repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
cdrtm/pyDatView
|
[
"fe1acacde27d4eafda0b54e455fadfb2d6199cd1"
] |
[
"pydatview/tools/signal.py"
] |
[
"from __future__ import division\nimport numpy as np\nfrom numpy.random import rand\nimport pandas as pd\n\n\n# --- List of available filters\nFILTERS=[\n {'name':'Moving average','param':100,'paramName':'Window Size','paramRange':[0,100000],'increment':1},\n {'name':'Low pass 1st order','param':1.0,'paramName':'Cutoff Freq.','paramRange':[0.0001,100000],'increment':0.1},\n {'name':'High pass 1st order','param':1.0,'paramName':'Cutoff Freq.','paramRange':[0.0001,100000],'increment':0.1},\n]\n\nSAMPLERS=[\n {'name':'Replace', 'param':[], 'paramName':'New x'},\n {'name':'Insert', 'param':[], 'paramName':'Insert list'},\n {'name':'Remove', 'param':[], 'paramName':'Remove list'},\n {'name':'Every n', 'param':2 , 'paramName':'n'},\n {'name':'Delta x', 'param':0.1, 'paramName':'dx'},\n]\n\n\n\ndef reject_outliers(y, x=None, m = 2., replaceNaN=True):\n \"\"\" Reject outliers:\n If replaceNaN is true: they are replaced by NaN \n Otherwise they are removed\n \"\"\"\n if m==0: \n # No rejection...\n pass\n else:\n dd = np.abs(y - np.nanmedian(y))\n mdev = np.nanmedian(dd)\n if mdev:\n ss = dd/mdev \n b=ss<m\n if replaceNaN:\n y=y.copy()\n y[~b]=np.nan\n else:\n y=y[b]\n if x is not None:\n x= x[b]\n if x is None:\n return y\n else:\n return x, y\n\n\n# --------------------------------------------------------------------------------}\n# --- Resampling \n# --------------------------------------------------------------------------------{\ndef multiInterp(x, xp, fp, extrap='bounded'):\n j = np.searchsorted(xp, x) - 1\n dd = np.zeros(len(x))\n bOK = np.logical_and(j>=0, j< len(xp)-1)\n bLower =j<0\n bUpper =j>=len(xp)-1\n jOK = j[bOK]\n #import pdb; pdb.set_trace()\n dd[bOK] = (x[bOK] - xp[jOK]) / (xp[jOK + 1] - xp[jOK])\n jBef=j \n jAft=j+1\n # \n # Use first and last values for anything beyond xp\n jAft[bUpper] = len(xp)-1\n jBef[bUpper] = len(xp)-1\n jAft[bLower] = 0\n jBef[bLower] = 0\n if extrap=='bounded':\n pass\n # OK\n elif extrap=='nan':\n dd[~bOK] = np.nan\n else:\n raise NotImplementedError()\n\n return (1 - dd) * fp[:,jBef] + fp[:,jAft] * dd\n\ndef resample_interp(x_old, x_new, y_old=None, df_old=None):\n #x_new=np.sort(x_new)\n if df_old is not None:\n # --- Method 1 (pandas)\n #df_new = df_old.copy()\n #df_new = df_new.set_index(x_old)\n #df_new = df_new.reindex(df_new.index | x_new)\n #df_new = df_new.interpolate().loc[x_new]\n #df_new = df_new.reset_index()\n # --- Method 2 interp storing dx\n data_new=multiInterp(x_new, x_old, df_old.values.T)\n df_new = pd.DataFrame(data=data_new.T, columns=df_old.columns.values)\n return x_new, df_new\n\n if y_old is not None:\n return x_new, np.interp(x_new, x_old, y_old)\n\n\ndef applySamplerDF(df_old, x_col, sampDict):\n x_old=df_old[x_col].values\n x_new, df_new =applySampler(x_old, y_old=None, sampDict=sampDict, df_old=df_old)\n df_new[x_col]=x_new\n return df_new\n\n\ndef applySampler(x_old, y_old, sampDict, df_old=None):\n\n param = np.asarray(sampDict['param']).ravel()\n\n if sampDict['name']=='Replace':\n if len(param)==0:\n raise Exception('Error: At least one value is required to resample the x values with')\n x_new = param\n return resample_interp(x_old, x_new, y_old, df_old)\n\n elif sampDict['name']=='Insert':\n if len(param)==0:\n raise Exception('Error: provide a list of values to insert')\n x_new = np.sort(np.concatenate((x_old.ravel(),param)))\n return resample_interp(x_old, x_new, y_old, df_old)\n\n elif sampDict['name']=='Remove':\n I=[]\n if len(param)==0:\n raise Exception('Error: provide a list of values to remove')\n for d in param:\n Ifound= np.where(np.abs(x_old-d)<1e-3)[0]\n if len(Ifound)>0:\n I+=list(Ifound.ravel())\n x_new=np.delete(x_old,I)\n return resample_interp(x_old, x_new, y_old, df_old)\n\n elif sampDict['name']=='Delta x':\n if len(param)==0:\n raise Exception('Error: provide value for dx')\n dx = param[0]\n x_new = np.arange(x_old[0], x_old[-1]+dx/2, dx)\n return resample_interp(x_old, x_new, y_old, df_old)\n\n elif sampDict['name']=='Every n':\n if len(param)==0:\n raise Exception('Error: provide value for n')\n n = int(param[0])\n if n==0:\n raise Exception('Error: |n| should be at least 1')\n\n x_new=x_old[::n]\n if df_old is not None:\n return x_new, (df_old.copy()).iloc[::n,:]\n if y_old is not None:\n return x_new, y_old[::n]\n\n else:\n raise NotImplementedError('{}'.format(sampDict))\n pass\n\n# --------------------------------------------------------------------------------}\n# --- Filters\n# --------------------------------------------------------------------------------{\n# def moving_average(x, w):\n# #t_new = np.arange(0,Tmax,dt)\n# #nt = len(t_new)\n# #nw=400\n# #u_new = moving_average(np.floor(np.linspace(0,3,nt+nw-1))*3+3.5, nw)\n# return np.convolve(x, np.ones(w), 'valid') / w\n# def moving_average(x,N,mode='same'):\n# y=np.convolve(x, np.ones((N,))/N, mode=mode)\n# return y\ndef moving_average(a, n=3) :\n \"\"\" \n perform moving average, return a vector of same length as input\n\n NOTE: also in kalman.filters\n \"\"\"\n a = a.ravel()\n a = np.concatenate(([a[0]]*(n-1),a)) # repeating first values\n ret = np.cumsum(a, dtype = float)\n ret[n:] = ret[n:] - ret[:-n]\n ret=ret[n - 1:] / n\n return ret\n\ndef lowpass1(y, dt, fc=3) :\n \"\"\" \n 1st order low pass filter\n \"\"\"\n tau=1/(2*np.pi*fc)\n alpha=dt/(tau+dt)\n y_filt=np.zeros(y.shape)\n y_filt[0]=y[0]\n for i in np.arange(1,len(y)):\n y_filt[i]=alpha*y[i] + (1-alpha)*y_filt[i-1]\n return y_filt\n\ndef highpass1(y, dt, fc=3) :\n \"\"\" \n 1st order high pass filter\n \"\"\"\n tau=1/(2*np.pi*fc)\n alpha=tau/(tau+dt)\n y_filt=np.zeros(y.shape)\n y_filt[0]=0\n for i in np.arange(1,len(y)):\n y_filt[i]=alpha*y_filt[i-1] + alpha*(y[i]-y[i-1])\n m0=np.mean(y)\n m1=np.mean(y_filt)\n y_filt+=m0-m1\n return y_filt\n\n\ndef applyFilter(x, y,filtDict):\n if filtDict['name']=='Moving average':\n return moving_average(y, n=np.round(filtDict['param']).astype(int))\n elif filtDict['name']=='Low pass 1st order':\n dt = x[1]-x[0]\n return lowpass1(y, dt=dt, fc=filtDict['param'])\n elif filtDict['name']=='High pass 1st order':\n dt = x[1]-x[0]\n return highpass1(y, dt=dt, fc=filtDict['param'])\n else:\n raise NotImplementedError('{}'.format(filtDict))\n\n# --------------------------------------------------------------------------------}\n# --- \n# --------------------------------------------------------------------------------{\ndef zero_crossings(y,x=None,direction=None):\n \"\"\"\n Find zero-crossing points in a discrete vector, using linear interpolation.\n\n direction: 'up' or 'down', to select only up-crossings or down-crossings\n\n returns: \n x values xzc such that y(yzc)==0\n indexes izc, such that the zero is between y[izc] (excluded) and y[izc+1] (included)\n\n if direction is not provided, also returns:\n sign, equal to 1 for up crossing\n \"\"\"\n if x is None:\n x=np.arange(len(y))\n\n if np.any((x[1:] - x[0:-1]) <= 0.0):\n raise Exception('x values need to be in ascending order')\n\n # Indices before zero-crossing\n iBef = np.where(y[1:]*y[0:-1] < 0.0)[0]\n \n # Find the zero crossing by linear interpolation\n xzc = x[iBef] - y[iBef] * (x[iBef+1] - x[iBef]) / (y[iBef+1] - y[iBef])\n \n # Selecting points that are exactly 0 and where neighbor change sign\n iZero = np.where(y == 0.0)[0]\n iZero = iZero[np.where((iZero > 0) & (iZero < x.size-1))]\n iZero = iZero[np.where(y[iZero-1]*y[iZero+1] < 0.0)]\n\n # Concatenate \n xzc = np.concatenate((xzc, x[iZero]))\n iBef = np.concatenate((iBef, iZero))\n\n # Sort\n iSort = np.argsort(xzc)\n xzc, iBef = xzc[iSort], iBef[iSort]\n\n # Return up-crossing, down crossing or both\n sign = np.sign(y[iBef+1]-y[iBef])\n if direction == 'up':\n I= np.where(sign==1)[0]\n return xzc[I],iBef[I]\n elif direction == 'down':\n I= np.where(sign==-1)[0]\n return xzc[I],iBef[I]\n elif direction is not None:\n raise Exception('Direction should be either `up` or `down`')\n return xzc, iBef, sign\n\n\n# --------------------------------------------------------------------------------}\n# --- \n# --------------------------------------------------------------------------------{\ndef correlation(x, nMax=80, dt=1, method='manual'):\n \"\"\" \n Compute auto correlation of a signal\n \"\"\"\n nvec = np.arange(0,nMax)\n sigma2 = np.var(x)\n R = np.zeros(nMax)\n R[0] =1\n for i,nDelay in enumerate(nvec[1:]):\n R[i+1] = np.mean( x[0:-nDelay] * x[nDelay:] ) / sigma2\n\n tau = nvec*dt\n return R, tau\n\n\ndef correlated_signal(coeff, n=1000):\n \"\"\"\n Create a correlated random signal of length `n` based on the correlation coefficient `coeff`\n value[t] = coeff * value[t-1] + (1-coeff) * random\n \"\"\"\n if coeff<0 or coeff>1: \n raise Exception('Correlation coefficient should be between 0 and 1')\n\n x = np.zeros(n)\n rvec = rand(n)\n x[0] = rvec[0]\n for m in np.arange(1,n):\n x[m] = coeff*x[m-1] + (1-coeff)*rvec[m] \n x-=np.mean(x)\n return x\n\n\nif __name__=='__main__':\n import numpy as np\n import matplotlib.pyplot as plt\n\n # Input\n dt = 1\n n = 10000\n coeff = 0.95 # 1:full corr, 00-corr\n nMax = 180\n # Create a correlated time series\n tvec = np.arange(0,n)*dt\n ts = correlated_signal(coeff, n)\n # --- Compute correlation coefficient\n R, tau = correlation(x, nMax=nMax)\n fig,axes = plt.subplots(2, 1, sharey=False, figsize=(6.4,4.8)) # (6.4,4.8)\n fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)\n ax=axes[0]\n # Plot time series\n ax.plot(tvec,ts)\n ax.set_xlabel('t [s]')\n ax.set_ylabel('u [m/s]')\n ax.tick_params(direction='in')\n # Plot correlation\n ax=axes[1]\n ax.plot(tau, R ,'b-o', label='computed')\n ax.plot(tau, coeff**(tau/dt) , 'r--' ,label='coeff^{tau/dt}') # analytical coeff^n trend\n ax.set_xlabel(r'$\\tau$ [s]')\n ax.set_ylabel(r'$R(\\tau)$ [-]')\n ax.legend()\n plt.show()\n\n\n\n\n\n\n"
] |
[
[
"numpy.random.rand",
"numpy.mean",
"numpy.sign",
"numpy.where",
"numpy.cumsum",
"numpy.concatenate",
"pandas.DataFrame",
"numpy.interp",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.nanmedian",
"numpy.delete",
"numpy.zeros",
"numpy.round",
"numpy.argsort",
"numpy.searchsorted",
"matplotlib.pyplot.show",
"numpy.asarray",
"numpy.any",
"numpy.abs",
"numpy.var"
]
] |
tedunderwood/fiction
|
[
"33e2986fecaa3d154b5fdd609146b65d97974275"
] |
[
"variation/methodological_experiment.py"
] |
[
"#!/usr/bin/env python3\n\n# methodological_experiment.py\n\nimport sys, os, csv\nimport numpy as np\nimport pandas as pd\nimport versatiletrainer2\nimport metaselector\n\nimport matplotlib.pyplot as plt\n\nfrom scipy import stats\n\ndef first_experiment():\n\n sourcefolder = '../data/'\n metadatapath = '../metadata/mastermetadata.csv'\n vocabpath = '../modeloutput/experimentalvocab.txt'\n tags4positive = {'fantasy_loc', 'fantasy_oclc'}\n tags4negative = {'sf_loc', 'sf_oclc'}\n sizecap = 200\n\n metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap)\n\n c_range = [.004, .012, 0.3, 0.8, 2]\n featurestart = 3000\n featureend = 4400\n featurestep = 100\n modelparams = 'logistic', 10, featurestart, featureend, featurestep, c_range\n\n matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, 'first_experiment', '../modeloutput/first_experiment.csv')\n\n plt.rcParams[\"figure.figsize\"] = [9.0, 6.0]\n plt.matshow(matrix, origin = 'lower', cmap = plt.cm.YlOrRd)\n plt.show()\n\ndef get_ratio_data(vocabpath, sizecap, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000):\n\n ''' Loads metadata, selects instances for the positive\n and negative classes (using a ratio to dilute the positive\n class with negative instances), creates a lexicon if one doesn't\n already exist, and creates a pandas dataframe storing\n texts as rows and words/features as columns. A refactored\n and simplified version of get_data_for_model().\n '''\n\n holdout_authors = True\n freqs_already_normalized = True\n verbose = False\n datecols = ['firstpub']\n indexcol = ['docid']\n extension = '.tsv'\n genrecol = 'tags'\n numfeatures = 8000\n\n sourcefolder = '../data/'\n metadatapath = '../metadata/mastermetadata.csv'\n\n # Get a list of files.\n allthefiles = os.listdir(sourcefolder)\n\n volumeIDsinfolder = list()\n volumepaths = list()\n numchars2trim = len(extension)\n\n for filename in allthefiles:\n\n if filename.endswith(extension):\n volID = filename[0 : -numchars2trim]\n # The volume ID is basically the filename minus its extension.\n volumeIDsinfolder.append(volID)\n\n metadata = metaselector.load_metadata(metadatapath, volumeIDsinfolder, excludebelow, excludeabove, indexcol = indexcol, datecols = datecols, genrecol = genrecol)\n\n # That function returns a pandas dataframe which is guaranteed to be indexed by indexcol,\n # and to contain a numeric column 'std_date' as well as a column 'tagset' which contains\n # sets of genre tags for each row. It has also been filtered so it only contains volumes\n # in the folder, and none whose date is below excludebelow or above excludeabove.\n\n orderedIDs, classdictionary = metaselector.dilute_positive_class(metadata, sizecap, tags4positive, tags4negative, ratio)\n\n metadata = metadata.loc[orderedIDs]\n # Limits the metadata data frame to rows we are actually using\n # (those selected in select_instances).\n\n # We now create an ordered list of id-path tuples.\n\n volspresent = [(x, sourcefolder + x + extension) for x in orderedIDs]\n print(len(volspresent))\n\n print('Building vocabulary.')\n\n vocablist = versatiletrainer2.get_vocablist(vocabpath, volspresent, n = numfeatures)\n\n numfeatures = len(vocablist)\n\n print()\n print(\"Number of features: \" + str(numfeatures))\n\n # For each volume, we're going to create a list of volumes that should be\n # excluded from the training set when it is to be predicted. More precisely,\n # we're going to create a list of their *indexes*, so that we can easily\n # remove rows from the training matrix.\n\n authormatches = [ [] for x in orderedIDs]\n\n # Now we proceed to enlarge that list by identifying, for each volume,\n # a set of indexes that have the same author. Obvs, there will always be at least one.\n # We exclude a vol from it's own training set.\n\n if holdout_authors:\n for idx1, anid in enumerate(orderedIDs):\n thisauthor = metadata.loc[anid, 'author']\n authormatches[idx1] = list(np.flatnonzero(metadata['author'] == thisauthor))\n\n for alist in authormatches:\n alist.sort(reverse = True)\n\n print()\n print('Authors matched.')\n print()\n\n # I am reversing the order of indexes so that I can delete them from\n # back to front, without changing indexes yet to be deleted.\n # This will become important in the modelingprocess module.\n\n masterdata, classvector = versatiletrainer2.get_dataframe(volspresent, classdictionary, vocablist, freqs_already_normalized)\n\n return metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist\n\ndef vary_sf_ratio_against_random():\n if not os.path.isfile('../measuredivergence/modeldata.tsv'):\n with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:\n outline = 'name\\tsize\\tratio\\taccuracy\\tfeatures\\tregularization\\n'\n f.write(outline)\n\n size = 80\n\n for iteration in [5, 6, 7]:\n\n ceiling = 105\n if iteration == 7:\n ceiling = 5\n\n for pct in range(0, ceiling, 5):\n ratio = pct / 100\n name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)\n\n vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'\n tags4positive = {'sf_loc', 'sf_oclc'}\n tags4negative = {'random'}\n\n metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)\n\n c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8]\n featurestart = 1000\n featureend = 6000\n featurestep = 300\n modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range\n\n matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)\n # It's important not to write fullmodel if you want the csvs\n # to accurately reflect terrible accuracy on diluted datasets.\n # write_fullmodel = False forces crossvalidation.\n\n with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:\n outline = name + '\\t' + str(size) + '\\t' + str(ratio) + '\\t' + str(maxaccuracy) + '\\t' + str(features4max) + '\\t' + str(best_regularization_coef) + '\\n'\n f.write(outline)\n\ndef vary_fantasy_ratio_against_sf():\n if not os.path.isfile('../measuredivergence/modeldata.tsv'):\n with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:\n outline = 'name\\tsize\\tratio\\taccuracy\\tfeatures\\tregularization\\n'\n f.write(outline)\n\n size = 80\n\n for iteration in [8, 9, 10]:\n\n ceiling = 105\n if iteration == 10:\n ceiling = 5\n\n for pct in range(0, ceiling, 5):\n ratio = pct / 100\n name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)\n\n vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'\n tags4positive = {'fantasy_loc', 'fantasy_oclc'}\n tags4negative = {'sf_loc', 'sf_oclc'}\n\n metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)\n\n c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8, 3]\n featurestart = 2000\n featureend = 7500\n featurestep = 400\n modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range\n\n matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)\n # write_fullmodel = False forces crossvalidation.\n\n with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:\n outline = name + '\\t' + str(size) + '\\t' + str(ratio) + '\\t' + str(maxaccuracy) + '\\t' + str(features4max) + '\\t' + str(best_regularization_coef) + '\\n'\n f.write(outline)\n\ndef vary_fantasy_ratio_against_random():\n if not os.path.isfile('../measuredivergence/modeldata.tsv'):\n with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:\n outline = 'name\\tsize\\tratio\\taccuracy\\tfeatures\\tregularization\\n'\n f.write(outline)\n\n size = 80\n\n for iteration in [11, 12, 13]:\n\n ceiling = 105\n if iteration == 13:\n ceiling = 5\n\n for pct in range(0, ceiling, 5):\n ratio = pct / 100\n name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)\n\n vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'\n tags4positive = {'fantasy_loc', 'fantasy_oclc'}\n tags4negative = {'random'}\n\n metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)\n\n c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8, 3]\n featurestart = 1600\n featureend = 6400\n featurestep = 400\n modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range\n\n matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)\n # write_fullmodel = False forces crossvalidation.\n\n with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:\n outline = name + '\\t' + str(size) + '\\t' + str(ratio) + '\\t' + str(maxaccuracy) + '\\t' + str(features4max) + '\\t' + str(best_regularization_coef) + '\\n'\n f.write(outline)\n\ndef accuracy(df, column):\n totalcount = len(df.realclass)\n tp = sum((df.realclass > 0.5) & (df[column] > 0.5))\n tn = sum((df.realclass <= 0.5) & (df[column] <= 0.5))\n fp = sum((df.realclass <= 0.5) & (df[column] > 0.5))\n fn = sum((df.realclass > 0.5) & (df[column] <= 0.5))\n assert totalcount == (tp + fp + tn + fn)\n\n return (tp + tn) / totalcount\n\ndef accuracy_loss(df):\n\n return accuracy(df, 'probability') - accuracy(df, 'alien_model')\n\ndef kldivergence(p, q):\n \"\"\"Kullback-Leibler divergence D(P || Q) for discrete distributions\n Parameters\n ----------\n p, q : array-like, dtype=float, shape=n\n Discrete probability distributions.\n \"\"\"\n p = np.asarray(p, dtype=np.float)\n q = np.asarray(q, dtype=np.float)\n\n return np.sum(np.where(p != 0, p * np.log(p / q), 0))\n\ndef averagecorr(r1, r2):\n z1 = np.arctanh(r1)\n z2 = np.arctanh(r2)\n themean = (z1 + z2) / 2\n return np.tanh(themean)\n\ndef get_divergences(gold, testname, itera, size, pct):\n '''\n This function gets several possible measures of divergence\n between two models.\n '''\n\n # We start by constructing the paths to the gold\n # standard model criteria (.pkl) and\n # model output (.csv) on the examples\n # originally used to train it.\n\n # We're going to try applying the gold standard\n # criteria to another model's output, and vice-\n # versa.\n\n model1 = '../measuredivergence/modeloutput/' + gold + '.pkl'\n meta1 = '../measuredivergence/modeloutput/' + gold + '.csv'\n\n # Now we construct paths to the test model\n # criteria (.pkl) and output (.csv).\n\n testpath = '../measuredivergence/modeloutput/' + testname\n model2 = testpath + '.pkl'\n meta2 = testpath + '.csv'\n\n model1on2 = versatiletrainer2.apply_pickled_model(model1, '../data/', '.tsv', meta2)\n model2on1 = versatiletrainer2.apply_pickled_model(model2, '../data/', '.tsv', meta1)\n\n pearson1on2 = stats.pearsonr(model1on2.probability, model1on2.alien_model)[0]\n pearson2on1 = stats.pearsonr(model2on1.probability, model2on1.alien_model)[0]\n pearson = averagecorr(pearson1on2, pearson2on1)\n\n spearman1on2 = stats.spearmanr(model1on2.probability, model1on2.alien_model)[0]\n spearman2on1 = stats.spearmanr(model2on1.probability, model2on1.alien_model)[0]\n spearman = averagecorr(spearman1on2, spearman2on1)\n\n loss1on2 = accuracy_loss(model1on2)\n loss2on1 = accuracy_loss(model2on1)\n loss = (loss1on2 + loss2on1) / 2\n\n kl1on2 = kldivergence(model1on2.probability, model1on2.alien_model)\n kl2on1 = kldivergence(model2on1.probability, model2on1.alien_model)\n kl = (kl1on2 + kl2on1) / 2\n\n return pearson, spearman, loss, kl, spearman1on2, spearman2on1, loss1on2, loss2on1\n\ndef measure_sf_divergences():\n\n columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']\n\n if not os.path.isfile('../measuredivergence/sf_divergences.tsv'):\n with open('../measuredivergence/sf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, delimiter = '\\t', fieldnames = columns)\n scribe.writeheader()\n\n goldstandards = ['iter5_size80_ratio0', 'iter6_size80_ratio0', 'iter7_size80_ratio0']\n size = 80\n\n modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\\t', index_col = 'name')\n\n for gold in goldstandards:\n for itera in [5, 6]:\n for pct in range(0, 105, 5):\n ratio = pct / 100\n\n testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)\n\n if testname == gold:\n continue\n # we don't test a model against itself\n else:\n row = dict()\n row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)\n\n row['name1'] = gold\n row['name2'] = testname\n row['size'] = size\n row['acc1'] = modeldata.loc[gold, 'accuracy']\n row['acc2'] = modeldata.loc[testname, 'accuracy']\n row['ratiodiff'] = ratio\n\n with open('../measuredivergence/sf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, delimiter = '\\t', fieldnames = columns)\n scribe.writerow(row)\n\ndef measure_fsf_divergences():\n\n columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']\n\n if not os.path.isfile('../measuredivergence/fsf_divergences.tsv'):\n with open('../measuredivergence/fsf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, delimiter = '\\t', fieldnames = columns)\n scribe.writeheader()\n\n goldstandards = ['iter8_size80_ratio0', 'iter9_size80_ratio0', 'iter10_size80_ratio0']\n size = 80\n\n modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\\t', index_col = 'name')\n\n for gold in goldstandards:\n for itera in [8, 9]:\n for pct in range(0, 105, 5):\n ratio = pct / 100\n\n testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)\n\n if testname == gold:\n continue\n # we don't test a model against itself\n else:\n row = dict()\n row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)\n\n row['name1'] = gold\n row['name2'] = testname\n row['size'] = size\n row['acc1'] = modeldata.loc[gold, 'accuracy']\n row['acc2'] = modeldata.loc[testname, 'accuracy']\n row['ratiodiff'] = ratio\n\n with open('../measuredivergence/fsf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, delimiter = '\\t', fieldnames = columns)\n scribe.writerow(row)\n\ndef measure_fantasy_divergences():\n\n columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']\n\n if not os.path.isfile('../measuredivergence/fantasy_divergences.tsv'):\n with open('../measuredivergence/fantasy_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, delimiter = '\\t', fieldnames = columns)\n scribe.writeheader()\n\n goldstandards = ['iter11_size80_ratio0', 'iter12_size80_ratio0', 'iter13_size80_ratio0']\n size = 80\n\n modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\\t', index_col = 'name')\n\n for gold in goldstandards:\n for itera in [11, 12]:\n for pct in range(0, 105, 5):\n ratio = pct / 100\n\n testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)\n\n if testname == gold:\n continue\n # we don't test a model against itself\n else:\n row = dict()\n row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)\n\n row['name1'] = gold\n row['name2'] = testname\n row['size'] = size\n row['acc1'] = modeldata.loc[gold, 'accuracy']\n row['acc2'] = modeldata.loc[testname, 'accuracy']\n row['ratiodiff'] = ratio\n\n with open('../measuredivergence/fantasy_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, delimiter = '\\t', fieldnames = columns)\n scribe.writerow(row)\n\ndef new_experiment():\n\n # The first time I ran this, I used partition 2 to build the\n # mixed data, and partition 1 as a gold standard. Now reversing.\n\n outmodelpath = '../measuredivergence/results/newexperimentmodels.csv'\n columns = ['name', 'size', 'ratio', 'iteration', 'meandate', 'maxaccuracy', 'features', 'regularization']\n if not os.path.isfile(outmodelpath):\n with open(outmodelpath, mode = 'w', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, fieldnames = columns)\n scribe.writeheader()\n\n c_range = [.00001, .0001, .001, .01, 0.1, 1, 10, 100]\n featurestart = 1500\n featureend = 6000\n featurestep = 300\n modelparams = 'logistic', 10, featurestart, featureend, featurestep, c_range\n sizecap = 75\n\n for i in range(3, 6):\n for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:\n sourcefolder = '../measuredivergence/mix/' + str(ratio) + '/'\n metadatapath = '../measuredivergence/partitionmeta/meta' + str(ratio) + '.csv'\n name = 'mixeddata_' + str(i) + '_' + str(ratio)\n vocabpath = '../lexica/' + name + '.txt'\n tags4positive = {'fantasy', 'detective'}\n tags4negative = {'random'}\n floor = 1800\n ceiling = 1930\n\n metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)\n\n matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')\n\n meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))\n\n row = dict()\n row['name'] = name\n row['size'] = sizecap\n row['ratio'] = ratio\n row['iteration'] = i\n row['meandate'] = meandate\n row['maxaccuracy'] = maxaccuracy\n row['features'] = features4max\n row['regularization'] = best_regularization_coef\n\n with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, fieldnames = columns)\n scribe.writerow(row)\n\n os.remove(vocabpath)\n\n sourcefolder = '../data/'\n metadatapath = '../measuredivergence/partitionmeta/part2.csv'\n # note that this is changed if you create mix data with\n # partition 2\n\n name = 'goldfantasy_' + str(i)\n vocabpath = '../lexica/' + name + '.txt'\n tags4positive = {'fantasy'}\n tags4negative = {'random', 'randomB'}\n floor = 1800\n ceiling = 1930\n\n metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)\n\n matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')\n\n meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))\n\n row = dict()\n row['name'] = name\n row['size'] = sizecap\n row['ratio'] = ratio\n row['iteration'] = i\n row['meandate'] = meandate\n row['maxaccuracy'] = maxaccuracy\n row['features'] = features4max\n row['regularization'] = best_regularization_coef\n\n with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, fieldnames = columns)\n scribe.writerow(row)\n\n os.remove(vocabpath)\n\n sourcefolder = '../data/'\n metadatapath = '../measuredivergence/partitionmeta/part2.csv'\n # depending on which partition you used to create mix data;\n # this will be the other one\n\n name = 'golddetective_' + str(i)\n vocabpath = '../lexica/' + name + '.txt'\n tags4positive = {'detective'}\n tags4negative = {'random', 'randomB'}\n floor = 1800\n ceiling = 1930\n\n metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)\n\n matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')\n\n meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))\n\n row = dict()\n row['name'] = name\n row['size'] = sizecap\n row['ratio'] = ratio\n row['iteration'] = i\n row['meandate'] = meandate\n row['maxaccuracy'] = maxaccuracy\n row['features'] = features4max\n row['regularization'] = best_regularization_coef\n\n with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, fieldnames = columns)\n scribe.writerow(row)\n\n os.remove(vocabpath)\n\ndef accuracy(df, column):\n totalcount = len(df.realclass)\n tp = sum((df.realclass > 0.5) & (df[column] > 0.5))\n tn = sum((df.realclass <= 0.5) & (df[column] <= 0.5))\n fp = sum((df.realclass <= 0.5) & (df[column] > 0.5))\n fn = sum((df.realclass > 0.5) & (df[column] <= 0.5))\n assert totalcount == (tp + fp + tn + fn)\n\n return (tp + tn) / totalcount\n\ndef accuracy_loss(df):\n\n return accuracy(df, 'probability') - accuracy(df, 'alien_model')\n\ndef get_divergence(sampleA, sampleB, twodatafolder = '../data/', onedatafolder = '../data/'):\n '''\n This function applies model a to b, and vice versa, and returns\n a couple of measures of divergence: notably lost accuracy and\n z-tranformed spearman correlation.\n '''\n\n # We start by constructing the paths to the sampleA\n # standard model criteria (.pkl) and\n # model output (.csv) on the examples\n # originally used to train it.\n\n # We're going to try applying the sampleA standard\n # criteria to another model's output, and vice-\n # versa.\n\n model1 = '../measuredivergence/newmodeloutput/' + sampleA + '.pkl'\n meta1 = '../measuredivergence/newmodeloutput/' + sampleA + '.csv'\n\n # Now we construct paths to the test model\n # criteria (.pkl) and output (.csv).\n\n model2 = '../measuredivergence/newmodeloutput/' + sampleB + '.pkl'\n meta2 = '../measuredivergence/newmodeloutput/' + sampleB + '.csv'\n\n model1on2 = versatiletrainer2.apply_pickled_model(model1, twodatafolder, '.tsv', meta2)\n model2on1 = versatiletrainer2.apply_pickled_model(model2, onedatafolder, '.tsv', meta1)\n\n spearman1on2 = np.arctanh(stats.spearmanr(model1on2.probability, model1on2.alien_model)[0])\n spearman2on1 = np.arctanh(stats.spearmanr(model2on1.probability, model2on1.alien_model)[0])\n spearman = (spearman1on2 + spearman2on1) / 2\n\n loss1on2 = accuracy_loss(model1on2)\n loss2on1 = accuracy_loss(model2on1)\n loss = (loss1on2 + loss2on1) / 2\n\n alienacc2 = accuracy(model1on2, 'alien_model')\n alienacc1 = accuracy(model2on1, 'alien_model')\n\n acc2 = accuracy(model1on2, 'probability')\n acc1 = accuracy(model2on1, 'probability')\n\n meandate2 = np.mean(model1on2.std_date)\n meandate1 = np.mean(model2on1.std_date)\n\n return spearman, loss, spearman1on2, spearman2on1, loss1on2, loss2on1, acc1, acc2, alienacc1, alienacc2, meandate1, meandate2\n\ndef write_a_row(r, outfile, columns):\n with open(outfile, mode = 'a', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, fieldnames = columns, delimiter = '\\t')\n scribe.writerow(r)\n\ndef new_divergences():\n\n outcomparisons = '../measuredivergence/results/new_comparisons.tsv'\n columns = ['testype', 'name1', 'name2', 'ratio', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'acc1', 'acc2', 'alienacc1', 'alienacc2', 'meandate1', 'meandate2']\n\n if not os.path.isfile(outcomparisons):\n with open(outcomparisons, mode = 'a', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, delimiter = '\\t', fieldnames = columns)\n scribe.writeheader()\n\n # I originally ran this with i and j\n # iterating through range(3). Now trying\n # on models generated with the partitions\n # reversed.\n\n for i in range(3, 6):\n for j in range(3, 6):\n for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:\n\n r = dict()\n r['testype'] = 'fantasy2mixed'\n r['name1'] = 'goldfantasy_' + str(i)\n r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)\n r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/')\n r['ratio'] = ratio\n\n write_a_row(r, outcomparisons, columns)\n\n r = dict()\n r['testype'] = 'detective2mixed'\n r['name1'] = 'golddetective_' + str(i)\n r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)\n r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/')\n r['ratio'] = 100 - ratio\n # note that distance from detective is the complement\n # of distance from fantasy\n\n write_a_row(r, outcomparisons, columns)\n\ndef new_self_comparisons ():\n\n outcomparisons = '../measuredivergence/results/self_comparisons.tsv'\n columns = ['testype', 'name1', 'name2', 'ratio', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'acc1', 'acc2', 'alienacc1', 'alienacc2', 'meandate1', 'meandate2']\n\n if not os.path.isfile(outcomparisons):\n with open(outcomparisons, mode = 'a', encoding = 'utf-8') as f:\n scribe = csv.DictWriter(f, delimiter = '\\t', fieldnames = columns)\n scribe.writeheader()\n\n for i in range(0, 3):\n for j in range(3, 6):\n for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:\n\n r = dict()\n r['testype'] = 'selfmixed'\n r['name1'] = 'mixeddata_' + str(i) + '_' + str(ratio)\n r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)\n r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/', onedatafolder = '../measuredivergence/altmix/' + str(ratio) + '/')\n r['ratio'] = ratio\n\n write_a_row(r, outcomparisons, columns)\n\nnew_self_comparisons()\n\n\n\n"
] |
[
[
"numpy.asarray",
"matplotlib.pyplot.matshow",
"numpy.log",
"numpy.sum",
"scipy.stats.spearmanr",
"numpy.mean",
"scipy.stats.pearsonr",
"numpy.tanh",
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.arctanh",
"numpy.flatnonzero"
]
] |
ngocpc/Project_Plagiarism_Detection
|
[
"d06216d2aafa71e52c528f3ae451a49638e9785d"
] |
[
"Project_Plagiarism_Detection/source_pytorch/train.py"
] |
[
"import argparse\nimport json\nimport os\nimport pandas as pd\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.utils.data\n\n# imports the model in model.py by name\nfrom model import BinaryClassifier\n\ndef model_fn(model_dir):\n \"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = BinaryClassifier(model_info['input_features'], model_info['hidden_dim'], model_info['output_dim'])\n\n # Load the stored model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # set to eval mode, could use no_grad\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model\n\n# Gets training data in batches from the train.csv file\ndef _get_train_data_loader(batch_size, training_dir):\n print(\"Get train data loader.\")\n\n train_data = pd.read_csv(os.path.join(training_dir, \"train.csv\"), header=None, names=None)\n\n train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()\n train_x = torch.from_numpy(train_data.drop([0], axis=1).values).float()\n\n train_ds = torch.utils.data.TensorDataset(train_x, train_y)\n\n return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)\n\n\n# Provided training function\ndef train(model, train_loader, epochs, criterion, optimizer, device):\n \"\"\"\n This is the training method that is called by the PyTorch training script. The parameters\n passed are as follows:\n model - The PyTorch model that we wish to train.\n train_loader - The PyTorch DataLoader that should be used during training.\n epochs - The total number of epochs to train for.\n criterion - The loss function used for training. \n optimizer - The optimizer to use during training.\n device - Where the model and data should be loaded (gpu or cpu).\n \"\"\"\n \n # training loop is provided\n for epoch in range(1, epochs + 1):\n model.train() # Make sure that the model is in training mode.\n\n total_loss = 0\n\n for batch in train_loader:\n # get data\n batch_x, batch_y = batch\n\n batch_x = batch_x.to(device)\n batch_y = batch_y.to(device)\n\n optimizer.zero_grad()\n\n # get predictions from model\n y_pred = model(batch_x)\n \n # perform backprop\n loss = criterion(y_pred, batch_y)\n loss.backward()\n optimizer.step()\n \n total_loss += loss.data.item()\n\n print(\"Epoch: {}, Loss: {}\".format(epoch, total_loss / len(train_loader)))\n\n\n## TODO: Complete the main code\nif __name__ == '__main__':\n \n # All of the model parameters and training parameters are sent as arguments\n # when this script is executed, during a training job\n \n # Here we set up an argument parser to easily access the parameters\n parser = argparse.ArgumentParser()\n\n # SageMaker parameters, like the directories for training data and saving models; set automatically\n # Do not need to change\n parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN'])\n \n # Training Parameters, given\n parser.add_argument('--batch-size', type=int, default=10, metavar='N',\n help='input batch size for training (default: 10)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.001)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n \n ## TODO: Add args for the three model parameters: input_features, hidden_dim, output_dim\n # Model Parameters\n parser.add_argument('--input_features', type=int, default=2, metavar='IN',\n help='number of input features to model (default: 2)')\n parser.add_argument('--hidden_dim', type=int, default=10, metavar='H',\n help='hidden dim of model (default: 10)')\n parser.add_argument('--output_dim', type=int, default=1, metavar='OUT',\n help='output dim of model (default: 1)')\n\n \n # args holds all passed-in arguments\n args = parser.parse_args()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"Using device {}.\".format(device))\n\n torch.manual_seed(args.seed)\n\n # Load the training data.\n train_loader = _get_train_data_loader(args.batch_size, args.data_dir)\n\n\n ## --- Your code here --- ##\n \n ## TODO: Build the model by passing in the input params\n # To get params from the parser, call args.argument_name, ex. args.epochs or ards.hidden_dim\n # Don't forget to move your model .to(device) to move to GPU , if appropriate\n model = BinaryClassifier(args.input_features, args.hidden_dim, args.output_dim).to(device)\n\n ## TODO: Define an optimizer and loss function for training\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n criterion = nn.BCELoss()\n\n # Trains the model (given line of code, which calls the above training function)\n train(model, train_loader, args.epochs, criterion, optimizer, device)\n\n ## TODO: complete in the model_info by adding three argument names, the first is given\n # Keep the keys of this dictionary as they are \n model_info_path = os.path.join(args.model_dir, 'model_info.pth')\n with open(model_info_path, 'wb') as f:\n model_info = {\n 'input_features': args.input_features,\n 'hidden_dim': args.hidden_dim,\n 'output_dim': args.output_dim,\n }\n torch.save(model_info, f)\n \n ## --- End of your code --- ##\n \n\n\t# Save the model parameters\n model_path = os.path.join(args.model_dir, 'model.pth')\n with open(model_path, 'wb') as f:\n torch.save(model.cpu().state_dict(), f)\n"
] |
[
[
"torch.save",
"torch.from_numpy",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.BCELoss",
"torch.utils.data.TensorDataset"
]
] |
grst/diffxpy
|
[
"8b9ad605cb11d05b58b3e3f4b2c8255c6e98b80c"
] |
[
"diffxpy/unit_test/test_pairwise.py"
] |
[
"import logging\nimport unittest\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as stats\n\nimport diffxpy.api as de\n\n\nclass _TestPairwiseNull:\n\n noise_model: str\n\n def _prepate_data(\n self,\n n_cells: int,\n n_genes: int,\n n_groups: int\n ):\n if self.noise_model == \"nb\":\n from batchglm.api.models.glm_nb import Simulator\n rand_fn_loc = lambda shape: np.random.uniform(0.1, 1, shape)\n rand_fn_scale = lambda shape: np.random.uniform(0.5, 1, shape)\n elif self.noise_model == \"norm\" or self.noise_model is None:\n from batchglm.api.models.glm_norm import Simulator\n rand_fn_loc = lambda shape: np.random.uniform(500, 1000, shape)\n rand_fn_scale = lambda shape: np.random.uniform(1, 2, shape)\n else:\n raise ValueError(\"noise model %s not recognized\" % self.noise_model)\n\n sim = Simulator(num_observations=n_cells, num_features=n_genes)\n sim.generate_sample_description(num_batches=0, num_conditions=0)\n sim.generate_params(\n rand_fn_loc=rand_fn_loc,\n rand_fn_scale=rand_fn_scale\n )\n sim.generate_data()\n\n random_sample_description = pd.DataFrame({\n \"condition\": np.random.randint(n_groups, size=sim.nobs)\n })\n return sim, random_sample_description\n\n def _test_null_distribution_basic(\n self,\n test: str,\n lazy: bool,\n quick_scale: bool = False,\n n_cells: int = 3000,\n n_genes: int = 200,\n n_groups: int = 3\n ):\n \"\"\"\n Test if de.wald() generates a uniform p-value distribution\n if it is given data simulated based on the null model. Returns the p-value\n of the two-side Kolmgorov-Smirnov test for equality of the observed \n p-value distriubution and a uniform distribution.\n\n :param n_cells: Number of cells to simulate (number of observations per test).\n :param n_genes: Number of genes to simulate (number of tests).\n \"\"\"\n sim, sample_description = self._prepate_data(\n n_cells=n_cells,\n n_genes=n_genes,\n n_groups=n_groups\n )\n test = de.test.pairwise(\n data=sim.input_data,\n sample_description=sample_description,\n grouping=\"condition\",\n test=test,\n lazy=lazy,\n quick_scale=quick_scale,\n noise_model=self.noise_model\n )\n _ = test.summary()\n\n # Compare p-value distribution under null model against uniform distribution.\n if lazy:\n pval_h0 = stats.kstest(test.pval_pairs(groups0=0, groups1=1).flatten(), 'uniform').pvalue\n else:\n pval_h0 = stats.kstest(test.pval[0, 1, :].flatten(), 'uniform').pvalue\n\n logging.getLogger(\"diffxpy\").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)\n assert pval_h0 > 0.05, \"KS-Test failed: pval_h0=%f is <= 0.05!\" % np.round(pval_h0, 5)\n return True\n\n\nclass TestPairwiseNullStandard(unittest.TestCase, _TestPairwiseNull):\n\n def test_null_distribution_ttest(self):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n np.random.seed(1)\n self.noise_model = None\n self._test_null_distribution_basic(test=\"t-test\", lazy=False)\n\n def test_null_distribution_rank(self):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n np.random.seed(1)\n self.noise_model = None\n self._test_null_distribution_basic(test=\"rank\", lazy=False)\n\n\nclass TestPairwiseNullNb(unittest.TestCase, _TestPairwiseNull):\n\n def test_null_distribution_ztest(self):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n np.random.seed(1)\n self.noise_model = \"nb\"\n self._test_null_distribution_basic(test=\"z-test\", lazy=False, quick_scale=False)\n self._test_null_distribution_basic(test=\"z-test\", lazy=False, quick_scale=True)\n\n def test_null_distribution_ztest_lazy(self):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n np.random.seed(1)\n self.noise_model = \"nb\"\n self._test_null_distribution_basic(test=\"z-test\", lazy=True, quick_scale=False)\n self._test_null_distribution_basic(test=\"z-test\", lazy=True, quick_scale=True)\n\n def test_null_distribution_wald(self):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n np.random.seed(1)\n self.noise_model = \"nb\"\n self._test_null_distribution_basic(test=\"wald\", lazy=False, quick_scale=False)\n self._test_null_distribution_basic(test=\"wald\", lazy=False, quick_scale=True)\n\n def test_null_distribution_lrt(self):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n np.random.seed(1)\n self.noise_model = \"nb\"\n self._test_null_distribution_basic(test=\"lrt\", lazy=False, quick_scale=False)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.random.seed",
"numpy.random.uniform",
"numpy.random.randint",
"numpy.round"
]
] |
aljubrmj/CS342-Final-Project
|
[
"841bab59ca1311faa550c5fce9327a1e65ff5501"
] |
[
"planner/regressor/models.py"
] |
[
"import torch\nimport torch.nn.functional as F\n\ndef spatial_argmax(logit):\n weights = F.softmax(logit.view(logit.size(0), -1), dim=-1).view_as(logit)\n return torch.stack(((weights.sum(1) * torch.linspace(-1, 1, logit.size(2)).to(logit.device)[None]).sum(1),\n (weights.sum(2) * torch.linspace(-1, 1, logit.size(1)).to(logit.device)[None]).sum(1)), 1)\n\n\nclass CNNClassifier(torch.nn.Module):\n class Block(torch.nn.Module):\n def __init__(self, n_input, n_output, kernel_size=3, stride=2):\n super().__init__()\n self.c1 = torch.nn.Conv2d(n_input, n_output, kernel_size=kernel_size, padding=kernel_size // 2,\n stride=stride, bias=False)\n self.c2 = torch.nn.Conv2d(n_output, n_output, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)\n self.c3 = torch.nn.Conv2d(n_output, n_output, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)\n self.b1 = torch.nn.BatchNorm2d(n_output)\n self.b2 = torch.nn.BatchNorm2d(n_output)\n self.b3 = torch.nn.BatchNorm2d(n_output)\n self.skip = torch.nn.Conv2d(n_input, n_output, kernel_size=1, stride=stride)\n\n def forward(self, x):\n return F.relu(self.b3(self.c3(F.relu(self.b2(self.c2(F.relu(self.b1(self.c1(x)))))))) + self.skip(x))\n\n def __init__(self, layers=[16, 32, 32, 32], n_output_channels=2, kernel_size=3):\n super().__init__()\n\n L = []\n c = 3\n for l in layers:\n L.append(self.Block(c, l, kernel_size, 2))\n c = l\n self.network = torch.nn.Sequential(*L)\n self.classifier = torch.nn.Linear(c, n_output_channels)\n\n def forward(self, x):\n z = self.network(x)\n return self.classifier(z.mean(dim=[2, 3]))\n\nclass Planner_reg(torch.nn.Module):\n def __init__(self, channels=[16, 32, 32, 32]):\n super().__init__()\n\n conv_block = lambda c, h: [torch.nn.BatchNorm2d(h), torch.nn.Conv2d(h, c, 5, 2, 2), torch.nn.ReLU(True)]\n\n h, _conv = 3, []\n for c in channels:\n _conv += conv_block(c, h)\n h = c\n\n self._conv = torch.nn.Sequential(*_conv, torch.nn.Conv2d(h, 1, 1))\n # self.classifier = torch.nn.Linear(h, 2)\n # self.classifier = torch.nn.Conv2d(h, 1, 1)\n\n def forward(self, img):\n \"\"\"\n Your code here\n Predict the aim point in image coordinate, given the supertuxkart image\n @img: (B,3,96,128)\n return (B,2)\n \"\"\"\n x = self._conv(img)\n return spatial_argmax(x[:, 0])\n\nclass FCN(torch.nn.Module):\n class UpBlock(torch.nn.Module):\n def __init__(self, n_input, n_output, kernel_size=3, stride=2):\n super().__init__()\n self.c1 = torch.nn.ConvTranspose2d(n_input, n_output, kernel_size=kernel_size, padding=kernel_size // 2,\n stride=stride, output_padding=1)\n\n def forward(self, x):\n return F.relu(self.c1(x))\n\n def __init__(self, layers=[16, 32, 64, 128], n_output_channels=5, kernel_size=3, use_skip=True):\n super().__init__()\n self.input_mean = torch.Tensor([0.3521554, 0.30068502, 0.28527516])\n self.input_std = torch.Tensor([0.18182722, 0.18656468, 0.15938024])\n\n c = 3\n self.use_skip = use_skip\n self.n_conv = len(layers)\n skip_layer_size = [3] + layers[:-1]\n for i, l in enumerate(layers):\n self.add_module('conv%d' % i, CNNClassifier.Block(c, l, kernel_size, 2))\n c = l\n for i, l in list(enumerate(layers))[::-1]:\n self.add_module('upconv%d' % i, self.UpBlock(c, l, kernel_size, 2))\n c = l\n if self.use_skip:\n c += skip_layer_size[i]\n self.classifier = torch.nn.Conv2d(c, n_output_channels, 1)\n\n def forward(self, x):\n z = (x - self.input_mean[None, :, None, None].to(x.device)) / self.input_std[None, :, None, None].to(x.device)\n up_activation = []\n for i in range(self.n_conv):\n # Add all the information required for skip connections\n up_activation.append(z)\n z = self._modules['conv%d'%i](z)\n\n for i in reversed(range(self.n_conv)):\n z = self._modules['upconv%d'%i](z)\n # Fix the padding\n z = z[:, :, :up_activation[i].size(2), :up_activation[i].size(3)]\n # Add the skip connection\n if self.use_skip:\n z = torch.cat([z, up_activation[i]], dim=1)\n return self.classifier(z)\n\n\nmodel_factory = {\n 'cnn': CNNClassifier,\n 'fcn': FCN,\n 'planner_reg':Planner_reg\n}\n\n\ndef save_model(model):\n from torch import save\n from os import path\n for n, m in model_factory.items():\n if isinstance(model, m):\n return save(model.state_dict(), path.join(path.dirname(path.abspath(__file__)), '%s.th' % n))\n raise ValueError(\"model type '%s' not supported!\" % str(type(model)))\n\n\ndef load_model(model):\n from torch import load\n from os import path\n r = model_factory[model]()\n r.load_state_dict(load(path.join(path.dirname(path.abspath(__file__)), '%s.th' % model), map_location='cpu'))\n return r\n"
] |
[
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.Tensor"
]
] |
Henrynaut/ML
|
[
"47ca3a67948ed8190a31a57d1e9a803ca532938b"
] |
[
"scripts/neural_net_workshop.py"
] |
[
"import numpy as np\nimport random\nrandom.seed(200)\n\n# Create Sigmoid Function\ndef sig(inp):\n return (1/(1+np.exp(-1*inp)))\n\n# For Back Propagation, make Desigmoid function\ndef dsig(inp):\n return (1.0-inp)*inp\n\n# Define class for neuron\nclass Neuron:\n def __init__(self,weights,func,dfunc):\n # member variables for class\n self.weights = weights\n self.output = None\n self.func = func\n # dfunc is the derivative of the function\n self.dfunc = dfunc\n # No delta yet because we haven't defined anything\n self.delta = None\n def agr(self,x):\n bias = self.weights[-1]\n out = np.inner(self.weights.copy()[:-1],x) + bias\n return out\n def activation(self,inp):\n self.output = self.func(inp)\n return self.output\n\n# Definition for weights\ndef gen_weights(dim):\n # Add 1 to the dimension for the bias\n return np.random.uniform(-0.1,0.1,dim+1)\n\n# Definition of the actual network\n# Activations correspond to activation funcitons used\ndef gen_net(structure, activations):\n # Create empty list\n net = []\n for i in range(1,len(structure)):\n layer = []\n for j in range(structure[i]):\n # feed in neuron weights from last layer\n weights = gen_weights(structure[i-1])\n layer.append(Neuron(weights, activations[0][i-1], activations[1][i-1]))\n net.append(layer)\n return net\n\n# Define feed forward\ndef feed_fwd(net, inp):\n # It stores the current input associated with the given layer\n inp_store = inp\n for layer in net:\n out_of_curr_layer = []\n for neuron in layer:\n # Calculate accumulated output value\n accum = neuron.agr(inp_store)\n output = neuron.activation(accum)\n # Store output for later use\n out_of_curr_layer.append(output)\n inp_store = out_of_curr_layer\n return inp_store\n\n# Define back propagation\ndef back_prop(net, target):\n back_len = len(net)\n for i in range(back_len):\n ind = back_len-i-1\n layer = net[ind]\n errors = []\n if ind == back_len-1:\n j=0\n for neuron in layer:\n errors.append(target[j]-neuron.output)\n j+=1\n else:\n for j in range(len(layer)):\n error = 0.0\n # For neuron in front of current neuron, check deltas\n for neuron in net[ind+1]:\n error+=(neuron.weights[j]*neuron.delta)\n errors.append(error)\n j=0\n for neuron in layer:\n neuron.delta = errors[j]*neuron.dfunc(neuron.output)\n j+=1\n return net\n\n\n# Define how much to update the weights by everytime\n# Alpha is the learning rate, but if too high it may overshoot\ndef update_weights(net,inp,alpha):\n for i in range(len(net)):\n if i==0:\n inputs = inp\n else:\n inputs = []\n prev_layer = net[i-1]\n for neuron in prev_layer:\n inputs.append(neuron.output)\n curr_layer = net[i]\n for neuron in curr_layer:\n for j in range(len(inputs)):\n neuron.weights[j] += alpha*neuron.delta*inputs[j]\n neuron.weights[-1]+=alpha*neuron.delta\n\n#Define training approach\ndef train(net,train_data,alpha,epoch):\n for curr_epoch_no in range(epoch):\n sums = 0\n sample_no = 0\n # Accuracy Count (number of samples that are right)\n acc_cnt = 0\n for sample in train_data:\n outputs = feed_fwd(net,sample[0])\n expected = sample[1]\n sums+=sum([(expected[i]-outputs[i])**2 for i in range(len(expected))])\n if expected.index(max(expected) == outputs.index(max(outputs))):\n acc_cnt += 1\n back_prop(net,expected)\n update_weights(net,sample[0],alpha)\n # Metadata on how well it's doing\n print('epoch_no:', curr_epoch_no,'loss:', sums, 'accuracy:', acc_cnt)\n\nnet = gen_net([2,100,100,2],[(sig,sig,sig),[dsig,dsig,dsig]])\ntrain(net,[[[0,0],[0,1]],\n [[0,1],[1,0]],\n [[1,0],[1,0]],\n [[1,1],[0,1]]],\n 2, 100)\n\n# Code to test out neural network output\n# net = gen_net([2,2,2],[(sig,sig),[dsig,dsig]])\n# print(feed_fwd(net,[0.2,0.3]))\n# for i in range(len(net)):\n# for j in range(len(net[i])):\n# print(net[i][j].weights)\n\n# print(\"--------------------------\")\n# net = back_prop(net,[1,0])\n# net = update_weights(net,[0.2,0.3],0.2)\n# for i in range(len(net)):\n# for j in range(len(net[i])):\n# print(net[i][j].weights)"
] |
[
[
"numpy.random.uniform",
"numpy.exp"
]
] |
metataro/DirectFeedbackAlignment
|
[
"7e2cbc3f001ac2290a15440628bb2b97d4ec52ab"
] |
[
"network/layers/convolution_im2col.py"
] |
[
"import numpy as np\n\nfrom network.activation import Activation\nfrom network.layer import Layer\nfrom network.utils.im2col_cython import im2col_cython, col2im_cython\n\n\nclass Convolution(Layer):\n def __init__(self, filter_shape, stride, padding, dropout_rate: float = 0, activation: Activation = None,\n last_layer=False, weight_initializer=None, fb_weight_initializer=None) -> None:\n assert len(filter_shape) == 4, \\\n \"invalid filter shape: 4-tuple required, {}-tuple given\".format(len(filter_shape))\n super().__init__()\n self.filter_shape = filter_shape\n self.stride = stride\n self.padding = padding\n self.dropout_rate = dropout_rate\n self.activation = activation\n self.last_layer = last_layer\n self.weight_initializer = weight_initializer\n self.fb_weight_initializer = fb_weight_initializer\n\n def initialize(self, input_size, num_classes, train_method) -> tuple:\n assert np.size(input_size) == 3, \\\n \"invalid input size: 3-tuple required for convolution layer\"\n\n c_in, h_in, w_in = input_size\n f, c_f, h_f, w_f = self.filter_shape\n\n assert c_in == c_f, \\\n \"input channel dimension ({}) not compatible with filter channel dimension ({})\".format(c_in, c_f)\n assert (h_in - h_f + 2 * self.padding) % self.stride == 0, \\\n \"filter width ({}) not compatible with input width ({})\".format(h_f, h_in)\n assert (w_in - w_f + 2 * self.padding) % self.stride == 0, \\\n \"filter height ({}) not compatible with input height ({})\".format(h_f, h_in)\n\n self.h_out = ((h_in - h_f + 2 * self.padding) // self.stride) + 1\n self.w_out = ((w_in - w_f + 2 * self.padding) // self.stride) + 1\n\n # initialize weights\n if self.weight_initializer is None:\n sqrt_fan_in = np.sqrt(c_in * h_in * w_in)\n self.W = np.random.uniform(low=-1 / sqrt_fan_in, high=1 / sqrt_fan_in, size=self.filter_shape)\n else:\n self.W = self.weight_initializer.init(dim=(f, c_f, h_f, w_f))\n\n # initialize feedback weights\n if self.fb_weight_initializer is None:\n sqrt_fan_out = np.sqrt(f * self.h_out * self.w_out)\n # self.B = np.random.uniform(low=-1 / sqrt_fan_out, high=1 / sqrt_fan_out, size=(num_classes, f, self.h_out, self.w_out))\n self.B = np.random.uniform(low=-1 / sqrt_fan_out, high=1 / sqrt_fan_out, size=(num_classes, f * self.h_out * self.w_out))\n else:\n # self.B = self.fb_weight_initializer.init(dim=(num_classes, f, self.h_out, self.w_out))\n self.B = self.fb_weight_initializer.init(dim=(num_classes, f * self.h_out * self.w_out))\n\n # initialize bias units\n self.b = np.zeros(f)\n\n return f, self.h_out, self.w_out\n\n def forward(self, X, mode='predict') -> np.ndarray:\n n_in, c, h_in, w_in = X.shape\n n_f, c, h_f, w_f = self.W.shape\n\n self.x_cols = im2col_cython(X, h_f, w_f, self.padding, self.stride) # <->\n z = self.W.reshape((n_f, -1)).dot(self.x_cols)\n z += self.b.reshape(-1, 1) # +\n z = z.reshape(n_f, self.h_out, self.w_out, n_in).transpose(3, 0, 1, 2)\n\n self.a_in = X\n\n if self.activation is None:\n self.a_out = z\n else:\n self.a_out = self.activation.forward(z)\n\n if mode == 'train' and self.dropout_rate > 0:\n # self.dropout_mask = np.random.binomial(size=self.a_out.shape, n=1, p=1 - self.dropout_rate)\n self.dropout_mask = (np.random.rand(*self.a_out.shape) > self.dropout_rate).astype(int)\n self.a_out *= self.dropout_mask\n\n return self.a_out\n\n def dfa(self, E: np.ndarray) -> tuple:\n # E = np.einsum('ij,jklm->iklm', E, self.B)\n\n n_f, c_f, h_f, w_f = self.W.shape\n\n E = np.dot(E, self.B).reshape((-1, n_f, self.h_out, self.w_out))\n if self.dropout_rate > 0:\n E *= self.dropout_mask\n\n if self.activation is None:\n E *= self.a_out\n else:\n E *= self.activation.gradient(self.a_out)\n\n dW = E.transpose((1, 2, 3, 0)).reshape(n_f, -1).dot(self.x_cols.T).reshape(self.W.shape)\n db = np.sum(E, axis=(0, 2, 3))\n\n return dW, db\n\n def back_prob(self, E: np.ndarray) -> tuple:\n if self.dropout_rate > 0:\n E *= self.dropout_mask\n\n n_in, c_in, h_in, w_in = self.a_in.shape\n n_f, c_f, h_f, w_f = self.W.shape\n\n if self.activation is None:\n E *= self.a_out\n else:\n E *= self.activation.gradient(self.a_out)\n delta_reshaped = E.transpose((1, 2, 3, 0)).reshape(n_f, -1)\n\n dX_cols = self.W.reshape(n_f, -1).T.dot(delta_reshaped)\n dX = col2im_cython(dX_cols, n_in, c_in, h_in, w_in, h_f, w_f, self.padding, self.stride)\n dW = delta_reshaped.dot(self.x_cols.T).reshape(self.W.shape)\n db = np.sum(E, axis=(0, 2, 3))\n\n return dX, dW, db\n\n def has_weights(self) -> bool:\n return True\n"
] |
[
[
"numpy.dot",
"numpy.random.rand",
"numpy.zeros",
"numpy.sum",
"numpy.random.uniform",
"numpy.size",
"numpy.sqrt"
]
] |
NingAnMe/snow_cover_of_remote_sensing
|
[
"aabd0f4754eb5200203fc8a90f06b603dcd260a8"
] |
[
"lib/pb_io.py"
] |
[
"# coding: utf-8\nimport errno\nimport os\nimport random\nimport re\nfrom contextlib import contextmanager\n\nimport h5py\nimport numpy as np\nimport time\nimport yaml\nfrom datetime import datetime\n\n\ndef write_yaml_file(yaml_dict, file_yaml):\n path_yaml = os.path.dirname(file_yaml)\n if not os.path.isdir(path_yaml):\n os.makedirs(path_yaml)\n with open(file_yaml, 'w') as stream:\n yaml.dump(yaml_dict, stream, default_flow_style=False)\n\n\ndef make_sure_path_exists(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n\ndef find_file(path, reg):\n \"\"\"\n path: 要遍历的目录\n reg: 符合条件的文件\n \"\"\"\n FileLst = []\n try:\n lst = os.walk(path)\n for root, dirs, files in lst:\n for name in files:\n try:\n m = re.match(reg, name)\n except Exception as e:\n continue\n if m:\n FileLst.append(os.path.join(root, name))\n except Exception as e:\n print(str(e))\n\n return sorted(FileLst)\n\n\ndef path_replace_ymd(path, ymd):\n \"\"\"\n path:替换路径中的日期 ,path中%YYYY%MM%DD%JJJ 等关键字会被ymd日期实例\n ymd: yyyymmdd (20180101)\n \"\"\"\n # 转成datetime类型\n ymd = datetime.strptime(ymd, '%Y%m%d')\n yy = ymd.strftime('%Y')\n mm = ymd.strftime('%m')\n dd = ymd.strftime('%d')\n jj = ymd.strftime('%j')\n path = path.replace('%YYYY', yy)\n path = path.replace('%MM', mm)\n path = path.replace('%DD', dd)\n path = path.replace('%JJJ', jj)\n\n return path\n\n\ndef is_none(*args):\n \"\"\"\n 判断传入的变量中是否有 None\n :param args:\n :return:\n \"\"\"\n has_none = False\n for arg in args:\n if arg is None:\n has_none = True\n return has_none\n\n\ndef copy_attrs_h5py(pre_object, out_object):\n \"\"\"\n 复制 file、dataset 或者 group 的属性\n :param pre_object: 被复制属性的 dataset 或者 group\n :param out_object: 复制属性的 dataset 或者 group\n :return:\n \"\"\"\n for akey in list(pre_object.attrs.keys()):\n out_object.attrs[akey] = pre_object.attrs[akey]\n\n\ndef read_dataset_hdf5(file_path, set_name):\n \"\"\"\n 读取 hdf5 文件,返回一个 numpy 多维数组\n :param file_path: (unicode)文件路径\n :param set_name: (str or list)表的名字\n :return: 如果传入的表名字是一个字符串,返回 numpy.ndarray\n 如果传入的表名字是一个列表,返回一个字典,key 是表名字,\n value 是 numpy.ndarry\n \"\"\"\n if isinstance(set_name, str):\n if os.path.isfile(file_path):\n file_h5py = h5py.File(file_path, 'r')\n data = file_h5py.get(set_name)[:]\n dataset = np.array(data)\n file_h5py.close()\n return dataset\n else:\n raise ValueError('value error: file_path')\n elif isinstance(set_name, list):\n datasets = {}\n if os.path.isfile(file_path):\n file_h5py = h5py.File(file_path, 'r')\n for name in set_name:\n data = file_h5py.get(name)[:]\n dataset = np.array(data)\n datasets[name] = dataset\n file_h5py.close()\n return datasets\n else:\n raise ValueError('value error: file_path')\n else:\n raise ValueError('value error: set_name')\n\n\ndef attrs2dict(attrs):\n \"\"\"\n 将一个 HDF5 attr 类转为 Dict 类\n :return:\n \"\"\"\n d = {}\n for k, v in list(attrs.items()):\n d[k] = v\n return d\n\n\n@contextmanager\ndef progress_lock(max_wait_time=5):\n try:\n sleep_time = 0\n lock = \"progress.lock\"\n while True:\n if os.path.isfile(lock):\n if sleep_time > max_wait_time:\n try:\n os.remove(lock)\n break\n except:\n continue\n else:\n random_number = random.random() * 0.1\n sleep_time += random_number\n\n time.sleep(random_number)\n else:\n break\n with open(lock, \"w\"):\n pass\n yield\n finally:\n try:\n os.remove(lock)\n except:\n pass\n\n\ndef write_txt(in_file, head, bodys, keylens=8):\n \"\"\"\n description: wangpeng add 20180615 (写入或更新txt)\n :in_file 写入文件位置\n :head 文件头信息\n :bodys 文件体\n :keylens 更新文件使用的第一列关键字长度\n \"\"\"\n allLines = []\n DICT_D = {}\n FilePath = os.path.dirname(in_file)\n if not os.path.exists(FilePath):\n os.makedirs(FilePath)\n\n if os.path.isfile(in_file) and os.path.getsize(in_file) != 0:\n fp = open(in_file, 'r')\n fp.readline()\n Lines = fp.readlines()\n fp.close()\n # 使用字典特性,保证时间唯一,读取数据\n for Line in Lines:\n DICT_D[Line[:keylens]] = Line[keylens:]\n # 添加或更改数据\n for Line in bodys:\n DICT_D[Line[:keylens]] = Line[keylens:]\n # 按照时间排序\n newLines = sorted(\n iter(DICT_D.items()), key=lambda d: d[0], reverse=False)\n\n for i in range(len(newLines)):\n allLines.append(str(newLines[i][0]) + str(newLines[i][1]))\n fp = open(in_file, 'w')\n fp.write(head)\n fp.writelines(allLines)\n fp.close()\n else:\n fp = open(in_file, 'w')\n fp.write(head)\n fp.writelines(bodys)\n fp.close()\n\n\ndef str_format(string, values):\n \"\"\"\n 格式化字符串\n :param string:(str) \"DCC: %sat_sensor_Projection_%ymd(分辨率 %resolution 度)\"\n :param values:(dict) {\"sat_sensor\": sat_sensor, \"resolution\": str(resolution), \"ymd\": ymd}\n :return: DCC: FY3D+MERSI_Projection_201712(分辨率 1 度)\n \"\"\"\n if not isinstance(string, str):\n return\n\n for k, v in values.items():\n string = string.replace(\"%\" + str(k), str(v))\n return string\n\n\ndef get_files_by_ymd(dir_path, time_start, time_end, ext=None, pattern_ymd=None):\n \"\"\"\n :param dir_path: 文件夹\n :param time_start: 开始时间\n :param time_end: 结束时间\n :param ext: 后缀名, '.hdf5'\n :param pattern_ymd: 匹配时间的模式, 可以是 r\".*(\\d{8})_(\\d{4})_\"\n :return: list\n \"\"\"\n files_found = []\n if pattern_ymd is not None:\n pattern = pattern_ymd\n else:\n pattern = r\".*(\\d{8})\"\n\n for root, dirs, files in os.walk(dir_path):\n for file_name in files:\n if ext is not None:\n if '.' not in ext:\n ext = '.' + ext\n if os.path.splitext(file_name)[1].lower() != ext.lower():\n continue\n re_result = re.match(pattern, file_name)\n if re_result is not None:\n time_file = ''.join(re_result.groups())\n else:\n continue\n if int(time_start) <= int(time_file) <= int(time_end):\n files_found.append(os.path.join(root, file_name))\n files_found.sort()\n return files_found\n\n\nclass ReadOrbitCrossFile(object):\n \"\"\"\n test\n \"\"\"\n\n @staticmethod\n def read_cross_file(in_file, file_type):\n \"\"\"\n :param in_file:\n :param file_type:\n :return:\n \"\"\"\n data = {\n 'ymdhms1': None,\n 'ymdhms2': None,\n 'lon1': None,\n 'lat1': None,\n 'lon2': None,\n 'lat2': None,\n 'fix_name': None # 只有固定点才有\n }\n\n if not os.path.isfile(in_file):\n print('***WARNING***File is not exist: {}'.format(in_file))\n return data\n # with open(in_file, 'r') as fp:\n # lines_10 = fp.readlines()[0: 10]\n #\n # count = 0\n # for line in lines_10:\n # print count, line.split()\n # count += 1\n\n if file_type == 'leo_area':\n\n data_raw = np.loadtxt(in_file, skiprows=10, dtype={\n 'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7'),\n 'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})\n\n if data_raw.size != 0:\n ymd = data_raw['d1']\n hms1 = data_raw['d2']\n hms2 = data_raw['d3']\n ymdhms1 = list(map(ymdhms2date, ymd, hms1))\n ymdhms2 = list(map(ymdhms2date, ymd, hms2))\n\n data['ymdhms1'] = ymdhms1\n data['ymdhms2'] = ymdhms2\n data['lat1'] = data_raw['d4']\n data['lon1'] = data_raw['d5']\n data['lat2'] = data_raw['d6']\n data['lon2'] = data_raw['d7']\n\n elif file_type == 'leo_leo':\n\n data_raw = np.loadtxt(in_file, skiprows=10, dtype={\n 'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9'),\n 'formats': ('S8', 'S8', 'f4', 'f4', 'S8', 'f4', 'f4', 'f4', 'f4')})\n\n if data_raw.size != 0:\n ymd = data_raw['d1']\n hms1 = data_raw['d2']\n hms2 = data_raw['d5']\n ymdhms1 = list(map(ymdhms2date, ymd, hms1))\n ymdhms2 = list(map(ymdhms2date, ymd, hms2))\n data['ymdhms1'] = ymdhms1\n data['ymdhms2'] = ymdhms2\n data['lat1'] = data_raw['d3']\n data['lon1'] = data_raw['d4']\n data['lat2'] = data_raw['d6']\n data['lon2'] = data_raw['d7']\n\n elif file_type == 'leo_fix':\n\n # 数据\n data_raw = np.loadtxt(in_file, skiprows=10, dtype={\n 'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8',),\n 'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4', 'f4')})\n\n if data_raw.size != 0:\n ymd = data_raw['d1']\n hms1 = data_raw['d2']\n hms2 = data_raw['d2']\n ymdhms1 = list(map(ymdhms2date, ymd, hms1))\n ymdhms2 = list(map(ymdhms2date, ymd, hms2))\n\n data['ymdhms1'] = ymdhms1\n data['ymdhms2'] = ymdhms2\n data['lat1'] = data_raw['d6']\n data['lon1'] = data_raw['d7']\n data['lat2'] = data_raw['d4']\n data['lon2'] = data_raw['d5']\n data['fix_name'] = data_raw['d3']\n\n elif file_type == 'geo_leo':\n # 信息\n\n data_raw = np.loadtxt(in_file, skiprows=10, dtype={\n 'names': ('d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7'),\n 'formats': ('S8', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})\n\n if data_raw.size != 0:\n ymd = data_raw['d1']\n hms1 = data_raw['d2']\n hms2 = data_raw['d3']\n ymdhms1 = list(map(ymdhms2date, ymd, hms1))\n ymdhms2 = list(map(ymdhms2date, ymd, hms2))\n\n data['ymdhms1'] = ymdhms1\n data['ymdhms2'] = ymdhms2\n data['lat1'] = data_raw['d4']\n data['lon1'] = data_raw['d5']\n data['lat2'] = data_raw['d6']\n data['lon2'] = data_raw['d7']\n\n else:\n raise KeyError('Cant handle this file type: {}'.format(file_type))\n return data\n\n\ndef ymdhms2date(ymd, hms):\n \"\"\"\n ymd = 20180101\n hms = 04:04:04\n \"\"\"\n ymdhms = ymd + hms\n return datetime.strptime(ymdhms, '%Y%m%d%H:%M:%S')\n\n\ndef CombineTimeList(TimeList):\n # 将时间段list中有重叠的时间段进行融合为新的时间段\n newTimeList = []\n # 默认排序,升序\n TimeList.sort()\n # 标记有时间融合的时间\n stime = TimeList[0][0]\n etime = TimeList[0][1]\n for i in range(1, len(TimeList), 1):\n if TimeList[i][1] <= etime:\n continue\n elif TimeList[i][0] <= etime <= TimeList[i][1]:\n etime = TimeList[i][1]\n elif TimeList[i][0] > etime:\n newTimeList.append([stime, etime])\n stime = TimeList[i][0]\n etime = TimeList[i][1]\n\n newTimeList.append([stime, etime])\n\n return newTimeList\n\n\ndef get_files_by_date(dir_path, time_start=None, time_end=None, ext=None, pattern=None):\n \"\"\"\n :param dir_path: 文件夹\n :param time_start: 开始时间\n :param time_end: 结束时间\n :param ext: 后缀名, '.hdf5'\n :param pattern: 匹配时间的模式\n :return: list\n \"\"\"\n files_found = []\n\n for root, dirs, files in os.walk(dir_path):\n for file_name in files:\n if ext is not None:\n if '.' not in ext:\n ext = '.' + ext\n if os.path.splitext(file_name)[1].lower() != ext.lower():\n continue\n if pattern is not None:\n re_result = re.match(pattern, file_name)\n if re_result is None:\n continue\n if time_start is not None:\n time_file = ''.join(re_result.groups())\n if not int(time_start) <= int(time_file) <= int(time_end):\n continue\n files_found.append(os.path.join(root, file_name))\n files_found.sort()\n return files_found\n\n\nif __name__ == '__main__':\n pass\n path_out_map = str_format('/abc/%YYYY%MM%DD', {\n 'YYYY': '20180101',\n 'MM': '01',\n 'DD': '01',\n })\n print(path_out_map)\n# path1 = \"E:/projects/ocrs/cfg/global.cfg\"\n# path2 = \"E:/projects/ocrs/cfg/FY3B+MERSI.yaml\"\n# c = Config(path1)\n# c = Config(path2)\n# print c.error\n# l = c.__dict__.keys()\n# l = sorted(l)\n# for k in l:\n# print k, \":\", c.__dict__[k]\n# print k\n\n# ################# test ReadOrbitCrossFile ################\n# LEO_AREA\n# leo_area_name = r'C:\\Users\\wangpeng\\Desktop\\tmp\\cross\\AQUA_australia_LEO_AREA_20171221.txt'\n# read_data = ReadOrbitCrossFile.read_cross_file(leo_area_name, 'leo_area')\n\n# LEO_LEO\n# leo_leo_name = r'C:\\Users\\wangpeng\\Desktop\\tmp\\cross\\FENGYUN-3D_NPP_LEO_LEO_20180901.txt'\n# read_data = ReadOrbitCrossFile.read_cross_file(leo_leo_name, 'leo_leo')\n\n# LEO_FIX\n# leo_fix_name = r'C:\\Users\\wangpeng\\Desktop\\tmp\\cross\\AQUA_FIX_LEO_FIX_20181101.txt'\n# read_data = ReadOrbitCrossFile.read_cross_file(leo_fix_name, 'leo_fix')\n\n# GEO_LEO\n# geo_leo_name = r'C:\\Users\\wangpeng\\Desktop\\tmp\\cross\\FENGYUN-2F_METOP-A_GEO_LEO20181101.txt'\n# read_data = ReadOrbitCrossFile.read_cross_file(geo_leo_name, 'geo_leo')\n\n# keys = read_data.keys()\n# keys.sort()\n# for data_name in keys:\n# print data_name, type(read_data[data_name]), read_data[data_name]\n"
] |
[
[
"numpy.array",
"numpy.loadtxt"
]
] |
ekyuho/tensorflow
|
[
"e0b721190502346e5485010c8db78339e08c5951"
] |
[
"tensorflow/contrib/batching/python/ops/batch_ops.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Operations for automatic batching and unbatching.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.batching.ops import gen_batch_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.contrib.batching.ops.gen_batch_ops import *\n# pylint: enable=wildcard-import\nfrom tensorflow.contrib.util import loader\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.platform import resource_loader\n\n\n_batch_ops = loader.load_op_library(\n resource_loader.get_path_to_datafile(\"_batch_ops.so\"))\n\n\n@ops.RegisterGradient(\"Batch\")\ndef _BatchGrad(op, *out_grads): # pylint: disable=invalid-name\n \"\"\"Gradient for batch op.\"\"\"\n gradients = []\n for i in range(len(op.inputs)):\n gradients.append(\n gen_batch_ops.unbatch(\n out_grads[i],\n op.outputs[-2],\n op.outputs[-1],\n timeout_micros=op.get_attr(\"grad_timeout_micros\"),\n shared_name=\"batch_gradient_{}_{}\".format(op.name, i)))\n return gradients\n\n\n@ops.RegisterGradient(\"Unbatch\")\ndef _UnbatchGrad(op, grad): # pylint: disable=invalid-name\n return [\n gen_batch_ops.unbatch_grad(\n op.inputs[0],\n op.inputs[1],\n grad,\n op.inputs[2],\n shared_name=\"unbatch_gradient_{}\".format(op.name)), None, None\n ]\n\n\ndef batch_function(num_batch_threads, max_batch_size, batch_timeout_micros,\n allowed_batch_sizes=None,\n grad_timeout_micros=60 * 1000 * 1000,\n unbatch_timeout_micros=60 * 1000 * 1000):\n \"\"\"Batches the computation done by the decorated function.\n\n So, for example, in the following code\n\n ```python\n @batch_function(1, 2, 3)\n def layer(a):\n return tf.matmul(a, a)\n\n b = layer(w)\n ```\n\n if more than one session.run call is simultaneously trying to compute `b`\n the values of `w` will be gathered, non-deterministically concatenated\n along the first axis, and only one thread will run the computation. See the\n documentation of the `Batch` op for more details.\n\n Assumes that all arguments of the decorated function are Tensors which will\n be batched along their first dimension.\n\n SparseTensor is not supported. The return value of the decorated function\n must be a Tensor or a list/tuple of Tensors.\n\n Args:\n num_batch_threads: Number of scheduling threads for processing batches\n of work. Determines the number of batches processed in parallel.\n max_batch_size: Batch sizes will never be bigger than this.\n batch_timeout_micros: Maximum number of microseconds to wait before\n outputting an incomplete batch.\n allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,\n does nothing. Otherwise, supplies a list of batch sizes, causing the op\n to pad batches up to one of those sizes. The entries must increase\n monotonically, and the final entry must equal max_batch_size.\n grad_timeout_micros: The timeout to use for the gradient. See the\n documentation of the unbatch op for more details. Defaults to 60s.\n unbatch_timeout_micros: The timeout to use for unbatching. See the\n documentation of the unbatch op for more details. Defaults to 60s.\n\n Returns:\n The decorated function will return the unbatched computation output Tensors.\n \"\"\"\n def decorator(f): # pylint: disable=missing-docstring\n def decorated(*args):\n with ops.name_scope(\"batch\") as name:\n for a in args:\n if not isinstance(a, ops.Tensor):\n raise ValueError(\"All arguments to functions decorated with \"\n \"`batch_function` are supposed to be Tensors; \"\n \"found %s\" % repr(a))\n batched_tensors, batch_index, id_t = gen_batch_ops.batch(\n args,\n num_batch_threads=num_batch_threads,\n max_batch_size=max_batch_size,\n batch_timeout_micros=batch_timeout_micros,\n allowed_batch_sizes=allowed_batch_sizes,\n grad_timeout_micros=grad_timeout_micros,\n shared_name=name)\n outputs = f(*batched_tensors)\n if isinstance(outputs, ops.Tensor):\n outputs_list = [outputs]\n else:\n outputs_list = outputs\n with ops.name_scope(\"unbatch\") as unbatch_name:\n unbatched = [\n gen_batch_ops.unbatch(t, batch_index, id_t,\n timeout_micros=unbatch_timeout_micros,\n shared_name=unbatch_name)\n for t in outputs_list]\n if isinstance(outputs, ops.Tensor):\n return unbatched[0]\n return unbatched\n return decorated\n return decorator\n"
] |
[
[
"tensorflow.contrib.batching.ops.gen_batch_ops.unbatch",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.contrib.batching.ops.gen_batch_ops.batch",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.platform.resource_loader.get_path_to_datafile"
]
] |
xingkongxiaxia/xx
|
[
"ce51d75406592d6bc25bb803f773f0788496fd97",
"ce51d75406592d6bc25bb803f773f0788496fd97"
] |
[
"recbole/model/knowledge_aware_recommender/kgnnls.py",
"recbole/model/knowledge_aware_recommender/kgcn.py"
] |
[
"# -*- coding: utf-8 -*-\n# @Time : 2020/10/3\n# @Author : Changxin Tian\n# @Email : cx.tian@outlook.com\n\nr\"\"\"\nKGNNLS\n################################################\n\nReference:\n Hongwei Wang et al. \"Knowledge-aware Graph Neural Networks with Label Smoothness Regularization\n for Recommender Systems.\" in KDD 2019.\n\nReference code:\n https://github.com/hwwang55/KGNN-LS\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport random\n\nfrom recbole.utils import InputType\nfrom recbole.model.abstract_recommender import KnowledgeRecommender\nfrom recbole.model.loss import BPRLoss, EmbLoss\nfrom recbole.model.init import xavier_normal_initialization\n\n\nclass KGNNLS(KnowledgeRecommender):\n r\"\"\"KGNN-LS is a knowledge-based recommendation model.\n KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to\n compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness\n assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance\n labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label\n propagation scheme on a graph.\n \"\"\"\n input_type = InputType.PAIRWISE\n\n def __init__(self, config, dataset):\n super(KGNNLS, self).__init__(config, dataset)\n\n # load parameters info\n self.embedding_size = config['embedding_size']\n self.neighbor_sample_size = config['neighbor_sample_size']\n self.aggregator_class = config['aggregator'] # which aggregator to use\n # number of iterations when computing entity representation\n self.n_iter = config['n_iter']\n self.reg_weight = config['reg_weight'] # weight of l2 regularization\n # weight of label Smoothness regularization\n self.ls_weight = config['ls_weight']\n\n # define embedding\n self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)\n self.entity_embedding = nn.Embedding(\n self.n_entities, self.embedding_size)\n self.relation_embedding = nn.Embedding(\n self.n_relations + 1, self.embedding_size)\n\n # sample neighbors and construct interaction table\n kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')\n adj_entity, adj_relation = self.construct_adj(kg_graph)\n self.adj_entity, self.adj_relation = adj_entity.to(\n self.device), adj_relation.to(self.device)\n\n inter_feat = dataset.dataset.inter_feat.values\n pos_users = torch.from_numpy(inter_feat[:, 0])\n pos_items = torch.from_numpy(inter_feat[:, 1])\n pos_label = torch.ones(pos_items.shape)\n pos_interaction_table, self.offset = self.get_interaction_table(\n pos_users, pos_items, pos_label)\n self.interaction_table = self.sample_neg_interaction(\n pos_interaction_table, self.offset)\n\n # define function\n self.softmax = nn.Softmax(dim=-1)\n self.linear_layers = torch.nn.ModuleList()\n for i in range(self.n_iter):\n self.linear_layers.append(nn.Linear(\n self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,\n self.embedding_size))\n self.ReLU = nn.ReLU()\n self.Tanh = nn.Tanh()\n\n self.bce_loss = nn.BCEWithLogitsLoss()\n self.l2_loss = EmbLoss()\n\n # parameters initialization\n self.apply(xavier_normal_initialization)\n\n def get_interaction_table(self, user_id, item_id, y):\n r\"\"\"Get interaction_table that is used for fetching user-item interaction label in LS regularization.\n\n Args:\n user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]\n item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]\n y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]\n\n Returns:\n tuple:\n - interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}\n - offset(int): The offset that is used for calculating the key(index) in interaction_table\n \"\"\"\n offset = len(str(self.n_entities))\n offset = 10 ** offset\n keys = user_id * offset + item_id\n keys = keys.int().cpu().numpy().tolist()\n values = y.float().cpu().numpy().tolist()\n\n interaction_table = dict(zip(keys, values))\n return interaction_table, offset\n\n def sample_neg_interaction(self, pos_interaction_table, offset):\n r\"\"\"Sample neg_interaction to construct train data.\n\n Args:\n pos_interaction_table(dict): the interaction_table that only contains pos_interaction.\n offset(int): The offset that is used for calculating the key(index) in interaction_table\n\n Returns:\n interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}\n \"\"\"\n pos_num = len(pos_interaction_table)\n neg_num = 0\n neg_interaction_table = {}\n while neg_num < pos_num:\n user_id = random.randint(0, self.n_users)\n item_id = random.randint(0, self.n_items)\n keys = user_id * offset + item_id\n if keys not in pos_interaction_table:\n neg_interaction_table[keys] = 0.\n neg_num += 1\n interaction_table = {**pos_interaction_table, **neg_interaction_table}\n return interaction_table\n\n def construct_adj(self, kg_graph):\n r\"\"\"Get neighbors and corresponding relations for each entity in the KG.\n\n Args:\n kg_graph(scipy.sparse.coo_matrix): an undirected graph\n\n Returns:\n tuple:\n - adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,\n shape: [n_entities, neighbor_sample_size]\n - adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,\n shape: [n_entities, neighbor_sample_size]\n \"\"\"\n # self.logger.info('constructing knowledge graph ...')\n # treat the KG as an undirected graph\n kg_dict = dict()\n for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):\n head = triple[0]\n relation = triple[1]\n tail = triple[2]\n if head not in kg_dict:\n kg_dict[head] = []\n kg_dict[head].append((tail, relation))\n if tail not in kg_dict:\n kg_dict[tail] = []\n kg_dict[tail].append((head, relation))\n\n # self.logger.info('constructing adjacency matrix ...')\n # each line of adj_entity stores the sampled neighbor entities for a given entity\n # each line of adj_relation stores the corresponding sampled neighbor relations\n entity_num = kg_graph.shape[0]\n adj_entity = np.zeros(\n [entity_num, self.neighbor_sample_size], dtype=np.int64)\n adj_relation = np.zeros(\n [entity_num, self.neighbor_sample_size], dtype=np.int64)\n for entity in range(entity_num):\n if entity not in kg_dict.keys():\n adj_entity[entity] = np.array(\n [entity] * self.neighbor_sample_size)\n adj_relation[entity] = np.array(\n [0] * self.neighbor_sample_size)\n continue\n\n neighbors = kg_dict[entity]\n n_neighbors = len(neighbors)\n if n_neighbors >= self.neighbor_sample_size:\n sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,\n replace=False)\n else:\n sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,\n replace=True)\n adj_entity[entity] = np.array(\n [neighbors[i][0] for i in sampled_indices])\n adj_relation[entity] = np.array(\n [neighbors[i][1] for i in sampled_indices])\n\n return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)\n\n def get_neighbors(self, items):\n r\"\"\"Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.\n\n Args:\n items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]\n\n Returns:\n tuple:\n - entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.\n dimensions of entities: {[batch_size, 1],\n [batch_size, n_neighbor],\n [batch_size, n_neighbor^2],\n ...,\n [batch_size, n_neighbor^n_iter]}\n - relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for\n entities. Relations have the same shape as entities.\n \"\"\"\n items = torch.unsqueeze(items, dim=1)\n entities = [items]\n relations = []\n for i in range(self.n_iter):\n index = torch.flatten(entities[i])\n neighbor_entities = torch.reshape(torch.index_select(\n self.adj_entity, 0, index), (self.batch_size, -1))\n neighbor_relations = torch.reshape(torch.index_select(\n self.adj_relation, 0, index), (self.batch_size, -1))\n entities.append(neighbor_entities)\n relations.append(neighbor_relations)\n return entities, relations\n\n def aggregate(self, user_embeddings, entities, relations):\n r\"\"\"For each item, aggregate the entity representation and its neighborhood representation into a single vector.\n\n Args:\n user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]\n entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.\n dimensions of entities: {[batch_size, 1],\n [batch_size, n_neighbor],\n [batch_size, n_neighbor^2],\n ...,\n [batch_size, n_neighbor^n_iter]}\n relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.\n relations have the same shape as entities.\n\n Returns:\n item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]\n\n \"\"\"\n entity_vectors = [self.entity_embedding(i) for i in entities]\n relation_vectors = [self.relation_embedding(i) for i in relations]\n\n for i in range(self.n_iter):\n entity_vectors_next_iter = []\n for hop in range(self.n_iter - i):\n shape = (self.batch_size, -1,\n self.neighbor_sample_size, self.embedding_size)\n self_vectors = entity_vectors[hop]\n neighbor_vectors = torch.reshape(\n entity_vectors[hop + 1], shape)\n neighbor_relations = torch.reshape(\n relation_vectors[hop], shape)\n\n # mix_neighbor_vectors\n user_embeddings = torch.reshape(user_embeddings,\n (self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]\n user_relation_scores = torch.mean(user_embeddings * neighbor_relations,\n dim=-1) # [batch_size, -1, n_neighbor]\n user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),\n dim=-1) # [batch_size, -1, n_neighbor, 1]\n neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,\n dim=2) # [batch_size, -1, dim]\n\n if self.aggregator_class == 'sum':\n output = torch.reshape(\n self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]\n elif self.aggregator_class == 'neighbor':\n output = torch.reshape(\n neighbors_agg, (-1, self.embedding_size)) # [-1, dim]\n elif self.aggregator_class == 'concat':\n # [batch_size, -1, dim * 2]\n output = torch.cat([self_vectors, neighbors_agg], dim=-1)\n output = torch.reshape(\n output, (-1, self.embedding_size * 2)) # [-1, dim * 2]\n else:\n raise Exception(\"Unknown aggregator: \" +\n self.aggregator_class)\n\n output = self.linear_layers[i](output)\n # [batch_size, -1, dim]\n output = torch.reshape(\n output, [self.batch_size, -1, self.embedding_size])\n\n if i == self.n_iter - 1:\n vector = self.Tanh(output)\n else:\n vector = self.ReLU(output)\n\n entity_vectors_next_iter.append(vector)\n entity_vectors = entity_vectors_next_iter\n\n res = torch.reshape(\n entity_vectors[0], (self.batch_size, self.embedding_size))\n return res\n\n def label_smoothness_predict(self, user_embeddings, user, entities, relations):\n r\"\"\"Predict the label of items by label smoothness.\n\n Args:\n user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],\n user(torch.FloatTensor): the index of users, shape: [batch_size*2]\n entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.\n dimensions of entities: {[batch_size*2, 1],\n [batch_size*2, n_neighbor],\n [batch_size*2, n_neighbor^2],\n ...,\n [batch_size*2, n_neighbor^n_iter]}\n relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.\n relations have the same shape as entities.\n\n Returns:\n predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]\n \"\"\"\n # calculate initial labels; calculate updating masks for label propagation\n entity_labels = []\n # True means the label of this item is reset to initial value during label propagation\n reset_masks = []\n holdout_item_for_user = None\n\n for entities_per_iter in entities:\n users = torch.unsqueeze(user, dim=1) # [batch_size, 1]\n user_entity_concat = users * self.offset + \\\n entities_per_iter # [batch_size, n_neighbor^i]\n\n # the first one in entities is the items to be held out\n if holdout_item_for_user is None:\n holdout_item_for_user = user_entity_concat\n\n def lookup_interaction_table(x, _):\n x = int(x)\n label = self.interaction_table.setdefault(x, 0.5)\n return label\n\n initial_label = user_entity_concat.clone().cpu().double()\n initial_label.map_(initial_label, lookup_interaction_table)\n initial_label = initial_label.float().to(self.device)\n\n # False if the item is held out\n holdout_mask = (holdout_item_for_user - user_entity_concat).bool()\n # True if the entity is a labeled item\n reset_mask = (initial_label - 0.5).bool()\n reset_mask = torch.logical_and(\n reset_mask, holdout_mask) # remove held-out items\n initial_label = holdout_mask.float() * initial_label + torch.logical_not(\n holdout_mask).float() * 0.5 # label initialization\n\n reset_masks.append(reset_mask)\n entity_labels.append(initial_label)\n # we do not need the reset_mask for the last iteration\n reset_masks = reset_masks[:-1]\n\n # label propagation\n relation_vectors = [self.relation_embedding(i) for i in relations]\n for i in range(self.n_iter):\n entity_labels_next_iter = []\n for hop in range(self.n_iter - i):\n masks = reset_masks[hop]\n self_labels = entity_labels[hop]\n neighbor_labels = torch.reshape(entity_labels[hop + 1],\n [self.batch_size, -1, self.neighbor_sample_size])\n neighbor_relations = torch.reshape(relation_vectors[hop],\n [self.batch_size, -1, self.neighbor_sample_size,\n self.embedding_size])\n\n # mix_neighbor_labels\n user_embeddings = torch.reshape(user_embeddings,\n [self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]\n user_relation_scores = torch.mean(user_embeddings * neighbor_relations,\n dim=-1) # [batch_size, -1, n_neighbor]\n user_relation_scores_normalized = self.softmax(\n user_relation_scores) # [batch_size, -1, n_neighbor]\n\n neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,\n dim=2) # [batch_size, -1, dim] # [batch_size, -1]\n output = masks.float() * self_labels + torch.logical_not(masks).float() * \\\n neighbors_aggregated_label\n\n entity_labels_next_iter.append(output)\n entity_labels = entity_labels_next_iter\n\n predicted_labels = entity_labels[0].squeeze(-1)\n return predicted_labels\n\n def forward(self, user, item):\n self.batch_size = item.shape[0]\n # [batch_size, dim]\n user_e = self.user_embedding(user)\n # entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:\n # {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}\n entities, relations = self.get_neighbors(item)\n # [batch_size, dim]\n item_e = self.aggregate(user_e, entities, relations)\n\n return user_e, item_e\n\n def calculate_ls_loss(self, user, item, target):\n r\"\"\"Calculate label smoothness loss.\n\n Args:\n user(torch.FloatTensor): the index of users, shape: [batch_size*2],\n item(torch.FloatTensor): the index of items, shape: [batch_size*2],\n target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],\n\n Returns:\n ls_loss: label smoothness loss\n \"\"\"\n user_e = self.user_embedding(user)\n entities, relations = self.get_neighbors(item)\n\n predicted_labels = self.label_smoothness_predict(\n user_e, user, entities, relations)\n ls_loss = self.bce_loss(predicted_labels, target)\n return ls_loss\n\n def calculate_loss(self, interaction):\n user = interaction[self.USER_ID]\n pos_item = interaction[self.ITEM_ID]\n neg_item = interaction[self.NEG_ITEM_ID]\n target = torch.zeros(\n len(user) * 2, dtype=torch.float32).to(self.device)\n target[:len(user)] = 1\n\n users = torch.cat((user, user))\n items = torch.cat((pos_item, neg_item))\n\n user_e, item_e = self.forward(users, items)\n predict = torch.mul(user_e, item_e).sum(dim=1)\n rec_loss = self.bce_loss(predict, target)\n\n ls_loss = self.calculate_ls_loss(users, items, target)\n l2_loss = self.l2_loss(user_e, item_e)\n\n loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss\n return loss\n\n def predict(self, interaction):\n user = interaction[self.USER_ID]\n item = interaction[self.ITEM_ID]\n user_e, item_e = self.forward(user, item)\n return torch.mul(user_e, item_e).sum(dim=1)\n\n def full_sort_predict(self, interaction):\n user_index = interaction[self.USER_ID]\n item_index = torch.tensor(range(self.n_items)).to(self.device)\n\n user = torch.unsqueeze(user_index, dim=1).repeat(\n 1, item_index.shape[0])\n user = torch.flatten(user)\n item = torch.unsqueeze(item_index, dim=0).repeat(\n user_index.shape[0], 1)\n item = torch.flatten(item)\n\n user_e, item_e = self.forward(user, item)\n score = torch.mul(user_e, item_e).sum(dim=1)\n\n return score.view(-1)\n",
"# -*- coding: utf-8 -*-\n# @Time : 2020/10/6\n# @Author : Changxin Tian\n# @Email : cx.tian@outlook.com\n\nr\"\"\"\nKGCN\n################################################\n\nReference:\n Hongwei Wang et al. \"Knowledge graph convolution networks for recommender systems.\" in WWW 2019.\n\nReference code:\n https://github.com/hwwang55/KGCN\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nfrom recbole.utils import InputType\nfrom recbole.model.abstract_recommender import KnowledgeRecommender\nfrom recbole.model.loss import BPRLoss, EmbLoss\nfrom recbole.model.init import xavier_normal_initialization\n\n\nclass KGCN(KnowledgeRecommender):\n r\"\"\"KGCN is a knowledge-based recommendation model that captures inter-item relatedness effectively by mining their\n associated attributes on the KG. To automatically discover both high-order structure information and semantic\n information of the KG, we treat KG as an undirected graph and sample from the neighbors for each entity in the KG\n as their receptive field, then combine neighborhood information with bias when calculating the representation of a\n given entity.\n \"\"\"\n input_type = InputType.PAIRWISE\n\n def __init__(self, config, dataset):\n super(KGCN, self).__init__(config, dataset)\n\n # load parameters info\n self.embedding_size = config['embedding_size']\n # number of iterations when computing entity representation\n self.n_iter = config['n_iter']\n self.aggregator_class = config['aggregator'] # which aggregator to use\n self.reg_weight = config['reg_weight'] # weight of l2 regularization\n self.neighbor_sample_size = config['neighbor_sample_size']\n\n # define embedding\n self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)\n self.entity_embedding = nn.Embedding(\n self.n_entities, self.embedding_size)\n self.relation_embedding = nn.Embedding(\n self.n_relations + 1, self.embedding_size)\n\n # sample neighbors\n kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')\n adj_entity, adj_relation = self.construct_adj(kg_graph)\n self.adj_entity, self.adj_relation = adj_entity.to(\n self.device), adj_relation.to(self.device)\n\n # define function\n self.softmax = nn.Softmax(dim=-1)\n self.linear_layers = torch.nn.ModuleList()\n for i in range(self.n_iter):\n self.linear_layers.append(nn.Linear(\n self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,\n self.embedding_size))\n self.ReLU = nn.ReLU()\n self.Tanh = nn.Tanh()\n\n self.bce_loss = nn.BCEWithLogitsLoss()\n self.l2_loss = EmbLoss()\n\n # parameters initialization\n self.apply(xavier_normal_initialization)\n\n def construct_adj(self, kg_graph):\n r\"\"\"Get neighbors and corresponding relations for each entity in the KG.\n\n Args:\n kg_graph(scipy.sparse.coo_matrix): an undirected graph\n\n Returns:\n tuple:\n - adj_entity(torch.LongTensor): each line stores the sampled neighbor entities for a given entity,\n shape: [n_entities, neighbor_sample_size]\n - adj_relation(torch.LongTensor): each line stores the corresponding sampled neighbor relations,\n shape: [n_entities, neighbor_sample_size]\n \"\"\"\n # self.logger.info('constructing knowledge graph ...')\n # treat the KG as an undirected graph\n kg_dict = dict()\n for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):\n head = triple[0]\n relation = triple[1]\n tail = triple[2]\n if head not in kg_dict:\n kg_dict[head] = []\n kg_dict[head].append((tail, relation))\n if tail not in kg_dict:\n kg_dict[tail] = []\n kg_dict[tail].append((head, relation))\n\n # self.logger.info('constructing adjacency matrix ...')\n # each line of adj_entity stores the sampled neighbor entities for a given entity\n # each line of adj_relation stores the corresponding sampled neighbor relations\n entity_num = kg_graph.shape[0]\n adj_entity = np.zeros(\n [entity_num, self.neighbor_sample_size], dtype=np.int64)\n adj_relation = np.zeros(\n [entity_num, self.neighbor_sample_size], dtype=np.int64)\n for entity in range(entity_num):\n if entity not in kg_dict.keys():\n adj_entity[entity] = np.array(\n [entity] * self.neighbor_sample_size)\n adj_relation[entity] = np.array(\n [0] * self.neighbor_sample_size)\n continue\n\n neighbors = kg_dict[entity]\n n_neighbors = len(neighbors)\n if n_neighbors >= self.neighbor_sample_size:\n sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,\n replace=False)\n else:\n sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,\n replace=True)\n adj_entity[entity] = np.array(\n [neighbors[i][0] for i in sampled_indices])\n adj_relation[entity] = np.array(\n [neighbors[i][1] for i in sampled_indices])\n\n return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)\n\n def get_neighbors(self, items):\n r\"\"\"Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.\n\n Args:\n items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]\n\n Returns:\n tuple:\n - entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.\n dimensions of entities: {[batch_size, 1],\n [batch_size, n_neighbor],\n [batch_size, n_neighbor^2],\n ...,\n [batch_size, n_neighbor^n_iter]}\n - relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for\n entities. Relations have the same shape as entities.\n \"\"\"\n items = torch.unsqueeze(items, dim=1)\n entities = [items]\n relations = []\n for i in range(self.n_iter):\n index = torch.flatten(entities[i])\n neighbor_entities = torch.reshape(torch.index_select(\n self.adj_entity, 0, index), (self.batch_size, -1))\n neighbor_relations = torch.reshape(torch.index_select(\n self.adj_relation, 0, index), (self.batch_size, -1))\n entities.append(neighbor_entities)\n relations.append(neighbor_relations)\n return entities, relations\n\n def mix_neighbor_vectors(self, neighbor_vectors, neighbor_relations, user_embeddings):\n r\"\"\"Mix neighbor vectors on user-specific graph.\n\n Args:\n neighbor_vectors(torch.FloatTensor): The embeddings of neighbor entities(items),\n shape: [batch_size, -1, neighbor_sample_size, embedding_size]\n neighbor_relations(torch.FloatTensor): The embeddings of neighbor relations,\n shape: [batch_size, -1, neighbor_sample_size, embedding_size]\n user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]\n\n Returns:\n neighbors_aggregated(torch.FloatTensor): The neighbors aggregated embeddings,\n shape: [batch_size, -1, embedding_size]\n\n \"\"\"\n avg = False\n if not avg:\n user_embeddings = torch.reshape(user_embeddings,\n (self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]\n user_relation_scores = torch.mean(user_embeddings * neighbor_relations,\n dim=-1) # [batch_size, -1, n_neighbor]\n user_relation_scores_normalized = self.softmax(\n user_relation_scores) # [batch_size, -1, n_neighbor]\n\n user_relation_scores_normalized = torch.unsqueeze(user_relation_scores_normalized,\n dim=-1) # [batch_size, -1, n_neighbor, 1]\n neighbors_aggregated = torch.mean(user_relation_scores_normalized * neighbor_vectors,\n dim=2) # [batch_size, -1, dim]\n else:\n neighbors_aggregated = torch.mean(\n neighbor_vectors, dim=2) # [batch_size, -1, dim]\n return neighbors_aggregated\n\n def aggregate(self, user_embeddings, entities, relations):\n r\"\"\"For each item, aggregate the entity representation and its neighborhood representation into a single vector.\n\n Args:\n user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]\n entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.\n dimensions of entities: {[batch_size, 1],\n [batch_size, n_neighbor],\n [batch_size, n_neighbor^2],\n ...,\n [batch_size, n_neighbor^n_iter]}\n relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.\n relations have the same shape as entities.\n\n Returns:\n item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]\n\n \"\"\"\n entity_vectors = [self.entity_embedding(i) for i in entities]\n relation_vectors = [self.relation_embedding(i) for i in relations]\n\n for i in range(self.n_iter):\n entity_vectors_next_iter = []\n for hop in range(self.n_iter - i):\n shape = (self.batch_size, -1,\n self.neighbor_sample_size, self.embedding_size)\n self_vectors = entity_vectors[hop]\n neighbor_vectors = torch.reshape(\n entity_vectors[hop + 1], shape)\n neighbor_relations = torch.reshape(\n relation_vectors[hop], shape)\n\n neighbors_agg = self.mix_neighbor_vectors(neighbor_vectors, neighbor_relations,\n user_embeddings) # [batch_size, -1, dim]\n\n if self.aggregator_class == 'sum':\n output = torch.reshape(\n self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]\n elif self.aggregator_class == 'neighbor':\n output = torch.reshape(\n neighbors_agg, (-1, self.embedding_size)) # [-1, dim]\n elif self.aggregator_class == 'concat':\n # [batch_size, -1, dim * 2]\n output = torch.cat([self_vectors, neighbors_agg], dim=-1)\n output = torch.reshape(\n output, (-1, self.embedding_size * 2)) # [-1, dim * 2]\n else:\n raise Exception(\"Unknown aggregator: \" +\n self.aggregator_class)\n\n output = self.linear_layers[i](output)\n # [batch_size, -1, dim]\n output = torch.reshape(\n output, [self.batch_size, -1, self.embedding_size])\n\n if i == self.n_iter - 1:\n vector = self.Tanh(output)\n else:\n vector = self.ReLU(output)\n\n entity_vectors_next_iter.append(vector)\n entity_vectors = entity_vectors_next_iter\n\n item_embeddings = torch.reshape(\n entity_vectors[0], (self.batch_size, self.embedding_size))\n\n return item_embeddings\n\n def forward(self, user, item):\n self.batch_size = item.shape[0]\n # [batch_size, dim]\n user_e = self.user_embedding(user)\n # entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:\n # {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}\n entities, relations = self.get_neighbors(item)\n # [batch_size, dim]\n item_e = self.aggregate(user_e, entities, relations)\n\n return user_e, item_e\n\n def calculate_loss(self, interaction):\n user = interaction[self.USER_ID]\n pos_item = interaction[self.ITEM_ID]\n neg_item = interaction[self.NEG_ITEM_ID]\n\n user_e, pos_item_e = self.forward(user, pos_item)\n user_e, neg_item_e = self.forward(user, neg_item)\n\n pos_item_score = torch.mul(user_e, pos_item_e).sum(dim=1)\n neg_item_score = torch.mul(user_e, neg_item_e).sum(dim=1)\n\n predict = torch.cat((pos_item_score, neg_item_score))\n target = torch.zeros(\n len(user) * 2, dtype=torch.float32).to(self.device)\n target[:len(user)] = 1\n rec_loss = self.bce_loss(predict, target)\n\n l2_loss = self.l2_loss(user_e, pos_item_e, neg_item_e)\n loss = rec_loss + self.reg_weight * l2_loss\n\n return loss\n\n def predict(self, interaction):\n user = interaction[self.USER_ID]\n item = interaction[self.ITEM_ID]\n user_e, item_e = self.forward(user, item)\n return torch.mul(user_e, item_e).sum(dim=1)\n\n def full_sort_predict(self, interaction):\n user_index = interaction[self.USER_ID]\n item_index = torch.tensor(range(self.n_items)).to(self.device)\n\n user = torch.unsqueeze(user_index, dim=1).repeat(\n 1, item_index.shape[0])\n user = torch.flatten(user)\n item = torch.unsqueeze(item_index, dim=0).repeat(\n user_index.shape[0], 1)\n item = torch.flatten(item)\n\n user_e, item_e = self.forward(user, item)\n score = torch.mul(user_e, item_e).sum(dim=1)\n\n return score.view(-1)\n"
] |
[
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.ModuleList",
"torch.ones",
"torch.logical_and",
"torch.nn.BCEWithLogitsLoss",
"torch.reshape",
"torch.mul",
"torch.nn.Softmax",
"torch.unsqueeze",
"torch.logical_not",
"torch.index_select",
"torch.nn.Embedding",
"numpy.array",
"numpy.zeros",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.from_numpy",
"torch.flatten",
"torch.mean"
],
[
"torch.nn.Linear",
"torch.index_select",
"torch.cat",
"numpy.array",
"torch.mul",
"torch.nn.ModuleList",
"torch.nn.Softmax",
"numpy.zeros",
"torch.nn.Tanh",
"torch.unsqueeze",
"torch.nn.ReLU",
"torch.from_numpy",
"torch.mean",
"torch.nn.BCEWithLogitsLoss",
"torch.flatten",
"torch.nn.Embedding",
"torch.reshape"
]
] |
a414351664/Bert-THUCNews
|
[
"a20749225091533b530f0e539bfaacbd3524fe99"
] |
[
"data/cnews_loader_bert.py"
] |
[
"# coding: utf-8\n\nimport sys\nfrom collections import Counter\n\nimport numpy as np\nimport tensorflow.contrib.keras as kr\nimport tensorflow as tf\nif sys.version_info[0] > 2:\n is_py3 = True\nelse:\n # reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n is_py3 = False\n\n\ndef native_word(word, encoding='utf-8'):\n \"\"\"如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码\"\"\"\n if not is_py3:\n return word.encode(encoding)\n else:\n return word\n\n\ndef native_content(content):\n if not is_py3:\n return content.decode('utf-8')\n else:\n return content\n\n\ndef open_file(filename, mode='r'):\n \"\"\"\n 常用文件操作,可在python2和python3间切换.\n mode: 'r' or 'w' for read or write\n \"\"\"\n if is_py3:\n return open(filename, mode, encoding='utf-8', errors='ignore')\n else:\n return open(filename, mode)\n\n\ndef read_file(filename):\n \"\"\"读取文件数据\"\"\"\n contents, labels = [], []\n with open_file(filename) as f:\n for line in f:\n # while True:\n # line = f.readline()\n try:\n label, content = line.strip().split('\\t')\n contents.append(content)\n if content:\n # contents.append(list(native_content(content)))\n labels.append(native_content(label))\n except:\n pass\n # if not line:\n # break\n return contents, labels\n\n\ndef build_vocab(train_dir, vocab_dir, vocab_size=5000):\n \"\"\"根据训练集构建词汇表,存储, x, y\"\"\"\n data_train, _ = read_file(train_dir)\n\n all_data = []\n for content in data_train:\n all_data.extend(content)\n\n counter = Counter(all_data)\n count_pairs = counter.most_common(vocab_size - 1)\n words, _ = list(zip(*count_pairs))\n # 添加一个 <PAD> 来将所有文本pad为同一长度\n words = ['<PAD>'] + list(words)\n open_file(vocab_dir, mode='w').write('\\n'.join(words) + '\\n')\n\n\ndef read_vocab(vocab_dir):\n \"\"\"读取词汇表\"\"\"\n # words = open_file(vocab_dir).read().strip().split('\\n')\n with open_file(vocab_dir) as fp:\n # 如果是py2 则每个值都转化为unicode\n words = [native_content(_.strip()) for _ in fp.readlines()]\n word_to_id = dict(zip(words, range(len(words))))\n return words, word_to_id\n\n\ndef read_category():\n \"\"\"读取分类目录,固定\"\"\"\n categories = ['体育', '财经', '房产', '家居', '教育', '科技', '时尚', '时政', '游戏', '娱乐']\n\n categories = [native_content(x) for x in categories]\n\n cat_to_id = dict(zip(categories, range(len(categories))))\n\n return categories, cat_to_id\n\n\ndef to_words(content, words):\n \"\"\"将id表示的内容转换为文字\"\"\"\n return ''.join(words[x] for x in content)\n\n\ndef process_file(filename, word_to_id, cat_to_id, max_length=600):\n \"\"\"将文件转换为id表示\"\"\"\n contents, labels = read_file(filename)\n # np.save('./train_x.npy', contents)\n # np.savetxt('./train_x.txt', contents, fmt='%s')\n data_id, label_id = [], []\n for i in range(len(contents)):\n # data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])\n label_id.append(cat_to_id[labels[i]])\n\n # 使用keras提供的pad_sequences来将文本pad为固定长度\n # x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)\n y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id)) # 将标签转换为one-hot表示\n\n return contents, y_pad\n\n\ndef batch_iter(x, y, batch_size=64):\n \"\"\"生成批次数据\"\"\"\n data_len = len(x)\n num_batch = int((data_len - 1) / batch_size) + 1\n # 区别在于shuffle直接在原来的数组上进行操作,改变原来数组的顺序,无返回值。\n # 而permutation不直接在原来的数组上进行操作,而是返回一个新的打乱顺序的数组,并不改变原来的数组。\n indices = np.random.permutation(np.arange(data_len))\n x_shuffle = np.array(x)[indices]\n y_shuffle = y[indices]\n for i in range(num_batch):\n start_id = i * batch_size\n end_id = min((i + 1) * batch_size, data_len)\n # yield x[start_id:end_id], y[start_id:end_id]\n yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]\ndef attention(inputs, attention_size, l2_reg_lambda):\n \"\"\"\n Attention mechanism layer.\n :param inputs: outputs of RNN/Bi-RNN layer (not final state)\n :param attention_size: linear size of attention weights\n :return: outputs of the passed RNN/Bi-RNN reduced with attention vector\n \"\"\"\n # In case of Bi-RNN input we need to concatenate outputs of its forward and backward parts\n if isinstance(inputs, tuple):\n inputs = tf.concat(2, inputs)\n\n sequence_length = inputs.get_shape()[1].value # the length of sequences processed in the antecedent RNN layer\n hidden_size = inputs.get_shape()[2].value # hidden size of the RNN layer\n\n # Attention mechanism W,b 相当于对RNN的输出做一个非线性变化,得到的结果在和u做内积\n W_omega = tf.get_variable(\"W_omega\", initializer=tf.random_normal([hidden_size, attention_size], stddev=0.1))\n b_omega = tf.get_variable(\"b_omega\", initializer=tf.random_normal([attention_size], stddev=0.1))\n u_omega = tf.get_variable(\"u_omega\", initializer=tf.random_normal([attention_size], stddev=0.1))\n\n v = tf.tanh(tf.matmul(tf.reshape(inputs, [-1, hidden_size]), W_omega) + tf.reshape(b_omega, [1, -1]))\n vu = tf.matmul(v, tf.reshape(u_omega, [-1, 1]))\n exps = tf.reshape(tf.exp(vu), [-1, sequence_length])\n alphas = exps / tf.reshape(tf.reduce_sum(exps, 1), [-1, 1])\n\n # Output of Bi-RNN is reduced with attention vector\n output = tf.reduce_sum(inputs * tf.reshape(alphas, [-1, sequence_length, 1]), 1)\n #if l2_reg_lambda > 0:\n # l2_loss += tf.nn.l2_loss(W_omega)\n # l2_loss += tf.nn.l2_loss(b_omega)\n # l2_loss += tf.nn.l2_loss(u_omega)\n # tf.add_to_collection('losses', l2_loss)\n\n return output\n"
] |
[
[
"tensorflow.exp",
"numpy.array",
"tensorflow.concat",
"tensorflow.reshape",
"numpy.arange",
"tensorflow.reduce_sum",
"tensorflow.random_normal"
]
] |
alexeypechorin/tibetan-transductive
|
[
"e2356d5c0a7cbc2f2359d9cf5b6b18729fecd8de"
] |
[
"train.py"
] |
[
"import os\nimport click\nimport numpy as np\nfrom tqdm import tqdm\nfrom models.model_loader import load_model\nfrom torchvision.transforms import Compose\nfrom dataset.data_transform import Resize, Rotation, ElasticAndSine, ColorGradGausNoise, AddWidth, Normalize, ToGray, OnlyElastic, OnlySine, ColorGrad, ColorGausNoise\nfrom dataset.text_data import TextDataset, TextDatasetRandomFont\nfrom dataset.collate_fn import text_collate\nfrom utils.data_visualization import TbSummary\nfrom lr_policy import StepLR, DannLR\nimport pickle as pkl\nimport glob\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom warpctc_pytorch import CTCLoss\n\nfrom test import test\nfrom models.new_vat import VATLoss, VATLossSign, LabeledATLoss, LabeledAtAndUnlabeledTestVatLoss, VATonRnnSign, VATonRnnCnnSign, VATonCnnSign\n\nfrom dataset.dataset_metadata import SynthDataInfo\n\n@click.command()\n@click.option('--base-data-dir', type=str,\n default=os.path.expandvars ('../Data/'),\n help='Path to base data directory (all other data paths are relative to this one).')\n@click.option('--train-data-path', type=str,\n default=os.path.expandvars ('Synthetic/Prepared/data_train.txt'),\n help='Path to training dataset (image path to line text) text file (relative to base-data-dir)')\n@click.option('--train-base-dir', type=str,\n default=os.path.expandvars(\n 'Synthetic/Prepared/Images'),\n help='Path to directory containing training images (relative to base-data-dir)')\n@click.option('--orig-eval-data-path', type=str,\n default=os.path.expandvars(\n 'Test/Prepared/im2line.txt'),\n help='Path to original test dataset (image path to line text) text file (relative to base-data-dir)')\n@click.option('--orig-eval-base-dir', type=str,\n default=os.path.expandvars(\n 'Test/Prepared/LineImages'),\n help='Path to directory containing original test images (relative to base-data-dir)')\n@click.option('--synth-eval-data-path', type=str,\n default=os.path.expandvars ('Synthetic/Prepared/data_val.txt'),\n help='Path to synthetic evaluation dataset (image path to line text) text file (relative to base-data-dir)')\n@click.option('--synth-eval-base-dir', type=str,\n default=os.path.expandvars(\n 'Synthetic/Prepared/Images'),\n help='Path to directory containing synthetic evaluation images (relative to base-data-dir)')\n@click.option('--lexicon-path', type=str,\n default=os.path.expandvars('char_to_class.pkl'),\n help='Path to alphabet lexicon (letter to id), relative to base-data-dir.')\n@click.option('--seq-proj', type=str, default=\"10x20\", help='Projection of sequence')\n@click.option('--backend', type=str, default=\"resnet18\", help='Backend network to use (default is resnet18)')\n@click.option('--snapshot', type=str, default=None, help='Path to pre-trained weights')\n@click.option('--input-height', type=int, default=64, help='Height of input images to network')\n@click.option('--base-lr', type=float, default=1e-4, help='Base learning rate.') # was e-3\n#@click.option('--lr-decay', type=float, default=1e-4, help='Base learning rate') # was 0.0001\n@click.option('--elastic-alpha', type=float, default=34, help='Elastic augmentation parameter alpha.')\n@click.option('--elastic-sigma', type=float, default=3, help='Elastic augmentation parameter sigma.')\n@click.option('--step-size', type=int, default=500, help='Step size for step lr change.')\n@click.option('--max-iter', type=int, default=6000, help='Max iterations for taining')\n@click.option('--batch-size', type=int, default=8, help='Batch size for training')\n@click.option('--output-dir', type=str,\n default='../Output/exp1',\n help='Path to save output snapshot')\n@click.option('--test-iter', type=int, default=1000, help='Number of iterations between test evaluation.')\n@click.option('--show-iter', type=int, default=1000, help='Number of iterations between showing images in tensorboard.')\n@click.option('--test-init', type=bool, default=False, help='Wether to test after network initialization initialization')\n@click.option('--use-gpu', type=bool, default=True, help='Whether to use the gpu')\n@click.option('--use-no-font-repeat-data', type=bool, default=True, help='Parameter to remove (always true) - whether to use random training data.')\n@click.option('--do-vat', type=bool, default=False, help='Whether to do VAT on synthetic trainig data')\n@click.option('--do-at', type=bool, default=False, help='Whether to do AT on synthetic trainig data')\n@click.option('--vat-ratio', type=float, default=1, help='Ratio of vat on train data loss vs base loss')\n@click.option('--test-vat-ratio', type=float, default=1, help='Ratio on vat on test data loss vs base loss')\n@click.option('--vat-epsilon', type=float, default=2.5, help='VAT on train hyperparameter - epsilon')\n@click.option('--vat-ip', type=int, default=1, help='VAT on train hyperparameter - number of power iterations')\n@click.option('--vat-xi', type=float, default=10., help='VAT on train hyperparameter - xi')\n@click.option('--vat-sign', type=bool, default=False, help='VAT on train hyperparameter - whether to do sign on vat loss')\n@click.option('--do-remove-augs', type=bool, default=False, help='Whether to remove some of the augmentations (for ablation study)')\n@click.option('--aug-to-remove', type=str,\n default='',\n help=\"with augmentation to remover out of ['elastic', 'sine', 'sine_rotate', 'rotation', 'color_aug', 'color_gaus', 'color_sine']\")\n@click.option('--do-beam-search', type=bool, default=False, help='whether to do beam search inference in evaluation')\n@click.option('--dropout-conv', type=bool, default=False, help='Whether to do dropout between convolution and rnn.')\n@click.option('--dropout-rnn', type=bool, default=False, help='Whether to do dropout in rnn.')\n@click.option('--dropout-output', type=bool, default=False, help='Whether to do dropout after rnn.')\n@click.option('--do-ema', type=bool, default=False, help='Whether to do exponential moving average on weights')\n@click.option('--do-gray', type=bool, default=False, help='whether to use grayscale instread of rgb')\n@click.option('--do-test-vat', type=bool, default=False, help='Whether to do VAT loss on original test data')\n@click.option('--do-test-entropy', type=bool, default=False, help='Whether to do entropy loss on original test data')\n@click.option('--do-test-vat-cnn', type=bool, default=False, help='Whether to do VAT loss on original test data only for cnn part')\n@click.option('--do-test-vat-rnn', type=bool, default=False, help='Whether to do VAT loss on original test data only for rnn part')\n@click.option('--ada-after-rnn', type=bool, default=False, help='Whether to do adversarial domain adaptaion on rnn part')\n@click.option('--ada-before-rnn', type=bool, default=False, help='Whether to do adversarial domain adaptaion on cnn part')\n@click.option('--do-ada-lr', type=bool, default=False, help='Whether to do lr rule suitable of adversarial domain adaptaion (from article)')\n@click.option('--ada-ratio', type=float, default=1, help='Ratio of ADA loss vs base loss')\n@click.option('--rnn-hidden-size', type=int, default=128, help='Size of rnn hidden layer')\n@click.option('--do-lr-step', type=bool, default=False, help='Visualize output')\n@click.option('--dataset-name', type=str, default='tibetan', help='Dataset name, currently wiener or tibetan')\n\n\ndef main(base_data_dir, train_data_path, train_base_dir,\n orig_eval_data_path, orig_eval_base_dir,\n synth_eval_data_path, synth_eval_base_dir,\n lexicon_path, seq_proj, backend, snapshot, input_height, base_lr, elastic_alpha, elastic_sigma,\n step_size, max_iter,\n batch_size, output_dir, test_iter, show_iter, test_init, use_gpu, use_no_font_repeat_data,\n do_vat, do_at, vat_ratio, test_vat_ratio, vat_epsilon, vat_ip, vat_xi, vat_sign,\n do_remove_augs, aug_to_remove, do_beam_search,\n dropout_conv, dropout_rnn, dropout_output, do_ema, do_gray, do_test_vat, do_test_entropy, do_test_vat_cnn,\n do_test_vat_rnn,\n ada_after_rnn, ada_before_rnn, do_ada_lr, ada_ratio, rnn_hidden_size,\n do_lr_step,\n dataset_name\n ):\n if not do_lr_step and not do_ada_lr:\n raise NotImplementedError('learning rate should be either step or ada.')\n train_data_path = os.path.join(base_data_dir, train_data_path)\n train_base_dir = os.path.join(base_data_dir, train_base_dir)\n synth_eval_data_path = os.path.join(base_data_dir, synth_eval_data_path)\n synth_eval_base_dir = os.path.join(base_data_dir, synth_eval_base_dir)\n orig_eval_data_path = os.path.join(base_data_dir, orig_eval_data_path)\n orig_eval_base_dir = os.path.join(base_data_dir, orig_eval_base_dir)\n lexicon_path = os.path.join(base_data_dir, lexicon_path)\n\n all_parameters = locals()\n cuda = use_gpu\n #print(train_base_dir)\n if output_dir is not None:\n os.makedirs(output_dir, exist_ok=True)\n tb_writer = TbSummary(output_dir)\n output_dir = os.path.join(output_dir, 'model')\n os.makedirs(output_dir, exist_ok=True)\n\n with open(lexicon_path, 'rb') as f:\n lexicon = pkl.load(f)\n #print(sorted(lexicon.items(), key=operator.itemgetter(1)))\n\n with open(os.path.join(output_dir, 'params.txt'),'w') as f:\n f.writelines(str(all_parameters))\n print(all_parameters)\n print('new vat')\n\n sin_magnitude = 4\n rotate_max_angle = 2\n dataset_info = SynthDataInfo(None, None, None, dataset_name.lower())\n train_fonts = dataset_info.font_names\n\n all_args = locals()\n\n allowed_removals = ['elastic', 'sine', 'sine_rotate', 'rotation', 'color_aug', 'color_gaus', 'color_sine']\n if do_remove_augs and aug_to_remove not in allowed_removals:\n raise Exception('augmentation removal value is not allowed.')\n\n\n if do_remove_augs:\n rand_trans = []\n if aug_to_remove == 'elastic':\n print('doing sine transform :)')\n rand_trans.append(OnlySine(sin_magnitude=sin_magnitude))\n elif aug_to_remove in ['sine', 'sine_rotate']:\n print('doing elastic transform :)')\n rand_trans.append(OnlyElastic(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma))\n if aug_to_remove not in ['elastic', 'sine', 'sine_rotate']:\n print('doing elastic transform :)')\n print('doing sine transform :)')\n rand_trans.append(ElasticAndSine(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma, sin_magnitude=sin_magnitude))\n if aug_to_remove not in ['rotation', 'sine_rotate']:\n print('doing rotation transform :)')\n rand_trans.append(Rotation(angle=rotate_max_angle, fill_value=255))\n if aug_to_remove not in ['color_aug', 'color_gaus', 'color_sine']:\n print('doing color_aug transform :)')\n rand_trans.append(ColorGradGausNoise())\n elif aug_to_remove == 'color_gaus':\n print('doing color_sine transform :)')\n rand_trans.append(ColorGrad())\n elif aug_to_remove == 'color_sine':\n print('doing color_gaus transform :)')\n rand_trans.append(ColorGausNoise())\n else:\n print('doing all transforms :)')\n rand_trans = [\n ElasticAndSine(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma, sin_magnitude=sin_magnitude),\n Rotation(angle=rotate_max_angle, fill_value=255),\n ColorGradGausNoise()]\n if do_gray:\n rand_trans = rand_trans + [Resize(hight=input_height),\n AddWidth(),\n ToGray(),\n Normalize()]\n else:\n rand_trans = rand_trans + [Resize(hight=input_height),\n AddWidth(),\n Normalize()]\n\n transform_random = Compose(rand_trans)\n if do_gray:\n transform_simple = Compose([\n Resize(hight=input_height),\n AddWidth(),\n ToGray(),\n Normalize()\n ])\n else:\n transform_simple = Compose([\n Resize(hight=input_height),\n AddWidth(),\n Normalize()\n ])\n\n if use_no_font_repeat_data:\n print('creating dataset')\n train_data = TextDatasetRandomFont(data_path=train_data_path, lexicon=lexicon,\n base_path=train_base_dir, transform=transform_random, fonts=train_fonts)\n print('finished creating dataset')\n else:\n print('train data path:\\n{}'.format(train_data_path))\n print('train_base_dir:\\n{}'.format(train_base_dir))\n train_data = TextDataset(data_path=train_data_path, lexicon=lexicon,\n base_path=train_base_dir, transform=transform_random, fonts=train_fonts)\n synth_eval_data = TextDataset(data_path=synth_eval_data_path, lexicon=lexicon,\n base_path=synth_eval_base_dir, transform=transform_random, fonts=train_fonts)\n orig_eval_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,\n base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)\n if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:\n orig_vat_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,\n base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)\n\n if ada_after_rnn or ada_before_rnn:\n orig_ada_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,\n base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)\n\n #else:\n # train_data = TestDataset(transform=transform, abc=abc).set_mode(\"train\")\n # synth_eval_data = TestDataset(transform=transform, abc=abc).set_mode(\"test\")\n # orig_eval_data = TestDataset(transform=transform, abc=abc).set_mode(\"test\")\n seq_proj = [int(x) for x in seq_proj.split('x')]\n net = load_model(lexicon=train_data.get_lexicon(), seq_proj=seq_proj, backend=backend,\n snapshot=snapshot, cuda=cuda, do_beam_search=do_beam_search,\n dropout_conv=dropout_conv,\n dropout_rnn=dropout_rnn,\n dropout_output=dropout_output,\n do_ema=do_ema,\n ada_after_rnn=ada_after_rnn, ada_before_rnn=ada_before_rnn,\n rnn_hidden_size=rnn_hidden_size\n )\n optimizer = optim.Adam(net.parameters(), lr = base_lr, weight_decay=0.0001)\n if do_ada_lr:\n print('using ada lr')\n lr_scheduler = DannLR(optimizer, max_iter=max_iter)\n elif do_lr_step:\n print('using step lr')\n lr_scheduler = StepLR(optimizer, step_size=step_size, max_iter=max_iter)\n loss_function = CTCLoss()\n\n synth_avg_ed_best = float(\"inf\")\n orig_avg_ed_best = float(\"inf\")\n epoch_count = 0\n\n if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:\n collate_vat = lambda x: text_collate(x, do_mask=True)\n vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate_vat)\n vat_len = len(vat_load)\n cur_vat = 0\n vat_iter = iter(vat_load)\n if ada_after_rnn or ada_before_rnn:\n collate_ada = lambda x: text_collate(x, do_mask=True)\n ada_load = DataLoader(orig_ada_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate_ada)\n ada_len = len(ada_load)\n cur_ada = 0\n ada_iter = iter(ada_load)\n\n loss_domain = torch.nn.NLLLoss()\n\n while True:\n collate = lambda x: text_collate(x, do_mask=(do_vat or ada_before_rnn or ada_after_rnn))\n data_loader = DataLoader(train_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate)\n\n loss_mean_ctc = []\n loss_mean_vat = []\n loss_mean_at = []\n loss_mean_comp = []\n loss_mean_total = []\n loss_mean_test_vat = []\n loss_mean_test_pseudo = []\n loss_mean_test_rand = []\n loss_mean_ada_rnn_s = []\n loss_mean_ada_rnn_t = []\n loss_mean_ada_cnn_s = []\n loss_mean_ada_cnn_t = []\n iterator = tqdm(data_loader)\n iter_count = 0\n for iter_num, sample in enumerate(iterator):\n total_iter = (epoch_count * len(data_loader)) + iter_num\n if ((total_iter > 1) and total_iter % test_iter == 0) or (test_init and total_iter == 0):\n # epoch_count != 0 and\n\n print(\"Test phase\")\n net = net.eval()\n if do_ema:\n net.start_test()\n\n synth_acc, synth_avg_ed, synth_avg_no_stop_ed, synth_avg_loss = test(net, synth_eval_data,\n synth_eval_data.get_lexicon(),\n cuda, visualize=False,\n dataset_info=dataset_info,\n batch_size=batch_size,\n tb_writer=tb_writer,\n n_iter=total_iter,\n initial_title='val_synth',\n loss_function=loss_function,\n output_path=os.path.join(\n output_dir, 'results'),\n do_beam_search=False)\n\n\n orig_acc, orig_avg_ed, orig_avg_no_stop_ed, orig_avg_loss = test(net, orig_eval_data,\n orig_eval_data.get_lexicon(), cuda,\n visualize=False,\n dataset_info=dataset_info,\n batch_size=batch_size,\n tb_writer=tb_writer, n_iter=total_iter,\n initial_title='test_orig',\n loss_function=loss_function,\n output_path=os.path.join(output_dir,\n 'results'),\n do_beam_search=do_beam_search)\n\n\n net = net.train()\n #save periodic\n if output_dir is not None and total_iter // 30000:\n periodic_save = os.path.join(output_dir, 'periodic_save')\n os.makedirs(periodic_save, exist_ok=True)\n old_save = glob.glob(os.path.join(periodic_save,'*'))\n\n torch.save(net.state_dict(), os.path.join(output_dir, \"crnn_\" + backend + \"_\" + str(total_iter)))\n\n if orig_avg_no_stop_ed < orig_avg_ed_best:\n orig_avg_ed_best = orig_avg_no_stop_ed\n if output_dir is not None:\n torch.save(net.state_dict(), os.path.join(output_dir, \"crnn_\" + backend + \"_best\"))\n\n if synth_avg_no_stop_ed < synth_avg_ed_best:\n synth_avg_ed_best = synth_avg_no_stop_ed\n if do_ema:\n net.end_test()\n print(\"synth: avg_ed_best: {}\\t avg_ed: {}; avg_nostop_ed: {}; acc: {}\".format(synth_avg_ed_best,\n synth_avg_ed,\n synth_avg_no_stop_ed,\n synth_acc))\n print(\"orig: avg_ed_best: {}\\t avg_ed: {}; avg_nostop_ed: {}; acc: {}\".format(orig_avg_ed_best,\n orig_avg_ed,\n orig_avg_no_stop_ed,\n orig_acc))\n tb_writer.get_writer().add_scalars('data/test',\n {'synth_ed_total': synth_avg_ed,\n 'synth_ed_no_stop': synth_avg_no_stop_ed,\n 'synth_avg_loss': synth_avg_loss,\n 'orig_ed_total': orig_avg_ed,\n 'orig_ed_no_stop': orig_avg_no_stop_ed,\n 'orig_avg_loss': orig_avg_loss\n }, total_iter)\n if len(loss_mean_ctc) > 0:\n train_dict = {'mean_ctc_loss': np.mean(loss_mean_ctc)}\n if do_vat:\n train_dict = {**train_dict, **{'mean_vat_loss':np.mean(loss_mean_vat)}}\n if do_at:\n train_dict = {**train_dict, **{'mean_at_loss':np.mean(loss_mean_at)}}\n if do_test_vat:\n train_dict = {**train_dict, **{'mean_test_vat_loss': np.mean(loss_mean_test_vat)}}\n if do_test_vat_rnn and do_test_vat_cnn:\n train_dict = {**train_dict, **{'mean_test_vat_crnn_loss': np.mean(loss_mean_test_vat)}}\n elif do_test_vat_rnn:\n train_dict = {**train_dict, **{'mean_test_vat_rnn_loss': np.mean(loss_mean_test_vat)}}\n elif do_test_vat_cnn:\n train_dict = {**train_dict, **{'mean_test_vat_cnn_loss': np.mean(loss_mean_test_vat)}}\n if ada_after_rnn:\n train_dict = {**train_dict,\n **{'mean_ada_rnn_s_loss': np.mean(loss_mean_ada_rnn_s),\n 'mean_ada_rnn_t_loss': np.mean(loss_mean_ada_rnn_t)}}\n if ada_before_rnn:\n train_dict = {**train_dict,\n **{'mean_ada_cnn_s_loss': np.mean(loss_mean_ada_cnn_s),\n 'mean_ada_cnn_t_loss': np.mean(loss_mean_ada_cnn_t)}}\n print(train_dict)\n tb_writer.get_writer().add_scalars('data/train',\n train_dict,\n total_iter)\n '''\n # for multi-gpu support\n if sample[\"img\"].size(0) % len(gpu.split(',')) != 0:\n continue\n '''\n optimizer.zero_grad()\n imgs = Variable(sample[\"img\"])\n #print(\"images sizes are:\")\n #print(sample[\"img\"].shape)\n if do_vat or ada_after_rnn or ada_before_rnn:\n mask = sample['mask']\n labels_flatten = Variable(sample[\"seq\"]).view(-1)\n label_lens = Variable(sample[\"seq_len\"].int())\n #print(\"image sequence length is:\")\n #print(sample[\"im_seq_len\"])\n #print(\"label sequence length is:\")\n #print(sample[\"seq_len\"].view(1,-1))\n img_seq_lens = sample[\"im_seq_len\"]\n if cuda:\n imgs = imgs.cuda()\n if do_vat or ada_after_rnn or ada_before_rnn:\n mask = mask.cuda()\n\n if do_ada_lr:\n ada_p = float(iter_count) / max_iter\n lr_scheduler.update(ada_p)\n\n if ada_before_rnn or ada_after_rnn:\n if not do_ada_lr:\n ada_p = float(iter_count) / max_iter\n ada_alpha = 2. / (1. + np.exp(-10. * ada_p)) - 1\n\n\n if cur_ada >= ada_len:\n ada_load = DataLoader(orig_ada_data, batch_size=batch_size, num_workers=4, shuffle=True,\n collate_fn=collate_ada)\n ada_len = len(ada_load)\n cur_ada = 0\n ada_iter = iter(ada_load)\n ada_batch = next(ada_iter)\n cur_ada += 1\n ada_imgs = Variable(ada_batch[\"img\"])\n ada_img_seq_lens = ada_batch[\"im_seq_len\"]\n ada_mask = ada_batch['mask'].byte()\n if cuda:\n ada_imgs = ada_imgs.cuda()\n\n _, ada_cnn, ada_rnn = net(ada_imgs, ada_img_seq_lens,\n ada_alpha=ada_alpha, mask=ada_mask)\n if ada_before_rnn:\n ada_num_features = ada_cnn.size(0)\n else:\n ada_num_features = ada_rnn.size(0)\n domain_label = torch.zeros(ada_num_features)\n domain_label = domain_label.long()\n if cuda:\n domain_label = domain_label.cuda()\n domain_label = Variable(domain_label)\n\n if ada_before_rnn:\n err_ada_cnn_t = loss_domain(ada_cnn, domain_label)\n if ada_after_rnn:\n err_ada_rnn_t = loss_domain(ada_rnn, domain_label)\n\n if do_test_vat and do_at:\n # test part!\n if cur_vat >= vat_len:\n vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True,\n collate_fn=collate_vat)\n vat_len = len(vat_load)\n cur_vat = 0\n vat_iter = iter(vat_load)\n test_vat_batch = next(vat_iter)\n cur_vat += 1\n test_vat_mask = test_vat_batch['mask']\n test_vat_imgs = Variable(test_vat_batch[\"img\"])\n test_vat_img_seq_lens = test_vat_batch[\"im_seq_len\"]\n if cuda:\n test_vat_imgs = test_vat_imgs.cuda()\n test_vat_mask = test_vat_mask.cuda()\n # train part\n at_test_vat_loss = LabeledAtAndUnlabeledTestVatLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)\n\n at_loss, test_vat_loss = at_test_vat_loss(model=net, train_x=imgs, train_labels_flatten=labels_flatten,\n train_img_seq_lens=img_seq_lens, train_label_lens=label_lens, batch_size=batch_size,\n test_x=test_vat_imgs, test_seq_len=test_vat_img_seq_lens, test_mask=test_vat_mask)\n elif do_test_vat or do_test_vat_rnn or do_test_vat_cnn:\n if cur_vat >= vat_len:\n vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True,\n collate_fn=collate_vat)\n vat_len = len(vat_load)\n cur_vat = 0\n vat_iter = iter(vat_load)\n vat_batch = next(vat_iter)\n cur_vat += 1\n vat_mask = vat_batch['mask']\n vat_imgs = Variable(vat_batch[\"img\"])\n vat_img_seq_lens = vat_batch[\"im_seq_len\"]\n if cuda:\n vat_imgs = vat_imgs.cuda()\n vat_mask = vat_mask.cuda()\n if do_test_vat:\n if do_test_vat_rnn or do_test_vat_cnn:\n raise \"can only do one of do_test_vat | (do_test_vat_rnn, do_test_vat_cnn)\"\n if vat_sign == True:\n test_vat_loss = VATLossSign(do_test_entropy=do_test_entropy, xi=vat_xi, eps=vat_epsilon, ip=vat_ip)\n else:\n test_vat_loss = VATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)\n elif do_test_vat_rnn and do_test_vat_cnn:\n test_vat_loss = VATonRnnCnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)\n elif do_test_vat_rnn:\n test_vat_loss = VATonRnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)\n elif do_test_vat_cnn:\n test_vat_loss = VATonCnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)\n if do_test_vat_cnn and do_test_vat_rnn:\n test_vat_loss, cnn_lds, rnn_lds = test_vat_loss(net, vat_imgs, vat_img_seq_lens, vat_mask)\n elif do_test_vat:\n test_vat_loss = test_vat_loss(net, vat_imgs, vat_img_seq_lens, vat_mask)\n elif do_vat:\n vat_loss = VATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)\n vat_loss = vat_loss(net, imgs, img_seq_lens, mask)\n elif do_at:\n at_loss = LabeledATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)\n at_loss = at_loss(net, imgs, labels_flatten, img_seq_lens, label_lens, batch_size)\n\n\n if ada_after_rnn or ada_before_rnn:\n preds, ada_cnn, ada_rnn = net(imgs, img_seq_lens, ada_alpha=ada_alpha, mask=mask)\n\n if ada_before_rnn:\n ada_num_features = ada_cnn.size(0)\n else:\n ada_num_features = ada_rnn.size(0)\n\n domain_label = torch.ones(ada_num_features)\n domain_label = domain_label.long()\n if cuda:\n domain_label = domain_label.cuda()\n domain_label = Variable(domain_label)\n\n if ada_before_rnn:\n err_ada_cnn_s = loss_domain(ada_cnn, domain_label)\n if ada_after_rnn:\n err_ada_rnn_s = loss_domain(ada_rnn, domain_label)\n\n else:\n preds = net(imgs, img_seq_lens)\n\n '''\n if output_dir is not None:\n if (show_iter is not None and iter_num != 0 and iter_num % show_iter == 0):\n print_data_visuals(net, tb_writer, train_data.get_lexicon(), sample[\"img\"], labels_flatten, label_lens,\n preds, ((epoch_count * len(data_loader)) + iter_num))\n '''\n loss_ctc = loss_function(preds, labels_flatten,\n Variable(torch.IntTensor(np.array(img_seq_lens))), label_lens) / batch_size\n\n if loss_ctc.data[0] in [float(\"inf\"), -float(\"inf\")]:\n print(\"warnning: loss should not be inf.\")\n continue\n total_loss = loss_ctc\n\n\n if do_vat:\n #mask = sample['mask']\n #if cuda:\n # mask = mask.cuda()\n #vat_loss = virtual_adversarial_loss(net, imgs, img_seq_lens, mask, is_training=True, do_entropy=False, epsilon=vat_epsilon, num_power_iterations=1,\n # xi=1e-6, average_loss=True)\n total_loss = total_loss + vat_ratio * vat_loss.cpu()\n if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:\n total_loss = total_loss + test_vat_ratio * test_vat_loss.cpu()\n\n if ada_before_rnn:\n total_loss = total_loss + ada_ratio * err_ada_cnn_s.cpu() + ada_ratio * err_ada_cnn_t.cpu()\n if ada_after_rnn:\n total_loss = total_loss + ada_ratio * err_ada_rnn_s.cpu() + ada_ratio * err_ada_rnn_t.cpu()\n\n total_loss.backward()\n nn.utils.clip_grad_norm(net.parameters(), 10.0)\n if -400 < loss_ctc.data[0] < 400:\n loss_mean_ctc.append(loss_ctc.data[0])\n if -1000 < total_loss.data[0] < 1000:\n loss_mean_total.append(total_loss.data[0])\n if len(loss_mean_total) > 100:\n loss_mean_total = loss_mean_total[-100:]\n status = \"epoch: {0:5d}; iter_num: {1:5d}; lr: {2:.2E}; loss_mean: {3:.3f}; loss: {4:.3f}\".format(epoch_count,\n lr_scheduler.last_iter,\n lr_scheduler.get_lr(),\n np.mean(loss_mean_total),\n loss_ctc.data[0])\n if ada_after_rnn:\n loss_mean_ada_rnn_s.append(err_ada_rnn_s.data[0])\n loss_mean_ada_rnn_t.append(err_ada_rnn_t.data[0])\n status += \"; ladatrnns: {0:.3f}; ladatrnnt: {1:.3f}\".format(\n err_ada_rnn_s.data[0], err_ada_rnn_t.data[0]\n )\n if ada_before_rnn:\n loss_mean_ada_cnn_s.append(err_ada_cnn_s.data[0])\n loss_mean_ada_cnn_t.append(err_ada_cnn_t.data[0])\n status += \"; ladatcnns: {0:.3f}; ladatcnnt: {1:.3f}\".format(\n err_ada_cnn_s.data[0], err_ada_cnn_t.data[0]\n )\n if do_vat:\n loss_mean_vat.append(vat_loss.data[0])\n status += \"; lvat: {0:.3f}\".format(\n vat_loss.data[0]\n )\n if do_at:\n loss_mean_at.append(at_loss.data[0])\n status += \"; lat: {0:.3f}\".format(\n at_loss.data[0]\n )\n if do_test_vat:\n loss_mean_test_vat.append(test_vat_loss.data[0])\n status += \"; l_tvat: {0:.3f}\".format(\n test_vat_loss.data[0]\n )\n if do_test_vat_rnn or do_test_vat_cnn:\n loss_mean_test_vat.append(test_vat_loss.data[0])\n if do_test_vat_rnn and do_test_vat_cnn:\n status += \"; l_tvatc: {}\".format(\n cnn_lds.data[0]\n )\n status += \"; l_tvatr: {}\".format(\n rnn_lds.data[0]\n )\n else:\n status += \"; l_tvat: {}\".format(\n test_vat_loss.data[0]\n )\n\n iterator.set_description(status)\n optimizer.step()\n if do_lr_step:\n lr_scheduler.step()\n if do_ema:\n net.udate_ema()\n iter_count += 1\n if output_dir is not None:\n torch.save(net.state_dict(), os.path.join(output_dir, \"crnn_\" + backend + \"_last\"))\n epoch_count += 1\n\n return\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.nn.NLLLoss",
"torch.zeros",
"numpy.array",
"torch.autograd.Variable",
"numpy.exp",
"numpy.mean",
"torch.ones",
"torch.utils.data.DataLoader"
]
] |
davideganna/NBA_Bet
|
[
"dba00542b8ed63a5a7290f25209270b32d18fb86"
] |
[
"NBABet/Telegram.py"
] |
[
"# --------------------- Telegram.py --------------------------------- #\n# Allows the integration with Telegram Bot.\n# ------------------------------------------------------------------- #\nfrom numpy.core.fromnumeric import around, std\nimport requests\nimport Elo\nfrom Models import Models\nimport Helper\nimport pandas as pd\nimport numpy as np\n\nclass TelegramBot():\n \"\"\"\n Allows integration with the Telegram Bot.\n \"\"\"\n def __init__(self):\n self.url = 'https://api.telegram.org/'\n with open('secrets/telegram_secrets') as f:\n lines = f.readlines()\n self.bot_token = lines[0].strip()\n self.chat_id = lines[1].strip()\n \n\n def send_message(self, d:dict):\n df = pd.read_csv('past_data/2021_2022/split_stats_per_game.csv')\n df = Helper.add_features_to_df(df)\n\n n = 3\n \n train_df = pd.read_csv('past_data/average_seasons/average_NSeasons_prod.csv')\n # Standardize the DataFrame\n std_df, scaler = Helper.standardize_DataFrame(train_df)\n\n clf = Models.build_RF_classifier(std_df)\n\n text = \"🏀 Tonight's Games: Home vs. Away 🏀\\n\\n\"\n for home, away in d.items():\n last_N_games_away = df.loc[df['Team_away'] == away].tail(n)\n last_N_games_home = df.loc[df['Team_home'] == home].tail(n)\n\n to_predict = pd.concat(\n [\n last_N_games_away[Models.away_features].mean(), \n last_N_games_home[Models.home_features].mean()\n ],\n axis=0)[Models.features]\n\n prob_home_rf, prob_away_rf = clf.predict_proba(scaler.transform(to_predict.values.reshape(1,-1)))[0]\n\n prob_away_elo, prob_home_elo = Elo.get_probas(away, home)\n\n if ((prob_home_rf > 0.5) and (prob_home_elo > 0.5)):\n prob_home = str(around((prob_home_rf + prob_home_elo)/2, decimals=3))\n odds_home = str(around(1/float(prob_home), decimals=2))\n if float(prob_home) >= 0.6:\n text = text + home + '(' + prob_home + ' --> ' + odds_home + ') vs. ' + away + '\\n\\\n RF Prob.: ' + str(around(prob_home_rf, decimals=3)) + '\\n\\\n Elo Prob.: ' + str(around(prob_home_elo, decimals=3)) + '\\n\\n'\n\n if ((prob_away_rf > 0.5) and (prob_away_elo > 0.5)):\n prob_away = str(around((prob_away_rf + prob_away_elo)/2, decimals=3))\n odds_away = str(around(1/float(prob_away), decimals=2))\n if float(prob_away) >= 0.6:\n text = text + home + ' vs. ' + away + '(' + prob_away + ' --> ' + odds_away + ')' + '\\n\\\n RF Prob.: ' + str(around(prob_away_rf, decimals=3)) + '\\n\\\n Elo Prob.: ' + str(around(prob_away_elo, decimals=3)) + '\\n\\n'\n\n query = self.url + self.bot_token + '/sendMessage?' + self.chat_id + '&text=' + text\n requests.request(\"POST\", query)"
] |
[
[
"numpy.core.fromnumeric.around",
"pandas.read_csv"
]
] |
Struth-Rourke/twitter_flask_app
|
[
"f73ad147f216ad77f8010ef6c02da4784dbfa9c8"
] |
[
"twitter_app/iris_classifier.py"
] |
[
"# twitter_app/iris_classifier.py\n\nimport os\nimport pickle\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression \n\nMODEL_FILEPATH = os.path.join(os.path.dirname(__file__), \"..\", \"models\", \"latest_model.pkl\")\n\ndef train_and_save_model():\n print(\"TRAINING THE MODEL...\")\n X, y = load_iris(return_X_y=True)\n #print(type(X), X.shape) #> <class 'numpy.ndarray'> (150, 4)\n #print(type(y), y.shape) #> <class 'numpy.ndarray'> (150,)\n classifier = LogisticRegression() # for example\n classifier.fit(X, y)\n\n print(\"SAVING THE MODEL...\")\n with open(MODEL_FILEPATH, \"wb\") as model_file:\n pickle.dump(classifier, model_file)\n\n return classifier\n\ndef load_model():\n print(\"LOADING THE MODEL...\")\n with open(MODEL_FILEPATH, \"rb\") as model_file:\n saved_model = pickle.load(model_file)\n return saved_model\n\nif __name__ == \"__main__\":\n\n #train_and_save_model()\n\n clf = load_model()\n print(\"CLASSIFIER:\", clf)\n\n X, y = load_iris(return_X_y=True) # just to have some data to use when predicting\n inputs = X[:2, :]\n print(type(inputs), inputs)\n\n result = clf.predict(inputs)\n print(\"RESULT:\", result)"
] |
[
[
"sklearn.linear_model.LogisticRegression",
"sklearn.datasets.load_iris"
]
] |
patrickctrf/scikit-learn
|
[
"d6735f4851d828984a0517de954b9b88c74919fe"
] |
[
"examples/applications/plot_cyclical_feature_engineering.py"
] |
[
"\"\"\"\n================================\nTime-related feature engineering\n================================\n\nThis notebook introduces different strategies to leverage time-related features\nfor a bike sharing demand regression task that is highly dependent on business\ncycles (days, weeks, months) and yearly season cycles.\n\nIn the process, we introduce how to perform periodic feature engineering using\nthe :class:`sklearn.preprocessing.SplineTransformer` class and its\n`extrapolation=\"periodic\"` option.\n\n\"\"\"\n# %%\n# Data exploration on the Bike Sharing Demand dataset\n# ---------------------------------------------------\n#\n# We start by loading the data from the OpenML repository.\nfrom sklearn.datasets import fetch_openml\n\nbike_sharing = fetch_openml(\"Bike_Sharing_Demand\", version=2, as_frame=True)\ndf = bike_sharing.frame\n\n# %%\n# To get a quick understanding of the periodic patterns of the data, let us\n# have a look at the average demand per hour during a week.\n#\n# Note that the week starts on a Sunday, during the weekend. We can clearly\n# distinguish the commute patterns in the morning and evenings of the work days\n# and the leisure use of the bikes on the weekends with a more spread peak\n# demand around the middle of the days:\nimport matplotlib.pyplot as plt\n\n\nfig, ax = plt.subplots(figsize=(12, 4))\naverage_week_demand = df.groupby([\"weekday\", \"hour\"]).mean()[\"count\"]\naverage_week_demand.plot(ax=ax)\n_ = ax.set(\n title=\"Average hourly bike demand during the week\",\n xticks=[i * 24 for i in range(7)],\n xticklabels=[\"Sun\", \"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\"],\n xlabel=\"Time of the week\",\n ylabel=\"Number of bike rentals\",\n)\n\n# %%\n#\n# The target of the prediction problem is the absolute count of bike rentals on\n# a hourly basis:\ndf[\"count\"].max()\n\n# %% [markdown]\n#\n# Let us rescale the target variable (number of hourly bike rentals) to predict\n# a relative demand so that the mean absolute error is more easily interpreted\n# as a fraction of the maximum demand.\n#\n# .. note::\n#\n# The fit method of the models used in this notebook all minimize the\n# mean squared error to estimate the conditional mean instead of the mean\n# absolute error that would fit an estimator of the conditional median.\n#\n# When reporting performance measure on the test set in the discussion, we\n# instead choose to focus on the mean absolute error that is more\n# intuitive than the (root) mean squared error. Note however that the best\n# models for one metric are also the best for the other in this study.\ny = df[\"count\"] / 1000\n\n# %%\nfig, ax = plt.subplots(figsize=(12, 4))\ny.hist(bins=30, ax=ax)\n_ = ax.set(\n xlabel=\"Fraction of rented fleet demand\",\n ylabel=\"Number of hours\",\n)\n\n# %%\n# The input feature data frame is a time annotated hourly log of variables\n# describing the weather conditions. It includes both numerical and categorical\n# variables. Note that the time information has already been expanded into\n# several complementary columns.\n#\nX = df.drop(\"count\", axis=\"columns\")\nX\n\n# %%\n# .. note::\n#\n# If the time information was only present as a date or datetime column, we\n# could have expanded it into hour-in-the-day, day-in-the-week,\n# day-in-the-month, month-in-the-year using pandas:\n# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#time-date-components\n#\n# We now introspect the distribution of the categorical variables, starting\n# with `\"weather\"`:\n#\nX[\"weather\"].value_counts()\n\n# %%\n# Since there are only 3 `\"heavy_rain\"` events, we cannot use this category to\n# train machine learning models with cross validation. Instead, we simplify the\n# representation by collapsing those into the `\"rain\"` category.\n#\nX[\"weather\"].replace(to_replace=\"heavy_rain\", value=\"rain\", inplace=True)\n# %%\nX[\"weather\"].value_counts()\n\n# %%\n# As expected, the `\"season\"` variable is well balanced:\n#\nX[\"season\"].value_counts()\n\n# %%\n# Time-based cross-validation\n# ---------------------------\n#\n# Since the dataset is a time-ordered event log (hourly demand), we will use a\n# time-sensitive cross-validation splitter to evaluate our demand forecasting\n# model as realistically as possible. We use a gap of 2 days between the train\n# and test side of the splits. We also limit the training set size to make the\n# performance of the CV folds more stable.\n#\n# 1000 test datapoints should be enough to quantify the performance of the\n# model. This represents a bit less than a month and a half of contiguous test\n# data:\n\nfrom sklearn.model_selection import TimeSeriesSplit\n\nts_cv = TimeSeriesSplit(\n n_splits=5,\n gap=48,\n max_train_size=10000,\n test_size=1000,\n)\n\n# %%\n# Let us manually inspect the various splits to check that the\n# `TimeSeriesSplit` works as we expect, starting with the first split:\nall_splits = list(ts_cv.split(X, y))\ntrain_0, test_0 = all_splits[0]\n\n# %%\nX.iloc[test_0]\n\n# %%\nX.iloc[train_0]\n\n# %%\n# We now inspect the last split:\ntrain_4, test_4 = all_splits[4]\n\n# %%\nX.iloc[test_4]\n\n# %%\nX.iloc[train_4]\n\n# %%\n# All is well. We are now ready to do some predictive modeling!\n#\n# Gradient Boosting\n# -----------------\n#\n# Gradient Boosting Regression with decision trees is often flexible enough to\n# efficiently handle heteorogenous tabular data with a mix of categorical and\n# numerical features as long as the number of samples is large enough.\n#\n# Here, we do minimal ordinal encoding for the categorical variables and then\n# let the model know that it should treat those as categorical variables by\n# using a dedicated tree splitting rule. Since we use an ordinal encoder, we\n# pass the list of categorical values explicitly to use a logical order when\n# encoding the categories as integer instead of the lexicographical order. This\n# also has the added benefit of preventing any issue with unknown categories\n# when using cross-validation.\n#\n# The numerical variable need no preprocessing and, for the sake of simplicity,\n# we only try the default hyper-parameters for this model:\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.ensemble import HistGradientBoostingRegressor\nfrom sklearn.model_selection import cross_validate\n\n\ncategorical_columns = [\n \"weather\",\n \"season\",\n \"holiday\",\n \"workingday\",\n]\ncategories = [\n [\"clear\", \"misty\", \"rain\"],\n [\"spring\", \"summer\", \"fall\", \"winter\"],\n [\"False\", \"True\"],\n [\"False\", \"True\"],\n]\nordinal_encoder = OrdinalEncoder(categories=categories)\n\n\ngbrt_pipeline = make_pipeline(\n ColumnTransformer(\n transformers=[\n (\"categorical\", ordinal_encoder, categorical_columns),\n ],\n remainder=\"passthrough\",\n ),\n HistGradientBoostingRegressor(\n categorical_features=range(4),\n ),\n)\n\n# %%\n#\n# Lets evaluate our gradient boosting model with the mean absolute error of the\n# relative demand averaged accross our 5 time-based cross-validation splits:\n\n\ndef evaluate(model, X, y, cv):\n cv_results = cross_validate(\n model,\n X,\n y,\n cv=ts_cv,\n scoring=[\"neg_mean_absolute_error\", \"neg_root_mean_squared_error\"],\n )\n mae = -cv_results[\"test_neg_mean_absolute_error\"]\n rmse = -cv_results[\"test_neg_root_mean_squared_error\"]\n print(\n f\"Mean Absolute Error: {mae.mean():.3f} +/- {mae.std():.3f}\\n\"\n f\"Root Mean Squared Error: {rmse.mean():.3f} +/- {rmse.std():.3f}\"\n )\n\n\nevaluate(gbrt_pipeline, X, y, cv=ts_cv)\n\n# %%\n# This model has an average error around 4 to 5% of the maximum demand. This is\n# quite good for a first trial without any hyper-parameter tuning! We just had\n# to make the categorical variables explicit. Note that the time related\n# features are passed as is, i.e. without processing them. But this is not much\n# of a problem for tree-based models as they can learn a non-monotonic\n# relationship between ordinal input features and the target.\n#\n# This is not the case for linear regression model as we will see in the\n# following.\n#\n# Naive linear regression\n# -----------------------\n#\n# As usual for linear models, categorical variables need to be one-hot encoded.\n# For consistency, we scale the numerical features to the same 0-1 range using\n# class:`sklearn.preprocessing.MinMaxScaler`, although in this case it does not\n# impact the results much because they are already on comparable scales:\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.linear_model import RidgeCV\nimport numpy as np\n\n\none_hot_encoder = OneHotEncoder(handle_unknown=\"ignore\", sparse=False)\nalphas = np.logspace(-6, 6, 25)\nnaive_linear_pipeline = make_pipeline(\n ColumnTransformer(\n transformers=[\n (\"categorical\", one_hot_encoder, categorical_columns),\n ],\n remainder=MinMaxScaler(),\n ),\n RidgeCV(alphas=alphas),\n)\n\n\nevaluate(naive_linear_pipeline, X, y, cv=ts_cv)\n\n\n# %%\n#\n# The performance is not good: the average error is around 14% of the maximum\n# demand. This is more than three times higher than the average error of the\n# gradient boosting model. We can suspect that the naive original encoding of\n# the periodic time-related features might prevent the linear regression model\n# to properly leverage the time information: linear regression does not model\n# non-monotonic relationships between the input features and the target.\n# Non-linear terms have to be engineered in the input.\n#\n# For example, the raw numerical encoding of the `\"hour\"` feature prevents the\n# linear model from recognizing that an increase of hour in the morning from 6\n# to 8 should have a strong positive impact on the number of bike rentals while\n# a increase of similar magnitude in the evening from 18 to 20 should have a\n# strong negative impact on the predicted number of bike rentals.\n#\n# Time-steps as categories\n# ------------------------\n#\n# Since the time features are encoded in a discrete manner using integers (24\n# unique values in the \"hours\" feature), we could decide to treat those as\n# categorical variables and ignore any assumption implied by the ordering of\n# the hour values using a one-hot encoding.\n#\n# Using one-hot encoding for the time features gives the linear model a lot\n# more flexibility as we introduce one additional feature per discrete time\n# level.\none_hot_linear_pipeline = make_pipeline(\n ColumnTransformer(\n transformers=[\n (\"categorical\", one_hot_encoder, categorical_columns),\n (\"one_hot_time\", one_hot_encoder, [\"hour\", \"weekday\", \"month\"]),\n ],\n remainder=MinMaxScaler(),\n ),\n RidgeCV(alphas=alphas),\n)\n\nevaluate(one_hot_linear_pipeline, X, y, cv=ts_cv)\n\n# %%\n# The average error rate of this model is 10% which is much better than using\n# the original ordinal encoding of the time feature, confirming our intuition\n# that the linear regression model benefit from the added flexibility to not\n# treat time progression in a monotonic manner.\n#\n# However, this introduces a very large number of new features. If the time of\n# the day was represented in minutes since the start of the day instead of\n# hours, one-hot encoding would have introduced 1440 features instead of 24.\n# This could cause some significant overfitting. To avoid this we could use\n# :func:`sklearn.preprocessing.KBinsDiscretizer` instead to re-bin the number\n# of levels of fine-grained ordinal or numerical variables while still\n# benefitting from the non-monotonic expressivity advantages of one-hot\n# encoding.\n#\n# Finally, we also observe than one-hot encoding completely ignores the\n# ordering of the hour levels while this could be an interesting inductive bias\n# to preserve to some level. In the following we try to explore smooth,\n# non-monotonic encoding that locally preserves the relative ordering of time\n# features.\n#\n# Trigonometric features\n# ----------------------\n#\n# As a first attempt, we can try to encode each of those periodic features\n# using a sine and cosine transform with the matching period.\n#\n# Each ordinal time feature is transformed into 2 features that together encode\n# equivalent information in a non-monotonic way, and more importantly without\n# any jump between the first and the last value of the periodic range.\nfrom sklearn.preprocessing import FunctionTransformer\n\n\ndef sin_transformer(period):\n return FunctionTransformer(lambda x: np.sin(x / period * 2 * np.pi))\n\n\ndef cos_transformer(period):\n return FunctionTransformer(lambda x: np.cos(x / period * 2 * np.pi))\n\n\n# %%\n#\n# Let us visualize the effect of this feature expansion on some synthetic hour\n# data with a bit of extrapolation beyond hour=23:\nimport pandas as pd\n\nhour_df = pd.DataFrame(\n np.arange(26).reshape(-1, 1),\n columns=[\"hour\"],\n)\nhour_df[\"hour_sin\"] = sin_transformer(24).fit_transform(hour_df)[\"hour\"]\nhour_df[\"hour_cos\"] = cos_transformer(24).fit_transform(hour_df)[\"hour\"]\nhour_df.plot(x=\"hour\")\n_ = plt.title(\"Trigonometric encoding for the 'hour' feature\")\n\n# %%\n#\n# Let's use a 2D scatter plot with the hours encoded as colors to better see\n# how this representation maps the 24 hours of the day to a 2D space, akin to\n# some sort of 24 hour version of an analog clock. Note that the \"25th\" hour is\n# mapped back to the 1st hour because of the periodic nature of the sine/cosine\n# representation.\nfig, ax = plt.subplots(figsize=(7, 5))\nsp = ax.scatter(hour_df[\"hour_sin\"], hour_df[\"hour_cos\"], c=hour_df[\"hour\"])\nax.set(\n xlabel=\"sin(hour)\",\n ylabel=\"cos(hour)\",\n)\n_ = fig.colorbar(sp)\n\n# %%\n#\n# We can now build a feature extraction pipeline using this strategy:\ncyclic_cossin_transformer = ColumnTransformer(\n transformers=[\n (\"categorical\", one_hot_encoder, categorical_columns),\n (\"month_sin\", sin_transformer(12), [\"month\"]),\n (\"month_cos\", cos_transformer(12), [\"month\"]),\n (\"weekday_sin\", sin_transformer(7), [\"weekday\"]),\n (\"weekday_cos\", cos_transformer(7), [\"weekday\"]),\n (\"hour_sin\", sin_transformer(24), [\"hour\"]),\n (\"hour_cos\", cos_transformer(24), [\"hour\"]),\n ],\n remainder=MinMaxScaler(),\n)\ncyclic_cossin_linear_pipeline = make_pipeline(\n cyclic_cossin_transformer,\n RidgeCV(alphas=alphas),\n)\nevaluate(cyclic_cossin_linear_pipeline, X, y, cv=ts_cv)\n\n\n# %%\n#\n# The performance of our linear regression model with this simple feature\n# engineering is a bit better than using the original ordinal time features but\n# worse than using the one-hot encoded time features. We will further analyze\n# possible reasons for this disappointing outcome at the end of this notebook.\n#\n# Periodic spline features\n# ------------------------\n#\n# We can try an alternative encoding of the periodic time-related features\n# using spline transformations with a large enough number of splines, and as a\n# result a larger number of expanded features:\nfrom sklearn.preprocessing import SplineTransformer\n\n\ndef periodic_spline_transformer(period, n_splines=None, degree=3):\n if n_splines is None:\n n_splines = period\n n_knots = n_splines + 1 # periodic and include_bias is True\n return SplineTransformer(\n degree=degree,\n n_knots=n_knots,\n knots=np.linspace(0, period, n_knots).reshape(n_knots, 1),\n extrapolation=\"periodic\",\n include_bias=True,\n )\n\n\n# %%\n#\n# Again, let us visualize the effect of this feature expansion on some\n# synthetic hour data with a bit of extrapolation beyond hour=23:\nhour_df = pd.DataFrame(\n np.linspace(0, 26, 1000).reshape(-1, 1),\n columns=[\"hour\"],\n)\nsplines = periodic_spline_transformer(24, n_splines=12).fit_transform(hour_df)\nsplines_df = pd.DataFrame(\n splines,\n columns=[f\"spline_{i}\" for i in range(splines.shape[1])],\n)\npd.concat([hour_df, splines_df], axis=\"columns\").plot(x=\"hour\", cmap=plt.cm.tab20b)\n_ = plt.title(\"Periodic spline-based encoding for the 'hour' feature\")\n\n\n# %%\n# Thanks to the use of the `extrapolation=\"periodic\"` parameter, we observe\n# that the feature encoding stays smooth when extrapolating beyond midnight.\n#\n# We can now build a predictive pipeline using this alternative periodic\n# feature engineering strategy.\n#\n# It is possible to use fewer splines than discrete levels for those ordinal\n# values. This makes spline-based encoding more efficient than one-hot encoding\n# while preserving most of the expressivity:\ncyclic_spline_transformer = ColumnTransformer(\n transformers=[\n (\"categorical\", one_hot_encoder, categorical_columns),\n (\"cyclic_month\", periodic_spline_transformer(12, n_splines=6), [\"month\"]),\n (\"cyclic_weekday\", periodic_spline_transformer(7, n_splines=3), [\"weekday\"]),\n (\"cyclic_hour\", periodic_spline_transformer(24, n_splines=12), [\"hour\"]),\n ],\n remainder=MinMaxScaler(),\n)\ncyclic_spline_linear_pipeline = make_pipeline(\n cyclic_spline_transformer,\n RidgeCV(alphas=alphas),\n)\nevaluate(cyclic_spline_linear_pipeline, X, y, cv=ts_cv)\n\n# %%\n# Spline features make it possible for the linear model to successfully\n# leverage the periodic time-related features and reduce the error from ~14% to\n# ~10% of the maximum demand, which is similar to what we observed with the\n# one-hot encoded features.\n#\n# Qualitative analysis of the impact of features on linear models predictions\n# ---------------------------------------------------------------------------\n#\n# Here, we want to visualize the impact of the feature engineering choices on\n# the time related shape of the predictions.\n#\n# To do so we consider an arbitrary time-based split to compare the predictions\n# on a range of held out data points.\nnaive_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])\nnaive_linear_predictions = naive_linear_pipeline.predict(X.iloc[test_0])\n\none_hot_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])\none_hot_linear_predictions = one_hot_linear_pipeline.predict(X.iloc[test_0])\n\ncyclic_cossin_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])\ncyclic_cossin_linear_predictions = cyclic_cossin_linear_pipeline.predict(X.iloc[test_0])\n\ncyclic_spline_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])\ncyclic_spline_linear_predictions = cyclic_spline_linear_pipeline.predict(X.iloc[test_0])\n\n# %%\n# We visualize those predictions by zooming on the last 96 hours (4 days) of\n# the test set to get some qualitative insights:\nlast_hours = slice(-96, None)\nfig, ax = plt.subplots(figsize=(12, 4))\nfig.suptitle(\"Predictions by linear models\")\nax.plot(\n y.iloc[test_0].values[last_hours],\n \"x-\",\n alpha=0.2,\n label=\"Actual demand\",\n color=\"black\",\n)\nax.plot(naive_linear_predictions[last_hours], \"x-\", label=\"Ordinal time features\")\nax.plot(\n cyclic_cossin_linear_predictions[last_hours],\n \"x-\",\n label=\"Trigonometric time features\",\n)\nax.plot(\n cyclic_spline_linear_predictions[last_hours],\n \"x-\",\n label=\"Spline-based time features\",\n)\nax.plot(\n one_hot_linear_predictions[last_hours],\n \"x-\",\n label=\"One-hot time features\",\n)\n_ = ax.legend()\n\n# %%\n# We can draw the following conclusions from the above plot:\n#\n# - the **raw ordinal time-related features** are problematic because they do\n# not capture the natural periodicity: we observe a big jump in the\n# predictions at the end of each day when the hour features goes from 23 back\n# to 0. We can expect similar artifacts at the end of each week or each year.\n#\n# - as expected, the **trigonometric features** (sine and cosine) do not have\n# these discontinuities at midnight but the linear regression model fails to\n# leverage those features to properly model intra-day variations.\n# Using trigonometric features for higher harmonics or additional\n# trigonometric features for the natural period with different phases could\n# potentially fix this problem.\n#\n# - the **periodic spline-based features** fix those two problems at once: they\n# give more expressivity to the linear model by making it possible to focus\n# on specific hours thanks to the use of 12 splines. Furthermore the\n# `extrapolation=\"periodic\"` option enforces a smooth representation between\n# `hour=23` and `hour=0`.\n#\n# - the **one-hot encoded features** behave similarly to the periodic\n# spline-based features but are more spiky: for instance they can better\n# model the morning peak during the week days since this peak lasts shorter\n# than an hour. However, we will see in the following that what can be an\n# advantage for linear models is not necessarily one for more expressive\n# models.\n\n# %%\n# We can also compare the number of features extracted by each feature\n# engineering pipeline:\nnaive_linear_pipeline[:-1].transform(X).shape\n\n# %%\none_hot_linear_pipeline[:-1].transform(X).shape\n\n# %%\ncyclic_cossin_linear_pipeline[:-1].transform(X).shape\n\n# %%\ncyclic_spline_linear_pipeline[:-1].transform(X).shape\n\n# %%\n# This confirms that the one-hot encoding and the spline encoding strategies\n# create a lot more features for the time representation than the alternatives,\n# which in turn gives the downstream linear model more flexibility (degrees of\n# freedom) to avoid underfitting.\n#\n# Finally, we observe that none of the linear models can approximate the true\n# bike rentals demand, especially for the peaks that can be very sharp at rush\n# hours during the working days but much flatter during the week-ends: the most\n# accurate linear models based on splines or one-hot encoding tend to forecast\n# peaks of commuting-related bike rentals even on the week-ends and\n# under-estimate the commuting-related events during the working days.\n#\n# These systematic prediction errors reveal a form of under-fitting and can be\n# explained by the lack of non-additive modeling of the interactions between\n# features (in this case \"workingday\" and features derived from \"hours\"). This\n# issue will be addressed in the following section.\n\n# %%\n# Modeling pairwise interactions with splines and polynomial features\n# -------------------------------------------------------------------\n#\n# Linear models alone cannot model interaction effects between input features.\n# It does not help that some features are marginally non-linear as is the case\n# with features constructed by `SplineTransformer` (or one-hot encoding or\n# binning).\n#\n# However, it is possible to use the `PolynomialFeatures` class on coarse\n# grained splined encoded hours to model the \"workingday\"/\"hours\" interaction\n# explicitly without introducing too many new variables:\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.pipeline import FeatureUnion\n\n\nhour_workday_interaction = make_pipeline(\n ColumnTransformer(\n [\n (\"cyclic_hour\", periodic_spline_transformer(24, n_splines=8), [\"hour\"]),\n (\"workingday\", FunctionTransformer(lambda x: x == \"True\"), [\"workingday\"]),\n ]\n ),\n PolynomialFeatures(degree=2, interaction_only=True, include_bias=False),\n)\n\n# %%\n# Those features are then combined with the ones already computed in the\n# previous spline-base pipeline. We can observe a nice performance improvemnt\n# by modeling this pairwise interaction explicitly:\n\ncyclic_spline_interactions_pipeline = make_pipeline(\n FeatureUnion(\n [\n (\"marginal\", cyclic_spline_transformer),\n (\"interactions\", hour_workday_interaction),\n ]\n ),\n RidgeCV(alphas=alphas),\n)\nevaluate(cyclic_spline_interactions_pipeline, X, y, cv=ts_cv)\n\n# %%\n# Modeling non-linear feature interactions with kernels\n# -----------------------------------------------------\n#\n# The previous analysis highlighted the need to model the interactions between\n# `\"workingday\"` and `\"hours\"`. Another example of a such a non-linear\n# interactions that we would like to model could be the impact of the rain that\n# might not be the same during the working days and the week-ends and holidays\n# for instance.\n#\n# To model all such interactions, we could either use a polynomial expansion on\n# all marginal features at once, after their spline-based expansion. However\n# this would create a quadratic number of features which can cause overfitting\n# and computational tractability issues.\n#\n# Alternatively we can use the Nyström method to compute an approximate\n# polynomial kernel expansion. Let us try the latter:\nfrom sklearn.kernel_approximation import Nystroem\n\n\ncyclic_spline_poly_pipeline = make_pipeline(\n cyclic_spline_transformer,\n Nystroem(kernel=\"poly\", degree=2, n_components=300, random_state=0),\n RidgeCV(alphas=alphas),\n)\nevaluate(cyclic_spline_poly_pipeline, X, y, cv=ts_cv)\n\n# %%\n#\n# We observe that this model can almost rival the performance of the gradient\n# boosted trees with an average error around 6% of the maximum demand.\n#\n# Note that while the final step of this pipeline is a linear regression model,\n# the intermediate steps such as the spline feature extraction and the Nyström\n# kernel approximation are highly non-linear. As a result the compound pipeline\n# is much more expressive than a simple linear regression model with raw features.\n#\n# For the sake of completeness, we also evaluate the combination of one-hot\n# encoding and kernel approximation:\n\none_hot_poly_pipeline = make_pipeline(\n ColumnTransformer(\n transformers=[\n (\"categorical\", one_hot_encoder, categorical_columns),\n (\"one_hot_time\", one_hot_encoder, [\"hour\", \"weekday\", \"month\"]),\n ],\n remainder=\"passthrough\",\n ),\n Nystroem(kernel=\"poly\", degree=2, n_components=300, random_state=0),\n RidgeCV(alphas=alphas),\n)\nevaluate(one_hot_poly_pipeline, X, y, cv=ts_cv)\n\n\n# %%\n# While one-hot features were competitive with spline-based features when using\n# linear models, this is no longer the case when using a low-rank approximation\n# of a non-linear kernel: this can be explained by the fact that spline\n# features are smoother and allow the kernel approximation to find a more\n# expressive decision function.\n#\n# Let us now have a qualitative look at the predictions of the kernel models\n# and of the gradient boosted trees that should be able to better model\n# non-linear interactions between features:\ngbrt_pipeline.fit(X.iloc[train_0], y.iloc[train_0])\ngbrt_predictions = gbrt_pipeline.predict(X.iloc[test_0])\n\none_hot_poly_pipeline.fit(X.iloc[train_0], y.iloc[train_0])\none_hot_poly_predictions = one_hot_poly_pipeline.predict(X.iloc[test_0])\n\ncyclic_spline_poly_pipeline.fit(X.iloc[train_0], y.iloc[train_0])\ncyclic_spline_poly_predictions = cyclic_spline_poly_pipeline.predict(X.iloc[test_0])\n\n# %%\n# Again we zoom on the last 4 days of the test set:\n\nlast_hours = slice(-96, None)\nfig, ax = plt.subplots(figsize=(12, 4))\nfig.suptitle(\"Predictions by non-linear regression models\")\nax.plot(\n y.iloc[test_0].values[last_hours],\n \"x-\",\n alpha=0.2,\n label=\"Actual demand\",\n color=\"black\",\n)\nax.plot(\n gbrt_predictions[last_hours],\n \"x-\",\n label=\"Gradient Boosted Trees\",\n)\nax.plot(\n one_hot_poly_predictions[last_hours],\n \"x-\",\n label=\"One-hot + polynomial kernel\",\n)\nax.plot(\n cyclic_spline_poly_predictions[last_hours],\n \"x-\",\n label=\"Splines + polynomial kernel\",\n)\n_ = ax.legend()\n\n\n# %%\n# First, note that trees can naturally model non-linear feature interactions\n# since, by default, decision trees are allowed to grow beyond a depth of 2\n# levels.\n#\n# Here we can observe that the combinations of spline features and non-linear\n# kernels works quite well and can almost rival the accuracy of the gradient\n# boosting regression trees.\n#\n# On the contrary, one-hot time features do not perform that well with the low\n# rank kernel model. In particular they significantly over-estimate the low\n# demand hours more than the competing models.\n#\n# We also observe that none of the models can successfully predict some of the\n# peak rentals at the rush hours during the working days. It is possible that\n# access to additional features would be required to further improve the\n# accuracy of the predictions. For instance, it could be useful to have access\n# to the geographical repartition of the fleet at any point in time or the\n# fraction of bikes that are immobilized because they need servicing.\n#\n# Let us finally get a more quantative look at the prediction errors of those\n# three models using the true vs predicted demand scatter plots:\nfig, axes = plt.subplots(ncols=3, figsize=(12, 4), sharey=True)\nfig.suptitle(\"Non-linear regression models\")\npredictions = [\n one_hot_poly_predictions,\n cyclic_spline_poly_predictions,\n gbrt_predictions,\n]\nlabels = [\n \"One hot + polynomial kernel\",\n \"Splines + polynomial kernel\",\n \"Gradient Boosted Trees\",\n]\nfor ax, pred, label in zip(axes, predictions, labels):\n ax.scatter(y.iloc[test_0].values, pred, alpha=0.3, label=label)\n ax.plot([0, 1], [0, 1], \"--\", label=\"Perfect model\")\n ax.set(\n xlim=(0, 1),\n ylim=(0, 1),\n xlabel=\"True demand\",\n ylabel=\"Predicted demand\",\n )\n ax.legend()\n\n\n# %%\n# This visualization confirms the conclusions we draw on the previous plot.\n#\n# All models under-estimate the high demand events (working days rush hours),\n# but gradient boosting a bit less so. The low demand events are well predicted\n# on average by gradient boosting while the one-hot polynomial regression\n# pipeline seems to systematically over-estimate demand in that regime. Overall\n# the predictions of the gradient boosted trees are closer to the diagonal than\n# for the kernel models.\n#\n# Concluding remarks\n# ------------------\n#\n# We note that we could have obtained slightly better results for kernel models\n# by using more components (higher rank kernel approximation) at the cost of\n# longer fit and prediction durations. For large values of `n_components`, the\n# performance of the one-hot features would even match the spline features.\n#\n# The `Nystroem` + `RidgeCV` classifier could also have been replaced by\n# :class:`~sklearn.neural_network.MLPRegressor` with one or two hidden layers\n# and we would have obtained quite similar results.\n#\n# The dataset we used in this case study is sampled on a hourly basis. However\n# cyclic spline-based features could model time-within-day or time-within-week\n# very efficiently with finer-grained time resolutions (for instance with\n# measurements taken every minute instead of every hours) without introducing\n# more features. One-hot encoding time representations would not offer this\n# flexibility.\n#\n# Finally, in this notebook we used `RidgeCV` because it is very efficient from\n# a computational point of view. However it models the target variable as a\n# Gaussian random variable with constant variance. For positive regression\n# problems, it is likely that using a Poisson or Gamma distribution would make\n# more sense. This could be achieved by using\n# `GridSearchCV(TweedieRegressor(power=2), param_grid({\"alpha\": alphas}))`\n# instead of `RidgeCV`.\n"
] |
[
[
"sklearn.linear_model.RidgeCV",
"sklearn.preprocessing.FunctionTransformer",
"sklearn.model_selection.cross_validate",
"sklearn.compose.ColumnTransformer",
"sklearn.preprocessing.MinMaxScaler",
"pandas.concat",
"numpy.cos",
"numpy.logspace",
"numpy.sin",
"matplotlib.pyplot.subplots",
"numpy.arange",
"sklearn.preprocessing.OrdinalEncoder",
"sklearn.datasets.fetch_openml",
"matplotlib.pyplot.title",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.kernel_approximation.Nystroem",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.pipeline.FeatureUnion",
"sklearn.model_selection.TimeSeriesSplit",
"numpy.linspace"
]
] |
arminbahl/mutant_zebrafish_behavior
|
[
"17bee04b35c23b0f93fcecac9758e6ba19872be1"
] |
[
"armin_analysis/model_tests.py"
] |
[
"import pylab as pl\nfrom get_fish_info import get_fish_info\nfrom fit_integrator_model import get_model_result, get_target_result\nimport numpy as np\nfrom pathlib import Path\nimport gmm_model_fit\nimport pandas as pd\nfrom pymoo.factory import get_problem, get_visualization, get_decomposition\n\n# import random\n#\n# for dt in [0.001, 0.002, 0.005, 0.01, 0.1]:\n#\n# tau = 4\n# Is = np.arange(0, 30, dt)\n# xs = np.empty_like(Is)\n# xs[0]\n#\n# for i in range(1, len(Is)):\n# dx = random.gauss(0.2, 5) - xs[i - 1]\n# xs[i] = xs[i - 1] + dx * dt / tau\n# pl.plot(Is, xs)\n# pl.show()\n# sdf\n\nroot_path = Path(\"/Users/arminbahl/Desktop/mutant_behavior_data/surrogate_fish1\")\n#root_path = Path(\"/Users/arminbahl/Desktop/mutant_behavior_data/scn1lab_NIBR\")\n#root_path = Path(\"/Users/arminbahl/Desktop/mutant_behavior_data/disc1_hetinx\")\ndf = pd.read_hdf(root_path / \"all_data.h5\", key=\"all_bouts\")\n#\n# df_extracted_features, df_extracted_binned_features, \\\n# df_extracted_binned_features_same_direction, \\\n# df_extracted_binned_features_heading_angle_change_histograms, \\\n# df_extracted_binned_features_inter_bout_interval_histograms = get_mean_fish_info(df)\n#\n# print(df_extracted_features)\n# pl.plot(df_extracted_features.loc[\"wt\", :][\"correctness\"])\n# pl.plot(df_extracted_features.loc[\"het\", :][\"correctness\"])\n# pl.plot(df_extracted_features.loc[\"hom\", :][\"correctness\"])\n#\n# pl.figure()\n# pl.plot(df_extracted_features.loc[\"wt\", :][\"inter_bout_interval\"])\n# pl.plot(df_extracted_features.loc[\"het\", :][\"inter_bout_interval\"])\n# pl.plot(df_extracted_features.loc[\"hom\", :][\"inter_bout_interval\"])\n#\n# pl.figure()\n# pl.plot(df_extracted_binned_features.loc[\"wt\", 0])\n# pl.plot(df_extracted_binned_features.loc[\"wt\", 1])\n# pl.plot(df_extracted_binned_features.loc[\"wt\", 2])\n# pl.plot(df_extracted_binned_features.loc[\"wt\", 3])\n#\n# pl.figure()\n# pl.plot(df_extracted_binned_features_same_direction.loc[\"wt\"])\n# pl.plot(df_extracted_binned_features_same_direction.loc[\"het\"])\n# pl.plot(df_extracted_binned_features_same_direction.loc[\"hom\"])\n#\n#\n# pl.figure()\n# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc[\"wt\", 0])\n# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc[\"wt\", 1])\n# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc[\"wt\", 2])\n# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc[\"wt\", 3])\n#\n# pl.show()\n#\n#\n# pl.show()\n#\n#\n# print(df_extracted_features)\n# gg\n# sdf\n\ngenotype = \"hom\"\n\ntarget_df_correctness_as_function_of_coherence, \\\ntarget_df_inter_bout_interval_as_function_of_coherence, \\\ntarget_df_binned_correctness, \\\ntarget_df_binned_same_direction, \\\ntarget_df_binned_features_heading_angle_change_histograms, \\\ntarget_df_binned_features_inter_bout_interval_histograms, \\\ntarget_df_gmm_fitting_results = get_target_result(root_path, genotype)\n\n\n# colors = [\"#000000\", \"#330000\", \"#990000\", \"#CC3333\"]\n#\n# for i in range(4):\n# pl.plot(target_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel(\"stim\"), label=f\"Coherence {i*25}%\", color=colors[i], linewidth=2)\n#\n# pl.xlabel(\"Heading angle change (deg)\")\n# pl.ylabel(\"Probability\")\n# pl.legend()\n#\n# fig = pl.figure()\n# fig.suptitle(\"Target functions\")\n# pl.subplot(211)\n# pl.plot(target_df_correctness_as_function_of_coherence, 'o-', color='black')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Correctness (%)\")\n# pl.subplot(212)\n# pl.plot(target_df_inter_bout_interval_as_function_of_coherence, 'o-', color='black')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Inter-bout interval (s)\")\n#\n\nmedianprops = dict(linestyle='-.', linewidth=2.5, color='firebrick')\n\nerrornames = [\"Error: 'Correctness as function of coherence'\",\n \"Error: 'Inter-bout interval as function of coherence'\",\n \"Error: 'Binned correctness at 25, 50, 100 %'\",\n \"Error: 'Binned same direction'\",\n \"Error: 'Histogram weights'\"]\n\n#errornames = [\"Mixed\"]\n\nrepeat = 1\nX = np.load(root_path / f\"leaky_integrator_model2_X_{genotype}_{repeat}.npy\")\nF = np.load(root_path / f\"leaky_integrator_model2_F_{genotype}_{repeat}.npy\")\n#\n#\n# for i in range(7):\n# F[-1, :, i] = F[-1, :, i] / np.max(F[-1, :, i])\n# print(F.shape)\n#\n# i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4] + F[-1, :, 5] + F[-1, :, 6])\n# print(F[-1, i6, 0])\n# dd\n#get_decomposition(\"asf\").do(F[-1], [1, 1, 1, 1, 1, 1, 1]).argmin()\n#print(I)\n#sdfsdf\n#X = np.load(root_path / f\"leaky_integrator_model2_X_{genotype}_{repeat}_single_error.npy\")\n#F = np.load(root_path / f\"leaky_integrator_model2_F_{genotype}_{repeat}_single_error.npy\")\n\n# from pymoo.factory import get_decision_making, get_reference_directions\n#\n# ref_dirs = get_reference_directions(\"das-dennis\", 4, n_partitions=12)\n# F = get_problem(\"dtlz1\").pareto_front(ref_dirs)\n#\n# weights = np.array([10.25, 10.25, 0.25, 0.25])\n# a, pseudo_weights = get_decision_making(\"pseudo-weights\", weights).do(F, return_pseudo_weights=True)\n# pl.plot(F[:, 0], F[:,1], 'o')\n# pl.plot(F[a, 0], F[a,1], 'o')\n# pl.show()\n#\n# print(a, pseudo_weights, F.shape)\n# ghj\nfrom pymoo.factory import get_decision_making, get_reference_directions\n\n\n#weights = [1000, 1000, 1000, 0, 0, 0, 0]\n#a, pseudo_weights = get_decision_making(\"pseudo-weights\", weights).do(F[-1], return_pseudo_weights=True)\n#print(pseudo_weights[0])\n#print(a, pseudo_weights)\n#dfg\nfor i in range(5):\n #pl.hist(F[-1, :, i])\n #pl.show()\n\n #print(np.percentile(F[-1, :, i], 75))\n #print(np.max(F[-1, :, i]) - np.min(F[-1, :, i]))\n F[-1, :, i] = F[-1, :, i] / np.percentile(F[-1, :, i], 75)\n# print(F.shape)\n#\n\n#i6 = a\n\n#i1 = np.argmin(F[-1, :, 0])\n# i2 = np.argmin(F[-1, :, 1])\n# i3 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500)\n# i4 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3])\n# i5 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25)\n# #i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25 + F[-1, :, 6]*5800)\n# #i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 2500 + F[-1, :, 3] * 5 + F[-1, :, 5] * 0.5 + F[-1, :, 6] * 6800)\n# i6 = np.argmin(F[-1, :, 0]*500 + F[-1, :, 1]*2500 + F[-1, :, 3]*50 + F[-1, :, 5]*0.5 + F[-1, :, 6]*4500)\n\ni6 = np.argmin(F[-1, :, 0] + 3*F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4])\n\n\n#from pymoo.factory import get_decision_making\n#dm = get_decision_making(\"high-tradeoff\")\n\n#I = dm.do(pf)\n\n# print(F.shape)\n# np.set_printoptions(precision=4, suppress=True)\n# print((X[-1, i]))\n# #gdfgh\n# for error_i in range(len(errornames)):\n# pl.figure()\n# pl.title(errornames[error_i])\n# bp = pl.boxplot(F[:, :, error_i].T, whis=[5, 95], showfliers=False, medianprops=medianprops)\n# for gen in range(50):\n# sc = pl.scatter([gen+1], [F[gen, :, error_i].min()], s=5, marker='.', c='firebrick')\n# pl.yscale(\"log\")\n# pl.xlabel(\"Generation\")\n# pl.ylabel(\"Log Error\")\n# pl.show()\n# dd\n#\n# pl.figure()\n# pl.title(\"Compromise between all error functions\")\n# #error = F[:, :, 0] + F[:, :, 1]*500 + F[:, :, 3] + F[:, :, 5]*0.25 + F[:, :, 6]*500\n# error = F[:, :, 0] + F[:, :, 1]*2500 + F[:, :, 3]*5 + F[:, :, 5]*0.5 + F[:, :, 6]*1500\n#\n# bp = pl.boxplot(error.T, whis=[5, 95], showfliers=False, medianprops=medianprops)\n# for gen in range(50):\n# sc = pl.scatter([gen + 1], [error[gen].min()], s=10, marker='.', c='firebrick')\n# pl.yscale(\"log\")\n# pl.xlabel(\"Generation\")\n# pl.ylabel(\"Log Error\")\n# pl.show()\n\n# pl.figure()\n# pl.scatter(F[-1, :, 0], F[-1, :, 1], s=10, marker='.', c='C0', label='Individual')\n# pl.scatter(F[-1, i1, 0], F[-1, i1, 1], s=15, marker='o', c='C1', label=\"Best for 'Correctness as function of coherence'\")\n# pl.scatter(F[-1, i2, 0], F[-1, i2, 1], s=15, marker='o', c='C2', label=\"Best for 'Inter-bout interval as function of coherence'\")\n# pl.scatter(F[-1, i3, 0], F[-1, i3, 1], s=15, marker='o', c='C3', label=\"Compromise\")\n# pl.legend()\n# pl.xlabel(errornames[0])\n# pl.ylabel(errornames[1])\n#\n#\n# pl.figure()\n# pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500, F[-1, :, 3], s=10, marker='.', c='C0', label='Individual')\n# pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500, F[-1, i1, 3], s=15, marker='o', c='C1', label=\"Best for 'Correctness as function of coherence'\")\n# pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500, F[-1, i2, 3], s=15, marker='o', c='C2', label=\"Best for 'Inter-bout interval as function of coherence'\")\n# pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500, F[-1, i3, 3], s=15, marker='o', c='C3', label=\"Compromise between 1 and 2\")\n# pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500, F[-1, i4, 3], s=15, marker='o', c='C4', label=\"Compromise between all\")\n# pl.legend()\n# pl.xlabel(\"Compromise between 1 and 2\")\n# pl.ylabel(errornames[3])\n#\n# pl.figure()\n# pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3], F[-1, :, 5], s=10, marker='.', c='C0', label='Individual')\n# pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500 + F[-1, i1, 3], F[-1, i1, 5], s=15, marker='o', c='C1', label=\"Best for 'Correctness as function of coherence'\")\n# pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500 + F[-1, i2, 3], F[-1, i2, 5], s=15, marker='o', c='C2', label=\"Best for 'Inter-bout interval as function of coherence'\")\n# pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500 + F[-1, i3, 3], F[-1, i3, 5], s=15, marker='o', c='C3', label=\"Compromise between 1 and 2\")\n# pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500 + F[-1, i4, 3], F[-1, i4, 5], s=15, marker='o', c='C4', label=\"Compromise between 1, 2, and 3\")\n# pl.scatter(F[-1, i5, 0] + F[-1, i5, 1]*500 + F[-1, i5, 3], F[-1, i5, 5], s=15, marker='o', c='C5', label=\"Compromise between all\")\n# pl.legend()\n# pl.xlabel(\"Compromise between 1, 2, and 3\")\n# pl.ylabel(errornames[5])\n#\n#\n# pl.figure()\n# pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25, F[-1, :, 6], s=10, marker='.', c='C0', label='Individual')\n# pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500 + F[-1, i1, 3] + F[-1, i1, 5]*0.25, F[-1, i1, 6], s=15, marker='o', c='C1', label=\"Best for 'Correctness as function of coherence'\")\n# pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500 + F[-1, i2, 3] + F[-1, i2, 5]*0.25, F[-1, i2, 6], s=15, marker='o', c='C2', label=\"Best for 'Inter-bout interval as function of coherence'\")\n# pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500 + F[-1, i3, 3] + F[-1, i3, 5]*0.25, F[-1, i3, 6], s=15, marker='o', c='C3', label=\"Compromise between 1 and 2\")\n# pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500 + F[-1, i4, 3] + F[-1, i4, 5]*0.25, F[-1, i4, 6], s=15, marker='o', c='C4', label=\"Compromise between 1, 2, and 3\")\n# pl.scatter(F[-1, i5, 0] + F[-1, i5, 1]*500 + F[-1, i5, 3] + F[-1, i5, 5]*0.25, F[-1, i5, 6], s=15, marker='o', c='C5', label=\"Compromise between 1, 2, 3, and 4\")\n# pl.scatter(F[-1, i6, 0] + F[-1, i6, 1]*500 + F[-1, i6, 3] + F[-1, i6, 5]*0.25, F[-1, i6, 6], s=15, marker='o', c='C6', label=\"Compromise between all\")\n# pl.legend()\n# pl.xlabel(\"Compromise between 1, 2, 3, and 4\")\n# pl.ylabel(errornames[6])\n#\n# fig = pl.figure()\n# model_df_correctness_as_function_of_coherence, \\\n# model_df_inter_bout_interval_as_function_of_coherence, \\\n# model_df_binned_correctness, \\\n# model_df_binned_same_direction, \\\n# model_df_binned_features_heading_angle_change_histograms, \\\n# model_df_binned_features_inter_bout_interval_histograms, \\\n# model_df_gmm_fitting_results = get_model_result(X[-1, i1])\n# fig.suptitle(\"Best for 'Correctness as function of coherence'\")\n# pl.subplot(211)\n# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')\n# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C1')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Correctness (%)\")\n# pl.subplot(212)\n# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')\n# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C1')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Inter-bout interval (s)\")\n#\n# fig = pl.figure()\n# model_df_correctness_as_function_of_coherence, \\\n# model_df_inter_bout_interval_as_function_of_coherence, \\\n# model_df_binned_correctness, \\\n# model_df_binned_same_direction, \\\n# model_df_binned_features_heading_angle_change_histograms, \\\n# model_df_binned_features_inter_bout_interval_histograms, \\\n# model_df_gmm_fitting_results = get_model_result(X[-1, i2])\n# fig.suptitle(\"Best for 'Inter-bout interval as function of coherence'\")\n# pl.subplot(211)\n# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')\n# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C2')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Correctness (%)\")\n# pl.subplot(212)\n# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')\n# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C2')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Inter-bout interval (s)\")\n#\n# fig = pl.figure()\n# model_df_correctness_as_function_of_coherence, \\\n# model_df_inter_bout_interval_as_function_of_coherence, \\\n# model_df_binned_correctness, \\\n# model_df_binned_same_direction, \\\n# model_df_binned_features_heading_angle_change_histograms, \\\n# model_df_binned_features_inter_bout_interval_histograms, \\\n# model_df_gmm_fitting_results = get_model_result(X[-1, i3])\n# fig.suptitle(\"Compromise between 'Correctness and inter-bout interval as function of coherence'\")\n# pl.subplot(211)\n# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')\n# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C3')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Correctness (%)\")\n# pl.subplot(212)\n# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')\n# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C3')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Inter-bout interval (s)\")\n#\n# fig = pl.figure()\n# model_df_correctness_as_function_of_coherence, \\\n# model_df_inter_bout_interval_as_function_of_coherence, \\\n# model_df_binned_correctness, \\\n# model_df_binned_same_direction, \\\n# model_df_binned_features_heading_angle_change_histograms, \\\n# model_df_binned_features_inter_bout_interval_histograms, \\\n# model_df_gmm_fitting_results = get_model_result(X[-1, i3])\n# fig.suptitle(\"Compromise between 'Correctness and inter-bout interval as function of coherence'\")\n# pl.subplot(221)\n# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')\n# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C3')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Correctness (%)\")\n# pl.subplot(222)\n# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')\n# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C3')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Inter-bout interval (s)\")\n# pl.subplot(223)\n# for i in range(4):\n# pl.plot(target_df_binned_correctness.loc[i, :].droplevel(\"stim\"), 'o-', color='black')\n# pl.plot(model_df_binned_correctness.loc[i, :].droplevel(\"stim\"), 'o--', color='C3')\n# pl.xlabel(\"Correctness (%)\")\n# pl.ylabel(\"Time (s)\")\n#\n#\n# fig = pl.figure()\n# model_df_correctness_as_function_of_coherence, \\\n# model_df_inter_bout_interval_as_function_of_coherence, \\\n# model_df_binned_correctness, \\\n# model_df_binned_same_direction, \\\n# model_df_binned_features_heading_angle_change_histograms, \\\n# model_df_binned_features_inter_bout_interval_histograms, \\\n# model_df_gmm_fitting_results = get_model_result(X[-1, i4])\n# fig.suptitle(\"Compromise between all three error functions\")\n# pl.subplot(221)\n# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')\n# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C4')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Correctness (%)\")\n# pl.subplot(222)\n# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')\n# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C4')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Inter-bout interval (s)\")\n# pl.subplot(223)\n# for i in range(4):\n# pl.plot(target_df_binned_correctness.loc[i, :].droplevel(\"stim\"), 'o-', color='black')\n# pl.plot(model_df_binned_correctness.loc[i, :].droplevel(\"stim\"), 'o--', color='C4')\n# pl.xlabel(\"Correctness (%)\")\n# pl.ylabel(\"Time (s)\")\n#\n#\n# fig = pl.figure()\n# model_df_correctness_as_function_of_coherence, \\\n# model_df_inter_bout_interval_as_function_of_coherence, \\\n# model_df_binned_correctness, \\\n# model_df_binned_same_direction, \\\n# model_df_binned_features_heading_angle_change_histograms, \\\n# model_df_binned_features_inter_bout_interval_histograms, \\\n# model_df_gmm_fitting_results = get_model_result(X[-1, i5])\n# fig.suptitle(\"Compromise between all four error functions\")\n# pl.subplot(221)\n# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')\n# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C5')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Correctness (%)\")\n# pl.subplot(222)\n# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')\n# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C5')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Inter-bout interval (s)\")\n# pl.subplot(223)\n# for i in range(4):\n# pl.plot(target_df_binned_correctness.loc[i, :].droplevel(\"stim\"), 'o-', color='black')\n# pl.plot(model_df_binned_correctness.loc[i, :].droplevel(\"stim\"), 'o--', color='C5')\n# pl.xlabel(\"Correctness (%)\")\n# pl.ylabel(\"Time (s)\")\n# pl.subplot(224)\n# pl.plot(target_df_binned_same_direction, 'o-', color='black')\n# pl.plot(model_df_binned_same_direction, 'o--', color='C5')\n# pl.xlabel(\"Time since last bout (s)\")\n# pl.ylabel(\"Correctness (%)\")\n\nfig = pl.figure()\nmodel_df_correctness_as_function_of_coherence, \\\nmodel_df_inter_bout_interval_as_function_of_coherence, \\\nmodel_df_binned_correctness, \\\nmodel_df_binned_same_direction, \\\nmodel_df_binned_features_heading_angle_change_histograms, \\\nmodel_df_binned_features_inter_bout_interval_histograms, \\\nmodel_df_gmm_fitting_results = get_model_result(X[-1, i6])\nfig.suptitle(\"Compromise between all five error functions\")\npl.subplot(231)\npl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')\npl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C6')\npl.xlabel(\"Coherence (%)\")\npl.ylabel(\"Correctness (%)\")\npl.subplot(232)\npl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')\npl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C6')\npl.xlabel(\"Coherence (%)\")\npl.ylabel(\"Inter-bout interval (s)\")\npl.subplot(233)\nfor i in range(4):\n pl.plot(target_df_binned_correctness.loc[i, :].droplevel(\"stim\"), 'o-', color='black')\n pl.plot(model_df_binned_correctness.loc[i, :].droplevel(\"stim\"), 'o--', color='C6')\npl.xlabel(\"Time (s)\")\npl.ylabel(\"Correctness (%)\")\npl.subplot(234)\npl.plot(target_df_binned_same_direction, 'o-', color='black')\npl.plot(model_df_binned_same_direction, 'o--', color='C6')\npl.xlabel(\"Time since last bout (s)\")\npl.ylabel(\"Correctness (%)\")\n# pl.subplot(235)\n# pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results[\"w_left\"].values, '-o', color='black', label='s_left')\n# pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results[\"w_center\"].values, '-o', color='black', label='s_center')\n# pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results[\"w_right\"].values, '-o', color='black', label='s_right')\n#\n# pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results[\"w_left\"].values, '--o', color='C6', label='s_left')\n# pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results[\"w_center\"].values, '--o', color='C6', label='s_center')\n# pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results[\"w_right\"].values, '--o', color='C6', label='s_right')\n# pl.xlabel(\"Coherence (%)\")\n# pl.ylabel(\"Weight\")\n# pl.legend()\npl.subplot(235)\nfor i in range(4):\n pl.plot(target_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel(\"stim\"), color=f\"black\")\n pl.plot(model_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel(\"stim\"), color=f\"C6\", linestyle='--')\npl.xlabel(\"Heading angle change\")\npl.ylabel(\"Probability\")\npl.show()\n\n\nfound_parameters = []\nfor repeat in range(12):\n for genotype in [\"wt\", \"het\", \"hom\"]:\n X = np.load(root_path / f\"leaky_integrator_model2_X_{genotype}_{repeat}.npy\")\n F = np.load(root_path / f\"leaky_integrator_model2_F_{genotype}_{repeat}.npy\")\n\n for i in range(5):\n #F[-1, :, i] = F[-1, :, i] / np.median(F[-1, :, i])\n F[-1, :, i] = F[-1, :, i] / np.percentile(F[-1, :, i], 75)\n #i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] + 5 * F[-1, :, 3] + F[-1, :, 5] + 5 * F[-1, :, 6])\n #i6 = np.argmin(F[-1, :, 0] + 5 * F[-1, :, 1] + 20 * F[-1, :, 4] + F[-1, :, 5] + 5 * F[-1, :, 6])\n i6 = np.argmin(F[-1, :, 0] + 3 * F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4])\n #i6 = np.argmin(F[-1, :, 0] + 2 * F[-1, :, 1] + F[-1, :, 2] + 3 * F[-1, :, 3] + F[-1, :, 5] + F[-1, :, 6])\n #i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 500 + F[-1, :, 3] + F[-1, :, 5] * 0.25 + F[-1, :, 6] * 500)\n #i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 2500 + F[-1, :, 3] * 5 + F[-1, :, 5] * 0.5 + F[-1, :, 6] * 1500)\n #i6 = np.argmin(F[-1, :, 0]*500 + F[-1, :, 1]*2500 + F[-1, :, 3]*50 + F[-1, :, 5]*0.5 + F[-1, :, 6]*4500)\n\n found_parameters.append([genotype, repeat, 49] + list(X[-1, i6, :]))\n\ndf = pd.DataFrame(found_parameters,\n columns=[\"genotype\",\n \"repeat\",\n \"gen\",\n \"tau\",\n \"sigma\",\n \"T\",\n \"p_below\",\n \"p_above\"]).astype(dtype={\"repeat\": \"int64\", \"gen\": \"int64\"}, copy=False)\n\ndf.set_index([\"genotype\", 'repeat', 'gen'], inplace=True)\ndf.sort_index(inplace=True)\ndf.to_hdf(root_path / \"found_parameters.h5\", key=\"parameters\", complevel=9)\n"
] |
[
[
"numpy.argmin",
"numpy.percentile",
"pandas.DataFrame",
"numpy.load",
"pandas.read_hdf"
]
] |
pjshu/QQZoneMood
|
[
"bc949855271a4d9944e1501599755cfdfdb8cfd6"
] |
[
"src/analysis/TrainMood.py"
] |
[
"from src.analysis.QQZoneAnalysis import QQZoneAnalysis\nimport json\n\nfrom src.util.constant import BASE_DIR\nfrom src.util.util import get_mktime2\nimport pandas as pd\nimport re\nfrom src.analysis.SentimentClassify import SentimentClassify\n\n\nclass TrainMood(QQZoneAnalysis):\n \"\"\"\n 生成各种训练需要的数据集\n \"\"\"\n\n def __init__(self, use_redis=False, debug=True, file_name_head=''):\n QQZoneAnalysis.__init__(self, use_redis=use_redis, debug=debug, username=file_name_head, analysis_friend=False)\n\n TRAIN_BASE_DIR = BASE_DIR + file_name_head + '/data/train/'\n\n self.MOOD_DATA_SCORE_FILE_NAME = TRAIN_BASE_DIR + 'score_mood_data.csv'\n self.RE_DO_SENTIMENT_FILE_NAME = TRAIN_BASE_DIR + 're_do_mood_data.csv'\n self.TEXT_LABEL_TRAIN_DATA = TRAIN_BASE_DIR + 'mood_text.csv'\n self.TRAIN_DATA_AFTER_CLASSIFIC = TRAIN_BASE_DIR + 'mood_classific.csv'\n\n self.TEXT_LABEL_RESULT_TRAIN_DATA = '../data/train3/text_' + file_name_head + '_label.csv'\n self.TEXT_CLASSIFICATION_DATA_SET = '../data/train/'\n self.FINAL_RESULT_TRAIN_DATA = '../data/train/' + file_name_head + '_final_train.csv'\n self.mood_data_df = pd.read_csv(self.MOOD_DATA_FILE_NAME)\n self.IMAGE_OBJECT_FILE_NAME = '../data/train3/' + file_name_head + '_image_object.csv'\n self.MOOD_DATA_AFTER_OBJECT = '../data/train/' + file_name_head + '_after_object.csv'\n\n\n self.sc = SentimentClassify()\n\n self.mood_data_df['score'] = '-1'\n self.label_dict = {'1': '旅游与运动',\n '2': '爱情与家庭',\n '3': '学习与工作',\n '4': '广告',\n '5': '生活日常',\n '6': '其他',\n '7': '人生感悟'}\n self.label_dict_reverse = {v: k for k, v in self.label_dict.items()}\n\n def calculate_score_for_each_mood(self):\n \"\"\"\n 利用谷歌nima模型对图片进行评分\n paper: https://arxiv.org/abs/1709.05424\n pytorch model: https://github.com/truskovskiyk/nima.pytorch.git\n\n 计算每条说说中图片的平均分\n 对于没有图片的按均值进行填充\n :return:\n \"\"\"\n # nima模型预测结果文件\n self.IMAGE_SCORE_FILE_PATH = '/Users/maicius/code/nima.pytorch/nima/result_dict.json'\n with open(self.IMAGE_SCORE_FILE_PATH, 'r', encoding='utf-8') as r:\n self.image_score_dict = json.load(r)\n self.image_score_df = pd.DataFrame(self.image_score_dict)\n\n mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]\n self.image_score_df.loc[self.image_score_df.score == -1, 'score'] = mean_score\n tid_list = self.mood_data_df['tid'].values\n for tid in tid_list:\n scores = self.image_score_df[self.image_score_df.image.str.contains(tid)].score\n if len(scores) > 0:\n self.mood_data_df.loc[self.mood_data_df.tid == tid, 'score'] = round(scores.mean(), 2)\n self.mood_data_df.fillna(mean_score)\n print(\"score shape:\", self.mood_data_df.shape)\n self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)\n\n def calculate_send_time(self):\n \"\"\"\n 计算每条说说的发送时间\n 分为以下五种类型:\n 0.午夜:0点-4点\n 1.凌晨:4点-8点\n 2.上午:8点-12点\n 3.下午:12点-16点\n 4.傍晚:16点-20点\n 5.晚上:20点-24点\n :return:\n \"\"\"\n day_begin_time = self.mood_data_df['time'].apply(lambda x: get_mktime2(x))\n day_time_stamp = self.mood_data_df['time_stamp']\n time_diff = day_time_stamp - day_begin_time\n # 四个小时的时间差\n time_step = 60 * 60 * 4\n time_state = time_diff.apply(lambda x: x // time_step)\n self.mood_data_df['time_state'] = time_state\n print('send time:', self.mood_data_df.shape)\n\n def export_df_after_clean(self):\n try:\n self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)\n except BaseException as e:\n print(e)\n self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)\n\n def export_train_text(self):\n train_text = pd.read_csv(self.label_path + 'result/' + 'final.csv')\n train_text = train_text[['type', 'content']]\n train_text.columns = ['Y', 'content']\n train_text.fillna('空', inplace=True)\n train_text.Y = train_text.Y.apply(lambda x: self.label_dict[str(int(x))])\n train_text.content = train_text.content.apply(lambda x: str(x).replace('\\n', ''))\n train_text.content = train_text.content.apply(lambda x: str(x).replace(' ', ''))\n train_text.content = train_text.content.apply(lambda x: remove_waste_emoji(x))\n train_text.fillna('空', inplace=True)\n train_dataset = train_text.sample(frac=0.8)\n val_dataset = train_text.sample(frac=0.3)\n test_dataset = train_text.sample(frac=0.3)\n\n self.print_label_dict(train_text)\n self.print_label_dict(train_dataset)\n self.print_label_dict(val_dataset)\n self.print_label_dict(test_dataset)\n\n train_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_train.csv', sep='\\t', index=None, header=None)\n val_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_val.csv', sep='\\t', index=None, header=None)\n test_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_test.csv', sep='\\t', index=None, header=None)\n self.calculate_avg_length(train_text)\n # train_text.to_csv(self.TEXT_LABEL_TRAIN_DATA, sep=' ', index=None, header=None)\n\n def calculate_avg_length(self, data_df):\n num = data_df.shape[0]\n content_list = data_df.content.sum()\n print(len(content_list) / num)\n\n def calculate_sentiment(self):\n print(\"Begin to calculate sentiment...\")\n self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace('\\n', ''))\n self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace(' ', ''))\n self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: remove_waste_emoji(str(x)))\n # 使用apply会导致超过qps限额\n # sentiments = self.mood_data_df['content'].apply(lambda x: self.sc.get_sentiment_for_text(x))\n # self.mood_data_df['sentiment'] = sentiments\n self.mood_data_df['sentiments'] = -1\n for i in range(self.mood_data_df.shape[0]):\n content = self.mood_data_df.loc[i, 'content']\n sentiment = self.sc.get_sentiment_for_text(content)\n print('content:', content, 'senti:', sentiment)\n self.mood_data_df.loc[i, 'sentiments'] = sentiment\n\n self.mood_data_df = self.re_do_sentiment(self.mood_data_df)\n try:\n self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)\n except BaseException as e:\n print(e)\n self.mood_data_df.to_csv('after_sentiment.csv')\n print(\"text sentiment:\", self.mood_data_df.shape)\n\n def print_label_dict(self, data_df):\n for item in self.label_dict.values():\n print(item, data_df.loc[data_df.Y == item, :].shape[0])\n print('==========')\n\n def re_do_sentiment(self, data_df):\n # data_df = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)\n for i in range(data_df.shape[0]):\n sentiment = data_df.loc[i, 'sentiments']\n content = data_df.loc[i, 'content']\n if sentiment == -1:\n content = content.replace('\\u2207', '')\n content = content.replace('\\ue40c', '')\n content = content.replace('\\ue412', '')\n content = content.replace('\\ue056', '')\n sentiment = self.sc.get_sentiment_for_text(str(content))\n data_df.loc[i, 'sentiments'] = sentiment\n data_df.to_csv(self.RE_DO_SENTIMENT_FILE_NAME)\n return data_df\n\n def export_classification_data(self):\n \"\"\"\n 导出待分类待的数据\n :return:\n \"\"\"\n data = pd.read_csv(self.RE_DO_SENTIMENT_FILE_NAME)\n data_df = data[['content']]\n data_df['Y'] = '旅游与运动'\n data_df.fillna('空', inplace=True)\n columns = ['Y', 'content']\n data_df = data_df.ix[:, columns]\n print(data_df.shape)\n data_df.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_maicius.csv', sep='\\t')\n\n def combine_text_type_data(self):\n data = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)\n print('mood_after_object_data:', data.shape)\n label = pd.read_csv(self.TEXT_LABEL_RESULT_TRAIN_DATA)\n print('label data:', label.shape)\n label_y = label['Y']\n data['type'] = label_y\n data.to_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)\n\n def attach_image_object_for_each_mood(self):\n with open('qq_big_image.json', 'r', encoding='utf-8') as r:\n data = json.load(r)\n\n with open('category.json', 'r', encoding='utf-8') as r:\n category = json.load(r)\n\n category_df = pd.DataFrame(category)\n image_object_df = pd.DataFrame(\n columns=['tid', 'person', 'vehicle', 'outdoor', 'animal', 'accessory', 'sports', 'kitchen', 'food',\n 'furniture',\n 'electronic', 'appliance', 'indoor'])\n i = 0\n for key, value in data.items():\n tid = key.split('--')[0].split('/')[-1]\n if image_object_df.loc[image_object_df.tid == tid].shape[0] == 0:\n image_object_df.loc[i, 'tid'] = tid\n i +=1\n for item in value:\n item = item.split(' ')[0]\n super_cate = category_df.loc[category_df.name.str.contains(item), 'supercategory']\n if len(super_cate) > 0:\n print(super_cate)\n image_object_df.loc[image_object_df.tid == tid, super_cate.values[0]] = 1\n image_object_df.fillna(0, inplace=True)\n image_object_df['vector'] = 0\n image_object_df['vector'] = image_object_df['tid'].apply(lambda x: image_object_df.loc[image_object_df.tid == x,'person':].values[0])\n image_object_df.to_csv(self.IMAGE_OBJECT_FILE_NAME)\n\n def combine_image_object(self):\n image_object_df = pd.read_csv(self.IMAGE_OBJECT_FILE_NAME)\n mood_data_df = pd.read_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)\n try:\n mood_data_df.drop(['vector'], axis=1, inplace=True)\n except BaseException as e:\n print(e)\n image_object = image_object_df[['tid', 'vector']]\n print(image_object_df.shape, mood_data_df.shape)\n result = pd.merge(mood_data_df, image_object, on='tid', how='left')\n print(result.shape)\n result.to_csv(self.MOOD_DATA_AFTER_OBJECT)\n\n def export_final_train_data(self):\n data = pd.read_csv(self.MOOD_DATA_AFTER_OBJECT)\n train = data[['n_E', 'score', 'time_state', 'sentiments', 'type', 'vector']]\n train = train.loc[6:, :]\n self.mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]\n train.score = train['score'].apply(lambda x: self.change_neg_image_score(x))\n train.type = train['type'].map(self.label_dict_reverse)\n train.vector.fillna('[0 0 0 0 0 0 0 0 0 0 0 0 0]', inplace=True)\n train.vector = train.vector.apply(lambda x: self.change_vector_to_int(x))\n train.sort_values(by='n_E', inplace=True, ascending=False)\n train.to_csv(self.FINAL_RESULT_TRAIN_DATA)\n\n def change_neg_image_score(self, score):\n if score == -1:\n return self.mean_score\n else:\n return score\n\n def change_vector_to_int(self, vector):\n vector = re.findall(re.compile('[0-9]'), vector)\n str_vector = \"\".join(vector)\n sum = 0\n length = len(str_vector)\n for i in range(length):\n sum += int(str_vector[i]) **(length - 1)\n return sum\n\n\ndef remove_waste_emoji(text):\n text = re.subn(re.compile('\\[em\\].*?\\[\\/em\\]'), '', text)[0]\n text = re.subn(re.compile('@\\{.*?\\}'), '', text)[0]\n return text\n\n\nif __name__ == '__main__':\n train = TrainMood(use_redis=True, debug=True, file_name_head='maicius')\n # train.calculate_score_for_each_mood()\n # train.calculate_send_time()\n # train.calculate_sentiment()\n # train.export_df_after_clean()\n train.export_train_text()\n # train.export_classification_data()\n # train.attach_image_object_for_each_mood()\n\n # train.combine_text_type_data()\n # train.combine_image_object()\n # train.export_final_train_data()\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv",
"pandas.merge"
]
] |
deepkashiwa/DeepUrbanEvent
|
[
"3356ee3030893e2806d23541b2650ec73dab3075"
] |
[
"meshdynamic/meshDynamic-Density.py"
] |
[
"import csv\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nimport time\r\nimport jismesh.utils as ju\r\nimport pandas as pd\r\n\r\ncurPath = os.path.abspath(os.path.dirname(__file__))\r\nrootPath = os.path.split(curPath)[0]\r\nsys.path.append(rootPath)\r\n\r\nfrom common.datastructure.Point import Point\r\nfrom common.datastructure.Mesh import Mesh\r\n\r\n# meshTokyo = Mesh('tokyo','500m')\r\n# GRIDNUMBER = meshTokyo.lonNum * meshTokyo.latNum\r\n# print(meshTokyo.size, GRIDNUMBER)\r\n# InterpolatedStep = 12\r\n\r\n\r\ndef getTimestamps(fileName):\r\n last_tid = ''\r\n D = []\r\n with open(fileName, \"r\") as rf:\r\n reader = csv.reader(rf)\r\n for line in reader:\r\n tid = line[0]\r\n if last_tid != '' and last_tid != tid:\r\n break\r\n timestamp = line[1]\r\n D.append(timestamp)\r\n last_tid = tid\r\n return D\r\n\r\ndef getMesh(mesh, readFileName, writeFileName):\r\n cnt = 0\r\n wf = open(writeFileName, 'w')\r\n with open(readFileName, 'r') as rf:\r\n for line in csv.reader(rf):\r\n if cnt % 1000000 == 0:\r\n print(cnt)\r\n tid = line[0]\r\n timestamp = line[1]\r\n p = Point(float(line[2]), float(line[3]))\r\n meshid = mesh.inWhichGrid(p)\r\n wf.write(','.join([tid, timestamp, str(meshid)])+'\\n')\r\n cnt += 1\r\n wf.close()\r\n\r\ndef genMeshDynamic(mesh, fileName, meshFileName):\r\n MD = {}\r\n with open(fileName, \"r\") as rf:\r\n reader = csv.reader(rf)\r\n for line in reader:\r\n tid = line[0]\r\n timestamp = line[1]\r\n meshid = line[2]\r\n key = (timestamp, meshid)\r\n if key in MD:\r\n MD[key].add(tid)\r\n else:\r\n MD[key] = set(tid)\r\n\r\n wf = open(meshFileName, 'w')\r\n Timestamps = getTimestamps(fileName)\r\n for ts in Timestamps:\r\n for meshid in range(mesh.lonNum * mesh.latNum):\r\n key = (ts, str(meshid))\r\n if key in MD:\r\n value = len(MD[key])\r\n else:\r\n value = 0\r\n wf.write(','.join([key[0], key[1], str(value)]) + '\\n')\r\n wf.close()\r\n\r\ndef getGrids(fileName):\r\n last_tid = ''\r\n G = []\r\n with open(fileName, \"r\") as rf:\r\n reader = csv.reader(rf)\r\n for line in reader:\r\n tid = line[0]\r\n if last_tid != '' and last_tid != tid:\r\n break\r\n grid = line[1]\r\n G.append(grid)\r\n last_tid = tid\r\n return G\r\n\r\ndef getDynamicMesh_mobmap(trajFileName, dynamicFileName, meshcode_level):\r\n Timestamps = getTimestamps(trajFileName)\r\n TIMENUMBER = len(Timestamps)\r\n TS = {}\r\n for i in range(TIMENUMBER):\r\n TS[Timestamps[i]] = i\r\n print('getDynamicMesh Started : ', time.ctime())\r\n R = []\r\n for i in range(TIMENUMBER):\r\n R.append({})\r\n with open(trajFileName, 'r') as rf:\r\n reader = csv.reader(rf)\r\n for line in reader:\r\n # tid = line[0]\r\n timestamp = line[1]\r\n lon = float(line[2])\r\n lat = float(line[3])\r\n meshcode = ju.to_meshcode(lat, lon, meshcode_level)\r\n if meshcode in R[TS[timestamp]]:\r\n R[TS[timestamp]][meshcode] += 1\r\n else:\r\n R[TS[timestamp]][meshcode] = 1\r\n\r\n print('getDynamicMesh Count Ended : ', time.ctime())\r\n with open(dynamicFileName, 'w') as wf:\r\n wf.write(\"@dynamic-mesh\\n\")\r\n wf.write(\"@use-mesh-code,\" + str(meshcode_level))\r\n for i in range(len(R)):\r\n timestamp = Timestamps[i]\r\n for key in R[i]:\r\n meshcode = key\r\n meshpop = R[i][meshcode]\r\n wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\\n')\r\n\r\n print('getDynamicMesh Ended : ', time.ctime())\r\n\r\ndef getDynamicMeshMobmap(trajFileName, dynamicFileName, meshcode_level):\r\n Timestamps = getTimestamps(trajFileName)\r\n TIMENUMBER = len(Timestamps)\r\n TS = {}\r\n for i in range(TIMENUMBER):\r\n TS[Timestamps[i]] = i\r\n print('getDynamicMesh Started : ', time.ctime())\r\n R = []\r\n for i in range(TIMENUMBER):\r\n R.append({})\r\n with open(trajFileName, 'r') as rf:\r\n reader = csv.reader(rf)\r\n for line in reader:\r\n # tid = line[0]\r\n timestamp = line[1]\r\n lon = float(line[2])\r\n lat = float(line[3])\r\n meshcode = ju.to_meshcode(lat, lon, meshcode_level)\r\n if meshcode in R[TS[timestamp]]:\r\n R[TS[timestamp]][meshcode] += 1\r\n else:\r\n R[TS[timestamp]][meshcode] = 1\r\n\r\n with open(dynamicFileName, 'w') as wf:\r\n wf.write(\"@dynamic-mesh\\n\")\r\n wf.write(\"@use-mesh-code,\" + str(meshcode_level))\r\n for i in range(len(R)):\r\n timestamp = Timestamps[i]\r\n for key in R[i]:\r\n meshcode = key\r\n meshpop = R[i][meshcode]\r\n wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\\n')\r\n\r\n print('getDynamicMesh Ended : ', time.ctime())\r\n\r\n\r\ndef getRfromDynamicMeshMobmap(meshcode_level, dynamicFileName, dynamicFileName1, dynamicFileName2):\r\n df1 = pd.read_csv(dynamicFileName, header=None, skiprows=2)\r\n df1.iloc[:,2] = np.log10(df1.iloc[:,2]+1) * 100\r\n\r\n df2 = pd.read_csv(dynamicFileName, header=None, skiprows=2)\r\n df2.iloc[:, 2] = np.log(df2.iloc[:,2]+1) * 100\r\n\r\n with open(dynamicFileName1, 'w') as wf:\r\n wf.write(\"@dynamic-mesh\\n\")\r\n wf.write(\"@use-mesh-code,\" + str(meshcode_level) + '\\n')\r\n\r\n with open(dynamicFileName2, 'w') as wf:\r\n wf.write(\"@dynamic-mesh\\n\")\r\n wf.write(\"@use-mesh-code,\" + str(meshcode_level) + '\\n')\r\n\r\n df1.to_csv(dynamicFileName1, header=False, index=False, mode='a')\r\n df2.to_csv(dynamicFileName2, header=False, index=False, mode='a')\r\n\r\ndef getDynamicMeshMobmapR(R, trajFileName, dynamicFileName, meshcode_level):\r\n Timestamps = getTimestamps(trajFileName)\r\n print('getDynamicMesh Count Ended : ', time.ctime())\r\n with open(dynamicFileName, 'w') as wf:\r\n wf.write(\"@dynamic-mesh\\n\")\r\n wf.write(\"@use-mesh-code,\" + str(meshcode_level))\r\n for i in range(len(R)):\r\n timestamp = Timestamps[i]\r\n for key in R[i]:\r\n meshcode = key\r\n meshpop = R[i][meshcode]\r\n wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\\n')\r\n\r\n print('getDynamicMesh Ended : ', time.ctime())\r\n\r\n\r\ndef genMeshDynamicTimeInterval(fileName, meshFileName, startTimestamp, endTimestamp):\r\n Timestamps = getTimestamps(fileName)\r\n startIndex = Timestamps.index(startTimestamp)\r\n endIndex = Timestamps.index(endTimestamp)\r\n Interval = [Timestamps[t] for t in range(startIndex, endIndex)]\r\n\r\n def strHH(timestamp):\r\n return timestamp[11:13] + timestamp[14:16]\r\n\r\n wf = open(meshFileName[:-4] + '_' + strHH(startTimestamp) + '_' + strHH(endTimestamp) + '.csv', 'w')\r\n with open(meshFileName, 'r') as rf:\r\n for line in csv.reader(rf):\r\n if line[0] in Interval:\r\n wf.write(','.join(line) + '\\n')\r\n else:\r\n pass\r\n wf.close()\r\n\r\ndef genMeshDynamicTimeInterval_Mobmap(fileName, meshFileName, startTimestamp, endTimestamp):\r\n Timestamps = getTimestamps(fileName)\r\n startIndex = Timestamps.index(startTimestamp)\r\n endIndex = Timestamps.index(endTimestamp)\r\n Interval = [Timestamps[t] for t in range(startIndex, endIndex)]\r\n\r\n def strHH(timestamp):\r\n return timestamp[11:13] + timestamp[14:16]\r\n\r\n wf = open(meshFileName[:-4] + '_' + strHH(startTimestamp) + '_' + strHH(endTimestamp) + '.csv', 'w')\r\n with open(meshFileName, 'r') as rf:\r\n for line in csv.reader(rf):\r\n if line[0] == '@dynamic-mesh' or '\"@use-mesh-code':\r\n wf.write(line + '\\n')\r\n if line[0] in Interval:\r\n wf.write(','.join(line) + '\\n')\r\n else:\r\n pass\r\n wf.close()\r\n\r\ndef genMeshDynamicMobmap(mesh, meshFileName, mobmapFile, timestamp):\r\n wf = open(mobmapFile, 'w')\r\n wf.write('@static-mesh' + '\\n')\r\n wf.write(','.join([str(x) for x in\r\n [mesh.minLat, mesh.minLon, mesh.dLat, mesh.dLon]]) + '\\n')\r\n with open(meshFileName, 'r') as rf:\r\n for line in csv.reader(rf):\r\n if timestamp != line[0]:\r\n continue\r\n else:\r\n meshid = line[1]\r\n number = line[2]\r\n xi, yi = mesh.Index[int(meshid)]\r\n wf.write(','.join([str(item) for item in [yi, xi, number]]) + '\\n')\r\n wf.close()\r\n\r\ndef loadGTrajectory(fileName):\r\n print('loadTrajectory Started : ', time.ctime())\r\n TDB = {}\r\n with open(fileName, 'r') as rf:\r\n reader = csv.reader(rf)\r\n for line in reader:\r\n tid = line[0]\r\n # timestamp = line[1]\r\n meshid = line[2]\r\n if tid in TDB:\r\n TDB[tid].append(meshid)\r\n else:\r\n TDB[tid] = [meshid]\r\n print('loadTrajectory Ended : ', time.ctime())\r\n return TDB\r\n\r\ndef getINDEX(mesh, gTrajFileName):\r\n GRIDNUMBER = mesh.lonNum * mesh.latNum\r\n print('getTrajectoryINDEX Started : ', time.ctime())\r\n Timestamps = getTimestamps(gTrajFileName)\r\n print('timestamps...', len(Timestamps))\r\n TDB = loadGTrajectory(gTrajFileName)\r\n INDEX = []\r\n for i in range(len(Timestamps)):\r\n INDEX.append([])\r\n for G in range(GRIDNUMBER):\r\n INDEX[i].append(set()) # set().add\r\n\r\n # print(np.array(INDEX).shape)\r\n for tid in TDB:\r\n traj = TDB[tid]\r\n for i in range(len(traj)):\r\n HH = i\r\n if traj[i] == 'None':\r\n pass\r\n else:\r\n gid = int(traj[i])\r\n INDEX[HH][gid].add(tid) # set().add\r\n return INDEX\r\n\r\ndef getGridImageIndex(mesh, window=15):\r\n GRIDNUMBER = mesh.lonNum * mesh.latNum\r\n IMG = []\r\n for g in range(GRIDNUMBER):\r\n R = np.zeros((window, window), dtype='int32')\r\n current_x, current_y = mesh.Index[g]\r\n start = 0 - window // 2\r\n end = window + start\r\n for i, dx in enumerate(list(range(start, end))):\r\n for j, dy in enumerate(list(range(start, end))):\r\n x = current_x + dx\r\n y = current_y + dy\r\n if mesh.inMesh(x, y):\r\n grid = mesh.ReverseIndex[(x, y)]\r\n R[j][i] = grid\r\n else:\r\n R[j][i] = -1\r\n R = R[::-1, :]\r\n IMG.append(R)\r\n return IMG\r\n\r\ndef genGridTransit(mesh, gTrajFileName, transitFileName):\r\n GRIDNUMBER = mesh.lonNum * mesh.latNum\r\n print('genGridTransit Started : ', time.ctime())\r\n transitWriteFile = open(transitFileName, 'w')\r\n INDEX = getINDEX(mesh, gTrajFileName)\r\n Timestamps = getTimestamps(gTrajFileName)\r\n GridImageIndex = getGridImageIndex(mesh)\r\n print('INDEX, Timestamps, GridImageIndex have been prepared.', time.ctime())\r\n\r\n for i in range(len(Timestamps) - 1):\r\n for j in range(GRIDNUMBER):\r\n cur_time = i\r\n next_time = i + 1\r\n cur_grid = j\r\n transitgrids = GridImageIndex[cur_grid]\r\n Transit = np.zeros(transitgrids.shape, dtype='int32')\r\n for ii in range(transitgrids.shape[0]):\r\n for jj in range(transitgrids.shape[1]):\r\n next_grid = transitgrids[ii][jj]\r\n if next_grid != -1:\r\n trajfirst = INDEX[cur_time][cur_grid]\r\n trajsecond = INDEX[next_time][next_grid]\r\n transit_num = len(trajfirst & trajsecond)\r\n Transit[ii][jj] = transit_num\r\n else:\r\n pass\r\n FlattedTransit = Transit.reshape(-1).tolist()\r\n lineitem = [str(i), str(j)]\r\n lineitem.extend([str(t) for t in FlattedTransit])\r\n line = ','.join(lineitem) + '\\n'\r\n transitWriteFile.write(line)\r\n print('genGridTransit timestamp: ', i)\r\n transitWriteFile.close()\r\n print('genGridTransit Ended: ', time.ctime())\r\n\r\n# This grid transit version is for 1minutes trajectory, more accurate, not for 5minutes.\r\n# !!!!!!!!!!!!!!!!!!!! 1 minute trajectory data.\r\n# TT is supposed to be 288 not 289 because it is interval.\r\ndef genGridTransit_5minutes_from_1minute(mesh, gTrajFileName, transitFileName):\r\n GRIDNUMBER = mesh.lonNum * mesh.latNum\r\n print('genGridTransit Started : ', time.ctime())\r\n transitWriteFile = open(transitFileName, 'w')\r\n INDEX = getINDEX(mesh, gTrajFileName)\r\n # Timestamps = getTimestamps(gTrajFileName)\r\n GridImageIndex = getGridImageIndex(mesh)\r\n print('INDEX, Timestamps, GridImageIndex have been prepared.', time.ctime())\r\n\r\n TT, SPAN = 24 * 12, 5\r\n for i in range(TT):\r\n for j in range(GRIDNUMBER):\r\n cur_time = i\r\n cur_grid = j\r\n transitgrids = GridImageIndex[cur_grid]\r\n Transit = np.zeros(transitgrids.shape, dtype='int32')\r\n for ii in range(transitgrids.shape[0]):\r\n for jj in range(transitgrids.shape[1]):\r\n next_grid = transitgrids[ii][jj]\r\n if next_grid != -1:\r\n cur_time_start = cur_time * SPAN\r\n cur_time_end = (cur_time + 1) * SPAN + 1\r\n SS = set()\r\n for pp in range(cur_time_start, cur_time_end):\r\n trajfirst = INDEX[pp][cur_grid]\r\n for qq in range(pp, cur_time_end):\r\n trajsecond = INDEX[qq][next_grid]\r\n SS.update(trajfirst & trajsecond)\r\n transit_num = len(SS)\r\n Transit[ii][jj] = transit_num\r\n else:\r\n pass\r\n FlattedTransit = Transit.reshape(-1).tolist()\r\n lineitem = [str(i), str(j)]\r\n lineitem.extend([str(t) for t in FlattedTransit])\r\n line = ','.join(lineitem) + '\\n'\r\n transitWriteFile.write(line)\r\n print('genGridTransit timestamp: ', i)\r\n transitWriteFile.close()\r\n print('genGridTransit Ended: ', time.ctime())\r\n\r\ndef getGridTransit(mesh, gTrajFileName, transitFileName):\r\n GRIDNUMBER = mesh.lonNum * mesh.latNum\r\n Timestamps = getTimestamps(gTrajFileName)\r\n TIMENUMBER = len(Timestamps) - 1 # -1 is because of transit\r\n print('getGridTransit Started : ', time.ctime())\r\n R = []\r\n for i in range(TIMENUMBER):\r\n R.append([])\r\n for j in range(GRIDNUMBER):\r\n R[i].append([])\r\n with open(transitFileName, 'r') as rf:\r\n tansistReader = csv.reader(rf)\r\n for line in tansistReader:\r\n timestamp = int(line[0])\r\n grid = int(line[1])\r\n R[timestamp][grid] = line[2:]\r\n R = np.array(R, dtype='int32') # 144, 6000, 225\r\n R = R.reshape(R.shape[0], mesh.lonNum, mesh.latNum, R.shape[2])\r\n R = np.swapaxes(R, 2, 1)\r\n R = R[:, ::-1, :, :] # 144, 75, 80, 225\r\n return R\r\n\r\ndef getGridPop(mesh, gTrajFileName, popFileName):\r\n GRIDNUMBER = mesh.lonNum * mesh.latNum\r\n Timestamps = getTimestamps(gTrajFileName)\r\n TIMENUMBER = len(Timestamps)\r\n TS = {}\r\n for i in range(TIMENUMBER):\r\n TS[Timestamps[i]] = i\r\n print('getGridPop Started : ', time.ctime())\r\n R = []\r\n for i in range(TIMENUMBER):\r\n R.append([])\r\n for j in range(GRIDNUMBER):\r\n R[i].append([])\r\n with open(popFileName, 'r') as rf:\r\n tansistReader = csv.reader(rf)\r\n for line in tansistReader:\r\n timestamp = TS[line[0]]\r\n grid = int(line[1])\r\n R[timestamp][grid] = int(line[2])\r\n R = np.array(R, dtype='int32') # shape 145, 6000\r\n R = R.reshape(R.shape[0], int(R.shape[1] ** 0.5), int(R.shape[1] ** 0.5), 1)\r\n R = np.swapaxes(R, 2, 1)\r\n R = R[:, ::-1, :, :] # shape 145, 80, 80, 1\r\n return R\r\n\r\ndef getGridPopPartition(R, M, K):\r\n # Original 8*8 matrix N = 8 = M*K\r\n # M = 4 # M*M sub matrix\r\n # K = 2 # each sub matrix has the size of K * K\r\n P = []\r\n for i in range(M):\r\n for j in range(M):\r\n P.append(R[:, i*K:i*K+K, j*K:j*K+K, :])\r\n return np.array(P)\r\n\r\ndef getGridPop2DNumpy(mesh, gTrajFileName, popFileName):\r\n GRIDNUMBER = mesh.lonNum * mesh.latNum\r\n Timestamps = getTimestamps(gTrajFileName)\r\n TIMENUMBER = len(Timestamps)\r\n TS = {}\r\n for i in range(TIMENUMBER):\r\n TS[Timestamps[i]] = i\r\n print('getGridPop Started : ', time.ctime())\r\n R = []\r\n for i in range(TIMENUMBER):\r\n R.append([])\r\n for j in range(GRIDNUMBER):\r\n R[i].append([])\r\n with open(popFileName, 'r') as rf:\r\n tansistReader = csv.reader(rf)\r\n for line in tansistReader:\r\n timestamp = TS[line[0]]\r\n grid = int(line[1])\r\n R[timestamp][grid] = int(line[2])\r\n R = np.array(R, dtype='int32') # shape 145, 6000\r\n return R\r\n\r\ndef getGridPopTimeInterval(mesh, popFileName):\r\n print('getGridPop', popFileName, time.ctime())\r\n\r\n GRIDNUMBER = mesh.lonNum * mesh.latNum\r\n Timestamps = []\r\n lastTimestamp = ''\r\n with open(popFileName, 'r') as rf:\r\n tansistReader = csv.reader(rf)\r\n for line in tansistReader:\r\n timestamp = line[0]\r\n if timestamp != lastTimestamp:\r\n Timestamps.append(timestamp)\r\n lastTimestamp = timestamp\r\n\r\n TIMENUMBER = len(Timestamps)\r\n TS = {}\r\n for i in range(TIMENUMBER):\r\n TS[Timestamps[i]] = i\r\n\r\n R = []\r\n for i in range(TIMENUMBER):\r\n R.append([])\r\n for j in range(GRIDNUMBER):\r\n R[i].append([])\r\n with open(popFileName, 'r') as rf:\r\n tansistReader = csv.reader(rf)\r\n for line in tansistReader:\r\n timestamp = TS[line[0]]\r\n grid = int(line[1])\r\n R[timestamp][grid] = int(line[2])\r\n R = np.array(R, dtype='int32') # shape 145, 6000\r\n R = R.reshape(R.shape[0], int(R.shape[1] ** 0.5), int(R.shape[1] ** 0.5), 1)\r\n R = np.swapaxes(R, 2, 1)\r\n R = R[:, ::-1, :, :] # shape 145, 75, 80, 1\r\n\r\n return R\r\n\r\ndef getGridTransitTimeInterval(mesh, transitFileName):\r\n print('getGridTransit Started : ', transitFileName, time.ctime())\r\n GRIDNUMBER = mesh.lonNum * mesh.latNum\r\n\r\n # Timestamps = []\r\n # lastTimestamp = ''\r\n # with open(transitFileName, 'r') as rf:\r\n # tansistReader = csv.reader(rf)\r\n # for line in tansistReader:\r\n # timestamp = line[0]\r\n # if timestamp != lastTimestamp:\r\n # Timestamps.append(timestamp)\r\n # lastTimestamp = timestamp\r\n # TIMENUMBER = len(Timestamps)\r\n\r\n TIMENUMBER = 24 * 12\r\n\r\n R = []\r\n for i in range(TIMENUMBER):\r\n R.append([])\r\n for j in range(GRIDNUMBER):\r\n R[i].append([])\r\n with open(transitFileName, 'r') as rf:\r\n tansistReader = csv.reader(rf)\r\n for line in tansistReader:\r\n timestamp = int(line[0])\r\n grid = int(line[1])\r\n R[timestamp][grid] = line[2:]\r\n R = np.array(R, dtype='int32') # 144, 6000, 225\r\n R = R.reshape(R.shape[0], mesh.lonNum, mesh.latNum, R.shape[2])\r\n R = np.swapaxes(R, 2, 1)\r\n R = R[:, ::-1, :, :] # 144, 75, 80, 225\r\n return R\r\n\r\ndef shuffleTrainValidateTest(InterpolatedStep, path, fileName, R, testRate=0.2):\r\n TIMESTEP = InterpolatedStep * 2\r\n Sequence = []\r\n for i in range(R.shape[0] - TIMESTEP):\r\n Sequence.append(R[i:i+TIMESTEP, :, :, :])\r\n Sequence = np.array(Sequence, dtype='int32')\r\n INDEX = list(range(len(Sequence)))\r\n np.random.shuffle(INDEX)\r\n np.random.shuffle(INDEX)\r\n np.random.shuffle(INDEX)\r\n trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate))]\r\n testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):]\r\n train = Sequence[trainINDEX]\r\n test = Sequence[testINDEX]\r\n np.save(path + 'train_' + fileName, train)\r\n np.save(path + 'test_' + fileName, test)\r\n print(train.shape, test.shape)\r\n\r\n # trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate - validateRate))]\r\n # validateINDEX = INDEX[int(len(INDEX) * (1 - testRate - validateRate)):int(len(INDEX) * (1 - testRate))]\r\n # testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):]\r\n # train = Sequence[trainINDEX]\r\n # validate = Sequence[validateINDEX]\r\n # test = Sequence[testINDEX]\r\n # np.save(path + 'train_' + fileName, train)\r\n # np.save(path + 'validate_' + fileName, validate)\r\n # np.save(path + 'test_' + fileName, test)\r\n\r\n # print(train.shape, validate.shape, test.shape)\r\n # or directly return not save to file because just too big.\r\n # return train, validate, test\r\n\r\ndef getShuffledTrainTest(path, fileName, TrainTest):\r\n return np.load(path + TrainTest + '_' + fileName + '.npy')\r\n\r\ndef testcode(mesh):\r\n GRIDNUMBER = mesh.lonNum * mesh.latNum\r\n window = 5\r\n R = np.zeros((window, window), dtype='int32')\r\n center = mesh.ReverseIndex[(2,2)]\r\n current_x, current_y = mesh.Index[center]\r\n start = 0 - window // 2\r\n end = window + start\r\n for i, dx in enumerate(list(range(start, end))):\r\n for j, dy in enumerate(list(range(start, end))):\r\n x = current_x + dx\r\n y = current_y + dy\r\n if mesh.inMesh(x, y):\r\n grid = mesh.ReverseIndex[(x, y)]\r\n R[j][i] = grid\r\n else:\r\n R[j][i] = -1\r\n R = R[::-1, :]\r\n print(R)\r\n\r\n for i in range(len(R)):\r\n print(R[i])\r\n for i in range(len(R)):\r\n print(R[i][0], R[i][1], R[i][2], R[i][3], R[i][4])\r\n\r\n T = R.reshape(-1)\r\n print(T.tolist())\r\n\r\n P = T.reshape(window, window)\r\n print(P)\r\n\r\n print(R.shape)\r\n print(R[54][4178])\r\n print(np.max(R) == 3369)\r\n print(mesh.Index[3369])\r\n x, y = mesh.Index[3369]\r\n lon, lat = mesh.minLon + (x + 0.5) * mesh.dLon, \\\r\n mesh.minLat + (y + 0.5) * mesh.dLat\r\n print(lon, lat)\r\n\r\n print(mesh.lonNum, mesh.latNum)\r\n T = np.array(range(GRIDNUMBER))\r\n T = T.reshape(mesh.lonNum, mesh.latNum)\r\n T = np.swapaxes(T, 1, 0)\r\n T = T[::-1, :]\r\n print(T)\r\n print(T.shape)\r\n\r\n\r\ndef run5min201802(mesh, dataPATH, dates):\r\n print('Now is getting trainig XS and YS...', dates)\r\n\r\n # timestamp = '2011-10-20 09:00:00'\r\n # filenameTime = timestamp[0:4] + timestamp[5:7] + timestamp[8:10] \\\r\n # + timestamp[11:13] + timestamp[14:16] + timestamp[17:19]\r\n # print(filenameTime)\r\n\r\n for date in dates:\r\n # first step: from trajectory point to mesh\r\n getMesh(dataPATH + date + 'tokyo_interpo5min.csv',\r\n dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv')\r\n\r\n # second step: calculate mesh population at each timestamp\r\n genMeshDynamic(dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv',\r\n dataPATH + date + 'tokyo_' + mesh.size + '_5min_pop.csv')\r\n\r\n # fourth step: mesh transit between two consecutive timestamps\r\n genGridTransit(dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv',\r\n dataPATH + date + 'tokyo_' + mesh.size + '_5min_transit.csv')\r\n\r\n\r\ndef getHHTransit(HH):\r\n assert HH <= 22, 'Hour should not be over 22.'\r\n dataPATH = '../interpo_data/'\r\n date = '20111020'\r\n R = getGridTransit(dataPATH + date + 'tokyo_meshtransit10min_1min_15.csv')\r\n # (144, 72, 80, 225)\r\n R = R[HH*6:HH*6+6, :, :, :]\r\n # (6, 72, 80, 225)\r\n R = R.reshape(R.shape[0], -1, R.shape[-1])\r\n # (6, 5760, 225)\r\n R = R.transpose(1, 0, 2)\r\n # (5760, 6, 225)\r\n R = R.reshape(R.shape[0], R.shape[1], int(R.shape[2]**0.5), int(R.shape[2]**0.5), 1)\r\n return R\r\n\r\ndef runCrowdDensity():\r\n dataPATH = '../interpo_data/'\r\n meshTokyo = Mesh('tokyo', '500m')\r\n #meshcode_level = 4\r\n alldates = [\"20110217\",\"20110218\",\"20110219\",\"20110220\", \"20110221\",\r\n\t\t\t\t\"20110222\",\"20110223\", \"20110224\", \"20110225\", \"20110226\", \"20110227\"]\r\n for date in alldates:\r\n print('this is date', date)\r\n\r\n getMesh(meshTokyo, dataPATH + date + 'tokyo_interpo5min.csv',\r\n dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv')\r\n\r\n genMeshDynamic(meshTokyo, dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv',\r\n dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_pop.csv')\r\n\r\n\r\n# def runCrowdFlow_from5min():\r\n# from common.dataparam.Param import alldates\r\n# dataPATH = '../interpo_data/'\r\n# meshTokyo = Mesh('tokyo', '500m')\r\n# #meshcode_level = 4\r\n#\r\n# for date in alldates:\r\n# print('this is date', date)\r\n# genGridTransit(meshTokyo,\r\n# dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv',\r\n# dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit_from5min.csv')\r\n\r\n# paper crowd flow is from 1min.!!!!!!!!!!!!\r\ndef runCrowdFlow():\r\n dataPATH = '../interpo_data/'\r\n meshTokyo = Mesh('tokyo', '500m')\r\n #meshcode_level = 4\r\n\r\n alldates = [\"20110217\", \"20110218\", \"20110219\", \"20110220\", \"20110221\",\r\n \"20110222\", \"20110223\", \"20110224\", \"20110225\", \"20110226\", \"20110227\"]\r\n \r\n for date in alldates:\r\n print('this is date', date)\r\n getMesh(meshTokyo, dataPATH + date + 'tokyo_interpo1min.csv',\r\n dataPATH + date + 'tokyo_' + meshTokyo.size + '_1min.csv')\r\n\r\n genGridTransit_5minutes_from_1minute(meshTokyo,\r\n dataPATH + date + 'tokyo_' + meshTokyo.size + '_1min.csv',\r\n dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit.csv')\r\n\r\ndef main():\r\n runCrowdDensity()\r\n\r\nif __name__ == '__main__':\r\n main()"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.log",
"numpy.load",
"numpy.save",
"numpy.random.shuffle",
"numpy.swapaxes",
"numpy.log10",
"pandas.read_csv"
]
] |
zhao-jin/Reinforcement-learning-with-tensorflow
|
[
"a4a816f1570be55016909f703fb1fd1ceae9c5a0"
] |
[
"contents/2_Q_Learning_maze/maze_env.py"
] |
[
"\"\"\"\nReinforcement learning maze example.\n\nRed rectangle: explorer.\nBlack rectangles: hells [reward = -1].\nYellow bin circle: paradise [reward = +1].\nAll other states: ground [reward = 0].\n\nThis script is the environment part of this example. The RL is in RL_brain.py.\n\nView more on my tutorial page: https://morvanzhou.github.io/tutorials/\n\"\"\"\n\n\nimport numpy as np\nimport time\nimport sys\nif sys.version_info.major == 2:\n import Tkinter as tk\nelse:\n import tkinter as tk\n\n\nUNIT = 40 # pixels\nMAZE_H = 4 # grid height\nMAZE_W = 4 # grid width\n\n\nclass Maze(tk.Tk, object):\n def __init__(self):\n super(Maze, self).__init__()\n self.action_space = ['u', 'd', 'l', 'r']\n self.n_actions = len(self.action_space)\n self.title('maze')\n self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))\n self._build_maze()\n\n def _build_maze(self):\n self.canvas = tk.Canvas(self, bg='white',\n height=MAZE_H * UNIT,\n width=MAZE_W * UNIT)\n\n # create grids\n for c in range(0, MAZE_W * UNIT, UNIT):\n x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT\n self.canvas.create_line(x0, y0, x1, y1)\n for r in range(0, MAZE_H * UNIT, UNIT):\n x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r\n self.canvas.create_line(x0, y0, x1, y1)\n\n # create origin\n origin = np.array([20, 20])\n\n # hell\n hell1_center = origin + np.array([UNIT * 2, UNIT])\n self.hell1 = self.canvas.create_rectangle(\n hell1_center[0] - 15, hell1_center[1] - 15,\n hell1_center[0] + 15, hell1_center[1] + 15,\n fill='black')\n # hell\n hell2_center = origin + np.array([UNIT, UNIT * 2])\n self.hell2 = self.canvas.create_rectangle(\n hell2_center[0] - 15, hell2_center[1] - 15,\n hell2_center[0] + 15, hell2_center[1] + 15,\n fill='black')\n\n # create oval\n oval_center = origin + UNIT * 2\n self.oval = self.canvas.create_oval(\n oval_center[0] - 15, oval_center[1] - 15,\n oval_center[0] + 15, oval_center[1] + 15,\n fill='yellow')\n\n # create red rect\n self.rect = self.canvas.create_rectangle(\n origin[0] - 15, origin[1] - 15,\n origin[0] + 15, origin[1] + 15,\n fill='red')\n\n # pack all\n self.canvas.pack()\n\n def reset(self):\n self.update()\n time.sleep(0.1)\n self.canvas.delete(self.rect)\n origin = np.array([20, 20])\n self.rect = self.canvas.create_rectangle(\n origin[0] - 15, origin[1] - 15,\n origin[0] + 15, origin[1] + 15,\n fill='red')\n # return observation\n return self.canvas.coords(self.rect)\n\n def step(self, action):\n s = self.canvas.coords(self.rect)\n base_action = np.array([0, 0])\n if action == 0: # up\n if s[1] > UNIT:\n base_action[1] -= UNIT\n elif action == 1: # down\n if s[1] < (MAZE_H - 1) * UNIT:\n base_action[1] += UNIT\n elif action == 2: # right\n if s[0] < (MAZE_W - 1) * UNIT:\n base_action[0] += UNIT\n elif action == 3: # left\n if s[0] > UNIT:\n base_action[0] -= UNIT\n\n self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent\n\n s_ = self.canvas.coords(self.rect) # next state\n\n # reward function\n if s_ == self.canvas.coords(self.oval):\n reward = 1\n done = True\n #s_ = 'terminal'\n elif s_ in [self.canvas.coords(self.hell1), self.canvas.coords(self.hell2)]:\n reward = -1\n done = True\n #s_ = 'terminal'\n else:\n reward = 0\n done = False\n\n return s_, reward, done\n\n def render(self):\n time.sleep(0.01)\n self.update()\n\n\ndef update():\n for t in range(10):\n s = env.reset()\n while True:\n env.render()\n a = 1\n s, r, done = env.step(a)\n if done:\n break\n\nif __name__ == '__main__':\n env = Maze()\n env.after(100, update)\n env.mainloop()"
] |
[
[
"numpy.array"
]
] |
tilmanbeck/adapter-transformers
|
[
"ed42ced6983891060bb160c5c4f2c5d64d2c205c",
"ed42ced6983891060bb160c5c4f2c5d64d2c205c"
] |
[
"src/transformers/trainer_tf.py",
"src/transformers/modeling_tf_transfo_xl.py"
] |
[
"\"\"\"Tensorflow trainer class.\"\"\"\n\nimport logging\nimport math\nimport os\nfrom typing import Callable, Dict, Optional\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom .modeling_tf_utils import TFPreTrainedModel, shape_list\nfrom .optimization_tf import GradientAccumulator, create_optimizer\nfrom .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput\nfrom .training_args_tf import TFTrainingArguments\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TFTrainer:\n model: TFPreTrainedModel\n args: TFTrainingArguments\n # something similar to a PT Dataset.\n # This is just temporary before to have\n # a framework-agnostic approach for datasets.\n train_dataset: Optional[tf.data.Dataset]\n eval_dataset: Optional[tf.data.Dataset]\n compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None\n prediction_loss_only: bool\n\n def __init__(\n self,\n model: TFPreTrainedModel,\n args: TFTrainingArguments,\n train_dataset: Optional[tf.data.Dataset] = None,\n eval_dataset: Optional[tf.data.Dataset] = None,\n compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,\n prediction_loss_only=False,\n ):\n self.model = model\n self.args = args\n self.train_dataset = train_dataset\n self.eval_dataset = eval_dataset\n self.compute_metrics = compute_metrics\n self.prediction_loss_only = prediction_loss_only\n self.gradient_accumulator = GradientAccumulator()\n\n self._setup_training()\n\n def _setup_training(self) -> None:\n \"\"\"\n Setup the different steps to train a model:\n - check if all the data are given\n - create the proper strategy\n - create the features\n - prepare the model settings\n \"\"\"\n self._prepare_dataset()\n\n with self.args.strategy.scope():\n self._create_optimizer()\n _ = self.optimizer.iterations\n self._set_loss_and_metric()\n self._create_checkpoint_manager()\n self._create_summary_writer()\n\n def _set_loss_and_metric(self) -> None:\n \"\"\"\n Create the training loss and metric with their name. Allowed names are those listed\n in the Tensorflow documentation and those contained in the transformers library.\n \"\"\"\n try:\n self.loss = tf.keras.losses.get(\n {\n \"class_name\": self.args.loss_name,\n \"config\": {\"from_logits\": True, \"reduction\": tf.keras.losses.Reduction.NONE},\n }\n )\n except TypeError:\n self.loss = tf.keras.losses.get(\n {\"class_name\": self.args.loss_name, \"config\": {\"reduction\": tf.keras.losses.Reduction.NONE}}\n )\n\n def _create_summary_writer(self) -> None:\n \"\"\"\n Create a summary writer to be able to read the logs in Tensorboard.\n \"\"\"\n self.writer = tf.summary.create_file_writer(self.args.logging_dir)\n\n def _prepare_dataset(self) -> None:\n \"\"\"\n Prepare the training, validation and test data.\n \"\"\"\n if self.train_dataset is not None:\n self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy()\n\n if self.args.max_steps > 0:\n self.train_steps = self.args.max_steps\n else:\n self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size)\n\n self.train_dataset = (\n self.train_dataset.cache()\n .shuffle(self.num_train_examples)\n .batch(self.args.train_batch_size)\n .prefetch(tf.data.experimental.AUTOTUNE)\n )\n\n if self.args.max_steps > 0:\n self.train_dataset = self.train_dataset.repeat(-1)\n\n self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset)\n else:\n self.train_steps = 0\n\n if self.eval_dataset is not None:\n self.eval_dataset = (\n self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE)\n )\n self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset)\n\n def _create_optimizer(self) -> None:\n \"\"\"\n Create the training optimizer with its name. Allowed names are those listed\n in the Tensorflow documentation and those contained in the transformers library.\n \"\"\"\n if self.args.optimizer_name == \"adamw\":\n self.optimizer = create_optimizer(\n self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr\n )\n else:\n try:\n self.optimizer = tf.keras.optimizers.get(\n {\n \"class_name\": self.args.optimizer_name,\n \"config\": {\"learning_rate\": self.args.learning_rate, \"epsilon\": self.args.adam_epsilon},\n }\n )\n except TypeError:\n # This is for the case where the optimizer is not Adam-like such as SGD\n self.optimizer = tf.keras.optimizers.get(\n {\"class_name\": self.args.optimizer_name, \"config\": {\"learning_rate\": self.args.learning_rate}}\n )\n logger.info(\"Created an/a {} optimizer\".format(self.args.optimizer_name))\n\n def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None:\n \"\"\"\n Create a checkpoint manager in order to be able to make the training\n fault-tolerant.\n Args:\n max_to_keep: the maximum number of checkpoints to keep in the checkpoint path.\n load_model: if we want to start the training from the latest checkpoint.\n \"\"\"\n ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)\n\n self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep)\n\n if load_model:\n ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()\n\n @tf.function\n def _evaluate_steps(self, per_replica_features, per_replica_labels):\n \"\"\"\n One step evaluation across replica.\n Args:\n per_replica_features: the batched features.\n per_replica_labels: the batched labels.\n Returns:\n The loss corresponding to the given batch.\n \"\"\"\n per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2(\n self._run_model, args=(per_replica_features, per_replica_labels, False)\n )\n\n try:\n reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)\n except ValueError:\n reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)\n\n return reduced_loss, per_replica_logits\n\n def _prediction_loop(\n self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None\n ) -> PredictionOutput:\n logger.info(\"***** Running %s *****\", description)\n logger.info(\" Batch size = %d\", self.args.eval_batch_size)\n\n label_ids: np.ndarray = None\n preds: np.ndarray = None\n\n step: int = 1\n\n for features, labels in dataset:\n step = tf.convert_to_tensor(step, dtype=tf.int64)\n loss, logits = self._evaluate_steps(features, labels)\n loss = tf.reduce_mean(loss)\n\n if not prediction_loss_only:\n if self.args.n_gpu > 1:\n for val in logits.values:\n if preds is None:\n preds = val.numpy()\n else:\n preds = np.append(preds, val.numpy(), axis=0)\n\n for val in labels.values:\n if label_ids is None:\n label_ids = val.numpy()\n else:\n label_ids = np.append(label_ids, val.numpy(), axis=0)\n else:\n if preds is None:\n preds = logits.numpy()\n else:\n preds = np.append(preds, logits.numpy(), axis=0)\n\n if label_ids is None:\n label_ids = labels.numpy()\n else:\n label_ids = np.append(label_ids, labels.numpy(), axis=0)\n\n step += 1\n\n if self.compute_metrics is not None and preds is not None and label_ids is not None:\n metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))\n else:\n metrics = {}\n\n metrics[\"eval_loss\"] = loss.numpy()\n\n for key in list(metrics.keys()):\n if not key.startswith(\"eval_\"):\n metrics[f\"eval_{key}\"] = metrics.pop(key)\n\n return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)\n\n def evaluate(\n self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None\n ) -> Dict[str, float]:\n \"\"\"\n Prediction/evaluation loop, shared by `evaluate()` and `predict()`.\n \"\"\"\n if eval_dataset is None:\n eval_dataset = self.eval_dataset\n\n output = self._prediction_loop(eval_dataset, description=\"Evaluation\")\n\n return output.metrics\n\n def train(self) -> None:\n \"\"\"\n Train method to train the model.\n \"\"\"\n if self.args.debug:\n tf.summary.trace_on(graph=True, profiler=True)\n\n self.gradient_accumulator.reset()\n\n iterations = self.optimizer.iterations\n\n if iterations.numpy() > 0:\n logger.info(\"Start the training from the last checkpoint\")\n start_epoch = (iterations.numpy() // self.train_steps) + 1\n else:\n start_epoch = 1\n\n tf.summary.experimental.set_step(iterations)\n\n epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs\n\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", self.num_train_examples)\n logger.info(\" Num Epochs = %d\", epochs)\n logger.info(\" Total optimization steps = %d\", self.train_steps)\n\n for epoch in range(start_epoch, int(epochs + 1)):\n for training_loss in self._training_steps():\n step = iterations.numpy()\n\n if self.args.debug:\n with self.writer.as_default():\n tf.summary.scalar(\"loss\", training_loss, step=step)\n\n if step == 1 and self.args.debug:\n with self.writer.as_default():\n tf.summary.trace_export(name=\"training\", step=step, profiler_outdir=self.args.logging_dir)\n\n if self.args.evaluate_during_training and step % self.args.eval_steps == 0:\n logs = {}\n results = self.evaluate()\n\n for key, value in results.items():\n eval_key = \"eval_{}\".format(key)\n logs[eval_key] = value\n\n if callable(self.optimizer.learning_rate):\n logs[\"learning_rate\"] = self.optimizer.learning_rate(step).numpy()\n else:\n logs[\"learning_rate\"] = self.optimizer.learning_rate.numpy()\n\n logger.info(\"Epoch {} Step {} Validation Metrics {}\".format(epoch, step, logs))\n\n with self.writer.as_default():\n for k, v in logs.items():\n tf.summary.scalar(k, v, step=step)\n\n if step % self.args.logging_steps == 0:\n logger.info(\"Epoch {} Step {} Train Loss {:.4f}\".format(epoch, step, training_loss.numpy()))\n\n if step % self.args.save_steps == 0:\n ckpt_save_path = self.model.ckpt_manager.save()\n logger.info(\"Saving checkpoint for step {} at {}\".format(step, ckpt_save_path))\n\n if step % self.train_steps == 0:\n break\n\n def _training_steps(self):\n \"\"\"\n Returns a generator over training steps (i.e. parameters update).\n \"\"\"\n for i, loss in enumerate(self._accumulate_next_gradients()):\n if i % self.args.gradient_accumulation_steps == 0:\n self._apply_gradients()\n yield loss\n\n @tf.function\n def _apply_gradients(self):\n \"\"\"Applies the gradients (cross-replica).\"\"\"\n self.args.strategy.experimental_run_v2(self._step)\n\n def _step(self):\n \"\"\"Applies gradients and resets accumulation.\"\"\"\n gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync\n gradients = [\n gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients\n ]\n gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients]\n\n self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))\n self.gradient_accumulator.reset()\n\n def _accumulate_next_gradients(self):\n \"\"\"Accumulates the gradients from the next element in dataset.\"\"\"\n iterator = iter(self.train_dataset)\n\n @tf.function\n def _accumulate_next():\n per_replica_features, per_replica_labels = next(iterator)\n\n return self._accumulate_gradients(per_replica_features, per_replica_labels)\n\n while True:\n try:\n yield _accumulate_next()\n except tf.errors.OutOfRangeError:\n break\n\n def _accumulate_gradients(self, per_replica_features, per_replica_labels):\n \"\"\"Accumulates the gradients across all the replica.\"\"\"\n per_replica_loss = self.args.strategy.experimental_run_v2(\n self._forward, args=(per_replica_features, per_replica_labels)\n )\n\n try:\n reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)\n except ValueError:\n reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)\n\n return reduced_loss\n\n def _forward(self, features, labels):\n \"\"\"Forwards a training example and accumulates the gradients.\"\"\"\n per_example_loss, _ = self._run_model(features, labels, True)\n gradients = tf.gradients(per_example_loss, self.model.trainable_variables)\n gradients = [\n g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)\n ]\n\n self.gradient_accumulator(gradients)\n\n return per_example_loss\n\n def _run_model(self, features, labels, training):\n \"\"\"\n Computes the loss of the given features and labels pair.\n Args:\n features: the batched features.\n labels: the batched labels.\n training: run the model in training mode or not\n \"\"\"\n if self.args.mode == \"text-classification\" or self.args.mode == \"token-classification\":\n logits = self.model(features, training=training)[0]\n else:\n logits = self.model(features, training=training)\n\n if self.args.mode == \"token-classification\":\n active_loss = tf.reshape(labels, (-1,)) != -1\n reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)\n labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)\n loss = self.loss(labels, reduced_logits)\n elif self.args.mode == \"question-answering\":\n start_loss = self.loss(labels[\"start_position\"], logits[0])\n end_loss = self.loss(labels[\"end_position\"], logits[1])\n loss = (start_loss + end_loss) / 2.0\n else:\n loss = self.loss(labels, logits)\n\n loss += sum(self.model.losses) * (1.0 / self.args.n_gpu)\n\n return loss, logits\n\n def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:\n \"\"\"\n Run prediction and return predictions and potential metrics.\n Depending on the dataset and your use case, your test dataset may contain labels.\n In that case, this method will also return metrics, like in evaluate().\n Args:\n test_dataset: something similar to a PT Dataset. This is just\n temporary before to have a framework-agnostic approach for datasets.\n \"\"\"\n test_dataset = test_dataset.batch(self.args.eval_batch_size)\n test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset)\n\n return self._prediction_loop(test_dataset, description=\"Prediction\")\n\n def save_model(self) -> None:\n \"\"\"\n Save the pretrained model and create a Tensorflow saved model.\n \"\"\"\n logger.info(\"Saving model in {}\".format(self.args.output_dir))\n\n path = os.path.join(self.args.output_dir, \"saved_model\")\n\n logger.info(\"Saving model in {}\".format(path))\n os.makedirs(path, exist_ok=True)\n self.model.save_pretrained(self.args.output_dir)\n",
"# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 Transformer XL model.\n\"\"\"\n\n\nimport logging\n\nimport tensorflow as tf\n\nfrom .configuration_transfo_xl import TransfoXLConfig\nfrom .file_utils import add_start_docstrings, add_start_docstrings_to_callable\nfrom .modeling_tf_transfo_xl_utilities import TFAdaptiveSoftmaxMask\nfrom .modeling_tf_utils import TFPreTrainedModel, get_initializer, keras_serializable, shape_list\nfrom .tokenization_utils import BatchEncoding\n\n\nlogger = logging.getLogger(__name__)\n\nTF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"transfo-xl-wt103\",\n # See all Transformer XL models at https://huggingface.co/models?filter=transfo-xl\n]\n\n\nclass TFPositionalEmbedding(tf.keras.layers.Layer):\n def __init__(self, demb, **kwargs):\n super().__init__(**kwargs)\n\n self.inv_freq = 1 / (10000 ** (tf.range(0, demb, 2.0) / demb))\n\n def call(self, pos_seq, bsz=None):\n sinusoid_inp = tf.einsum(\"i,j->ij\", pos_seq, self.inv_freq)\n pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)\n\n if bsz is not None:\n return tf.tile(pos_emb[:, None, :], [1, bsz, 1])\n else:\n return pos_emb[:, None, :]\n\n\nclass TFPositionwiseFF(tf.keras.layers.Layer):\n def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5, init_std=0.02, **kwargs):\n super().__init__(**kwargs)\n\n self.d_model = d_model\n self.d_inner = d_inner\n self.dropout = dropout\n\n self.layer_1 = tf.keras.layers.Dense(\n d_inner, kernel_initializer=get_initializer(init_std), activation=tf.nn.relu, name=\"CoreNet_._0\"\n )\n self.drop_1 = tf.keras.layers.Dropout(dropout)\n self.layer_2 = tf.keras.layers.Dense(d_model, kernel_initializer=get_initializer(init_std), name=\"CoreNet_._3\")\n self.drop_2 = tf.keras.layers.Dropout(dropout)\n\n self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name=\"layer_norm\")\n\n self.pre_lnorm = pre_lnorm\n\n def call(self, inp, training=False):\n if self.pre_lnorm:\n # layer normalization + positionwise feed-forward\n core_out = self.layer_norm(inp)\n core_out = self.layer_1(core_out)\n core_out = self.drop_1(core_out, training=training)\n core_out = self.layer_2(core_out)\n core_out = self.drop_2(core_out, training=training)\n\n # residual connection\n output = core_out + inp\n else:\n # positionwise feed-forward\n core_out = self.layer_1(inp)\n core_out = self.drop_1(core_out, training=training)\n core_out = self.layer_2(core_out)\n core_out = self.drop_2(core_out, training=training)\n\n # residual connection + layer normalization\n output = self.layer_norm(inp + core_out)\n\n return output\n\n\nclass TFRelPartialLearnableMultiHeadAttn(tf.keras.layers.Layer):\n def __init__(\n self,\n n_head,\n d_model,\n d_head,\n dropout,\n dropatt=0,\n tgt_len=None,\n ext_len=None,\n mem_len=None,\n pre_lnorm=False,\n r_r_bias=None,\n r_w_bias=None,\n output_attentions=False,\n layer_norm_epsilon=1e-5,\n init_std=0.02,\n **kwargs\n ):\n super().__init__(**kwargs)\n\n self.output_attentions = output_attentions\n self.n_head = n_head\n self.d_model = d_model\n self.d_head = d_head\n self.dropout = dropout\n\n self.qkv_net = tf.keras.layers.Dense(\n 3 * n_head * d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name=\"qkv_net\"\n )\n\n self.drop = tf.keras.layers.Dropout(dropout)\n self.dropatt = tf.keras.layers.Dropout(dropatt)\n self.o_net = tf.keras.layers.Dense(\n d_model, kernel_initializer=get_initializer(init_std), use_bias=False, name=\"o_net\"\n )\n\n self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name=\"layer_norm\")\n\n self.scale = 1 / (d_head ** 0.5)\n\n self.pre_lnorm = pre_lnorm\n\n if r_r_bias is not None and r_w_bias is not None: # Biases are shared\n self.r_r_bias = r_r_bias\n self.r_w_bias = r_w_bias\n else:\n self.r_r_bias = None\n self.r_w_bias = None\n\n self.r_net = tf.keras.layers.Dense(\n self.n_head * self.d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name=\"r_net\"\n )\n\n def build(self, input_shape):\n if self.r_r_bias is None or self.r_w_bias is None: # Biases are not shared\n self.r_r_bias = self.add_weight(\n shape=(self.n_head, self.d_head), initializer=\"zeros\", trainable=True, name=\"r_r_bias\"\n )\n self.r_w_bias = self.add_weight(\n shape=(self.n_head, self.d_head), initializer=\"zeros\", trainable=True, name=\"r_w_bias\"\n )\n super().build(input_shape)\n\n def _rel_shift(self, x):\n x_size = shape_list(x)\n\n x = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])\n x = tf.reshape(x, [x_size[1] + 1, x_size[0], x_size[2], x_size[3]])\n x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])\n x = tf.reshape(x, x_size)\n\n return x\n\n def call(self, inputs, training=False):\n w, r, attn_mask, mems, head_mask = inputs\n qlen, rlen, bsz = shape_list(w)[0], shape_list(r)[0], shape_list(w)[1]\n\n if mems is not None:\n cat = tf.concat([mems, w], 0)\n if self.pre_lnorm:\n w_heads = self.qkv_net(self.layer_norm(cat))\n else:\n w_heads = self.qkv_net(cat)\n r_head_k = self.r_net(r)\n\n w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)\n w_head_q = w_head_q[-qlen:]\n else:\n if self.pre_lnorm:\n w_heads = self.qkv_net(self.layer_norm(w))\n else:\n w_heads = self.qkv_net(w)\n r_head_k = self.r_net(r)\n\n w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)\n\n klen = shape_list(w_head_k)[0]\n\n w_head_q = tf.reshape(w_head_q, (qlen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head\n w_head_k = tf.reshape(w_head_k, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head\n w_head_v = tf.reshape(w_head_v, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head\n\n r_head_k = tf.reshape(r_head_k, (rlen, self.n_head, self.d_head)) # qlen x n_head x d_head\n\n # compute attention score\n rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head\n AC = tf.einsum(\"ibnd,jbnd->ijbn\", rw_head_q, w_head_k) # qlen x klen x bsz x n_head\n\n rr_head_q = w_head_q + self.r_r_bias\n BD = tf.einsum(\"ibnd,jnd->ijbn\", rr_head_q, r_head_k) # qlen x klen x bsz x n_head\n BD = self._rel_shift(BD)\n\n # [qlen x klen x bsz x n_head]\n attn_score = AC + BD\n attn_score = attn_score * self.scale\n\n # compute attention probability\n if attn_mask is not None:\n attn_mask_t = attn_mask[:, :, None, None]\n attn_score = attn_score * (1 - attn_mask_t) - 1e30 * attn_mask_t\n\n # [qlen x klen x bsz x n_head]\n attn_prob = tf.nn.softmax(attn_score, axis=1)\n attn_prob = self.dropatt(attn_prob, training=training)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_prob = attn_prob * head_mask\n\n # compute attention vector\n attn_vec = tf.einsum(\"ijbn,jbnd->ibnd\", attn_prob, w_head_v)\n\n # [qlen x bsz x n_head x d_head]\n attn_vec_sizes = shape_list(attn_vec)\n attn_vec = tf.reshape(attn_vec, (attn_vec_sizes[0], attn_vec_sizes[1], self.n_head * self.d_head))\n\n # linear projection\n attn_out = self.o_net(attn_vec)\n attn_out = self.drop(attn_out, training=training)\n\n if self.pre_lnorm:\n # residual connection\n outputs = [w + attn_out]\n else:\n # residual connection + layer normalization\n outputs = [self.layer_norm(w + attn_out)]\n\n if self.output_attentions:\n outputs.append(attn_prob)\n\n return outputs\n\n\nclass TFRelPartialLearnableDecoderLayer(tf.keras.layers.Layer):\n def __init__(\n self,\n n_head,\n d_model,\n d_head,\n d_inner,\n dropout,\n tgt_len=None,\n ext_len=None,\n mem_len=None,\n dropatt=0.0,\n pre_lnorm=False,\n r_w_bias=None,\n r_r_bias=None,\n output_attentions=False,\n layer_norm_epsilon=1e-5,\n init_std=0.02,\n **kwargs\n ):\n super().__init__(**kwargs)\n\n self.dec_attn = TFRelPartialLearnableMultiHeadAttn(\n n_head,\n d_model,\n d_head,\n dropout,\n tgt_len=tgt_len,\n ext_len=ext_len,\n mem_len=mem_len,\n dropatt=dropatt,\n pre_lnorm=pre_lnorm,\n r_w_bias=r_w_bias,\n r_r_bias=r_r_bias,\n init_std=init_std,\n output_attentions=output_attentions,\n layer_norm_epsilon=layer_norm_epsilon,\n name=\"dec_attn\",\n )\n self.pos_ff = TFPositionwiseFF(\n d_model,\n d_inner,\n dropout,\n pre_lnorm=pre_lnorm,\n init_std=init_std,\n layer_norm_epsilon=layer_norm_epsilon,\n name=\"pos_ff\",\n )\n\n def call(self, inputs, training=False):\n dec_inp, r, dec_attn_mask, mems, head_mask = inputs\n attn_outputs = self.dec_attn([dec_inp, r, dec_attn_mask, mems, head_mask], training=training)\n ff_output = self.pos_ff(attn_outputs[0], training=training)\n\n outputs = [ff_output] + attn_outputs[1:]\n\n return outputs\n\n\nclass TFAdaptiveEmbedding(tf.keras.layers.Layer):\n def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, init_std=0.02, sample_softmax=False, **kwargs):\n super().__init__(**kwargs)\n\n self.n_token = n_token\n self.d_embed = d_embed\n self.init_std = init_std\n\n self.cutoffs = cutoffs + [n_token]\n self.div_val = div_val\n self.d_proj = d_proj\n\n self.emb_scale = d_proj ** 0.5\n\n self.cutoff_ends = [0] + self.cutoffs\n\n self.emb_layers = []\n self.emb_projs = []\n if div_val == 1:\n raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint\n else:\n for i in range(len(self.cutoffs)):\n l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]\n d_emb_i = d_embed // (div_val ** i)\n self.emb_layers.append(\n tf.keras.layers.Embedding(\n r_idx - l_idx,\n d_emb_i,\n embeddings_initializer=get_initializer(init_std),\n name=\"emb_layers_._{}\".format(i),\n )\n )\n\n def build(self, input_shape):\n for i in range(len(self.cutoffs)):\n d_emb_i = self.d_embed // (self.div_val ** i)\n self.emb_projs.append(\n self.add_weight(\n shape=(d_emb_i, self.d_proj),\n initializer=get_initializer(self.init_std),\n trainable=True,\n name=\"emb_projs_._{}\".format(i),\n )\n )\n super().build(input_shape)\n\n def call(self, inp):\n if self.div_val == 1:\n raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint\n else:\n inp_flat = tf.reshape(inp, (-1,))\n emb_flat = tf.zeros([shape_list(inp_flat)[0], self.d_proj])\n for i in range(len(self.cutoffs)):\n l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]\n\n mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)\n\n inp_i = tf.boolean_mask(inp_flat, mask_i) - l_idx\n emb_i = self.emb_layers[i](inp_i)\n emb_i = tf.einsum(\"id,de->ie\", emb_i, self.emb_projs[i])\n\n mask_idx = tf.cast(tf.where(mask_i), dtype=tf.int64)\n emb_flat += tf.scatter_nd(mask_idx, emb_i, tf.cast(shape_list(emb_flat), dtype=tf.int64))\n\n embed_shape = shape_list(inp) + [self.d_proj]\n embed = tf.reshape(emb_flat, embed_shape)\n\n embed *= self.emb_scale\n\n return embed\n\n\n@keras_serializable\nclass TFTransfoXLMainLayer(tf.keras.layers.Layer):\n config_class = TransfoXLConfig\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n\n self.n_token = config.vocab_size\n\n self.d_embed = config.d_embed\n self.d_model = config.d_model\n self.n_head = config.n_head\n self.d_head = config.d_head\n self.untie_r = config.untie_r\n\n self.word_emb = TFAdaptiveEmbedding(\n config.vocab_size,\n config.d_embed,\n config.d_model,\n config.cutoffs,\n div_val=config.div_val,\n init_std=config.init_std,\n name=\"word_emb\",\n )\n\n self.drop = tf.keras.layers.Dropout(config.dropout)\n\n self.n_layer = config.n_layer\n\n self.tgt_len = config.tgt_len\n self.mem_len = config.mem_len\n self.ext_len = config.ext_len\n self.max_klen = config.tgt_len + config.ext_len + config.mem_len\n\n self.attn_type = config.attn_type\n\n self.layers = []\n if config.attn_type == 0: # the default attention\n for i in range(config.n_layer):\n self.layers.append(\n TFRelPartialLearnableDecoderLayer(\n config.n_head,\n config.d_model,\n config.d_head,\n config.d_inner,\n config.dropout,\n tgt_len=config.tgt_len,\n ext_len=config.ext_len,\n mem_len=config.mem_len,\n dropatt=config.dropatt,\n pre_lnorm=config.pre_lnorm,\n r_w_bias=None if self.untie_r else self.r_w_bias,\n r_r_bias=None if self.untie_r else self.r_r_bias,\n output_attentions=self.output_attentions,\n layer_norm_epsilon=config.layer_norm_epsilon,\n init_std=config.init_std,\n name=\"layers_._{}\".format(i),\n )\n )\n else: # learnable embeddings and absolute embeddings\n raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint\n\n self.same_length = config.same_length\n self.clamp_len = config.clamp_len\n\n if self.attn_type == 0: # default attention\n self.pos_emb = TFPositionalEmbedding(self.d_model, name=\"pos_emb\")\n else: # learnable embeddings and absolute embeddings\n raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint\n\n def build(self, input_shape):\n if not self.untie_r:\n self.r_w_bias = self.add_weight(\n shape=(self.n_head, self.d_head), initializer=\"zeros\", trainable=True, name=\"r_w_bias\"\n )\n self.r_r_bias = self.add_weight(\n shape=(self.n_head, self.d_head), initializer=\"zeros\", trainable=True, name=\"r_r_bias\"\n )\n super().build(input_shape)\n\n def get_input_embeddings(self):\n return self.word_emb\n\n def _resize_token_embeddings(self, new_num_tokens):\n return self.word_emb\n\n def backward_compatible(self):\n self.sample_softmax = -1\n\n def reset_length(self, tgt_len, ext_len, mem_len):\n self.tgt_len = tgt_len\n self.mem_len = mem_len\n self.ext_len = ext_len\n\n def _prune_heads(self, heads):\n raise NotImplementedError\n\n def init_mems(self, bsz):\n if self.mem_len > 0:\n mems = []\n for i in range(self.n_layer):\n empty = tf.zeros([self.mem_len, bsz, self.d_model])\n mems.append(empty)\n\n return mems\n else:\n return None\n\n def _update_mems(self, hids, mems, mlen, qlen):\n # does not deal with None\n if mems is None:\n return None\n\n # mems is not None\n assert len(hids) == len(mems), \"len(hids) != len(mems)\"\n\n # There are `mlen + qlen` steps that can be cached into mems\n # For the next step, the last `ext_len` of the `qlen` tokens\n # will be used as the extended context. Hence, we only cache\n # the tokens from `mlen + qlen - self.ext_len - self.mem_len`\n # to `mlen + qlen - self.ext_len`.\n new_mems = []\n end_idx = mlen + max(0, qlen - 0 - self.ext_len)\n beg_idx = max(0, end_idx - self.mem_len)\n for i in range(len(hids)):\n\n cat = tf.concat([mems[i], hids[i]], axis=0)\n tf.stop_gradient(cat)\n new_mems.append(cat[beg_idx:end_idx])\n\n return new_mems\n\n def call(self, inputs, mems=None, head_mask=None, inputs_embeds=None, training=False):\n if isinstance(inputs, (tuple, list)):\n input_ids = inputs[0]\n mems = inputs[1] if len(inputs) > 1 else mems\n head_mask = inputs[2] if len(inputs) > 2 else head_mask\n inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds\n assert len(inputs) <= 4, \"Too many inputs.\"\n elif isinstance(inputs, (dict, BatchEncoding)):\n input_ids = inputs.get(\"input_ids\")\n mems = inputs.get(\"mems\", mems)\n head_mask = inputs.get(\"head_mask\", head_mask)\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\n assert len(inputs) <= 4, \"Too many inputs.\"\n else:\n input_ids = inputs\n\n # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library\n # so we transpose here from shape [bsz, len] to shape [len, bsz]\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_ids = tf.transpose(input_ids, perm=(1, 0))\n qlen, bsz = shape_list(input_ids)\n elif inputs_embeds is not None:\n inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2))\n qlen, bsz = shape_list(inputs_embeds)[:2]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if mems is None:\n mems = self.init_mems(bsz)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)\n # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]\n if head_mask is not None:\n raise NotImplementedError\n else:\n head_mask = [None] * self.n_layer\n\n if inputs_embeds is not None:\n word_emb = inputs_embeds\n else:\n word_emb = self.word_emb(input_ids)\n\n mlen = shape_list(mems[0])[0] if mems is not None else 0\n klen = mlen + qlen\n\n attn_mask = tf.ones([qlen, qlen])\n mask_u = tf.linalg.band_part(attn_mask, 0, -1)\n mask_dia = tf.linalg.band_part(attn_mask, 0, 0)\n attn_mask_pad = tf.zeros([qlen, mlen])\n dec_attn_mask = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)\n if self.same_length:\n mask_l = tf.linalg.band_part(attn_mask, -1, 0)\n dec_attn_mask = tf.concat([dec_attn_mask[:, :qlen] + mask_l - mask_dia, dec_attn_mask[:, qlen:]], 1)\n # ::: PyTorch masking code for reference :::\n # if self.same_length:\n # all_ones = word_emb.new_ones((qlen, klen), dtype=torch.uint8)\n # mask_len = klen - self.mem_len\n # if mask_len > 0:\n # mask_shift_len = qlen - mask_len\n # else:\n # mask_shift_len = qlen\n # dec_attn_mask = (torch.triu(all_ones, 1+mlen)\n # + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1\n # else:\n # dec_attn_mask = torch.triu(\n # word_emb.new_ones((qlen, klen), dtype=torch.uint8), diagonal=1+mlen)[:,:,None]\n\n hids = []\n attentions = []\n if self.attn_type == 0: # default\n pos_seq = tf.range(klen - 1, -1, -1.0)\n if self.clamp_len > 0:\n pos_seq = tf.minimum(pos_seq, self.clamp_len)\n pos_emb = self.pos_emb(pos_seq)\n\n core_out = self.drop(word_emb, training=training)\n pos_emb = self.drop(pos_emb, training=training)\n\n for i, layer in enumerate(self.layers):\n hids.append(core_out)\n mems_i = None if mems is None else mems[i]\n layer_outputs = layer([core_out, pos_emb, dec_attn_mask, mems_i, head_mask[i]], training=training)\n core_out = layer_outputs[0]\n if self.output_attentions:\n attentions.append(layer_outputs[1])\n else: # learnable embeddings and absolute embeddings\n raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint\n\n core_out = self.drop(core_out, training=training)\n\n new_mems = self._update_mems(hids, mems, mlen, qlen)\n\n # We transpose back here to shape [bsz, len, hidden_dim]\n outputs = [tf.transpose(core_out, perm=(1, 0, 2)), new_mems]\n if self.output_hidden_states:\n # Add last layer and transpose to library standard shape [bsz, len, hidden_dim]\n hids.append(core_out)\n hids = list(tf.transpose(t, perm=(1, 0, 2)) for t in hids)\n outputs.append(hids)\n if self.output_attentions:\n # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]\n attentions = list(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions)\n outputs.append(attentions)\n return outputs # last hidden state, new_mems, (all hidden states), (all attentions)\n\n\nclass TFTransfoXLPreTrainedModel(TFPreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = TransfoXLConfig\n base_model_prefix = \"transformer\"\n\n\nTRANSFO_XL_START_DOCSTRING = r\"\"\"\n\n .. note::\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having\n all the tensors in the first argument of the model call function: :obj:`model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors\n in the first positional argument :\n\n - a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n :obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`\n\n Parameters:\n config (:class:`~transformers.TransfoXLConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nTRANSFO_XL_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.TransfoXLTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.encode_plus` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model\n (see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems\n given to this model should not be passed as input ids as they have already been computed.\n head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n inputs_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Bert Model transformer outputing raw hidden-states without any specific head on top.\",\n TRANSFO_XL_START_DOCSTRING,\n)\nclass TFTransfoXLModel(TFTransfoXLPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFTransfoXLMainLayer(config, name=\"transformer\")\n\n @add_start_docstrings_to_callable(TRANSFO_XL_INPUTS_DOCSTRING)\n def call(self, inputs, **kwargs):\n r\"\"\"\n Return:\n :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.TransfoXLConfig`) and inputs:\n last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the last layer of the model.\n mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks).\n Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model\n should not be passed as input ids as they have already been computed.\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n import tensorflow as tf\n from transformers import TransfoXLTokenizer, TFTransfoXLModel\n\n tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')\n model = TFTransfoXLModel.from_pretrained('transfo-xl-wt103')\n input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True))[None, :] # Batch size 1\n outputs = model(input_ids)\n last_hidden_states, mems = outputs[:2]\n\n \"\"\"\n outputs = self.transformer(inputs, **kwargs)\n return outputs\n\n\nclass TFTransfoXLLMHead(tf.keras.layers.Layer):\n def __init__(self, config, input_embeddings, **kwargs):\n super().__init__(**kwargs)\n self.vocab_size = config.vocab_size\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.input_embeddings = input_embeddings\n\n def build(self, input_shape):\n self.bias = self.add_weight(shape=(self.vocab_size,), initializer=\"zeros\", trainable=True, name=\"bias\")\n super().build(input_shape)\n\n def call(self, hidden_states):\n hidden_states = self.input_embeddings(hidden_states, mode=\"linear\")\n hidden_states = hidden_states + self.bias\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"The Transformer-XL Model with a language modeling head on top\n (adaptive softmax with weights tied to the adaptive input embeddings)\"\"\",\n TRANSFO_XL_START_DOCSTRING,\n)\nclass TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.transformer = TFTransfoXLMainLayer(config, name=\"transformer\")\n self.sample_softmax = config.sample_softmax\n assert (\n self.sample_softmax <= 0\n ), \"Sampling from the softmax is not implemented yet. Please look at issue: #3310: https://github.com/huggingface/transformers/issues/3310\"\n\n self.crit = TFAdaptiveSoftmaxMask(\n config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val, name=\"crit\"\n )\n\n def get_output_embeddings(self):\n \"\"\" Double-check if you are using adaptive softmax.\n \"\"\"\n if len(self.crit.out_layers) > 0:\n return self.crit.out_layers[-1]\n return None\n\n def reset_length(self, tgt_len, ext_len, mem_len):\n self.transformer.reset_length(tgt_len, ext_len, mem_len)\n\n def init_mems(self, bsz):\n return self.transformer.init_mems(bsz)\n\n @add_start_docstrings_to_callable(TRANSFO_XL_INPUTS_DOCSTRING)\n def call(self, inputs, mems=None, head_mask=None, inputs_embeds=None, labels=None, training=False):\n r\"\"\"\n Return:\n :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.TransfoXLConfig`) and inputs:\n prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks).\n Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model\n should not be passed as input ids as they have already been computed.\n hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`tf.Tensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n import tensorflow as tf\n from transformers import TransfoXLTokenizer, TFTransfoXLLMHeadModel\n\n tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')\n model = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')\n input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True))[None, :] # Batch size 1\n outputs = model(input_ids)\n prediction_scores, mems = outputs[:2]\n\n \"\"\"\n if isinstance(inputs, (tuple, list)):\n input_ids = inputs[0]\n mems = inputs[1] if len(inputs) > 1 else mems\n head_mask = inputs[2] if len(inputs) > 2 else head_mask\n inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds\n labels = inputs[4] if len(inputs) > 4 else labels\n assert len(inputs) <= 5, \"Too many inputs.\"\n elif isinstance(inputs, dict):\n input_ids = inputs.get(\"input_ids\")\n mems = inputs.get(\"mems\", mems)\n head_mask = inputs.get(\"head_mask\", head_mask)\n inputs_embeds = inputs.get(\"inputs_embeds\", inputs_embeds)\n labels = inputs.get(\"labels\", labels)\n assert len(inputs) <= 5, \"Too many inputs.\"\n else:\n input_ids = inputs\n\n if input_ids is not None:\n bsz, tgt_len = shape_list(input_ids)[:2]\n else:\n bsz, tgt_len = shape_list(inputs_embeds)[:2]\n\n transformer_outputs = self.transformer([input_ids, mems, head_mask, inputs_embeds], training=training)\n\n last_hidden = transformer_outputs[0]\n pred_hid = last_hidden[:, -tgt_len:]\n outputs = transformer_outputs[1:]\n\n softmax_output = self.crit([pred_hid, labels], training=training)\n outputs = [softmax_output] + outputs\n\n return outputs # logits, new_mems, (all hidden states), (all attentions)\n\n def prepare_inputs_for_generation(self, inputs, past, **model_kwargs):\n inputs = {\"inputs\": inputs}\n\n # if past is defined in model kwargs then use it for faster decoding\n if past:\n inputs[\"mems\"] = past\n\n return inputs\n"
] |
[
[
"tensorflow.convert_to_tensor",
"tensorflow.keras.losses.get",
"tensorflow.summary.trace_on",
"tensorflow.train.CheckpointManager",
"tensorflow.summary.scalar",
"tensorflow.summary.experimental.set_step",
"tensorflow.gradients",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.constant",
"tensorflow.zeros_like",
"tensorflow.clip_by_value",
"tensorflow.summary.trace_export",
"tensorflow.summary.create_file_writer",
"tensorflow.reduce_mean",
"tensorflow.keras.optimizers.get",
"tensorflow.train.Checkpoint"
],
[
"tensorflow.ones",
"tensorflow.reshape",
"tensorflow.tile",
"tensorflow.nn.softmax",
"tensorflow.einsum",
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.pad",
"tensorflow.split",
"tensorflow.zeros",
"tensorflow.range",
"tensorflow.minimum",
"tensorflow.where",
"tensorflow.cos",
"tensorflow.keras.layers.Dropout",
"tensorflow.sin",
"tensorflow.boolean_mask",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.slice",
"tensorflow.stop_gradient",
"tensorflow.linalg.band_part"
]
] |
wdempsey/sense2stop-lvm
|
[
"ea44d5f9199382d30e4c5a5ff4bd524313ceb5b2"
] |
[
"methods/latent-pp-models-mem-rjmcmc.py"
] |
[
"#%%\nimport pymc3 as pm\nimport arviz as az\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nfrom scipy import stats\nimport os\nimport pickle\nfrom scipy import special\nimport theano.tensor as tt\n\n## List down file paths\nexec(open('../env_vars.py').read())\ndir_data = os.environ['dir_data']\ndir_picklejar = os.environ['dir_picklejar']\n\n#%%\n\n###############################################################################\n# Read in preparation: data_dates data frame\n###############################################################################\nfilename = os.path.join(os.path.realpath(dir_picklejar), 'save_all_dict')\ninfile = open(filename,'rb')\nclean_data = pickle.load(infile)\ninfile.close()\n\n#%% \n\n'''\nDelete all times > 1hr before start time. \nExtend day to handle all other times and remove duplicates\nNeed to move this part of code to pre-processing at some point\n'''\n\nfor key in clean_data.keys():\n temp = clean_data[key]\n for days in temp.keys():\n day_temp = temp[days]\n if len(day_temp['hours_since_start_day']) > 0:\n ## Check if any times < or > 1hr \n day_temp['hours_since_start_day'] = day_temp['hours_since_start_day'].iloc[np.where(day_temp['hours_since_start_day'] > -1)]\n day_temp['hours_since_start_day'] = day_temp['hours_since_start_day'].iloc[np.where(day_temp['day_length'] - day_temp['hours_since_start_day'] > -1)]\n day_min = np.min(day_temp['hours_since_start_day'])\n day_min = np.min([day_min,0])\n day_max = np.max(day_temp['hours_since_start_day'])\n day_max = np.max([day_max, day_temp['day_length']])\n day_temp['hours_since_start_day'] = day_temp['hours_since_start_day'] - day_min\n day_temp['hours_since_start_day'] = np.unique(day_temp['hours_since_start_day'])\n day_temp['day_length'] = day_max - day_min\n\n\n\n#%%\n\n###############################################################################\n# Estimation using pymc3\n###############################################################################\n\ndef exponential_log_complementary_cdf(x, lam):\n ''' log complementary CDF of exponential distribution '''\n return -lam*x\n\ndef exponential_log_pdf(x, lam):\n ''' log complementary CDF of exponential distribution '''\n return np.log(lam)-lam*x\n\n\ndef selfreport_mem(observed, latent, dimon):\n '''\n observed: Observed self report times\n latent: Vector of latent smoking events (length is max) \n dimon: Integer saying how many of the latent entries are currently included\n '''\n total = 1.0\n temp_latent = latent[tt.arange(dimon)]\n if not tt.all(tt.eq(observed,temp_latent)):\n total = -1000000\n else: \n total = tt.prod(tt.eq(temp_latent,observed)*0.9 + (1-tt.eq(temp_latent,observed))*0.1)\n return total\n\nmax_events = 0.0 # Defining max number of events\nfor participants in clean_data.keys():\n for days in clean_data[participants].keys():\n max_events = np.max([max_events,len(clean_data[participants][days]['hours_since_start_day'])])\n\nmax_events = max_events + 10 # Just to be safe let's add a few more\nmax_events = max_events.astype('int')\n#%%\n\n###############################################################################\n'''\nEstimation using pymc3.\nModel is a static graph so we handle this by having a maximum number of \nevents within a day length max_events that tells us which events are \"on\" \n'''\n###############################################################################\n\nwith pm.Model() as model:\n # -------------------------------------------------------------------------\n # Priors\n # -------------------------------------------------------------------------\n beta = pm.Normal('beta', mu=0, sd=10)\n loglamb_observed = beta \n lamb_observed = np.exp(loglamb_observed)\n \n # -------------------------------------------------------------------------\n # Likelihood\n # -------------------------------------------------------------------------\n for participants in clean_data.keys():\n for days in clean_data[participants].keys():\n if len(clean_data[participants][days]['hours_since_start_day']) > 0:\n pp_rate = lamb_observed*clean_data[participants][days]['day_length']\n num_sr = len(clean_data[participants][days]['hours_since_start_day'])\n sr = clean_data[participants][days]['hours_since_start_day']\n day_length = clean_data[participants][days]['day_length']\n init = np.append(sr, np.repeat(0,max_events-num_sr))\n smoke_length = pm.Poisson('num_smokes_%d_%d'%(participants, days), mu=pp_rate, testval = num_sr) # Number of Events in Day\n smoke_times = pm.Uniform('smoke_times_%d_%d'%(participants, days), lower = 0.0, upper = day_length, shape = max_events, testval = init) # Location of Events in Day\n sr_times = pm.Potential('sr_times_%d_%d'%(participants, days), selfreport_mem(observed=sr, latent=smoke_times, dimon = smoke_length))\n\n\n#%%\n# Sample from posterior distribution\nwith model:\n# posterior_samples = pm.sample(draws=5000, tune=5000, cores=1, target_accept=0.80)\n posterior_samples = pm.sample(draws = 2000, tune=2000, init='adapt_diag', cores = 1) \n\n#%%\n# Calculate 95% credible interval\nmodel_summary_logscale = az.summary(posterior_samples, credible_interval=.95)\nmodel_summary_logscale = model_summary_logscale[['mean','hpd_2.5%','hpd_97.5%']]\n\n# Produce trace plots\npm.traceplot(posterior_samples)\n\n# Collect results\ncollect_results = {'model':model, \n 'posterior_samples':posterior_samples,\n 'model_summary_logscale':model_summary_logscale}\n#%%\n# Remove variable from workspace\ndel model, posterior_samples, model_summary_logscale\n\n#%%\n\n###############################################################################\n# Print results from all models\n###############################################################################\nimport matplotlib.pyplot as plt\n\n# Model 0\npm.traceplot(collect_results['posterior_samples'])\nprint(collect_results['model_summary_logscale'])\n\nplt.figure(figsize=(4,8))\npm.forestplot(collect_results['posterior_samples'], var_names=['beta'], credible_interval=0.95)\npm.forestplot(collect_results['posterior_samples'], var_names=['beta_day'], credible_interval=0.95)\n#pm.forestplot(collect_results['0']['posterior_samples'], var_names=['alpha'], credible_interval=0.95)\n\n# %%\nfilename = os.path.join(os.path.realpath(dir_picklejar), 'rjmcmc_models')\noutfile = open(filename, 'wb')\npickle.dump(collect_results, outfile)\noutfile.close()\n\n# %% REsidual code for safekeeping\n\n# # Y_hat_latent = pm.Determinist(of Y_diff_latent)\n# # Y_observed = pm.Potential('Y_observed', selfreport_mem(Y_hat_latent))\n## Y_hat_observed is 'hours_since_start_day'\n## Given hours_since_start_day, use smartdumbRJ.py to generate a new latent event times (Y_hat_latent)\n## Given Y_hat_latent, take diff sequence and model as exponential holding times \n# loglamb_observed = beta\n# lamb_observed = np.exp(loglamb_observed)\n# # Define Y_hat_latent\n# # Take sequence of differences, Y_diff_latent\n# Y_diff_latent = pm.Exponential('Y_diff_latent', lam = lamb_observed)\n"
] |
[
[
"numpy.max",
"numpy.log",
"numpy.min",
"numpy.exp",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.repeat",
"numpy.unique"
]
] |
Drishttii/pyprobml
|
[
"30b120e7d4f81ade55c10250193d98398040574b"
] |
[
"scripts/knn_voronoi_plot.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nimport pyprobml_utils as pml\n\nfrom scipy.spatial import KDTree, Voronoi, voronoi_plot_2d\n\nnp.random.seed(42)\ndata = np.random.rand(25, 2)\nvor = Voronoi(data)\n\nprint('Using scipy.spatial.voronoi_plot_2d, wait...')\nvoronoi_plot_2d(vor)\nxlim = plt.xlim()\nylim = plt.ylim()\npml.savefig('knnVoronoiMesh.pdf')\nplt.show()\n\nprint('Using scipy.spatial.KDTree, wait a few seconds...')\nplt.figure()\ntree = KDTree(data)\nx = np.linspace(xlim[0], xlim[1], 200)\ny = np.linspace(ylim[0], ylim[1], 200)\nxx, yy = np.meshgrid(x, y)\nxy = np.c_[xx.ravel(), yy.ravel()]\nplt.plot(data[:, 0], data[:, 1], 'ko')\nplt.pcolormesh(x, y, tree.query(xy)[1].reshape(200, 200), cmap='jet')\npml.savefig('knnVoronoiColor.pdf')\nplt.show()\n"
] |
[
[
"scipy.spatial.Voronoi",
"numpy.random.rand",
"matplotlib.pyplot.xlim",
"scipy.spatial.voronoi_plot_2d",
"numpy.random.seed",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"scipy.spatial.KDTree",
"matplotlib.pyplot.show",
"numpy.linspace",
"numpy.meshgrid"
]
] |
sofiapasquini/Code-Astro-Group-23-Project
|
[
"97dcbaf1b04822d56582e51332666dc5045e1154"
] |
[
"exampledoc/docs/Extractor.py"
] |
[
"#define functions that will extract the data from SDSS based on an input RA/DEC\n\nfrom astroquery.sdss import SDSS\nfrom astropy import coordinates as coords\nimport pandas as pd \nfrom astroquery.ned import Ned \nimport matplotlib.pyplot as plt\nfrom astropy.convolution import convolve, Box1DKernel\nimport numpy as np\nfrom astropy import units as u\n\n\ndef ra_dec_format(val):\n \"\"\" Ra/Dec string formatting\n\n Converts the input string format of a right ascension/ declination coordinate\n to one recognizable by astroquery\n\n Args:\n val (str): string; an ra/dec expression formatted as \"005313.81 +130955.0\".\n\n Returns:\n string: the ra/dec coordinates re-formatted as \"00h53m13.81s +13d09m55.0s\"\n \"\"\"\n #ra\n hour = val[0:2]\n min_ = val[2:4]\n sec = val[4:9]\n ra = hour+'h'+min_+'m'+sec+'s'\n #dec\n deg = val[9:13]\n min_d = val[13:15]\n sec_d = val[15:]\n dec = deg+'d'+min_d+'m'+sec_d+'s'\n return ra+\" \"+dec\n\ndef extractor(position):\n \"\"\"\n This function extracts the information from the SDSS database and returns\n a pandas dataframe with the query region. Please ensure that the 'position'\n input is formatted as '005313.81 +130955.0\n\n extractor(str) --> pd.DataFrame\n \"\"\"\n\n # convert the input position argument to the format recognized by astroquery.SDSS\n# position=ra_dec_format(position)\n\n # query the region and get the data\n position = ra_dec_format(position)\n pos = coords.SkyCoord(position, frame='icrs')\n data = SDSS.query_region(pos, spectro=True)\n return data.to_pandas()\n\n\ndef downloader(data):\n \"\"\"\n This function uses extracted information in order to dwonaload spectra, \n separating the data from th SDSS and BOSS.\n\n downloader(pd.Dataframe) --> [list(fits)]\n \"\"\"\n #create a empty list\n spec_list=[]\n\n # iteration over the pandas\n for i in range(len(data)):\n results = SDSS.query_specobj(plate = data['plate'][i],\n mjd = data['mjd'][i],\n fiberID = data['fiberID'][i])\n \n # try if it can download the data (SDSS)\n try:\n spec = SDSS.get_spectra(matches=results)[0]\n spec_list.append(spec)\n\n # if it cant download, is because is from (BOSS)\n except:\n results.remove_column(\"instrument\")\n results.add_column(name=\"instrument\", col=\"eboss\") # replace the instrument column\n spec = SDSS.get_spectra(matches=results)[0]\n spec_list.append(spec)\n\n return spec_list \n\n\n\n# test=downloader(result)\n# print(test)\n\n# define a function which grabs the object's redshift from the Ned database (better calibration)- needed for plotting in the object's rest-frame\ndef redshift(position):\n\n # make sure to format the input position argument such that it is recognizable by astroquery.Ned\n # position=ra_dec_format(position)\n position = ra_dec_format(position)\n pos=coords.SkyCoord(position, frame='icrs') # create a position object\n ned_results=Ned.query_region(pos,equinox=\"J2000\", radius=2*u.arcsecond) # query the database\n z=ned_results[0][6] # grab the redshift value from the query results\n return z\n\n# define a function that transforms an objects wavelength array into the object's rest-frame\ndef redshift_correct(z, wavelengths): # takes as input the redshift and the array of wavelengths\n wavelengths_corrected = wavelengths/(z+1)\n return wavelengths_corrected\n\n# define a function that transforms the results of downloader() into an array of data which will be plotted\ndef transform_data(spec_list, z): # takes as input a list of (I think?) fits files results and the redshift of the object\n \n # iterate over each file and grab the important data\n #fluxes={} # containers for each of the data arrays to be plotted ( will be lists of lists/arrays)\n #wavelengths={}\n #inverse_variances={} # <- dictionaries!\n\n dict={}\n\n for spec in spec_list:\n \n flux_array=[]\n wavelength_array=[]\n sigma_array=[]\n\n data=spec[1].data # this is the data part of the file\n #print(data.shape[0])\n #print(data)\n\n # store the appropriate columns in the designated containers- each row is a single spectrum?\n # SOFIA- try a nested dictionary?!?! \n for j in range(data.shape[0]):\n #print(data[j][0])\n\n #smoothedFlux=convolve(data[0],Box1DKernel(9)) # smooth the fluxes using a boxcar\n #print(smoothedFlux)\n flux_data = data[j][0]\n flux_array.append(flux_data)\n \n wavelengths_uncorrected=10**data[j][1] # the wavelengths (transformed from the log scale)\n #print(wavelengths_uncorrected)\n wavelengths_corrected=redshift_correct(z, wavelengths_uncorrected) # save the wavelengths after they have been scaled to the rest-frame\n #print(wavelengths_corrected)\n wavelength_array.append(wavelengths_corrected)\n\n inverse_variance=data[j][2] # the inverse variance of the flux\n one_over_sigma=inverse_variance**0.5\n sigma=1/one_over_sigma # the one-sigma uncertainty associated with the flux array\n sigma_array.append(sigma)\n \n smoothedFlux = convolve(flux_array,Box1DKernel(9))\n if 'flux' in dict:\n dict['flux'].append([smoothedFlux])\n else:\n dict['flux'] = [smoothedFlux]\n \n if 'wavelength' in dict:\n dict['wavelength'].append([wavelength_array])\n else:\n dict['wavelength'] = [wavelength_array]\n \n if '1sigma' in dict:\n dict['1sigma'].append([sigma_array])\n else:\n dict['1sigma'] = [sigma_array]\n\n # now return the nested dictionary with three keys:(flux, wavelength and sigma)\n # each key should have data.shape[0] number of arrays with all fluxes, wavelength and sigmas for every spec in spec_list\n return dict\n\n\ndef plot_spec(dict, radec, z): # takes as input the dictionary holding the data, the radec, and the redshift\n\n for i in range(len(dict['wavelength'])):\n #extract data\n wavelength = dict['wavelength'][i]\n sigma = dict['1sigma'][i]\n flux = dict['flux'][i]\n\n # instantiate a figure object\n fig=plt.figure()\n plt.title(str(radec)+str('; ')+'z={}'.format(z))\n plt.xlabel(\"Rest-frame Wavelength [$\\AA$]\")\n plt.ylabel(\"Flux [$10^{-17}$ erg$^{-1}$s$^{-1}$cm$^{-2}$$\\AA^{-1}$]\")\n plt.plot(wavelength, flux) # plot the actual data\n # now create upper and lower bounds on the uncertainty regions\n sigmaUpper=np.add(flux,sigma)\n sigmaLower=np.subtract(flux,sigma)\n plt.fill_between(wavelength, sigmaLower, sigmaUpper, color='grey', alpha=0.5)\n\n plt.show()\n\n\n\n#TEST\nradec='223812.39 +213203.4'\nz=redshift(radec)\ndata=extractor(radec)\nspec_list=downloader(data)\ndic = transform_data(spec_list,z)\nplot_spec(dic, radec, z)"
] |
[
[
"numpy.add",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.subtract",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
] |
nasyxx/nnUNet
|
[
"92d5f2352349eed278e22f7a38cb86b0fccd7c75",
"92d5f2352349eed278e22f7a38cb86b0fccd7c75"
] |
[
"nnunet/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py",
"nnunet/evaluation/model_selection/summarize_results_in_one_json.py"
] |
[
"import torch\nfrom nnunet.network_architecture.generic_UNet import Generic_UNet\nfrom nnunet.network_architecture.initialization import InitWeights_He\nfrom nnunet.training.network_training.nnUNet_variants.data_augmentation.nnUNetTrainerV2_insaneDA import \\\n nnUNetTrainerV2_insaneDA\nfrom nnunet.utilities.nd_softmax import softmax_helper\nfrom torch import nn\n\n\nclass nnUNetTrainerV2_MMS(nnUNetTrainerV2_insaneDA):\n def setup_DA_params(self):\n super().setup_DA_params()\n self.data_aug_params[\"p_rot\"] = 0.7\n self.data_aug_params[\"p_eldef\"] = 0.1\n self.data_aug_params[\"p_scale\"] = 0.3\n\n self.data_aug_params[\"independent_scale_factor_for_each_axis\"] = True\n self.data_aug_params[\"p_independent_scale_per_axis\"] = 0.3\n\n self.data_aug_params[\"do_additive_brightness\"] = True\n self.data_aug_params[\"additive_brightness_mu\"] = 0\n self.data_aug_params[\"additive_brightness_sigma\"] = 0.2\n self.data_aug_params[\"additive_brightness_p_per_sample\"] = 0.3\n self.data_aug_params[\"additive_brightness_p_per_channel\"] = 1\n\n self.data_aug_params[\"elastic_deform_alpha\"] = (0., 300.)\n self.data_aug_params[\"elastic_deform_sigma\"] = (9., 15.)\n\n self.data_aug_params['gamma_range'] = (0.5, 1.6)\n\n def initialize_network(self):\n if self.threeD:\n conv_op = nn.Conv3d\n dropout_op = nn.Dropout3d\n norm_op = nn.BatchNorm3d\n\n else:\n conv_op = nn.Conv2d\n dropout_op = nn.Dropout2d\n norm_op = nn.BatchNorm2d\n\n norm_op_kwargs = {'eps': 1e-5, 'affine': True}\n dropout_op_kwargs = {'p': 0, 'inplace': True}\n net_nonlin = nn.LeakyReLU\n net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,\n len(self.net_num_pool_op_kernel_sizes),\n self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,\n dropout_op_kwargs,\n net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),\n self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)\n if torch.cuda.is_available():\n self.network.cuda()\n self.network.inference_apply_nonlin = softmax_helper\n\n \"\"\"def run_training(self):\n from batchviewer import view_batch\n a = next(self.tr_gen)\n view_batch(a['data'])\n import IPython;IPython.embed()\"\"\"\n",
"# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\nfrom nnunet.evaluation.add_mean_dice_to_json import foreground_mean\nfrom batchgenerators.utilities.file_and_folder_operations import *\nfrom nnunet.paths import network_training_output_dir\nimport numpy as np\n\n\ndef summarize(tasks, models=('2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres'),\n output_dir=join(network_training_output_dir, \"summary_jsons\"), folds=(0, 1, 2, 3, 4)):\n maybe_mkdir_p(output_dir)\n\n if len(tasks) == 1 and tasks[0] == \"all\":\n tasks = list(range(999))\n else:\n tasks = [int(i) for i in tasks]\n\n for model in models:\n for t in tasks:\n t = int(t)\n if not isdir(join(network_training_output_dir, model)):\n continue\n task_name = subfolders(join(network_training_output_dir, model), prefix=\"Task%03.0d\" % t, join=False)\n if len(task_name) != 1:\n print(\"did not find unique output folder for network %s and task %s\" % (model, t))\n continue\n task_name = task_name[0]\n out_dir_task = join(network_training_output_dir, model, task_name)\n\n model_trainers = subdirs(out_dir_task, join=False)\n for trainer in model_trainers:\n if trainer.startswith(\"fold\"):\n continue\n out_dir = join(out_dir_task, trainer)\n\n validation_folders = []\n for fld in folds:\n d = join(out_dir, \"fold%d\"%fld)\n if not isdir(d):\n d = join(out_dir, \"fold_%d\"%fld)\n if not isdir(d):\n break\n validation_folders += subfolders(d, prefix=\"validation\", join=False)\n\n for v in validation_folders:\n ok = True\n metrics = OrderedDict()\n for fld in folds:\n d = join(out_dir, \"fold%d\"%fld)\n if not isdir(d):\n d = join(out_dir, \"fold_%d\"%fld)\n if not isdir(d):\n ok = False\n break\n validation_folder = join(d, v)\n\n if not isfile(join(validation_folder, \"summary.json\")):\n print(\"summary.json missing for net %s task %s fold %d\" % (model, task_name, fld))\n ok = False\n break\n\n metrics_tmp = load_json(join(validation_folder, \"summary.json\"))[\"results\"][\"mean\"]\n for l in metrics_tmp.keys():\n if metrics.get(l) is None:\n metrics[l] = OrderedDict()\n for m in metrics_tmp[l].keys():\n if metrics[l].get(m) is None:\n metrics[l][m] = []\n metrics[l][m].append(metrics_tmp[l][m])\n if ok:\n for l in metrics.keys():\n for m in metrics[l].keys():\n assert len(metrics[l][m]) == len(folds)\n metrics[l][m] = np.mean(metrics[l][m])\n json_out = OrderedDict()\n json_out[\"results\"] = OrderedDict()\n json_out[\"results\"][\"mean\"] = metrics\n json_out[\"task\"] = task_name\n json_out[\"description\"] = model + \" \" + task_name + \" all folds summary\"\n json_out[\"name\"] = model + \" \" + task_name + \" all folds summary\"\n json_out[\"experiment_name\"] = model\n save_json(json_out, join(out_dir, \"summary_allFolds__%s.json\" % v))\n save_json(json_out, join(output_dir, \"%s__%s__%s__%s.json\" % (task_name, model, trainer, v)))\n foreground_mean(join(out_dir, \"summary_allFolds__%s.json\" % v))\n foreground_mean(join(output_dir, \"%s__%s__%s__%s.json\" % (task_name, model, trainer, v)))\n\n\ndef summarize2(task_ids, models=('2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres'),\n output_dir=join(network_training_output_dir, \"summary_jsons\"), folds=(0, 1, 2, 3, 4)):\n maybe_mkdir_p(output_dir)\n\n if len(task_ids) == 1 and task_ids[0] == \"all\":\n task_ids = list(range(999))\n else:\n task_ids = [int(i) for i in task_ids]\n\n for model in models:\n for t in task_ids:\n if not isdir(join(network_training_output_dir, model)):\n continue\n task_name = subfolders(join(network_training_output_dir, model), prefix=\"Task%03.0d\" % t, join=False)\n if len(task_name) != 1:\n print(\"did not find unique output folder for network %s and task %s\" % (model, t))\n continue\n task_name = task_name[0]\n out_dir_task = join(network_training_output_dir, model, task_name)\n\n model_trainers = subdirs(out_dir_task, join=False)\n for trainer in model_trainers:\n if trainer.startswith(\"fold\"):\n continue\n out_dir = join(out_dir_task, trainer)\n\n validation_folders = []\n for fld in folds:\n fold_output_dir = join(out_dir, \"fold_%d\"%fld)\n if not isdir(fold_output_dir):\n continue\n validation_folders += subfolders(fold_output_dir, prefix=\"validation\", join=False)\n\n validation_folders = np.unique(validation_folders)\n\n for v in validation_folders:\n ok = True\n metrics = OrderedDict()\n metrics['mean'] = OrderedDict()\n metrics['median'] = OrderedDict()\n metrics['all'] = OrderedDict()\n for fld in folds:\n fold_output_dir = join(out_dir, \"fold_%d\"%fld)\n\n if not isdir(fold_output_dir):\n print(\"fold missing\", model, task_name, trainer, fld)\n ok = False\n break\n validation_folder = join(fold_output_dir, v)\n\n if not isdir(validation_folder):\n print(\"validation folder missing\", model, task_name, trainer, fld, v)\n ok = False\n break\n\n if not isfile(join(validation_folder, \"summary.json\")):\n print(\"summary.json missing\", model, task_name, trainer, fld, v)\n ok = False\n break\n\n all_metrics = load_json(join(validation_folder, \"summary.json\"))[\"results\"]\n # we now need to get the mean and median metrics. We use the mean metrics just to get the\n # names of computed metics, we ignore the precomputed mean and do it ourselfes again\n mean_metrics = all_metrics[\"mean\"]\n all_labels = [i for i in list(mean_metrics.keys()) if i != \"mean\"]\n\n if len(all_labels) == 0: print(v, fld); break\n\n all_metrics_names = list(mean_metrics[all_labels[0]].keys())\n for l in all_labels:\n # initialize the data structure, no values are copied yet\n for k in ['mean', 'median', 'all']:\n if metrics[k].get(l) is None:\n metrics[k][l] = OrderedDict()\n for m in all_metrics_names:\n if metrics['all'][l].get(m) is None:\n metrics['all'][l][m] = []\n for entry in all_metrics['all']:\n for l in all_labels:\n for m in all_metrics_names:\n metrics['all'][l][m].append(entry[l][m])\n # now compute mean and median\n for l in metrics['all'].keys():\n for m in metrics['all'][l].keys():\n metrics['mean'][l][m] = np.nanmean(metrics['all'][l][m])\n metrics['median'][l][m] = np.nanmedian(metrics['all'][l][m])\n if ok:\n fold_string = \"\"\n for f in folds:\n fold_string += str(f)\n json_out = OrderedDict()\n json_out[\"results\"] = OrderedDict()\n json_out[\"results\"][\"mean\"] = metrics['mean']\n json_out[\"results\"][\"median\"] = metrics['median']\n json_out[\"task\"] = task_name\n json_out[\"description\"] = model + \" \" + task_name + \"summary folds\" + str(folds)\n json_out[\"name\"] = model + \" \" + task_name + \"summary folds\" + str(folds)\n json_out[\"experiment_name\"] = model\n save_json(json_out, join(output_dir, \"%s__%s__%s__%s__%s.json\" % (task_name, model, trainer, v, fold_string)))\n foreground_mean2(join(output_dir, \"%s__%s__%s__%s__%s.json\" % (task_name, model, trainer, v, fold_string)))\n\n\ndef foreground_mean2(filename):\n with open(filename, 'r') as f:\n res = json.load(f)\n class_ids = np.array([int(i) for i in res['results']['mean'].keys() if (i != 'mean') and i != '0'])\n\n metric_names = res['results']['mean']['1'].keys()\n res['results']['mean'][\"mean\"] = OrderedDict()\n res['results']['median'][\"mean\"] = OrderedDict()\n for m in metric_names:\n foreground_values = [res['results']['mean'][str(i)][m] for i in class_ids]\n res['results']['mean'][\"mean\"][m] = np.nanmean(foreground_values)\n foreground_values = [res['results']['median'][str(i)][m] for i in class_ids]\n res['results']['median'][\"mean\"][m] = np.nanmean(foreground_values)\n with open(filename, 'w') as f:\n json.dump(res, f, indent=4, sort_keys=True)\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(usage=\"This is intended to identify the best model based on the five fold \"\n \"cross-validation. Running this script requires alle models to have been run \"\n \"already. This script will summarize the results of the five folds of all \"\n \"models in one json each for easy interpretability\")\n parser.add_argument(\"-t\", '--task_ids', nargs=\"+\", required=True, help=\"task id. can be 'all'\")\n parser.add_argument(\"-f\", '--folds', nargs=\"+\", required=False, type=int, default=[0, 1, 2, 3, 4])\n parser.add_argument(\"-m\", '--models', nargs=\"+\", required=False, default=['2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres'])\n\n args = parser.parse_args()\n tasks = args.task_ids\n models = args.models\n\n folds = args.folds\n summarize2(tasks, models, folds=folds, output_dir=join(network_training_output_dir, \"summary_jsons_new\"))\n\n"
] |
[
[
"torch.cuda.is_available"
],
[
"numpy.nanmean",
"numpy.mean",
"numpy.nanmedian",
"numpy.unique"
]
] |
bjtuyxc/detectron2
|
[
"ebb9f8c9166765c508f8ac53d9ed2004739b28d1"
] |
[
"tests/test_visualizer.py"
] |
[
"# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# File:\n\nimport numpy as np\nimport unittest\nimport torch\n\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.structures import Instances, RotatedBoxes, BoxMode\nfrom detectron2.utils.visualizer import Visualizer\n\n\nclass TestVisualizer(unittest.TestCase):\n def _random_data(self):\n H, W = 100, 100\n N = 10\n img = np.random.rand(H, W, 3) * 255\n boxxy = np.random.rand(N, 2) * (H // 2)\n boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1)\n\n def _rand_poly():\n return np.random.rand(3, 2).flatten() * H\n\n polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)]\n\n mask = np.zeros_like(img[:, :, 0], dtype=np.bool)\n mask[:10, 10:20] = 1\n\n labels = [str(i) for i in range(N)]\n return img, boxes, labels, polygons, [mask] * N\n\n @property\n def metadata(self):\n return MetadataCatalog.get(\"coco_2017_train\")\n\n def test_draw_dataset_dict(self):\n img = np.random.rand(512, 512, 3) * 255\n dic = {'annotations': [{'bbox': [368.9946492271106,\n 330.891438763377,\n 13.148537455410235,\n 13.644708680142685],\n 'bbox_mode': BoxMode.XYWH_ABS,\n 'category_id': 0,\n 'iscrowd': 1,\n 'segmentation': {'counts': '_jh52m?2N2N2N2O100O10O001N1O2MceP2',\n 'size': [512, 512]}}],\n 'height': 512,\n 'image_id': 1,\n 'width': 512}\n v = Visualizer(img, self.metadata)\n v.draw_dataset_dict(dic)\n\n def test_overlay_instances(self):\n img, boxes, labels, polygons, masks = self._random_data()\n\n v = Visualizer(img, self.metadata)\n output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()\n self.assertEqual(output.shape, img.shape)\n\n # Test 2x scaling\n v = Visualizer(img, self.metadata, scale=2.0)\n output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()\n self.assertEqual(output.shape[0], img.shape[0] * 2)\n\n # Test overlay masks\n v = Visualizer(img, self.metadata)\n output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image()\n self.assertEqual(output.shape, img.shape)\n\n def test_overlay_instances_no_boxes(self):\n img, boxes, labels, polygons, _ = self._random_data()\n v = Visualizer(img, self.metadata)\n v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image()\n\n def test_draw_instance_predictions(self):\n img, boxes, _, _, masks = self._random_data()\n num_inst = len(boxes)\n inst = Instances((img.shape[0], img.shape[1]))\n inst.pred_classes = torch.randint(0, 80, size=(num_inst,))\n inst.scores = torch.rand(num_inst)\n inst.pred_boxes = torch.from_numpy(boxes)\n inst.pred_masks = torch.from_numpy(np.asarray(masks))\n\n v = Visualizer(img, self.metadata)\n v.draw_instance_predictions(inst)\n\n def test_draw_empty_mask_predictions(self):\n img, boxes, _, _, masks = self._random_data()\n num_inst = len(boxes)\n inst = Instances((img.shape[0], img.shape[1]))\n inst.pred_classes = torch.randint(0, 80, size=(num_inst,))\n inst.scores = torch.rand(num_inst)\n inst.pred_boxes = torch.from_numpy(boxes)\n inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks)))\n\n v = Visualizer(img, self.metadata)\n v.draw_instance_predictions(inst)\n\n def test_correct_output_shape(self):\n img = np.random.rand(928, 928, 3) * 255\n v = Visualizer(img, self.metadata)\n out = v.output.get_image()\n self.assertEqual(out.shape, img.shape)\n\n def test_overlay_rotated_instances(self):\n H, W = 100, 150\n img = np.random.rand(H, W, 3) * 255\n num_boxes = 50\n boxes_5d = torch.zeros(num_boxes, 5)\n boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W)\n boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H)\n boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))\n boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))\n boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)\n rotated_boxes = RotatedBoxes(boxes_5d)\n labels = [str(i) for i in range(num_boxes)]\n\n v = Visualizer(img, self.metadata)\n output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image()\n self.assertEqual(output.shape, img.shape)\n\n def test_draw_no_metadata(self):\n img, boxes, _, _, masks = self._random_data()\n num_inst = len(boxes)\n inst = Instances((img.shape[0], img.shape[1]))\n inst.pred_classes = torch.randint(0, 80, size=(num_inst,))\n inst.scores = torch.rand(num_inst)\n inst.pred_boxes = torch.from_numpy(boxes)\n inst.pred_masks = torch.from_numpy(np.asarray(masks))\n\n v = Visualizer(img, MetadataCatalog.get(\"asdfasdf\"))\n v.draw_instance_predictions(inst)\n"
] |
[
[
"numpy.concatenate",
"torch.rand",
"numpy.zeros_like",
"torch.zeros",
"numpy.random.rand",
"numpy.asarray",
"torch.FloatTensor",
"torch.from_numpy",
"torch.randint",
"numpy.random.randint"
]
] |
mangye16/ReID-Label-Noise
|
[
"89aa11f68c275a0bcff232d9a5c3ae152c9276af"
] |
[
"PNet/train_pnet.py"
] |
[
"# -*- coding: UTF-8 -*-\nfrom __future__ import print_function, division\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nfrom torchvision import datasets, models, transforms\n\nfrom tensorboardX import SummaryWriter\n\nimport sys\nimport json\nimport scipy\nimport os, time\nimport argparse\nimport numpy as np\nimport torchvision\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom shutil import copyfile\nfrom model import ft_net\nfrom test_eval_cython import get_test_acc, extr_fea_train\nfrom utils import *\nimport loader, loss\nimport pdb\n\nversion = torch.__version__\n# #####################################################################\n# argsions\n# --------\nparser = argparse.ArgumentParser(description='Training')\nparser.add_argument('--gpu',default='0', type=str,help='gpu ids: e.g. 0 0,1,2 0,2')\nparser.add_argument('--seed', default=1, type=int, help='rng seed')\nparser.add_argument('--model_dir',default='.checkpoint/', type=str, help='output model name')\nparser.add_argument('--data_dir',default='/home/comp/mangye/dataset/', type=str, help='data dir')\nparser.add_argument('--dataset',default='duke',type=str, help='training data:Market1501, DukeMTMCreID')\nparser.add_argument('--pretrained',default='',type=str, help='path of pretrained \"model:./model/baseline/net_8.pth\"')\nparser.add_argument('--batchsize', default=32, type=int, help='batchsize')\nparser.add_argument('--noise_ratio', default=0.2, type=float, help='percentage of noise data in the training')\nparser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')\nparser.add_argument('--alpha', default=2, type=float, help='beta distribution: alpha')\nparser.add_argument('--beta', default=6, type=float, help='beta distribution: beta')\nparser.add_argument('--LabelWt', default=60, type=int, help='label refinment weight')\nparser.add_argument('--weighttype', default=0, type=int, help='weight type: instance weight, class weight')\nparser.add_argument('--stage2', action='store_true', help='training stage 2')\n\nargs = parser.parse_args()\n\ntorch.manual_seed(args.seed)\n\nstart_epoch = 0\nif args.stage2:\n start_epoch = start_epoch + 20\n \nbest_acc = 0\ntest_epoch = 2\nlr = args.lr\ndata_dir = args.data_dir + args.dataset\nsuffix = args.dataset + '_noise_{}_'.format(args.noise_ratio)\nif args.LabelWt > 0 or args.stage2: \n suffix = suffix + 'batch_{}_wt_{}'.format(args.batchsize,args.LabelWt) \nelse:\n suffix = suffix + 'batch_{}_baseline'.format(args.batchsize) \n \n\nif args.stage2:\n suffix = suffix + '_beta_{}_{}_lr_{:1.1e}'.format(args.alpha, args.beta, args.lr)\n suffix = suffix + '_w_st2_new'\nelse:\n suffix = suffix + '_lr_{:1.1e}'.format(args.lr)\n suffix = suffix + '_w_st1'\n \nprint ('model: ' + suffix)\n\n# define the log path \nlog_dir = './new_res/' + args.dataset + '_log/'\ncheckpoint_path = './res/checkpoint/' \nvis_log_dir = log_dir + suffix + '/'\nif not os.path.isdir(log_dir):\n os.makedirs(log_dir)\nif not os.path.isdir(vis_log_dir):\n os.makedirs(vis_log_dir)\nwriter = SummaryWriter(vis_log_dir) \ntest_log_file = open(log_dir + suffix + '.txt', \"w\") \nsys.stdout = Logger(log_dir + suffix + '_os.txt')\n\n# define the gpu id\nstr_ids = args.gpu.split(',')\ngpu_ids = []\nfor str_id in str_ids:\n gid = int(str_id)\n if gid >=0:\n gpu_ids.append(gid)\n# set gpu ids\nif len(gpu_ids)>0:\n torch.cuda.set_device(gpu_ids[0])\n\nprint ('using gpu: {}'.format(gpu_ids))\n\n# #####################################################################\n# Load Data\ntrain_transform = transforms.Compose([\n #transforms.RandomResizedCrop(size=128, scale=(0.75,1.0), ratio=(0.75,1.3333), interpolation=3), #Image.BICUBIC)\n transforms.Resize((288,144), interpolation=3),\n transforms.RandomCrop((256,128)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\ntest_transform = transforms.Compose([\n transforms.Resize((256,128), interpolation=3),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n# load training dataDatasetFolder\nprint('Starting loading training data: ', args.dataset )\ntrain_dataset = loader.DatasetFolder(os.path.join(data_dir, 'train'), transform=train_transform)\nclass_names = train_dataset.classes\ndataset_sizes_train = len(train_dataset)\n\nuse_gpu = torch.cuda.is_available()\n\n# Define a model\nmodel = ft_net(len(class_names))\n\nif use_gpu:\n model = model.cuda()\n \n# Load a pretrainied model\nif args.pretrained or args.stage2:\n # model_name = 'market_noise_0.2_batch_32_lambda_0.4_lr_1.0e-02_st1_epoch_best.t'\n model_name = '{}_noise_{}_batch_32_wt_60_lr_1.0e-02_w_st1_epoch_best.t'.format(args.dataset, args.noise_ratio)\n print('Initilizaing weights with {}'.format(model_name))\n model_path = checkpoint_path + model_name\n model.load_state_dict(torch.load(model_path))\nelse:\n print('Initilizaing weights with ImageNet')\n \n# generate noisy label\nif args.noise_ratio >= 0:\n trainLabels = torch.LongTensor([y for (p, y, w) in train_dataset.imgs])\n trainLabels_nsy, if_truelbl = gen_nosiy_lbl(trainLabels, args.noise_ratio, len(class_names))\n print('Finish adding noisy label')\n\n# generate instance weight \nif args.stage2:\n print('Generating sef-generated weights......')\n weight_file = './new_res/' + 'new_{}_{}_weights.npy'.format(args.dataset, args.noise_ratio)\n label_file = './new_res/' + 'new_{}_{}_label.npy'.format(args.dataset, args.noise_ratio)\n # if os.path.exists(weight_file):\n # all_weights = np.load(weight_file)\n # pre_pids = np.load(label_file)\n # else:\n tansform_bak = train_transform\n train_dataset.transform = test_transform\n temploader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchsize, shuffle=False, num_workers=8)\n \n model.eval() # Set model to evaluate mode\n print('Start extract features...')\n start = time.time()\n train_feas, pre_pids = extr_fea_train(model, train_dataset, temploader, use_gpu)\n \n print('Evaluation time: {}'.format(time.time()-start))\n indexs, ori_weight = gen_weights_dist(train_feas, trainLabels_nsy, class_names, args.alpha, args.beta)\n order = np.argsort(indexs)\n all_weights = ori_weight[order]\n np.save(weight_file, all_weights)\n np.save(label_file, pre_pids)\n train_dataset.transform = tansform_bak\n all_weights = all_weights.astype(np.float32)\n for i in range(len(trainLabels_nsy)):\n train_dataset.imgs[i] = (train_dataset.imgs[i][0], int(pre_pids[i]), all_weights[i]) \nelse:\n print('Setting same weights for all the instances...')\n for i in range(len(trainLabels_nsy)):\n train_dataset.imgs[i] = (train_dataset.imgs[i][0], trainLabels_nsy[i],1) \n\n \ndataloaders_train = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchsize, shuffle=True, num_workers=8) # 8 workers may work faster\n\n# load testing dataDatasetFolder\ntest_dataset = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,test_transform) for x in ['gallery','query']}\ndataloaders_test = {x: torch.utils.data.DataLoader(test_dataset[x], batch_size=args.batchsize, shuffle=False, num_workers=8) for x in ['gallery','query']}\n\n# Define loss functions\n# if args.LabelWt>0:\n # criterion = loss.LabelRefineLoss(lambda1=args.LabelWt)\nif args.stage2:\n criterion = loss.InstanceWeightLoss(weighted = 1)\nelse:\n criterion = nn.CrossEntropyLoss()\n\n# optimizer\nignored_params = list(map(id, model.model.fc.parameters() )) + list(map(id, model.classifier.parameters() ))\nbase_params = filter(lambda p: id(p) not in ignored_params, model.parameters())\noptimizer_ft = optim.SGD([\n {'params': base_params, 'lr': lr},\n {'params': model.model.fc.parameters(), 'lr': lr*10},\n {'params': model.classifier.parameters(), 'lr': lr*10}\n ], weight_decay=5e-4, momentum=0.9, nesterov=True)\n\n# Decay LR by a factor of 0.1 every 40 epochs\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=20, gamma=0.1)\n\ndef save_network(network, epoch_label, is_best = False):\n if is_best:\n save_path = checkpoint_path + suffix + '_epoch_best.t'\n else:\n save_path = checkpoint_path + suffix + '_epoch_{}.t'.format(epoch_label)\n torch.save(network.state_dict(), save_path)\ndef sigmoid_rampup(current, rampup_length):\n \"\"\"Exponential rampup from https://arxiv.org/abs/1610.02242\"\"\"\n if rampup_length == 0:\n return 1.0\n else:\n current = np.clip(current, 0.0, rampup_length)\n phase = 1.0 - current / rampup_length\n w = float(np.exp(-2.0 * phase * phase))\n return min(w,0.5)\n \ndef train_model(model, criterion, optimizer_ft, scheduler, epoch):\n \n scheduler.step()\n lambda1 = sigmoid_rampup(epoch, args.LabelWt)\n train_loss = AverageMeter()\n data_time = AverageMeter()\n batch_time = AverageMeter()\n \n model.train()\n correct = 0\n total = 0\n end = time.time()\n for batch_idx, (inputs, targets, weights) in enumerate(dataloaders_train):\n if use_gpu:\n inputs = Variable(inputs.cuda()) \n targets = Variable(targets.cuda()) \n weights = Variable(weights.cuda())\n data_time.update(time.time() - end) \n \n optimizer_ft.zero_grad()\n \n outputs = model(inputs)\n \n if args.stage2:\n loss = criterion(outputs, targets, weights) \n else:\n loss = criterion(outputs, targets, lambda1) \n \n loss.backward()\n optimizer_ft.step()\n \n train_loss.update(loss.item(), inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n \n _, predicted = outputs.max(1)\n correct += predicted.eq(targets).sum().item()\n total += inputs.size(0)\n \n if batch_idx%10==0:\n print('Epoch: [{}][{}/{}] '\n 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '\n 'Data: {data_time.val:.3f} ({data_time.avg:.3f}) '\n 'Loss: {train_loss.val:.4f} ({train_loss.avg:.4f}) '\n 'Accu: {:.2f}'.format(\n epoch, batch_idx, len(dataloaders_train),100.*correct/total, batch_time=batch_time, data_time=data_time, train_loss=train_loss))\n\n writer.add_scalar('training acc (train)', 100.*correct/total, epoch)\n writer.add_scalar('loss', train_loss.avg, epoch)\n\n\nfor epoch in range(start_epoch, start_epoch+41):\n\n # training\n print('Start Training..........')\n train_model(model, criterion, optimizer_ft, exp_lr_scheduler, epoch)\n \n # evaluation\n if epoch%test_epoch ==0:\n model.eval() # Set model to evaluate mode\n start = time.time()\n cmc, mAP = get_test_acc(model, test_dataset, dataloaders_test, use_gpu, max_rank=10)\n if cmc[0] > best_acc:\n best_epoch = epoch\n best_acc = cmc[0]\n save_network(model, epoch, is_best = True)\n print('Epoch {}: R1:{:.4%} R5:{:.4%} R10:{:.4%} mAP:{:.4%} (Best Epoch[{}])'.format(\n epoch, cmc[0],cmc[4],cmc[9], mAP ,best_epoch)) \n print('Epoch {}: R1:{:.4%} R5:{:.4%} R10:{:.4%} mAP:{:.4%} (Best Epoch[{}])'.format(\n epoch, cmc[0],cmc[4],cmc[9], mAP ,best_epoch), file = test_log_file) \n test_log_file.flush()\n print('Evaluation time: {}'.format(time.time()-start))\n \n # if epoch%20==0:\n # save_network(model, epoch, is_best = False)"
] |
[
[
"matplotlib.use",
"numpy.clip",
"torch.optim.lr_scheduler.StepLR",
"numpy.exp",
"numpy.save",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.utils.data.DataLoader",
"numpy.argsort",
"torch.load",
"torch.nn.CrossEntropyLoss"
]
] |
mukulbalodi/rasa
|
[
"3126ef1148c165f2402f3c7203138d429e46c68c"
] |
[
"rasa/nlu/classifiers/diet_classifier.py"
] |
[
"from __future__ import annotations\nimport copy\nimport logging\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom rasa.nlu.featurizers.featurizer import Featurizer\n\nimport numpy as np\nimport scipy.sparse\nimport tensorflow as tf\n\nfrom typing import Any, Dict, List, Optional, Text, Tuple, Union, Type\n\nfrom rasa.engine.graph import ExecutionContext, GraphComponent\nfrom rasa.engine.recipes.default_recipe import DefaultV1Recipe\nfrom rasa.engine.storage.resource import Resource\nfrom rasa.engine.storage.storage import ModelStorage\nfrom rasa.nlu.extractors.extractor import EntityExtractorMixin\nfrom rasa.nlu.classifiers.classifier import IntentClassifier\nimport rasa.shared.utils.io\nimport rasa.utils.io as io_utils\nimport rasa.nlu.utils.bilou_utils as bilou_utils\nfrom rasa.shared.constants import DIAGNOSTIC_DATA\nfrom rasa.nlu.extractors.extractor import EntityTagSpec\nfrom rasa.nlu.classifiers import LABEL_RANKING_LENGTH\nfrom rasa.utils import train_utils\nfrom rasa.utils.tensorflow import rasa_layers\nfrom rasa.utils.tensorflow.models import RasaModel, TransformerRasaModel\nfrom rasa.utils.tensorflow.model_data import (\n RasaModelData,\n FeatureSignature,\n FeatureArray,\n)\nfrom rasa.nlu.constants import TOKENS_NAMES, DEFAULT_TRANSFORMER_SIZE\nfrom rasa.shared.nlu.constants import (\n SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,\n TEXT,\n INTENT,\n INTENT_RESPONSE_KEY,\n ENTITIES,\n ENTITY_ATTRIBUTE_TYPE,\n ENTITY_ATTRIBUTE_GROUP,\n ENTITY_ATTRIBUTE_ROLE,\n NO_ENTITY_TAG,\n SPLIT_ENTITIES_BY_COMMA,\n)\nfrom rasa.shared.exceptions import InvalidConfigException\nfrom rasa.shared.nlu.training_data.training_data import TrainingData\nfrom rasa.shared.nlu.training_data.message import Message\nfrom rasa.utils.tensorflow.constants import (\n LABEL,\n IDS,\n HIDDEN_LAYERS_SIZES,\n RENORMALIZE_CONFIDENCES,\n SHARE_HIDDEN_LAYERS,\n TRANSFORMER_SIZE,\n NUM_TRANSFORMER_LAYERS,\n NUM_HEADS,\n BATCH_SIZES,\n BATCH_STRATEGY,\n EPOCHS,\n RANDOM_SEED,\n LEARNING_RATE,\n RANKING_LENGTH,\n LOSS_TYPE,\n SIMILARITY_TYPE,\n NUM_NEG,\n SPARSE_INPUT_DROPOUT,\n DENSE_INPUT_DROPOUT,\n MASKED_LM,\n ENTITY_RECOGNITION,\n TENSORBOARD_LOG_DIR,\n INTENT_CLASSIFICATION,\n EVAL_NUM_EXAMPLES,\n EVAL_NUM_EPOCHS,\n UNIDIRECTIONAL_ENCODER,\n DROP_RATE,\n DROP_RATE_ATTENTION,\n CONNECTION_DENSITY,\n NEGATIVE_MARGIN_SCALE,\n REGULARIZATION_CONSTANT,\n SCALE_LOSS,\n USE_MAX_NEG_SIM,\n MAX_NEG_SIM,\n MAX_POS_SIM,\n EMBEDDING_DIMENSION,\n BILOU_FLAG,\n KEY_RELATIVE_ATTENTION,\n VALUE_RELATIVE_ATTENTION,\n MAX_RELATIVE_POSITION,\n AUTO,\n BALANCED,\n CROSS_ENTROPY,\n TENSORBOARD_LOG_LEVEL,\n CONCAT_DIMENSION,\n FEATURIZERS,\n CHECKPOINT_MODEL,\n SEQUENCE,\n SENTENCE,\n SEQUENCE_LENGTH,\n DENSE_DIMENSION,\n MASK,\n CONSTRAIN_SIMILARITIES,\n MODEL_CONFIDENCE,\n SOFTMAX,\n)\n\nlogger = logging.getLogger(__name__)\n\nSPARSE = \"sparse\"\nDENSE = \"dense\"\nLABEL_KEY = LABEL\nLABEL_SUB_KEY = IDS\n\nPOSSIBLE_TAGS = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP]\n\n\n@DefaultV1Recipe.register(\n [\n DefaultV1Recipe.ComponentType.INTENT_CLASSIFIER,\n DefaultV1Recipe.ComponentType.ENTITY_EXTRACTOR,\n ],\n is_trainable=True,\n)\nclass DIETClassifier(GraphComponent, IntentClassifier, EntityExtractorMixin):\n \"\"\"A multi-task model for intent classification and entity extraction.\n\n DIET is Dual Intent and Entity Transformer.\n The architecture is based on a transformer which is shared for both tasks.\n A sequence of entity labels is predicted through a Conditional Random Field (CRF)\n tagging layer on top of the transformer output sequence corresponding to the\n input sequence of tokens. The transformer output for the ``__CLS__`` token and\n intent labels are embedded into a single semantic vector space. We use the\n dot-product loss to maximize the similarity with the target label and minimize\n similarities with negative samples.\n \"\"\"\n\n @classmethod\n def required_components(cls) -> List[Type]:\n \"\"\"Components that should be included in the pipeline before this component.\"\"\"\n return [Featurizer]\n\n @staticmethod\n def get_default_config() -> Dict[Text, Any]:\n \"\"\"The component's default config (see parent class for full docstring).\"\"\"\n # please make sure to update the docs when changing a default parameter\n return {\n # ## Architecture of the used neural network\n # Hidden layer sizes for layers before the embedding layers for user message\n # and labels.\n # The number of hidden layers is equal to the length of the corresponding\n # list.\n HIDDEN_LAYERS_SIZES: {TEXT: [], LABEL: []},\n # Whether to share the hidden layer weights between user message and labels.\n SHARE_HIDDEN_LAYERS: False,\n # Number of units in transformer\n TRANSFORMER_SIZE: DEFAULT_TRANSFORMER_SIZE,\n # Number of transformer layers\n NUM_TRANSFORMER_LAYERS: 2,\n # Number of attention heads in transformer\n NUM_HEADS: 4,\n # If 'True' use key relative embeddings in attention\n KEY_RELATIVE_ATTENTION: False,\n # If 'True' use value relative embeddings in attention\n VALUE_RELATIVE_ATTENTION: False,\n # Max position for relative embeddings. Only in effect if key- or value\n # relative attention are turned on\n MAX_RELATIVE_POSITION: 5,\n # Use a unidirectional or bidirectional encoder.\n UNIDIRECTIONAL_ENCODER: False,\n # ## Training parameters\n # Initial and final batch sizes:\n # Batch size will be linearly increased for each epoch.\n BATCH_SIZES: [64, 256],\n # Strategy used when creating batches.\n # Can be either 'sequence' or 'balanced'.\n BATCH_STRATEGY: BALANCED,\n # Number of epochs to train\n EPOCHS: 300,\n # Set random seed to any 'int' to get reproducible results\n RANDOM_SEED: None,\n # Initial learning rate for the optimizer\n LEARNING_RATE: 0.001,\n # ## Parameters for embeddings\n # Dimension size of embedding vectors\n EMBEDDING_DIMENSION: 20,\n # Dense dimension to use for sparse features.\n DENSE_DIMENSION: {TEXT: 128, LABEL: 20},\n # Default dimension to use for concatenating sequence and sentence features.\n CONCAT_DIMENSION: {TEXT: 128, LABEL: 20},\n # The number of incorrect labels. The algorithm will minimize\n # their similarity to the user input during training.\n NUM_NEG: 20,\n # Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.\n SIMILARITY_TYPE: AUTO,\n # The type of the loss function, either 'cross_entropy' or 'margin'.\n LOSS_TYPE: CROSS_ENTROPY,\n # Number of top intents for which confidences should be reported.\n # Set to 0 if confidences for all intents should be reported.\n RANKING_LENGTH: LABEL_RANKING_LENGTH,\n # Indicates how similar the algorithm should try to make embedding vectors\n # for correct labels.\n # Should be 0.0 < ... < 1.0 for 'cosine' similarity type.\n MAX_POS_SIM: 0.8,\n # Maximum negative similarity for incorrect labels.\n # Should be -1.0 < ... < 1.0 for 'cosine' similarity type.\n MAX_NEG_SIM: -0.4,\n # If 'True' the algorithm only minimizes maximum similarity over\n # incorrect intent labels, used only if 'loss_type' is set to 'margin'.\n USE_MAX_NEG_SIM: True,\n # If 'True' scale loss inverse proportionally to the confidence\n # of the correct prediction\n SCALE_LOSS: False,\n # ## Regularization parameters\n # The scale of regularization\n REGULARIZATION_CONSTANT: 0.002,\n # The scale of how important is to minimize the maximum similarity\n # between embeddings of different labels,\n # used only if 'loss_type' is set to 'margin'.\n NEGATIVE_MARGIN_SCALE: 0.8,\n # Dropout rate for encoder\n DROP_RATE: 0.2,\n # Dropout rate for attention\n DROP_RATE_ATTENTION: 0,\n # Fraction of trainable weights in internal layers.\n CONNECTION_DENSITY: 0.2,\n # If 'True' apply dropout to sparse input tensors\n SPARSE_INPUT_DROPOUT: True,\n # If 'True' apply dropout to dense input tensors\n DENSE_INPUT_DROPOUT: True,\n # ## Evaluation parameters\n # How often calculate validation accuracy.\n # Small values may hurt performance.\n EVAL_NUM_EPOCHS: 20,\n # How many examples to use for hold out validation set\n # Large values may hurt performance, e.g. model accuracy.\n # Set to 0 for no validation.\n EVAL_NUM_EXAMPLES: 0,\n # ## Model config\n # If 'True' intent classification is trained and intent predicted.\n INTENT_CLASSIFICATION: True,\n # If 'True' named entity recognition is trained and entities predicted.\n ENTITY_RECOGNITION: True,\n # If 'True' random tokens of the input message will be masked and the model\n # should predict those tokens.\n MASKED_LM: False,\n # 'BILOU_flag' determines whether to use BILOU tagging or not.\n # If set to 'True' labelling is more rigorous, however more\n # examples per entity are required.\n # Rule of thumb: you should have more than 100 examples per entity.\n BILOU_FLAG: True,\n # If you want to use tensorboard to visualize training and validation\n # metrics, set this option to a valid output directory.\n TENSORBOARD_LOG_DIR: None,\n # Define when training metrics for tensorboard should be logged.\n # Either after every epoch or for every training step.\n # Valid values: 'epoch' and 'batch'\n TENSORBOARD_LOG_LEVEL: \"epoch\",\n # Perform model checkpointing\n CHECKPOINT_MODEL: False,\n # Specify what features to use as sequence and sentence features\n # By default all features in the pipeline are used.\n FEATURIZERS: [],\n # Split entities by comma, this makes sense e.g. for a list of ingredients\n # in a recipie, but it doesn't make sense for the parts of an address\n SPLIT_ENTITIES_BY_COMMA: True,\n # If 'True' applies sigmoid on all similarity terms and adds\n # it to the loss function to ensure that similarity values are\n # approximately bounded. Used inside cross-entropy loss only.\n CONSTRAIN_SIMILARITIES: False,\n # Model confidence to be returned during inference. Currently, the only\n # possible value is `softmax`.\n MODEL_CONFIDENCE: SOFTMAX,\n # Determines whether the confidences of the chosen top intents should be\n # renormalized so that they sum up to 1. By default, we do not renormalize\n # and return the confidences for the top intents as is.\n # Note that renormalization only makes sense if confidences are generated\n # via `softmax`.\n RENORMALIZE_CONFIDENCES: False,\n }\n\n def __init__(\n self,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n index_label_id_mapping: Optional[Dict[int, Text]] = None,\n entity_tag_specs: Optional[List[EntityTagSpec]] = None,\n model: Optional[RasaModel] = None,\n sparse_feature_sizes: Optional[Dict[Text, Dict[Text, List[int]]]] = None,\n ) -> None:\n \"\"\"Declare instance variables with default values.\"\"\"\n if EPOCHS not in config:\n rasa.shared.utils.io.raise_warning(\n f\"Please configure the number of '{EPOCHS}' in your configuration file.\"\n f\" We will change the default value of '{EPOCHS}' in the future to 1. \"\n )\n\n self.component_config = config\n self._model_storage = model_storage\n self._resource = resource\n self._execution_context = execution_context\n\n self._check_config_parameters()\n\n # transform numbers to labels\n self.index_label_id_mapping = index_label_id_mapping or {}\n\n self._entity_tag_specs = entity_tag_specs\n\n self.model = model\n\n self.tmp_checkpoint_dir = None\n if self.component_config[CHECKPOINT_MODEL]:\n self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())\n\n self._label_data: Optional[RasaModelData] = None\n self._data_example: Optional[Dict[Text, Dict[Text, List[FeatureArray]]]] = None\n\n self.split_entities_config = rasa.utils.train_utils.init_split_entities(\n self.component_config[SPLIT_ENTITIES_BY_COMMA],\n SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,\n )\n\n self.finetune_mode = self._execution_context.is_finetuning\n self._sparse_feature_sizes = sparse_feature_sizes\n\n # init helpers\n def _check_masked_lm(self) -> None:\n if (\n self.component_config[MASKED_LM]\n and self.component_config[NUM_TRANSFORMER_LAYERS] == 0\n ):\n raise ValueError(\n f\"If number of transformer layers is 0, \"\n f\"'{MASKED_LM}' option should be 'False'.\"\n )\n\n def _check_share_hidden_layers_sizes(self) -> None:\n if self.component_config.get(SHARE_HIDDEN_LAYERS):\n first_hidden_layer_sizes = next(\n iter(self.component_config[HIDDEN_LAYERS_SIZES].values())\n )\n # check that all hidden layer sizes are the same\n identical_hidden_layer_sizes = all(\n current_hidden_layer_sizes == first_hidden_layer_sizes\n for current_hidden_layer_sizes in self.component_config[\n HIDDEN_LAYERS_SIZES\n ].values()\n )\n if not identical_hidden_layer_sizes:\n raise ValueError(\n f\"If hidden layer weights are shared, \"\n f\"{HIDDEN_LAYERS_SIZES} must coincide.\"\n )\n\n def _check_config_parameters(self) -> None:\n self.component_config = train_utils.check_deprecated_options(\n self.component_config\n )\n\n self._check_masked_lm()\n self._check_share_hidden_layers_sizes()\n\n self.component_config = train_utils.update_confidence_type(\n self.component_config\n )\n\n train_utils.validate_configuration_settings(self.component_config)\n\n self.component_config = train_utils.update_similarity_type(\n self.component_config\n )\n self.component_config = train_utils.update_evaluation_parameters(\n self.component_config\n )\n\n @classmethod\n def create(\n cls,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n ) -> DIETClassifier:\n \"\"\"Creates a new untrained component (see parent class for full docstring).\"\"\"\n return cls(config, model_storage, resource, execution_context)\n\n @property\n def label_key(self) -> Optional[Text]:\n \"\"\"Return key if intent classification is activated.\"\"\"\n return LABEL_KEY if self.component_config[INTENT_CLASSIFICATION] else None\n\n @property\n def label_sub_key(self) -> Optional[Text]:\n \"\"\"Return sub key if intent classification is activated.\"\"\"\n return LABEL_SUB_KEY if self.component_config[INTENT_CLASSIFICATION] else None\n\n @staticmethod\n def model_class() -> Type[RasaModel]:\n return DIET\n\n # training data helpers:\n @staticmethod\n def _label_id_index_mapping(\n training_data: TrainingData, attribute: Text\n ) -> Dict[Text, int]:\n \"\"\"Create label_id dictionary.\"\"\"\n\n distinct_label_ids = {\n example.get(attribute) for example in training_data.intent_examples\n } - {None}\n return {\n label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids))\n }\n\n @staticmethod\n def _invert_mapping(mapping: Dict) -> Dict:\n return {value: key for key, value in mapping.items()}\n\n def _create_entity_tag_specs(\n self, training_data: TrainingData\n ) -> List[EntityTagSpec]:\n \"\"\"Create entity tag specifications with their respective tag id mappings.\"\"\"\n\n _tag_specs = []\n\n for tag_name in POSSIBLE_TAGS:\n if self.component_config[BILOU_FLAG]:\n tag_id_index_mapping = bilou_utils.build_tag_id_dict(\n training_data, tag_name\n )\n else:\n tag_id_index_mapping = self._tag_id_index_mapping_for(\n tag_name, training_data\n )\n\n if tag_id_index_mapping:\n _tag_specs.append(\n EntityTagSpec(\n tag_name=tag_name,\n tags_to_ids=tag_id_index_mapping,\n ids_to_tags=self._invert_mapping(tag_id_index_mapping),\n num_tags=len(tag_id_index_mapping),\n )\n )\n\n return _tag_specs\n\n @staticmethod\n def _tag_id_index_mapping_for(\n tag_name: Text, training_data: TrainingData\n ) -> Optional[Dict[Text, int]]:\n \"\"\"Create mapping from tag name to id.\"\"\"\n if tag_name == ENTITY_ATTRIBUTE_ROLE:\n distinct_tags = training_data.entity_roles\n elif tag_name == ENTITY_ATTRIBUTE_GROUP:\n distinct_tags = training_data.entity_groups\n else:\n distinct_tags = training_data.entities\n\n distinct_tags = distinct_tags - {NO_ENTITY_TAG} - {None}\n\n if not distinct_tags:\n return None\n\n tag_id_dict = {\n tag_id: idx for idx, tag_id in enumerate(sorted(distinct_tags), 1)\n }\n # NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index\n # needed for correct prediction for padding\n tag_id_dict[NO_ENTITY_TAG] = 0\n\n return tag_id_dict\n\n @staticmethod\n def _find_example_for_label(\n label: Text, examples: List[Message], attribute: Text\n ) -> Optional[Message]:\n for ex in examples:\n if ex.get(attribute) == label:\n return ex\n return None\n\n def _check_labels_features_exist(\n self, labels_example: List[Message], attribute: Text\n ) -> bool:\n \"\"\"Checks if all labels have features set.\"\"\"\n\n return all(\n label_example.features_present(\n attribute, self.component_config[FEATURIZERS]\n )\n for label_example in labels_example\n )\n\n def _extract_features(\n self, message: Message, attribute: Text\n ) -> Dict[Text, Union[scipy.sparse.spmatrix, np.ndarray]]:\n\n (\n sparse_sequence_features,\n sparse_sentence_features,\n ) = message.get_sparse_features(attribute, self.component_config[FEATURIZERS])\n dense_sequence_features, dense_sentence_features = message.get_dense_features(\n attribute, self.component_config[FEATURIZERS]\n )\n\n if dense_sequence_features is not None and sparse_sequence_features is not None:\n if (\n dense_sequence_features.features.shape[0]\n != sparse_sequence_features.features.shape[0]\n ):\n raise ValueError(\n f\"Sequence dimensions for sparse and dense sequence features \"\n f\"don't coincide in '{message.get(TEXT)}'\"\n f\"for attribute '{attribute}'.\"\n )\n if dense_sentence_features is not None and sparse_sentence_features is not None:\n if (\n dense_sentence_features.features.shape[0]\n != sparse_sentence_features.features.shape[0]\n ):\n raise ValueError(\n f\"Sequence dimensions for sparse and dense sentence features \"\n f\"don't coincide in '{message.get(TEXT)}'\"\n f\"for attribute '{attribute}'.\"\n )\n\n # If we don't use the transformer and we don't want to do entity recognition,\n # to speed up training take only the sentence features as feature vector.\n # We would not make use of the sequence anyway in this setup. Carrying over\n # those features to the actual training process takes quite some time.\n if (\n self.component_config[NUM_TRANSFORMER_LAYERS] == 0\n and not self.component_config[ENTITY_RECOGNITION]\n and attribute not in [INTENT, INTENT_RESPONSE_KEY]\n ):\n sparse_sequence_features = None\n dense_sequence_features = None\n\n out = {}\n\n if sparse_sentence_features is not None:\n out[f\"{SPARSE}_{SENTENCE}\"] = sparse_sentence_features.features\n if sparse_sequence_features is not None:\n out[f\"{SPARSE}_{SEQUENCE}\"] = sparse_sequence_features.features\n if dense_sentence_features is not None:\n out[f\"{DENSE}_{SENTENCE}\"] = dense_sentence_features.features\n if dense_sequence_features is not None:\n out[f\"{DENSE}_{SEQUENCE}\"] = dense_sequence_features.features\n\n return out\n\n def _check_input_dimension_consistency(self, model_data: RasaModelData) -> None:\n \"\"\"Checks if features have same dimensionality if hidden layers are shared.\"\"\"\n if self.component_config.get(SHARE_HIDDEN_LAYERS):\n num_text_sentence_features = model_data.number_of_units(TEXT, SENTENCE)\n num_label_sentence_features = model_data.number_of_units(LABEL, SENTENCE)\n num_text_sequence_features = model_data.number_of_units(TEXT, SEQUENCE)\n num_label_sequence_features = model_data.number_of_units(LABEL, SEQUENCE)\n\n if (0 < num_text_sentence_features != num_label_sentence_features > 0) or (\n 0 < num_text_sequence_features != num_label_sequence_features > 0\n ):\n raise ValueError(\n \"If embeddings are shared text features and label features \"\n \"must coincide. Check the output dimensions of previous components.\"\n )\n\n def _extract_labels_precomputed_features(\n self, label_examples: List[Message], attribute: Text = INTENT\n ) -> Tuple[List[FeatureArray], List[FeatureArray]]:\n \"\"\"Collects precomputed encodings.\"\"\"\n features = defaultdict(list)\n\n for e in label_examples:\n label_features = self._extract_features(e, attribute)\n for feature_key, feature_value in label_features.items():\n features[feature_key].append(feature_value)\n sequence_features = []\n sentence_features = []\n for feature_name, feature_value in features.items():\n if SEQUENCE in feature_name:\n sequence_features.append(\n FeatureArray(np.array(feature_value), number_of_dimensions=3)\n )\n else:\n sentence_features.append(\n FeatureArray(np.array(feature_value), number_of_dimensions=3)\n )\n return sequence_features, sentence_features\n\n @staticmethod\n def _compute_default_label_features(\n labels_example: List[Message],\n ) -> List[FeatureArray]:\n \"\"\"Computes one-hot representation for the labels.\"\"\"\n logger.debug(\"No label features found. Computing default label features.\")\n\n eye_matrix = np.eye(len(labels_example), dtype=np.float32)\n # add sequence dimension to one-hot labels\n return [\n FeatureArray(\n np.array([np.expand_dims(a, 0) for a in eye_matrix]),\n number_of_dimensions=3,\n )\n ]\n\n def _create_label_data(\n self,\n training_data: TrainingData,\n label_id_dict: Dict[Text, int],\n attribute: Text,\n ) -> RasaModelData:\n \"\"\"Create matrix with label_ids encoded in rows as bag of words.\n\n Find a training example for each label and get the encoded features\n from the corresponding Message object.\n If the features are already computed, fetch them from the message object\n else compute a one hot encoding for the label as the feature vector.\n \"\"\"\n # Collect one example for each label\n labels_idx_examples = []\n for label_name, idx in label_id_dict.items():\n label_example = self._find_example_for_label(\n label_name, training_data.intent_examples, attribute\n )\n labels_idx_examples.append((idx, label_example))\n\n # Sort the list of tuples based on label_idx\n labels_idx_examples = sorted(labels_idx_examples, key=lambda x: x[0])\n labels_example = [example for (_, example) in labels_idx_examples]\n # Collect features, precomputed if they exist, else compute on the fly\n if self._check_labels_features_exist(labels_example, attribute):\n (\n sequence_features,\n sentence_features,\n ) = self._extract_labels_precomputed_features(labels_example, attribute)\n else:\n sequence_features = None\n sentence_features = self._compute_default_label_features(labels_example)\n\n label_data = RasaModelData()\n label_data.add_features(LABEL, SEQUENCE, sequence_features)\n label_data.add_features(LABEL, SENTENCE, sentence_features)\n if label_data.does_feature_not_exist(\n LABEL, SENTENCE\n ) and label_data.does_feature_not_exist(LABEL, SEQUENCE):\n raise ValueError(\n \"No label features are present. Please check your configuration file.\"\n )\n\n label_ids = np.array([idx for (idx, _) in labels_idx_examples])\n # explicitly add last dimension to label_ids\n # to track correctly dynamic sequences\n label_data.add_features(\n LABEL_KEY,\n LABEL_SUB_KEY,\n [FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],\n )\n\n label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)\n\n return label_data\n\n def _use_default_label_features(self, label_ids: np.ndarray) -> List[FeatureArray]:\n feature_arrays: List[FeatureArray] = self._label_data.get(LABEL, SENTENCE)\n all_label_features = feature_arrays[0]\n return [\n FeatureArray(\n np.array([all_label_features[label_id] for label_id in label_ids]),\n number_of_dimensions=all_label_features.number_of_dimensions,\n )\n ]\n\n def _create_model_data(\n self,\n training_data: List[Message],\n label_id_dict: Optional[Dict[Text, int]] = None,\n label_attribute: Optional[Text] = None,\n training: bool = True,\n ) -> RasaModelData:\n \"\"\"Prepare data for training and create a RasaModelData object.\"\"\"\n from rasa.utils.tensorflow import model_data_utils\n\n attributes_to_consider = [TEXT]\n if training and self.component_config[INTENT_CLASSIFICATION]:\n # we don't have any intent labels during prediction, just add them during\n # training\n attributes_to_consider.append(label_attribute)\n if (\n training\n and self.component_config[ENTITY_RECOGNITION]\n and self._entity_tag_specs\n ):\n # Add entities as labels only during training and only if there was\n # training data added for entities with DIET configured to predict entities.\n attributes_to_consider.append(ENTITIES)\n\n if training and label_attribute is not None:\n # only use those training examples that have the label_attribute set\n # during training\n training_data = [\n example for example in training_data if label_attribute in example.data\n ]\n\n training_data = [\n message\n for message in training_data\n if message.features_present(\n attribute=TEXT, featurizers=self.component_config.get(FEATURIZERS)\n )\n ]\n\n if not training_data:\n # no training data are present to train\n return RasaModelData()\n\n (\n features_for_examples,\n sparse_feature_sizes,\n ) = model_data_utils.featurize_training_examples(\n training_data,\n attributes_to_consider,\n entity_tag_specs=self._entity_tag_specs,\n featurizers=self.component_config[FEATURIZERS],\n bilou_tagging=self.component_config[BILOU_FLAG],\n )\n attribute_data, _ = model_data_utils.convert_to_data_format(\n features_for_examples, consider_dialogue_dimension=False\n )\n\n model_data = RasaModelData(\n label_key=self.label_key, label_sub_key=self.label_sub_key\n )\n model_data.add_data(attribute_data)\n model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)\n # Current implementation doesn't yet account for updating sparse\n # feature sizes of label attributes. That's why we remove them.\n sparse_feature_sizes = self._remove_label_sparse_feature_sizes(\n sparse_feature_sizes=sparse_feature_sizes, label_attribute=label_attribute\n )\n model_data.add_sparse_feature_sizes(sparse_feature_sizes)\n\n self._add_label_features(\n model_data, training_data, label_attribute, label_id_dict, training\n )\n\n # make sure all keys are in the same order during training and prediction\n # as we rely on the order of key and sub-key when constructing the actual\n # tensors from the model data\n model_data.sort()\n\n return model_data\n\n @staticmethod\n def _remove_label_sparse_feature_sizes(\n sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],\n label_attribute: Optional[Text] = None,\n ) -> Dict[Text, Dict[Text, List[int]]]:\n\n if label_attribute in sparse_feature_sizes:\n del sparse_feature_sizes[label_attribute]\n return sparse_feature_sizes\n\n def _add_label_features(\n self,\n model_data: RasaModelData,\n training_data: List[Message],\n label_attribute: Text,\n label_id_dict: Dict[Text, int],\n training: bool = True,\n ) -> None:\n label_ids = []\n if training and self.component_config[INTENT_CLASSIFICATION]:\n for example in training_data:\n if example.get(label_attribute):\n label_ids.append(label_id_dict[example.get(label_attribute)])\n # explicitly add last dimension to label_ids\n # to track correctly dynamic sequences\n model_data.add_features(\n LABEL_KEY,\n LABEL_SUB_KEY,\n [FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],\n )\n\n if (\n label_attribute\n and model_data.does_feature_not_exist(label_attribute, SENTENCE)\n and model_data.does_feature_not_exist(label_attribute, SEQUENCE)\n ):\n # no label features are present, get default features from _label_data\n model_data.add_features(\n LABEL, SENTENCE, self._use_default_label_features(np.array(label_ids))\n )\n\n # as label_attribute can have different values, e.g. INTENT or RESPONSE,\n # copy over the features to the LABEL key to make\n # it easier to access the label features inside the model itself\n model_data.update_key(label_attribute, SENTENCE, LABEL, SENTENCE)\n model_data.update_key(label_attribute, SEQUENCE, LABEL, SEQUENCE)\n model_data.update_key(label_attribute, MASK, LABEL, MASK)\n\n model_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)\n\n # train helpers\n def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:\n \"\"\"Prepares data for training.\n\n Performs sanity checks on training data, extracts encodings for labels.\n \"\"\"\n if self.component_config[BILOU_FLAG]:\n bilou_utils.apply_bilou_schema(training_data)\n\n label_id_index_mapping = self._label_id_index_mapping(\n training_data, attribute=INTENT\n )\n\n if not label_id_index_mapping:\n # no labels are present to train\n return RasaModelData()\n\n self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)\n\n self._label_data = self._create_label_data(\n training_data, label_id_index_mapping, attribute=INTENT\n )\n\n self._entity_tag_specs = self._create_entity_tag_specs(training_data)\n\n label_attribute = (\n INTENT if self.component_config[INTENT_CLASSIFICATION] else None\n )\n model_data = self._create_model_data(\n training_data.nlu_examples,\n label_id_index_mapping,\n label_attribute=label_attribute,\n )\n\n self._check_input_dimension_consistency(model_data)\n\n return model_data\n\n @staticmethod\n def _check_enough_labels(model_data: RasaModelData) -> bool:\n return len(np.unique(model_data.get(LABEL_KEY, LABEL_SUB_KEY))) >= 2\n\n def train(self, training_data: TrainingData) -> Resource:\n \"\"\"Train the embedding intent classifier on a data set.\"\"\"\n model_data = self.preprocess_train_data(training_data)\n if model_data.is_empty():\n logger.debug(\n f\"Cannot train '{self.__class__.__name__}'. No data was provided. \"\n f\"Skipping training of the classifier.\"\n )\n return self._resource\n\n if not self.model and self.finetune_mode:\n raise rasa.shared.exceptions.InvalidParameterException(\n f\"{self.__class__.__name__} was instantiated \"\n f\"with `model=None` and `finetune_mode=True`. \"\n f\"This is not a valid combination as the component \"\n f\"needs an already instantiated and trained model \"\n f\"to continue training in finetune mode.\"\n )\n\n if self.component_config.get(INTENT_CLASSIFICATION):\n if not self._check_enough_labels(model_data):\n logger.error(\n f\"Cannot train '{self.__class__.__name__}'. \"\n f\"Need at least 2 different intent classes. \"\n f\"Skipping training of classifier.\"\n )\n return self._resource\n if self.component_config.get(ENTITY_RECOGNITION):\n self.check_correct_entity_annotations(training_data)\n\n # keep one example for persisting and loading\n self._data_example = model_data.first_data_example()\n\n if not self.finetune_mode:\n # No pre-trained model to load from. Create a new instance of the model.\n self.model = self._instantiate_model_class(model_data)\n self.model.compile(\n optimizer=tf.keras.optimizers.Adam(self.component_config[LEARNING_RATE])\n )\n else:\n self.model.adjust_for_incremental_training(\n data_example=self._data_example,\n new_sparse_feature_sizes=model_data.get_sparse_feature_sizes(),\n old_sparse_feature_sizes=self._sparse_feature_sizes,\n )\n self._sparse_feature_sizes = model_data.get_sparse_feature_sizes()\n\n data_generator, validation_data_generator = train_utils.create_data_generators(\n model_data,\n self.component_config[BATCH_SIZES],\n self.component_config[EPOCHS],\n self.component_config[BATCH_STRATEGY],\n self.component_config[EVAL_NUM_EXAMPLES],\n self.component_config[RANDOM_SEED],\n )\n callbacks = train_utils.create_common_callbacks(\n self.component_config[EPOCHS],\n self.component_config[TENSORBOARD_LOG_DIR],\n self.component_config[TENSORBOARD_LOG_LEVEL],\n self.tmp_checkpoint_dir,\n )\n\n self.model.fit(\n data_generator,\n epochs=self.component_config[EPOCHS],\n validation_data=validation_data_generator,\n validation_freq=self.component_config[EVAL_NUM_EPOCHS],\n callbacks=callbacks,\n verbose=False,\n shuffle=False, # we use custom shuffle inside data generator\n )\n\n self.persist()\n\n return self._resource\n\n # process helpers\n def _predict(\n self, message: Message\n ) -> Optional[Dict[Text, Union[tf.Tensor, Dict[Text, tf.Tensor]]]]:\n if self.model is None:\n logger.debug(\n f\"There is no trained model for '{self.__class__.__name__}': The \"\n f\"component is either not trained or didn't receive enough training \"\n f\"data.\"\n )\n return None\n\n # create session data from message and convert it into a batch of 1\n model_data = self._create_model_data([message], training=False)\n if model_data.is_empty():\n return None\n return self.model.run_inference(model_data)\n\n def _predict_label(\n self, predict_out: Optional[Dict[Text, tf.Tensor]]\n ) -> Tuple[Dict[Text, Any], List[Dict[Text, Any]]]:\n \"\"\"Predicts the intent of the provided message.\"\"\"\n label: Dict[Text, Any] = {\"name\": None, \"confidence\": 0.0}\n label_ranking = []\n\n if predict_out is None:\n return label, label_ranking\n\n message_sim = predict_out[\"i_scores\"]\n message_sim = message_sim.flatten() # sim is a matrix\n\n # if X contains all zeros do not predict some label\n if message_sim.size == 0:\n return label, label_ranking\n\n # rank the confidences\n ranking_length = self.component_config[RANKING_LENGTH]\n renormalize = (\n self.component_config[RENORMALIZE_CONFIDENCES]\n and self.component_config[MODEL_CONFIDENCE] == SOFTMAX\n )\n ranked_label_indices, message_sim = train_utils.rank_and_mask(\n message_sim, ranking_length=ranking_length, renormalize=renormalize\n )\n\n # construct the label and ranking\n casted_message_sim: List[float] = message_sim.tolist() # np.float to float\n top_label_idx = ranked_label_indices[0]\n label = {\n \"name\": self.index_label_id_mapping[top_label_idx],\n \"confidence\": casted_message_sim[top_label_idx],\n }\n\n ranking = [(idx, casted_message_sim[idx]) for idx in ranked_label_indices]\n label_ranking = [\n {\"name\": self.index_label_id_mapping[label_idx], \"confidence\": score}\n for label_idx, score in ranking\n ]\n\n return label, label_ranking\n\n def _predict_entities(\n self, predict_out: Optional[Dict[Text, tf.Tensor]], message: Message\n ) -> List[Dict]:\n if predict_out is None:\n return []\n\n predicted_tags, confidence_values = train_utils.entity_label_to_tags(\n predict_out, self._entity_tag_specs, self.component_config[BILOU_FLAG]\n )\n\n entities = self.convert_predictions_into_entities(\n message.get(TEXT),\n message.get(TOKENS_NAMES[TEXT], []),\n predicted_tags,\n self.split_entities_config,\n confidence_values,\n )\n\n entities = self.add_extractor_name(entities)\n entities = message.get(ENTITIES, []) + entities\n\n return entities\n\n def process(self, messages: List[Message]) -> List[Message]:\n \"\"\"Augments the message with intents, entities, and diagnostic data.\"\"\"\n for message in messages:\n out = self._predict(message)\n\n if self.component_config[INTENT_CLASSIFICATION]:\n label, label_ranking = self._predict_label(out)\n\n message.set(INTENT, label, add_to_output=True)\n message.set(\"intent_ranking\", label_ranking, add_to_output=True)\n\n if self.component_config[ENTITY_RECOGNITION]:\n entities = self._predict_entities(out, message)\n\n message.set(ENTITIES, entities, add_to_output=True)\n\n if out and self._execution_context.should_add_diagnostic_data:\n message.add_diagnostic_data(\n self._execution_context.node_name, out.get(DIAGNOSTIC_DATA)\n )\n\n return messages\n\n def persist(self) -> None:\n \"\"\"Persist this model into the passed directory.\"\"\"\n if self.model is None:\n return None\n\n with self._model_storage.write_to(self._resource) as model_path:\n file_name = self.__class__.__name__\n tf_model_file = model_path / f\"{file_name}.tf_model\"\n\n rasa.shared.utils.io.create_directory_for_file(tf_model_file)\n\n if self.component_config[CHECKPOINT_MODEL] and self.tmp_checkpoint_dir:\n self.model.load_weights(self.tmp_checkpoint_dir / \"checkpoint.tf_model\")\n # Save an empty file to flag that this model has been\n # produced using checkpointing\n checkpoint_marker = model_path / f\"{file_name}.from_checkpoint.pkl\"\n checkpoint_marker.touch()\n\n self.model.save(str(tf_model_file))\n\n io_utils.pickle_dump(\n model_path / f\"{file_name}.data_example.pkl\", self._data_example\n )\n io_utils.pickle_dump(\n model_path / f\"{file_name}.sparse_feature_sizes.pkl\",\n self._sparse_feature_sizes,\n )\n io_utils.pickle_dump(\n model_path / f\"{file_name}.label_data.pkl\", dict(self._label_data.data)\n )\n io_utils.json_pickle(\n model_path / f\"{file_name}.index_label_id_mapping.json\",\n self.index_label_id_mapping,\n )\n\n entity_tag_specs = (\n [tag_spec._asdict() for tag_spec in self._entity_tag_specs]\n if self._entity_tag_specs\n else []\n )\n rasa.shared.utils.io.dump_obj_as_json_to_file(\n model_path / f\"{file_name}.entity_tag_specs.json\", entity_tag_specs\n )\n\n @classmethod\n def load(\n cls,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n **kwargs: Any,\n ) -> DIETClassifier:\n \"\"\"Loads a policy from the storage (see parent class for full docstring).\"\"\"\n try:\n with model_storage.read_from(resource) as model_path:\n return cls._load(\n model_path, config, model_storage, resource, execution_context\n )\n except ValueError:\n logger.debug(\n f\"Failed to load {cls.__class__.__name__} from model storage. Resource \"\n f\"'{resource.name}' doesn't exist.\"\n )\n return cls(config, model_storage, resource, execution_context)\n\n @classmethod\n def _load(\n cls,\n model_path: Path,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n ) -> \"DIETClassifier\":\n \"\"\"Loads the trained model from the provided directory.\"\"\"\n (\n index_label_id_mapping,\n entity_tag_specs,\n label_data,\n data_example,\n sparse_feature_sizes,\n ) = cls._load_from_files(model_path)\n\n config = train_utils.update_confidence_type(config)\n config = train_utils.update_similarity_type(config)\n\n model = cls._load_model(\n entity_tag_specs,\n label_data,\n config,\n data_example,\n model_path,\n finetune_mode=execution_context.is_finetuning,\n )\n\n return cls(\n config=config,\n model_storage=model_storage,\n resource=resource,\n execution_context=execution_context,\n index_label_id_mapping=index_label_id_mapping,\n entity_tag_specs=entity_tag_specs,\n model=model,\n sparse_feature_sizes=sparse_feature_sizes,\n )\n\n @classmethod\n def _load_from_files(\n cls, model_path: Path\n ) -> Tuple[\n Dict[int, Text],\n List[EntityTagSpec],\n RasaModelData,\n Dict[Text, Dict[Text, List[FeatureArray]]],\n Dict[Text, Dict[Text, List[int]]],\n ]:\n file_name = cls.__name__\n\n data_example = io_utils.pickle_load(\n model_path / f\"{file_name}.data_example.pkl\"\n )\n label_data = io_utils.pickle_load(model_path / f\"{file_name}.label_data.pkl\")\n label_data = RasaModelData(data=label_data)\n sparse_feature_sizes = io_utils.pickle_load(\n model_path / f\"{file_name}.sparse_feature_sizes.pkl\"\n )\n index_label_id_mapping = io_utils.json_unpickle(\n model_path / f\"{file_name}.index_label_id_mapping.json\"\n )\n entity_tag_specs = rasa.shared.utils.io.read_json_file(\n model_path / f\"{file_name}.entity_tag_specs.json\"\n )\n entity_tag_specs = [\n EntityTagSpec(\n tag_name=tag_spec[\"tag_name\"],\n ids_to_tags={\n int(key): value for key, value in tag_spec[\"ids_to_tags\"].items()\n },\n tags_to_ids={\n key: int(value) for key, value in tag_spec[\"tags_to_ids\"].items()\n },\n num_tags=tag_spec[\"num_tags\"],\n )\n for tag_spec in entity_tag_specs\n ]\n\n # jsonpickle converts dictionary keys to strings\n index_label_id_mapping = {\n int(key): value for key, value in index_label_id_mapping.items()\n }\n\n return (\n index_label_id_mapping,\n entity_tag_specs,\n label_data,\n data_example,\n sparse_feature_sizes,\n )\n\n @classmethod\n def _load_model(\n cls,\n entity_tag_specs: List[EntityTagSpec],\n label_data: RasaModelData,\n config: Dict[Text, Any],\n data_example: Dict[Text, Dict[Text, List[FeatureArray]]],\n model_path: Path,\n finetune_mode: bool = False,\n ) -> \"RasaModel\":\n file_name = cls.__name__\n tf_model_file = model_path / f\"{file_name}.tf_model\"\n\n label_key = LABEL_KEY if config[INTENT_CLASSIFICATION] else None\n label_sub_key = LABEL_SUB_KEY if config[INTENT_CLASSIFICATION] else None\n\n model_data_example = RasaModelData(\n label_key=label_key, label_sub_key=label_sub_key, data=data_example\n )\n\n model = cls._load_model_class(\n tf_model_file,\n model_data_example,\n label_data,\n entity_tag_specs,\n config,\n finetune_mode=finetune_mode,\n )\n\n return model\n\n @classmethod\n def _load_model_class(\n cls,\n tf_model_file: Text,\n model_data_example: RasaModelData,\n label_data: RasaModelData,\n entity_tag_specs: List[EntityTagSpec],\n config: Dict[Text, Any],\n finetune_mode: bool,\n ) -> \"RasaModel\":\n\n predict_data_example = RasaModelData(\n label_key=model_data_example.label_key,\n data={\n feature_name: features\n for feature_name, features in model_data_example.items()\n if TEXT in feature_name\n },\n )\n\n return cls.model_class().load(\n tf_model_file,\n model_data_example,\n predict_data_example,\n data_signature=model_data_example.get_signature(),\n label_data=label_data,\n entity_tag_specs=entity_tag_specs,\n config=copy.deepcopy(config),\n finetune_mode=finetune_mode,\n )\n\n def _instantiate_model_class(self, model_data: RasaModelData) -> \"RasaModel\":\n return self.model_class()(\n data_signature=model_data.get_signature(),\n label_data=self._label_data,\n entity_tag_specs=self._entity_tag_specs,\n config=self.component_config,\n )\n\n\nclass DIET(TransformerRasaModel):\n def __init__(\n self,\n data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]],\n label_data: RasaModelData,\n entity_tag_specs: Optional[List[EntityTagSpec]],\n config: Dict[Text, Any],\n ) -> None:\n # create entity tag spec before calling super otherwise building the model\n # will fail\n super().__init__(\"DIET\", config, data_signature, label_data)\n self._entity_tag_specs = self._ordered_tag_specs(entity_tag_specs)\n\n self.predict_data_signature = {\n feature_name: features\n for feature_name, features in data_signature.items()\n if TEXT in feature_name\n }\n\n # tf training\n self._create_metrics()\n self._update_metrics_to_log()\n\n # needed for efficient prediction\n self.all_labels_embed: Optional[tf.Tensor] = None\n\n self._prepare_layers()\n\n @staticmethod\n def _ordered_tag_specs(\n entity_tag_specs: Optional[List[EntityTagSpec]],\n ) -> List[EntityTagSpec]:\n \"\"\"Ensure that order of entity tag specs matches CRF layer order.\"\"\"\n if entity_tag_specs is None:\n return []\n\n crf_order = [\n ENTITY_ATTRIBUTE_TYPE,\n ENTITY_ATTRIBUTE_ROLE,\n ENTITY_ATTRIBUTE_GROUP,\n ]\n\n ordered_tag_spec = []\n\n for tag_name in crf_order:\n for tag_spec in entity_tag_specs:\n if tag_name == tag_spec.tag_name:\n ordered_tag_spec.append(tag_spec)\n\n return ordered_tag_spec\n\n def _check_data(self) -> None:\n if TEXT not in self.data_signature:\n raise InvalidConfigException(\n f\"No text features specified. \"\n f\"Cannot train '{self.__class__.__name__}' model.\"\n )\n if self.config[INTENT_CLASSIFICATION]:\n if LABEL not in self.data_signature:\n raise InvalidConfigException(\n f\"No label features specified. \"\n f\"Cannot train '{self.__class__.__name__}' model.\"\n )\n\n if self.config[SHARE_HIDDEN_LAYERS]:\n different_sentence_signatures = False\n different_sequence_signatures = False\n if (\n SENTENCE in self.data_signature[TEXT]\n and SENTENCE in self.data_signature[LABEL]\n ):\n different_sentence_signatures = (\n self.data_signature[TEXT][SENTENCE]\n != self.data_signature[LABEL][SENTENCE]\n )\n if (\n SEQUENCE in self.data_signature[TEXT]\n and SEQUENCE in self.data_signature[LABEL]\n ):\n different_sequence_signatures = (\n self.data_signature[TEXT][SEQUENCE]\n != self.data_signature[LABEL][SEQUENCE]\n )\n\n if different_sentence_signatures or different_sequence_signatures:\n raise ValueError(\n \"If hidden layer weights are shared, data signatures \"\n \"for text_features and label_features must coincide.\"\n )\n\n if self.config[ENTITY_RECOGNITION] and (\n ENTITIES not in self.data_signature\n or ENTITY_ATTRIBUTE_TYPE not in self.data_signature[ENTITIES]\n ):\n logger.debug(\n f\"You specified '{self.__class__.__name__}' to train entities, but \"\n f\"no entities are present in the training data. Skipping training of \"\n f\"entities.\"\n )\n self.config[ENTITY_RECOGNITION] = False\n\n def _create_metrics(self) -> None:\n # self.metrics will have the same order as they are created\n # so create loss metrics first to output losses first\n self.mask_loss = tf.keras.metrics.Mean(name=\"m_loss\")\n self.intent_loss = tf.keras.metrics.Mean(name=\"i_loss\")\n self.entity_loss = tf.keras.metrics.Mean(name=\"e_loss\")\n self.entity_group_loss = tf.keras.metrics.Mean(name=\"g_loss\")\n self.entity_role_loss = tf.keras.metrics.Mean(name=\"r_loss\")\n # create accuracy metrics second to output accuracies second\n self.mask_acc = tf.keras.metrics.Mean(name=\"m_acc\")\n self.intent_acc = tf.keras.metrics.Mean(name=\"i_acc\")\n self.entity_f1 = tf.keras.metrics.Mean(name=\"e_f1\")\n self.entity_group_f1 = tf.keras.metrics.Mean(name=\"g_f1\")\n self.entity_role_f1 = tf.keras.metrics.Mean(name=\"r_f1\")\n\n def _update_metrics_to_log(self) -> None:\n debug_log_level = logging.getLogger(\"rasa\").level == logging.DEBUG\n\n if self.config[MASKED_LM]:\n self.metrics_to_log.append(\"m_acc\")\n if debug_log_level:\n self.metrics_to_log.append(\"m_loss\")\n if self.config[INTENT_CLASSIFICATION]:\n self.metrics_to_log.append(\"i_acc\")\n if debug_log_level:\n self.metrics_to_log.append(\"i_loss\")\n if self.config[ENTITY_RECOGNITION]:\n for tag_spec in self._entity_tag_specs:\n if tag_spec.num_tags != 0:\n name = tag_spec.tag_name\n self.metrics_to_log.append(f\"{name[0]}_f1\")\n if debug_log_level:\n self.metrics_to_log.append(f\"{name[0]}_loss\")\n\n self._log_metric_info()\n\n def _log_metric_info(self) -> None:\n metric_name = {\n \"t\": \"total\",\n \"i\": \"intent\",\n \"e\": \"entity\",\n \"m\": \"mask\",\n \"r\": \"role\",\n \"g\": \"group\",\n }\n logger.debug(\"Following metrics will be logged during training: \")\n for metric in self.metrics_to_log:\n parts = metric.split(\"_\")\n name = f\"{metric_name[parts[0]]} {parts[1]}\"\n logger.debug(f\" {metric} ({name})\")\n\n def _prepare_layers(self) -> None:\n # For user text, prepare layers that combine different feature types, embed\n # everything using a transformer and optionally also do masked language\n # modeling.\n self.text_name = TEXT\n self._tf_layers[\n f\"sequence_layer.{self.text_name}\"\n ] = rasa_layers.RasaSequenceLayer(\n self.text_name, self.data_signature[self.text_name], self.config\n )\n if self.config[MASKED_LM]:\n self._prepare_mask_lm_loss(self.text_name)\n\n # Intent labels are treated similarly to user text but without the transformer,\n # without masked language modelling, and with no dropout applied to the\n # individual features, only to the overall label embedding after all label\n # features have been combined.\n if self.config[INTENT_CLASSIFICATION]:\n self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL\n\n # disable input dropout applied to sparse and dense label features\n label_config = self.config.copy()\n label_config.update(\n {SPARSE_INPUT_DROPOUT: False, DENSE_INPUT_DROPOUT: False}\n )\n\n self._tf_layers[\n f\"feature_combining_layer.{self.label_name}\"\n ] = rasa_layers.RasaFeatureCombiningLayer(\n self.label_name, self.label_signature[self.label_name], label_config\n )\n\n self._prepare_ffnn_layer(\n self.label_name,\n self.config[HIDDEN_LAYERS_SIZES][self.label_name],\n self.config[DROP_RATE],\n )\n\n self._prepare_label_classification_layers(predictor_attribute=TEXT)\n\n if self.config[ENTITY_RECOGNITION]:\n self._prepare_entity_recognition_layers()\n\n def _prepare_mask_lm_loss(self, name: Text) -> None:\n # for embedding predicted tokens at masked positions\n self._prepare_embed_layers(f\"{name}_lm_mask\")\n\n # for embedding the true tokens that got masked\n self._prepare_embed_layers(f\"{name}_golden_token\")\n\n # mask loss is additional loss\n # set scaling to False, so that it doesn't overpower other losses\n self._prepare_dot_product_loss(f\"{name}_mask\", scale_loss=False)\n\n def _create_bow(\n self,\n sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],\n sentence_features: List[Union[tf.Tensor, tf.SparseTensor]],\n sequence_feature_lengths: tf.Tensor,\n name: Text,\n ) -> tf.Tensor:\n\n x, _ = self._tf_layers[f\"feature_combining_layer.{name}\"](\n (sequence_features, sentence_features, sequence_feature_lengths),\n training=self._training,\n )\n\n # convert to bag-of-words by summing along the sequence dimension\n x = tf.reduce_sum(x, axis=1)\n\n return self._tf_layers[f\"ffnn.{name}\"](x, self._training)\n\n def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]:\n all_label_ids = self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0]\n\n sequence_feature_lengths = self._get_sequence_feature_lengths(\n self.tf_label_data, LABEL\n )\n\n x = self._create_bow(\n self.tf_label_data[LABEL][SEQUENCE],\n self.tf_label_data[LABEL][SENTENCE],\n sequence_feature_lengths,\n self.label_name,\n )\n all_labels_embed = self._tf_layers[f\"embed.{LABEL}\"](x)\n\n return all_label_ids, all_labels_embed\n\n def _mask_loss(\n self,\n outputs: tf.Tensor,\n inputs: tf.Tensor,\n seq_ids: tf.Tensor,\n mlm_mask_boolean: tf.Tensor,\n name: Text,\n ) -> tf.Tensor:\n # make sure there is at least one element in the mask\n mlm_mask_boolean = tf.cond(\n tf.reduce_any(mlm_mask_boolean),\n lambda: mlm_mask_boolean,\n lambda: tf.scatter_nd([[0, 0, 0]], [True], tf.shape(mlm_mask_boolean)),\n )\n\n mlm_mask_boolean = tf.squeeze(mlm_mask_boolean, -1)\n\n # Pick elements that were masked, throwing away the batch & sequence dimension\n # and effectively switching from shape (batch_size, sequence_length, units) to\n # (num_masked_elements, units).\n outputs = tf.boolean_mask(outputs, mlm_mask_boolean)\n inputs = tf.boolean_mask(inputs, mlm_mask_boolean)\n ids = tf.boolean_mask(seq_ids, mlm_mask_boolean)\n\n tokens_predicted_embed = self._tf_layers[f\"embed.{name}_lm_mask\"](outputs)\n tokens_true_embed = self._tf_layers[f\"embed.{name}_golden_token\"](inputs)\n\n # To limit the otherwise computationally expensive loss calculation, we\n # constrain the label space in MLM (i.e. token space) to only those tokens that\n # were masked in this batch. Hence the reduced list of token embeddings\n # (tokens_true_embed) and the reduced list of labels (ids) are passed as\n # all_labels_embed and all_labels, respectively. In the future, we could be less\n # restrictive and construct a slightly bigger label space which could include\n # tokens not masked in the current batch too.\n return self._tf_layers[f\"loss.{name}_mask\"](\n inputs_embed=tokens_predicted_embed,\n labels_embed=tokens_true_embed,\n labels=ids,\n all_labels_embed=tokens_true_embed,\n all_labels=ids,\n )\n\n def _calculate_label_loss(\n self, text_features: tf.Tensor, label_features: tf.Tensor, label_ids: tf.Tensor\n ) -> tf.Tensor:\n all_label_ids, all_labels_embed = self._create_all_labels()\n\n text_embed = self._tf_layers[f\"embed.{TEXT}\"](text_features)\n label_embed = self._tf_layers[f\"embed.{LABEL}\"](label_features)\n\n return self._tf_layers[f\"loss.{LABEL}\"](\n text_embed, label_embed, label_ids, all_labels_embed, all_label_ids\n )\n\n def batch_loss(\n self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]\n ) -> tf.Tensor:\n \"\"\"Calculates the loss for the given batch.\n\n Args:\n batch_in: The batch.\n\n Returns:\n The loss of the given batch.\n \"\"\"\n tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)\n\n sequence_feature_lengths = self._get_sequence_feature_lengths(\n tf_batch_data, TEXT\n )\n\n (\n text_transformed,\n text_in,\n mask_combined_sequence_sentence,\n text_seq_ids,\n mlm_mask_boolean_text,\n _,\n ) = self._tf_layers[f\"sequence_layer.{self.text_name}\"](\n (\n tf_batch_data[TEXT][SEQUENCE],\n tf_batch_data[TEXT][SENTENCE],\n sequence_feature_lengths,\n ),\n training=self._training,\n )\n\n losses = []\n\n # Lengths of sequences in case of sentence-level features are always 1, but they\n # can effectively be 0 if sentence-level features aren't present.\n sentence_feature_lengths = self._get_sentence_feature_lengths(\n tf_batch_data, TEXT\n )\n\n combined_sequence_sentence_feature_lengths = (\n sequence_feature_lengths + sentence_feature_lengths\n )\n\n if self.config[MASKED_LM]:\n loss, acc = self._mask_loss(\n text_transformed, text_in, text_seq_ids, mlm_mask_boolean_text, TEXT\n )\n self.mask_loss.update_state(loss)\n self.mask_acc.update_state(acc)\n losses.append(loss)\n\n if self.config[INTENT_CLASSIFICATION]:\n loss = self._batch_loss_intent(\n combined_sequence_sentence_feature_lengths,\n text_transformed,\n tf_batch_data,\n )\n losses.append(loss)\n\n if self.config[ENTITY_RECOGNITION]:\n losses += self._batch_loss_entities(\n mask_combined_sequence_sentence,\n sequence_feature_lengths,\n text_transformed,\n tf_batch_data,\n )\n\n return tf.math.add_n(losses)\n\n def _batch_loss_intent(\n self,\n combined_sequence_sentence_feature_lengths_text: tf.Tensor,\n text_transformed: tf.Tensor,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n ) -> tf.Tensor:\n # get sentence features vector for intent classification\n sentence_vector = self._last_token(\n text_transformed, combined_sequence_sentence_feature_lengths_text\n )\n\n sequence_feature_lengths_label = self._get_sequence_feature_lengths(\n tf_batch_data, LABEL\n )\n\n label_ids = tf_batch_data[LABEL_KEY][LABEL_SUB_KEY][0]\n label = self._create_bow(\n tf_batch_data[LABEL][SEQUENCE],\n tf_batch_data[LABEL][SENTENCE],\n sequence_feature_lengths_label,\n self.label_name,\n )\n loss, acc = self._calculate_label_loss(sentence_vector, label, label_ids)\n\n self._update_label_metrics(loss, acc)\n\n return loss\n\n def _update_label_metrics(self, loss: tf.Tensor, acc: tf.Tensor) -> None:\n\n self.intent_loss.update_state(loss)\n self.intent_acc.update_state(acc)\n\n def _batch_loss_entities(\n self,\n mask_combined_sequence_sentence: tf.Tensor,\n sequence_feature_lengths: tf.Tensor,\n text_transformed: tf.Tensor,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n ) -> List[tf.Tensor]:\n losses = []\n\n entity_tags = None\n\n for tag_spec in self._entity_tag_specs:\n if tag_spec.num_tags == 0:\n continue\n\n tag_ids = tf_batch_data[ENTITIES][tag_spec.tag_name][0]\n # add a zero (no entity) for the sentence features to match the shape of\n # inputs\n tag_ids = tf.pad(tag_ids, [[0, 0], [0, 1], [0, 0]])\n\n loss, f1, _logits = self._calculate_entity_loss(\n text_transformed,\n tag_ids,\n mask_combined_sequence_sentence,\n sequence_feature_lengths,\n tag_spec.tag_name,\n entity_tags,\n )\n\n if tag_spec.tag_name == ENTITY_ATTRIBUTE_TYPE:\n # use the entity tags as additional input for the role\n # and group CRF\n entity_tags = tf.one_hot(\n tf.cast(tag_ids[:, :, 0], tf.int32), depth=tag_spec.num_tags\n )\n\n self._update_entity_metrics(loss, f1, tag_spec.tag_name)\n\n losses.append(loss)\n\n return losses\n\n def _update_entity_metrics(\n self, loss: tf.Tensor, f1: tf.Tensor, tag_name: Text\n ) -> None:\n if tag_name == ENTITY_ATTRIBUTE_TYPE:\n self.entity_loss.update_state(loss)\n self.entity_f1.update_state(f1)\n elif tag_name == ENTITY_ATTRIBUTE_GROUP:\n self.entity_group_loss.update_state(loss)\n self.entity_group_f1.update_state(f1)\n elif tag_name == ENTITY_ATTRIBUTE_ROLE:\n self.entity_role_loss.update_state(loss)\n self.entity_role_f1.update_state(f1)\n\n def prepare_for_predict(self) -> None:\n \"\"\"Prepares the model for prediction.\"\"\"\n if self.config[INTENT_CLASSIFICATION]:\n _, self.all_labels_embed = self._create_all_labels()\n\n def batch_predict(\n self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]\n ) -> Dict[Text, tf.Tensor]:\n \"\"\"Predicts the output of the given batch.\n\n Args:\n batch_in: The batch.\n\n Returns:\n The output to predict.\n \"\"\"\n tf_batch_data = self.batch_to_model_data_format(\n batch_in, self.predict_data_signature\n )\n\n sequence_feature_lengths = self._get_sequence_feature_lengths(\n tf_batch_data, TEXT\n )\n sentence_feature_lengths = self._get_sentence_feature_lengths(\n tf_batch_data, TEXT\n )\n\n text_transformed, _, _, _, _, attention_weights = self._tf_layers[\n f\"sequence_layer.{self.text_name}\"\n ](\n (\n tf_batch_data[TEXT][SEQUENCE],\n tf_batch_data[TEXT][SENTENCE],\n sequence_feature_lengths,\n ),\n training=self._training,\n )\n predictions = {\n DIAGNOSTIC_DATA: {\n \"attention_weights\": attention_weights,\n \"text_transformed\": text_transformed,\n }\n }\n\n if self.config[INTENT_CLASSIFICATION]:\n predictions.update(\n self._batch_predict_intents(\n sequence_feature_lengths + sentence_feature_lengths,\n text_transformed,\n )\n )\n\n if self.config[ENTITY_RECOGNITION]:\n predictions.update(\n self._batch_predict_entities(sequence_feature_lengths, text_transformed)\n )\n\n return predictions\n\n def _batch_predict_entities(\n self, sequence_feature_lengths: tf.Tensor, text_transformed: tf.Tensor\n ) -> Dict[Text, tf.Tensor]:\n predictions: Dict[Text, tf.Tensor] = {}\n\n entity_tags = None\n\n for tag_spec in self._entity_tag_specs:\n # skip crf layer if it was not trained\n if tag_spec.num_tags == 0:\n continue\n\n name = tag_spec.tag_name\n _input = text_transformed\n\n if entity_tags is not None:\n _tags = self._tf_layers[f\"embed.{name}.tags\"](entity_tags)\n _input = tf.concat([_input, _tags], axis=-1)\n\n _logits = self._tf_layers[f\"embed.{name}.logits\"](_input)\n pred_ids, confidences = self._tf_layers[f\"crf.{name}\"](\n _logits, sequence_feature_lengths\n )\n\n predictions[f\"e_{name}_ids\"] = pred_ids\n predictions[f\"e_{name}_scores\"] = confidences\n\n if name == ENTITY_ATTRIBUTE_TYPE:\n # use the entity tags as additional input for the role\n # and group CRF\n entity_tags = tf.one_hot(\n tf.cast(pred_ids, tf.int32), depth=tag_spec.num_tags\n )\n\n return predictions\n\n def _batch_predict_intents(\n self,\n combined_sequence_sentence_feature_lengths: tf.Tensor,\n text_transformed: tf.Tensor,\n ) -> Dict[Text, tf.Tensor]:\n\n if self.all_labels_embed is None:\n raise ValueError(\n \"The model was not prepared for prediction. \"\n \"Call `prepare_for_predict` first.\"\n )\n\n # get sentence feature vector for intent classification\n sentence_vector = self._last_token(\n text_transformed, combined_sequence_sentence_feature_lengths\n )\n sentence_vector_embed = self._tf_layers[f\"embed.{TEXT}\"](sentence_vector)\n\n _, scores = self._tf_layers[\n f\"loss.{LABEL}\"\n ].get_similarities_and_confidences_from_embeddings(\n sentence_vector_embed[:, tf.newaxis, :],\n self.all_labels_embed[tf.newaxis, :, :],\n )\n\n return {\"i_scores\": scores}\n"
] |
[
[
"tensorflow.keras.metrics.Mean",
"numpy.array",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.reduce_any",
"tensorflow.cast",
"tensorflow.squeeze",
"tensorflow.reduce_sum",
"tensorflow.keras.optimizers.Adam",
"tensorflow.pad",
"tensorflow.boolean_mask",
"numpy.expand_dims",
"tensorflow.math.add_n"
]
] |
tucaiyong/tensorflow
|
[
"3cc3c87f375f1bc292bd58db4928b810ac888bc6",
"3cc3c87f375f1bc292bd58db4928b810ac888bc6",
"3cc3c87f375f1bc292bd58db4928b810ac888bc6"
] |
[
"tensorflow/contrib/linalg/python/ops/linear_operator_kronecker.py",
"tensorflow/contrib/eager/python/tfe.py",
"tensorflow/contrib/distributions/python/ops/sinh_arcsinh.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Construct the Kronecker product of one or more `LinearOperators`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import common_shapes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.linalg import linalg_impl as linalg\nfrom tensorflow.python.ops.linalg import linear_operator\n\n\ndef _vec(x):\n \"\"\"Stacks column of matrix to form a single column.\"\"\"\n return array_ops.reshape(\n array_ops.matrix_transpose(x),\n array_ops.concat(\n [array_ops.shape(x)[:-2], [-1]], axis=0))\n\n\ndef _unvec_by(y, num_col):\n \"\"\"Unstack vector to form a matrix, with a specified amount of columns.\"\"\"\n return array_ops.matrix_transpose(\n array_ops.reshape(\n y,\n array_ops.concat(\n [array_ops.shape(y)[:-1], [num_col, -1]], axis=0)))\n\n\ndef _rotate_last_dim(x, rotate_right=False):\n \"\"\"Rotate the last dimension either left or right.\"\"\"\n ndims = array_ops.rank(x)\n if rotate_right:\n transpose_perm = array_ops.concat(\n [[ndims - 1], math_ops.range(0, ndims - 1)], axis=0)\n else:\n transpose_perm = array_ops.concat(\n [math_ops.range(1, ndims), [0]], axis=0)\n return array_ops.transpose(x, transpose_perm)\n\n\nclass LinearOperatorKronecker(linear_operator.LinearOperator):\n \"\"\"Kronecker product between two `LinearOperators`.\n\n This operator composes one or more linear operators `[op1,...,opJ]`,\n building a new `LinearOperator` representing the Kronecker product:\n `op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is\n associative).\n\n If `opj` has shape `batch_shape_j` + [M_j, N_j`, then the composed operator\n will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`,\n where the product is over all operators.\n\n ```python\n # Create a 4 x 4 linear operator composed of two 2 x 2 operators.\n operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])\n operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]])\n operator = LinearOperatorKronecker([operator_1, operator_2])\n\n operator.to_dense()\n ==> [[1., 2., 0., 0.],\n [3., 4., 0., 0.],\n [2., 4., 1., 2.],\n [6., 8., 3., 4.]]\n\n operator.shape\n ==> [4, 4]\n\n operator.log_abs_determinant()\n ==> scalar Tensor\n\n x = ... Shape [4, 2] Tensor\n operator.matmul(x)\n ==> Shape [4, 2] Tensor\n\n # Create a [2, 3] batch of 4 x 5 linear operators.\n matrix_45 = tf.random_normal(shape=[2, 3, 4, 5])\n operator_45 = LinearOperatorFullMatrix(matrix)\n\n # Create a [2, 3] batch of 5 x 6 linear operators.\n matrix_56 = tf.random_normal(shape=[2, 3, 5, 6])\n operator_56 = LinearOperatorFullMatrix(matrix_56)\n\n # Compose to create a [2, 3] batch of 20 x 30 operators.\n operator_large = LinearOperatorKronecker([operator_45, operator_56])\n\n # Create a shape [2, 3, 20, 2] vector.\n x = tf.random_normal(shape=[2, 3, 6, 2])\n operator_large.matmul(x)\n ==> Shape [2, 3, 30, 2] Tensor\n ```\n\n #### Performance\n\n The performance of `LinearOperatorKronecker` on any operation is equal to\n the sum of the individual operators' operations.\n\n #### Matrix property hints\n\n This `LinearOperator` is initialized with boolean flags of the form `is_X`,\n for `X = non_singular, self_adjoint, positive_definite, square`.\n These have the following meaning:\n\n * If `is_X == True`, callers should expect the operator to have the\n property `X`. This is a promise that should be fulfilled, but is *not* a\n runtime assert. For example, finite floating point precision may result\n in these promises being violated.\n * If `is_X == False`, callers should expect the operator to not have `X`.\n * If `is_X == None` (the default), callers should have no expectation either\n way.\n \"\"\"\n\n def __init__(self,\n operators,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=None):\n r\"\"\"Initialize a `LinearOperatorKronecker`.\n\n `LinearOperatorKronecker` is initialized with a list of operators\n `[op_1,...,op_J]`.\n\n Args:\n operators: Iterable of `LinearOperator` objects, each with\n the same `dtype` and composable shape, representing the Kronecker\n factors.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix\\\n #Extension_for_non_symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`. Default is the individual\n operators names joined with `_x_`.\n\n Raises:\n TypeError: If all operators do not have the same `dtype`.\n ValueError: If `operators` is empty.\n \"\"\"\n # Validate operators.\n check_ops.assert_proper_iterable(operators)\n operators = list(operators)\n if not operators:\n raise ValueError(\n \"Expected a list of >=1 operators. Found: %s\" % operators)\n self._operators = operators\n\n # Validate dtype.\n dtype = operators[0].dtype\n for operator in operators:\n if operator.dtype != dtype:\n name_type = (str((o.name, o.dtype)) for o in operators)\n raise TypeError(\n \"Expected all operators to have the same dtype. Found %s\"\n % \" \".join(name_type))\n\n # Auto-set and check hints.\n # A Kronecker product is invertible, if and only if all factors are\n # invertible.\n if all(operator.is_non_singular for operator in operators):\n if is_non_singular is False:\n raise ValueError(\n \"The Kronecker product of non-singular operators is always \"\n \"non-singular.\")\n is_non_singular = True\n\n if all(operator.is_self_adjoint for operator in operators):\n if is_self_adjoint is False:\n raise ValueError(\n \"The Kronecker product of self-adjoint operators is always \"\n \"self-adjoint.\")\n is_self_adjoint = True\n\n # The eigenvalues of a Kronecker product are equal to the products of eigen\n # values of the corresponding factors.\n if all(operator.is_positive_definite for operator in operators):\n if is_positive_definite is False:\n raise ValueError(\"The Kronecker product of positive-definite operators \"\n \"is always positive-definite.\")\n is_positive_definite = True\n\n # Initialization.\n graph_parents = []\n for operator in operators:\n graph_parents.extend(operator.graph_parents)\n\n if name is None:\n name = operators[0].name\n for operator in operators[1:]:\n name += \"_x_\" + operator.name\n with ops.name_scope(name, values=graph_parents):\n super(LinearOperatorKronecker, self).__init__(\n dtype=dtype,\n graph_parents=graph_parents,\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n\n @property\n def operators(self):\n return self._operators\n\n def _shape(self):\n # Get final matrix shape.\n domain_dimension = self.operators[0].domain_dimension\n for operator in self.operators[1:]:\n domain_dimension *= operator.domain_dimension\n\n range_dimension = self.operators[0].range_dimension\n for operator in self.operators[1:]:\n range_dimension *= operator.range_dimension\n\n matrix_shape = tensor_shape.TensorShape([\n range_dimension, domain_dimension])\n\n # Get broadcast batch shape.\n # broadcast_shape checks for compatibility.\n batch_shape = self.operators[0].batch_shape\n for operator in self.operators[1:]:\n batch_shape = common_shapes.broadcast_shape(\n batch_shape, operator.batch_shape)\n\n return batch_shape.concatenate(matrix_shape)\n\n def _shape_tensor(self):\n domain_dimension = self.operators[0].domain_dimension_tensor()\n for operator in self.operators[1:]:\n domain_dimension *= operator.domain_dimension_tensor()\n\n range_dimension = self.operators[0].range_dimension_tensor()\n for operator in self.operators[1:]:\n range_dimension *= operator.range_dimension_tensor()\n\n matrix_shape = [range_dimension, domain_dimension]\n\n # Get broadcast batch shape.\n # broadcast_shape checks for compatibility.\n batch_shape = self.operators[0].batch_shape_tensor()\n for operator in self.operators[1:]:\n batch_shape = array_ops.broadcast_dynamic_shape(\n batch_shape, operator.batch_shape_tensor())\n\n return array_ops.concat((batch_shape, matrix_shape), 0)\n\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n # Here we heavily rely on Roth's column Lemma [1]:\n # (A x B) * vec X = vec BXA^T,\n # where vec stacks all the columns of the matrix under each other. In our\n # case, x represents a batch of vec X (i.e. we think of x as a batch of\n # column vectors, rather than a matrix). Each member of the batch can be\n # reshaped to a matrix (hence we get a batch of matrices).\n # We can iteratively apply this lemma by noting that if B is a Kronecker\n # product, then we can apply the lemma again.\n\n # [1] W. E. Roth, \"On direct product matrices,\"\n # Bulletin of the American Mathematical Society, vol. 40, pp. 461-468,\n # 1934\n\n # Efficiency\n\n # Naively doing the Kronecker product, by calculating the dense matrix and\n # applying it will can take cubic time in the size of domain_dimension\n # (assuming a square matrix). The other issue is that calculating the dense\n # matrix can be prohibitively expensive, in that it can take a large amount\n # of memory.\n #\n # This implementation avoids this memory blow up by only computing matmuls\n # with the factors. In this way, we don't have to realize the dense matrix.\n # In terms of complexity, if we have Kronecker Factors of size:\n # (n1, n1), (n2, n2), (n3, n3), ... (nJ, nJ), with N = \\prod n_i, and we\n # have as input a [N, M] matrix, the naive approach would take O(N^2 M).\n # With this approach (ignoring reshaping of tensors and transposes for now),\n # the time complexity can be O(M * (\\sum n_i) * N). There is also the\n # benefit of batched multiplication (In this example, the batch size is\n # roughly M * N) so this can be much faster. However, not factored in are\n # the costs of the several transposing of tensors, which can affect cache\n # behavior.\n\n # Below we document the shape manipulation for adjoint=False,\n # adjoint_arg=False, but the general case of different adjoints is still\n # handled.\n\n if adjoint_arg:\n x = linalg.adjoint(x)\n\n # Always add a batch dimension to enable broadcasting to work.\n batch_shape = array_ops.concat(\n [array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)\n x += array_ops.zeros(batch_shape, dtype=x.dtype.base_dtype)\n\n # x has shape [B, R, C], where B represent some number of batch dimensions,\n # R represents the number of rows, and C represents the number of columns.\n # In order to apply Roth's column lemma, we need to operate on a batch of\n # column vectors, so we reshape into a batch of column vectors. We put it\n # at the front to ensure that broadcasting between operators to the batch\n # dimensions B still works.\n output = _rotate_last_dim(x, rotate_right=True)\n\n # Also expand the shape to be [A, C, B, R]. The first dimension will be\n # used to accumulate dimensions from each operator matmul.\n output = output[array_ops.newaxis, ...]\n\n # In this loop, A is going to refer to the value of the accumulated\n # dimension. A = 1 at the start, and will end up being self.range_dimension.\n # V will refer to the last dimension. V = R at the start, and will end up\n # being 1 in the end.\n for operator in self.operators[:-1]:\n # Reshape output from [A, C, B, V] to be\n # [A, C, B, V / op.domain_dimension, op.domain_dimension]\n if adjoint:\n operator_dimension = operator.range_dimension_tensor()\n else:\n operator_dimension = operator.domain_dimension_tensor()\n\n output = _unvec_by(output, operator_dimension)\n\n # We are computing (XA^T) = (AX^T)^T.\n # output has [A, C, B, V / op.domain_dimension, op.domain_dimension],\n # which is being converted to:\n # [A, C, B, V / op.domain_dimension, op.range_dimension]\n output = array_ops.matrix_transpose(output)\n output = operator.matmul(output, adjoint=adjoint, adjoint_arg=False)\n output = array_ops.matrix_transpose(output)\n # Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]\n output = _rotate_last_dim(output, rotate_right=False)\n output = _vec(output)\n output = _rotate_last_dim(output, rotate_right=True)\n\n # After the loop, we will have\n # A = self.range_dimension / op[-1].range_dimension\n # V = op[-1].domain_dimension\n\n # We convert that using matvec to get:\n # [A, C, B, op[-1].range_dimension]\n output = self.operators[-1].matvec(output, adjoint=adjoint)\n # Rearrange shape to be [B1, ... Bn, self.range_dimension, C]\n output = _rotate_last_dim(output, rotate_right=False)\n output = _vec(output)\n output = _rotate_last_dim(output, rotate_right=False)\n\n if x.shape.is_fully_defined():\n column_dim = x.shape[-1]\n broadcast_batch_shape = common_shapes.broadcast_shape(\n x.shape[:-2], self.batch_shape)\n if adjoint:\n matrix_dimensions = [self.domain_dimension, column_dim]\n else:\n matrix_dimensions = [self.range_dimension, column_dim]\n\n print(\"x: \", x)\n print(\"bathc_shape:\", self.batch_shape)\n print(\"self.shape:\", self.shape)\n print(\"output: \", output)\n output.set_shape(broadcast_batch_shape.concatenate(\n matrix_dimensions))\n\n return output\n\n def _determinant(self):\n # Note that we have |X1 x X2| = |X1| ** n * |X2| ** m, where X1 is an m x m\n # matrix, and X2 is an n x n matrix. We can iteratively apply this property\n # to get the determinant of |X1 x X2 x X3 ...|. If T is the product of the\n # domain dimension of all operators, then we have:\n # |X1 x X2 x X3 ...| =\n # |X1| ** (T / m) * |X2 x X3 ... | ** m =\n # |X1| ** (T / m) * |X2| ** (m * (T / m) / n) * ... =\n # |X1| ** (T / m) * |X2| ** (T / n) * | X3 x X4... | ** (m * n)\n # And by doing induction we have product(|X_i| ** (T / dim(X_i))).\n total = self.domain_dimension_tensor()\n determinant = 1.\n for operator in self.operators:\n determinant *= operator.determinant() ** math_ops.cast(\n total / operator.domain_dimension_tensor(),\n dtype=operator.dtype)\n return determinant\n\n def _log_abs_determinant(self):\n # This will be sum((total / dim(x_i)) * log |X_i|)\n total = self.domain_dimension_tensor()\n log_abs_det = 0.\n for operator in self.operators:\n log_abs_det += operator.log_abs_determinant() * math_ops.cast(\n total / operator.domain_dimension_tensor(),\n dtype=operator.dtype)\n return log_abs_det\n\n def _trace(self):\n # tr(A x B) = tr(A) * tr(B)\n trace = 1.\n for operator in self.operators:\n trace *= operator.trace()\n return trace\n\n def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n # Here we follow the same use of Roth's column lemma as in `matmul`, with\n # the key difference that we replace all `matmul` instances with `solve`.\n # This follows from the property that inv(A x B) = inv(A) x inv(B).\n\n # Below we document the shape manipulation for adjoint=False,\n # adjoint_arg=False, but the general case of different adjoints is still\n # handled.\n\n if adjoint_arg:\n rhs = linalg.adjoint(rhs)\n\n # Always add a batch dimension to enable broadcasting to work.\n batch_shape = array_ops.concat(\n [array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)\n rhs += array_ops.zeros(batch_shape, dtype=rhs.dtype.base_dtype)\n\n # rhs has shape [B, R, C], where B represent some number of batch\n # dimensions,\n # R represents the number of rows, and C represents the number of columns.\n # In order to apply Roth's column lemma, we need to operate on a batch of\n # column vectors, so we reshape into a batch of column vectors. We put it\n # at the front to ensure that broadcasting between operators to the batch\n # dimensions B still works.\n output = _rotate_last_dim(rhs, rotate_right=True)\n\n # Also expand the shape to be [A, C, B, R]. The first dimension will be\n # used to accumulate dimensions from each operator matmul.\n output = output[array_ops.newaxis, ...]\n\n # In this loop, A is going to refer to the value of the accumulated\n # dimension. A = 1 at the start, and will end up being self.range_dimension.\n # V will refer to the last dimension. V = R at the start, and will end up\n # being 1 in the end.\n for operator in self.operators[:-1]:\n # Reshape output from [A, C, B, V] to be\n # [A, C, B, V / op.domain_dimension, op.domain_dimension]\n if adjoint:\n operator_dimension = operator.range_dimension_tensor()\n else:\n operator_dimension = operator.domain_dimension_tensor()\n\n output = _unvec_by(output, operator_dimension)\n\n # We are computing (XA^-1^T) = (A^-1 X^T)^T.\n # output has [A, C, B, V / op.domain_dimension, op.domain_dimension],\n # which is being converted to:\n # [A, C, B, V / op.domain_dimension, op.range_dimension]\n output = array_ops.matrix_transpose(output)\n output = operator.solve(output, adjoint=adjoint, adjoint_arg=False)\n output = array_ops.matrix_transpose(output)\n # Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]\n output = _rotate_last_dim(output, rotate_right=False)\n output = _vec(output)\n output = _rotate_last_dim(output, rotate_right=True)\n\n # After the loop, we will have\n # A = self.range_dimension / op[-1].range_dimension\n # V = op[-1].domain_dimension\n\n # We convert that using matvec to get:\n # [A, C, B, op[-1].range_dimension]\n output = self.operators[-1].solvevec(output, adjoint=adjoint)\n # Rearrange shape to be [B1, ... Bn, self.range_dimension, C]\n output = _rotate_last_dim(output, rotate_right=False)\n output = _vec(output)\n output = _rotate_last_dim(output, rotate_right=False)\n\n if rhs.shape.is_fully_defined():\n column_dim = rhs.shape[-1]\n broadcast_batch_shape = common_shapes.broadcast_shape(\n rhs.shape[:-2], self.batch_shape)\n if adjoint:\n matrix_dimensions = [self.domain_dimension, column_dim]\n else:\n matrix_dimensions = [self.range_dimension, column_dim]\n\n output.set_shape(broadcast_batch_shape.concatenate(\n matrix_dimensions))\n\n return output\n\n def _diag_part(self):\n diag_part = self.operators[0].diag_part()\n for operator in self.operators[1:]:\n diag_part = diag_part[..., :, array_ops.newaxis]\n op_diag_part = operator.diag_part()[..., array_ops.newaxis, :]\n diag_part *= op_diag_part\n diag_part = array_ops.reshape(\n diag_part,\n shape=array_ops.concat(\n [array_ops.shape(diag_part)[:-2], [-1]], axis=0))\n if self.range_dimension > self.domain_dimension:\n diag_dimension = self.domain_dimension\n else:\n diag_dimension = self.range_dimension\n diag_part.set_shape(\n self.batch_shape.concatenate(diag_dimension))\n return diag_part\n\n def _to_dense(self):\n product = self.operators[0].to_dense()\n for operator in self.operators[1:]:\n # Product has shape [B, R1, 1, C1].\n product = product[\n ..., :, array_ops.newaxis, :, array_ops.newaxis]\n # Operator has shape [B, 1, R2, 1, C2].\n op_to_mul = operator.to_dense()[\n ..., array_ops.newaxis, :, array_ops.newaxis, :]\n # This is now [B, R1, R2, C1, C2].\n product *= op_to_mul\n # Now merge together dimensions to get [B, R1 * R2, C1 * C2].\n product = array_ops.reshape(\n product,\n shape=array_ops.concat(\n [array_ops.shape(product)[:-4],\n [array_ops.shape(product)[-4] * array_ops.shape(product)[-3],\n array_ops.shape(product)[-2] * array_ops.shape(product)[-1]]\n ], axis=0))\n product.set_shape(self.shape)\n return product\n\n def _assert_non_singular(self):\n if all(operator.is_square for operator in self.operators):\n asserts = [operator.assert_non_singular() for operator in self.operators]\n return control_flow_ops.group(asserts)\n else:\n raise errors.InvalidArgumentError(\n node_def=None, op=None, message=\"All Kronecker factors must be \"\n \"square for the product to be invertible.\")\n\n def _assert_self_adjoint(self):\n if all(operator.is_square for operator in self.operators):\n asserts = [operator.assert_self_adjoint() for operator in self.operators]\n return control_flow_ops.group(asserts)\n else:\n raise errors.InvalidArgumentError(\n node_def=None, op=None, message=\"All Kronecker factors must be \"\n \"square for the product to be self adjoint.\")\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Eager execution prototype.\n\nEXPERIMENTAL: APIs here are unstable and likely to change without notice.\n\nTo use, at program startup, call `tfe.enable_eager_execution()`.\n\n@@metrics\n\n@@list_devices\n@@num_gpus\n\n@@py_func\n@@defun\n@@make_template\n@@implicit_gradients\n@@implicit_value_and_gradients\n@@gradients_function\n@@value_and_gradients_function\n@@GradientTape\n\n@@run\n@@enable_eager_execution\n\n@@custom_gradient\n\n@@add_execution_callback\n@@clear_execution_callbacks\n@@inf_callback\n@@inf_nan_callback\n@@nan_callback\n@@seterr\n\n@@Iterator\n@@Saver\n@@restore_variables_on_create\n@@Variable\n@@get_optimizer_variables\n@@EagerVariableStore\n\n@@Network\n@@Sequential\n@@save_network_checkpoint\n@@restore_network_checkpoint\n\n@@Checkpoint\n@@Checkpointable\n@@CheckpointableSaver\n\n@@executing_eagerly\n@@in_eager_mode\n@@set_execution_mode\n@@execution_mode\n@@async_wait\n@@async_clear_error\n\n@@run_test_in_graph_and_eager_modes\n\n@@DEVICE_PLACEMENT_EXPLICIT\n@@DEVICE_PLACEMENT_WARN\n@@DEVICE_PLACEMENT_SILENT\n@@SYNC\n@@ASYNC\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\n# pylint:disable=g-bad-import-order,g-import-not-at-top,unused-import\n#\nfrom tensorflow.contrib.eager.python import metrics\nfrom tensorflow.contrib.eager.python.datasets import Iterator\nfrom tensorflow.contrib.eager.python.network import Network\nfrom tensorflow.contrib.eager.python.network import Sequential\nfrom tensorflow.contrib.eager.python.network import save_network_checkpoint\nfrom tensorflow.contrib.eager.python.network import restore_network_checkpoint\nfrom tensorflow.contrib.eager.python.saver import get_optimizer_variables\nfrom tensorflow.contrib.eager.python.saver import restore_variables_on_create\nfrom tensorflow.contrib.eager.python.saver import Saver\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.eager.context import DEVICE_PLACEMENT_EXPLICIT\nfrom tensorflow.python.eager.context import DEVICE_PLACEMENT_WARN\nfrom tensorflow.python.eager.context import DEVICE_PLACEMENT_SILENT\nfrom tensorflow.python.eager.context import executing_eagerly\nfrom tensorflow.python.eager.context import list_devices\nfrom tensorflow.python.eager.context import set_execution_mode\nfrom tensorflow.python.eager.context import execution_mode\nfrom tensorflow.python.eager.context import async_wait\nfrom tensorflow.python.eager.context import async_clear_error\nfrom tensorflow.python.eager.context import SYNC\nfrom tensorflow.python.eager.context import ASYNC\nfrom tensorflow.python.eager.context import num_gpus\nfrom tensorflow.python.eager.execution_callbacks import add_execution_callback\nfrom tensorflow.python.eager.execution_callbacks import clear_execution_callbacks\nfrom tensorflow.python.eager.execution_callbacks import inf_callback\nfrom tensorflow.python.eager.execution_callbacks import inf_nan_callback\nfrom tensorflow.python.eager.execution_callbacks import nan_callback\nfrom tensorflow.python.eager.execution_callbacks import seterr\nfrom tensorflow.python.framework.ops import enable_eager_execution\nfrom tensorflow.python.framework.ops import eager_run as run\nfrom tensorflow.python.framework.test_util import run_in_graph_and_eager_modes as run_test_in_graph_and_eager_modes\nfrom tensorflow.python.ops.custom_gradient import custom_gradient\nfrom tensorflow.python.ops.resource_variable_ops import ResourceVariable as Variable\nfrom tensorflow.python.ops.variable_scope import EagerVariableStore\nfrom tensorflow.python.ops import script_ops\nfrom tensorflow.python.ops import template\nfrom tensorflow.python.training.checkpointable import Checkpointable\nfrom tensorflow.python.training.checkpointable_utils import CheckpointableSaver\nfrom tensorflow.python.training.checkpointable_utils import Checkpoint\nfrom tensorflow.python.util.all_util import remove_undocumented\n\npy_func = script_ops.eager_py_func\ndefun = function.defun\nmake_template = template.make_template_internal\nimplicit_gradients = backprop.implicit_grad\nimplicit_value_and_gradients = backprop.implicit_val_and_grad\ngradients_function = backprop.gradients_function\nvalue_and_gradients_function = backprop.val_and_grad_function\nGradientTape = backprop.GradientTape # pylint: disable=invalid-name\nin_eager_mode = executing_eagerly\n\nremove_undocumented(__name__)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"SinhArcsinh transformation of a distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.distributions.python.ops import bijectors\nfrom tensorflow.contrib.distributions.python.ops import distribution_util\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops.distributions import normal\nfrom tensorflow.python.ops.distributions import transformed_distribution\n\n__all__ = [\n \"SinhArcsinh\",\n]\n\n\nclass SinhArcsinh(transformed_distribution.TransformedDistribution):\n \"\"\"The SinhArcsinh transformation of a distribution on `(-inf, inf)`.\n\n This distribution models a random variable, making use of\n a `SinhArcsinh` transformation (which has adjustable tailweight and skew),\n a rescaling, and a shift.\n\n The `SinhArcsinh` transformation of the Normal is described in great depth in\n [Sinh-arcsinh distributions](https://www.jstor.org/stable/27798865).\n Here we use a slightly different parameterization, in terms of `tailweight`\n and `skewness`. Additionally we allow for distributions other than Normal,\n and control over `scale` as well as a \"shift\" parameter `loc`.\n\n #### Mathematical Details\n\n Given random variable `Z`, we define the SinhArcsinh\n transformation of `Z`, `Y`, parameterized by\n `(loc, scale, skewness, tailweight)`, via the relation:\n\n ```\n Y := loc + scale * F(Z) * (2 / F_0(2))\n F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )\n F_0(Z) := Sinh( Arcsinh(Z) * tailweight )\n ```\n\n This distribution is similar to the location-scale transformation\n `L(Z) := loc + scale * Z` in the following ways:\n\n * If `skewness = 0` and `tailweight = 1` (the defaults), `F(Z) = Z`, and then\n `Y = L(Z)` exactly.\n * `loc` is used in both to shift the result by a constant factor.\n * The multiplication of `scale` by `2 / F_0(2)` ensures that if `skewness = 0`\n `P[Y - loc <= 2 * scale] = P[L(Z) - loc <= 2 * scale]`.\n Thus it can be said that the weights in the tails of `Y` and `L(Z)` beyond\n `loc + 2 * scale` are the same.\n\n This distribution is different than `loc + scale * Z` due to the\n reshaping done by `F`:\n\n * Positive (negative) `skewness` leads to positive (negative) skew.\n * positive skew means, the mode of `F(Z)` is \"tilted\" to the right.\n * positive skew means positive values of `F(Z)` become more likely, and\n negative values become less likely.\n * Larger (smaller) `tailweight` leads to fatter (thinner) tails.\n * Fatter tails mean larger values of `|F(Z)|` become more likely.\n * `tailweight < 1` leads to a distribution that is \"flat\" around `Y = loc`,\n and a very steep drop-off in the tails.\n * `tailweight > 1` leads to a distribution more peaked at the mode with\n heavier tails.\n\n To see the argument about the tails, note that for `|Z| >> 1` and\n `|Z| >> (|skewness| * tailweight)**tailweight`, we have\n `Y approx 0.5 Z**tailweight e**(sign(Z) skewness * tailweight)`.\n\n To see the argument regarding multiplying `scale` by `2 / F_0(2)`,\n\n ```\n P[(Y - loc) / scale <= 2] = P[F(Z) * (2 / F_0(2)) <= 2]\n = P[F(Z) <= F_0(2)]\n = P[Z <= 2] (if F = F_0).\n ```\n \"\"\"\n\n def __init__(self,\n loc,\n scale,\n skewness=None,\n tailweight=None,\n distribution=None,\n validate_args=False,\n allow_nan_stats=True,\n name=\"SinhArcsinh\"):\n \"\"\"Construct SinhArcsinh distribution on `(-inf, inf)`.\n\n Arguments `(loc, scale, skewness, tailweight)` must have broadcastable shape\n (indexing batch dimensions). They must all have the same `dtype`.\n\n Args:\n loc: Floating-point `Tensor`.\n scale: `Tensor` of same `dtype` as `loc`.\n skewness: Skewness parameter. Default is `0.0` (no skew).\n tailweight: Tailweight parameter. Default is `1.0` (unchanged tailweight)\n distribution: `tf.Distribution`-like instance. Distribution that is\n transformed to produce this distribution.\n Default is `tf.distributions.Normal(0., 1.)`.\n Must be a scalar-batch, scalar-event distribution. Typically\n `distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is\n a function of non-trainable parameters. WARNING: If you backprop through\n a `SinhArcsinh` sample and `distribution` is not\n `FULLY_REPARAMETERIZED` yet is a function of trainable variables, then\n the gradient will be incorrect!\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`,\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\n indicate the result is undefined. When `False`, an exception is raised\n if one or more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = locals()\n\n with ops.name_scope(name,\n values=[loc, scale, skewness, tailweight]) as name:\n loc = ops.convert_to_tensor(loc, name=\"loc\")\n dtype = loc.dtype\n scale = ops.convert_to_tensor(scale, name=\"scale\", dtype=dtype)\n tailweight = 1. if tailweight is None else tailweight\n has_default_skewness = skewness is None\n skewness = 0. if skewness is None else skewness\n tailweight = ops.convert_to_tensor(\n tailweight, name=\"tailweight\", dtype=dtype)\n skewness = ops.convert_to_tensor(skewness, name=\"skewness\", dtype=dtype)\n\n batch_shape = distribution_util.get_broadcast_shape(\n loc, scale, tailweight, skewness)\n\n # Recall, with Z a random variable,\n # Y := loc + C * F(Z),\n # F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )\n # F_0(Z) := Sinh( Arcsinh(Z) * tailweight )\n # C := 2 * scale / F_0(2)\n if distribution is None:\n distribution = normal.Normal(\n loc=array_ops.zeros([], dtype=dtype),\n scale=array_ops.ones([], dtype=dtype),\n allow_nan_stats=allow_nan_stats)\n else:\n asserts = distribution_util.maybe_check_scalar_distribution(\n distribution, dtype, validate_args)\n if asserts:\n loc = control_flow_ops.with_dependencies(asserts, loc)\n\n # Make the SAS bijector, 'F'.\n f = bijectors.SinhArcsinh(\n skewness=skewness, tailweight=tailweight)\n if has_default_skewness:\n f_noskew = f\n else:\n f_noskew = bijectors.SinhArcsinh(\n skewness=skewness.dtype.as_numpy_dtype(0.),\n tailweight=tailweight)\n\n # Make the AffineScalar bijector, Z --> loc + scale * Z (2 / F_0(2))\n c = 2 * scale / f_noskew.forward(ops.convert_to_tensor(2, dtype=dtype))\n affine = bijectors.AffineScalar(\n shift=loc,\n scale=c,\n validate_args=validate_args)\n\n bijector = bijectors.Chain([affine, f])\n\n super(SinhArcsinh, self).__init__(\n distribution=distribution,\n bijector=bijector,\n batch_shape=batch_shape,\n validate_args=validate_args,\n name=name)\n self._parameters = parameters\n self._loc = loc\n self._scale = scale\n self._tailweight = tailweight\n self._skewness = skewness\n\n @property\n def loc(self):\n \"\"\"The `loc` in `Y := loc + scale @ F(Z) * (2 / F(2)).\"\"\"\n return self._loc\n\n @property\n def scale(self):\n \"\"\"The `LinearOperator` `scale` in `Y := loc + scale @ F(Z) * (2 / F(2)).\"\"\"\n return self._scale\n\n @property\n def tailweight(self):\n \"\"\"Controls the tail decay. `tailweight > 1` means faster than Normal.\"\"\"\n return self._tailweight\n\n @property\n def skewness(self):\n \"\"\"Controls the skewness. `Skewness > 0` means right skew.\"\"\"\n return self._skewness\n"
] |
[
[
"tensorflow.python.ops.array_ops.matrix_transpose",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.framework.errors.InvalidArgumentError",
"tensorflow.python.ops.check_ops.assert_proper_iterable",
"tensorflow.python.framework.common_shapes.broadcast_shape",
"tensorflow.python.ops.linalg.linalg_impl.adjoint",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.rank"
],
[
"tensorflow.python.util.all_util.remove_undocumented"
],
[
"tensorflow.contrib.distributions.python.ops.bijectors.AffineScalar",
"tensorflow.contrib.distributions.python.ops.bijectors.Chain",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.contrib.distributions.python.ops.distribution_util.maybe_check_scalar_distribution",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.contrib.distributions.python.ops.distribution_util.get_broadcast_shape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.contrib.distributions.python.ops.bijectors.SinhArcsinh"
]
] |
hwangjt/blue
|
[
"609defbe476c86a4a2eddd12977b47e649ea7f50"
] |
[
"openmdao/test_suite/components/cycle_comps.py"
] |
[
"\"\"\"Components for use in `CycleGroup`. For details, see `CycleGroup`.\"\"\"\nfrom __future__ import division, print_function\n\nfrom six.moves import range\n\nimport numpy as np\nimport scipy.sparse as sparse\n\nimport unittest\n\nfrom openmdao.core.explicitcomponent import ExplicitComponent\n\n\nPSI = 1.\n\n_vec_terms = {}\n\n\ndef _compute_vector_terms(system_size):\n # Try/Except pattern is much faster than if key in ... if the key is present (which it will be\n # outside of the first invocation).\n try:\n return _vec_terms[system_size]\n except KeyError:\n u = np.zeros(system_size)\n u[[0, -1]] = np.sqrt(2)/2\n\n v = np.zeros(system_size)\n v[1:-1] = 1 / np.sqrt(system_size - 2)\n\n cross_terms = np.outer(v, u) - np.outer(u, v)\n same_terms = np.outer(u, u) + np.outer(v, v)\n\n _vec_terms[system_size] = u, v, cross_terms, same_terms\n\n return u, v, cross_terms, same_terms\n\n\ndef _compute_A(system_size, theta):\n u, v, cross_terms, same_terms = _compute_vector_terms(system_size)\n return (np.eye(system_size)\n + np.sin(theta) * cross_terms\n + (np.cos(theta) - 1) * same_terms)\n\n\ndef _compute_dA(system_size, theta):\n u, v, cross_terms, same_terms = _compute_vector_terms(system_size)\n return np.cos(theta) * cross_terms - np.sin(theta) * same_terms\n\n\ndef array_idx(i, var_size):\n return slice(i * var_size, (i + 1) * var_size)\n\n\nclass ExplicitCycleComp(ExplicitComponent):\n\n def _inputs_to_vector(self, inputs):\n var_shape = self.metadata['var_shape']\n num_var = self.metadata['num_var']\n size = np.prod(var_shape)\n x = np.zeros(num_var * size)\n for i in range(num_var):\n x_i = inputs[self._cycle_names['x'].format(i)].flat\n x[size * i:size * (i + 1)] = x_i\n\n return x\n\n def _vector_to_outputs(self, vec, outputs):\n var_shape = self.metadata['var_shape']\n num_var = self.metadata['num_var']\n size = np.prod(var_shape)\n for i in range(num_var):\n y_i = vec[size * i:size * (i + 1)].reshape(var_shape)\n outputs[self._cycle_names['y'].format(i)] = y_i\n\n def __str__(self):\n return 'Explicit Cycle Component'\n\n def initialize(self):\n self.metadata.declare('jacobian_type', default='matvec',\n values=['matvec', 'dense', 'sparse-coo', 'sparse-csr',\n 'sparse-csc'],\n desc='method of assembling derivatives')\n self.metadata.declare('partial_type', default='array',\n values=['array', 'sparse', 'aij'],\n desc='type of partial derivatives')\n self.metadata.declare('num_var', type_=int, default=1,\n desc='Number of variables per component')\n self.metadata.declare('var_shape', type_=tuple, default=(3,),\n desc='Shape of each variable')\n self.metadata.declare('index', type_=int,\n desc='Index of the component. Used for testing implicit connections')\n self.metadata.declare('connection_type', type_=str, default='explicit',\n values=['explicit', 'implicit'],\n desc='How to connect variables.')\n self.metadata.declare('finite_difference', default=False,\n type_=bool,\n desc='If the derivatives should be finite differenced.')\n self.metadata.declare('num_comp', type_=int, default=2,\n desc='Total number of components')\n\n self.angle_param = 'theta'\n\n self._cycle_names = {}\n\n def _init_parameterized(self):\n self.num_var = self.metadata['num_var']\n self.var_shape = self.metadata['var_shape']\n self.size = self.num_var * np.prod(self.var_shape)\n\n if self.metadata['jacobian_type'] == 'matvec':\n self.compute_jacvec_product = self.jacvec_product\n\n if self.metadata['connection_type'] == 'implicit':\n idx = self.metadata['index']\n self._cycle_names['x'] = 'x_{}_{{}}'.format(idx)\n self._cycle_names['y'] = 'x_{}_{{}}'.format(idx + 1)\n self._cycle_names['theta'] = 'theta_{}'.format(idx)\n self._cycle_names['theta_out'] = 'theta_{}'.format(idx + 1)\n num_var = self.metadata['num_var']\n self._cycle_promotes_in = [self._cycle_names['x'].format(i) for i in range(num_var)]\n self._cycle_promotes_out = [self._cycle_names['y'].format(i) for i in range(num_var)]\n self._cycle_promotes_in.append(self._cycle_names['theta'])\n self._cycle_promotes_out.append(self._cycle_names['theta_out'])\n else:\n self._cycle_names['x'] = 'x_{}'\n self._cycle_names['y'] = 'y_{}'\n self._cycle_names['theta'] = 'theta'\n self._cycle_names['theta_out'] = 'theta_out'\n self._cycle_promotes_in = self._cycle_promotes_out = []\n\n def setup(self):\n for i in range(self.num_var):\n self.add_input(self._cycle_names['x'].format(i), shape=self.var_shape)\n self.add_output(self._cycle_names['y'].format(i), shape=self.var_shape)\n\n self.add_input(self._cycle_names['theta'], val=1.)\n self.add_output(self._cycle_names['theta_out'], shape=(1,))\n\n # Setup partials\n\n pd_type = self.metadata['partial_type']\n\n if self.metadata['finite_difference']:\n if self.metadata['jacobian_type'] == 'matvec':\n raise unittest.SkipTest('not testing FD and matvec')\n if pd_type != 'array':\n raise unittest.SkipTest('only dense FD supported')\n self.declare_partials('*', '*', method='fd')\n\n elif self.metadata['jacobian_type'] != 'matvec' and pd_type != 'array':\n num_var = self.num_var\n var_shape = self.var_shape\n var_size = np.prod(var_shape)\n A = np.ones((self.size, self.size))\n dA_x = np.ones((self.size, 1))\n dtheta = np.array([[1.]])\n angle_param = self._cycle_names[self.angle_param]\n\n # if our subjacs are not dense, we must assign values here that\n # match their type (data values don't matter, only structure).\n # Otherwise, we assume they are dense and we'll get an error later\n # when we assign a subjac with a type that doesn't match.\n for out_idx in range(num_var):\n out_var = self._cycle_names['y'].format(out_idx)\n for in_idx in range(num_var):\n in_var = self._cycle_names['x'].format(in_idx)\n Aij = A[array_idx(out_idx, var_size), array_idx(in_idx, var_size)]\n\n self.declare_partials(out_var, in_var,\n **self._array2kwargs(Aij, pd_type))\n self.declare_partials(out_var, angle_param,\n **self._array2kwargs(dA_x[array_idx(out_idx, var_size)],\n pd_type))\n\n self.declare_partials(self._cycle_names['theta_out'], self._cycle_names['theta'],\n **self._array2kwargs(dtheta, pd_type))\n\n else:\n # Declare everything\n self.declare_partials(of='*', wrt='*')\n\n def compute(self, inputs, outputs):\n theta = inputs[self._cycle_names['theta']]\n A = _compute_A(self.size, theta)\n x = self._inputs_to_vector(inputs)\n y = A.dot(x)\n self._vector_to_outputs(y, outputs)\n outputs[self._cycle_names['theta_out']] = theta\n\n def jacvec_product(self, inputs, d_inputs, d_outputs, mode):\n angle_param = self._cycle_names[self.angle_param]\n x = self._inputs_to_vector(inputs)\n angle = inputs[angle_param]\n A = _compute_A(self.size, angle)\n dA = _compute_dA(self.size, angle)\n\n var_shape = self.metadata['var_shape']\n var_size = np.prod(var_shape)\n num_var = self.metadata['num_var']\n x_name = self._cycle_names['x']\n y_name = self._cycle_names['y']\n theta_name = self._cycle_names['theta']\n theta_out_name = self._cycle_names['theta_out']\n\n if mode == 'fwd':\n for j in range(num_var):\n x_j = x_name.format(j)\n if x_j in d_inputs:\n dx = d_inputs[x_j].flat[:]\n for i in range(num_var):\n y_i = y_name.format(i)\n if y_i in d_outputs:\n Aij = A[array_idx(i, var_size), array_idx(j, var_size)]\n d_outputs[y_i] += Aij.dot(dx).reshape(var_shape)\n\n if theta_name in d_inputs and theta_out_name in d_outputs:\n dtheta = d_inputs[theta_name]\n d_outputs[theta_out_name] += dtheta\n\n if angle_param in d_inputs:\n dangle = d_inputs[angle_param]\n dy_dangle = (dA.dot(x)) * dangle\n for i in range(num_var):\n y_i = y_name.format(i)\n if y_i in d_outputs:\n d_outputs[y_i] += dy_dangle[array_idx(i, var_size)].reshape(var_shape)\n\n elif mode == 'rev':\n for i in range(num_var):\n y_i = y_name.format(i)\n if y_i in d_outputs:\n dy_i = d_outputs[y_i].flat[:]\n for j in range(num_var):\n x_j = x_name.format(j)\n if x_j in d_inputs:\n Aij = A[array_idx(i, var_size), array_idx(j, var_size)]\n d_inputs[x_j] += Aij.T.dot(dy_i).reshape(var_shape)\n if angle_param in d_inputs:\n dAij = dA[array_idx(i, var_size), array_idx(j, var_size)]\n x_j_vec = inputs[x_j].flat[:]\n d_inputs[angle_param] += x_j_vec.T.dot(dAij.T.dot(dy_i))\n\n if theta_out_name in d_outputs and theta_name in d_inputs:\n dtheta_out = d_outputs[theta_out_name]\n d_inputs[theta_name] += dtheta_out\n\n def make_jacobian_entry(self, A, pd_type):\n if pd_type == 'aij':\n return self.make_sub_jacobian(A, pd_type)[0]\n return self.make_sub_jacobian(A, pd_type)\n\n def make_sub_jacobian(self, A, pd_type):\n if pd_type == 'array':\n return A\n if pd_type == 'sparse':\n return sparse.csr_matrix(A)\n if pd_type == 'aij':\n data = []\n rows = []\n cols = []\n A = np.atleast_2d(A)\n for i in range(A.shape[0]):\n for j in range(A.shape[1]):\n if np.abs(A[i, j]) > 1e-15:\n data.append(A[i, j])\n rows.append(i)\n cols.append(j)\n return [np.array(data), np.array(rows), np.array(cols)]\n\n raise ValueError('Unknown partial_type: {}'.format(pd_type))\n\n def _array2kwargs(self, arr, pd_type):\n jac = self.make_sub_jacobian(arr, pd_type)\n if pd_type == 'aij':\n return {'val': jac[0], 'rows': jac[1], 'cols': jac[2]}\n else:\n return {'val': jac}\n\n def compute_partials(self, inputs, partials):\n if self.metadata['jacobian_type'] != 'matvec' and not self.metadata['finite_difference']:\n angle_param = self._cycle_names[self.angle_param]\n angle = inputs[angle_param]\n num_var = self.num_var\n var_shape = self.var_shape\n var_size = np.prod(var_shape)\n x = self._inputs_to_vector(inputs)\n size = self.size\n A = _compute_A(size, angle)\n dA = _compute_dA(size, angle)\n dA_x = np.atleast_2d(dA.dot(x)).T\n pd_type = self.metadata['partial_type']\n dtheta = np.array([[1.]])\n\n y_name = self._cycle_names['y']\n x_name = self._cycle_names['x']\n\n for out_idx in range(num_var):\n out_var = y_name.format(out_idx)\n for in_idx in range(num_var):\n in_var = x_name.format(in_idx)\n Aij = A[array_idx(out_idx, var_size), array_idx(in_idx, var_size)]\n J_y_x = self.make_jacobian_entry(Aij, pd_type)\n J_y_angle = self.make_jacobian_entry(dA_x[array_idx(out_idx, var_size)],\n pd_type)\n\n partials[out_var, in_var] = J_y_x\n partials[out_var, angle_param] = J_y_angle\n\n theta_out = self._cycle_names['theta_out']\n theta = self._cycle_names['theta']\n partials[theta_out, theta] = self.make_jacobian_entry(dtheta, pd_type)\n\n\nclass ExplicitFirstComp(ExplicitCycleComp):\n def __str__(self):\n return 'Explicit Cycle Component - First'\n\n def setup(self):\n self.add_input('psi', val=1.)\n self.angle_param = 'psi'\n self._cycle_names['psi'] = 'psi'\n super(ExplicitFirstComp, self).setup()\n\n def compute(self, inputs, outputs):\n theta = inputs[self._cycle_names['theta']]\n psi = inputs[self._cycle_names['psi']]\n A = _compute_A(self.size, psi)\n y = A.dot(np.ones(self.size))\n self._vector_to_outputs(y, outputs)\n outputs[self._cycle_names['theta_out']] = theta\n\n\nclass ExplicitLastComp(ExplicitFirstComp):\n def __str__(self):\n return 'Explicit Cycle Component - Last'\n\n def setup(self):\n super(ExplicitLastComp, self).setup()\n\n self.add_output('x_norm2', shape=(1,))\n self._n = 1\n\n # Setup partials\n\n pd_type = self.metadata['partial_type']\n if self.metadata['jacobian_type'] != 'matvec' and pd_type != 'array':\n x = np.ones(self.var_shape)\n for i in range(self.metadata['num_var']):\n in_var = self._cycle_names['x'].format(i)\n self.declare_partials('x_norm2', in_var,\n **self._array2kwargs(x.flatten(), pd_type))\n\n self.declare_partials(self._cycle_names['theta_out'], self._cycle_names['psi'],\n **self._array2kwargs(np.array([1.]), pd_type))\n\n def compute(self, inputs, outputs):\n theta = inputs[self._cycle_names['theta']]\n psi = inputs[self._cycle_names['psi']]\n k = self.metadata['num_comp']\n x = self._inputs_to_vector(inputs)\n\n outputs['x_norm2'] = 0.5*np.dot(x,x)\n\n # theta_out has 1/2 the error as theta does to the correct angle.\n outputs[self._cycle_names['theta_out']] = theta / 2 + (self._n * 2 * np.pi - psi) / (2 * k - 2)\n\n def compute_partials(self, inputs, partials):\n if self.metadata['jacobian_type'] != 'matvec' and not self.metadata['finite_difference']:\n pd_type = self.metadata['partial_type']\n for i in range(self.metadata['num_var']):\n in_var = self._cycle_names['x'].format(i)\n partials['x_norm2', in_var] = self.make_jacobian_entry(inputs[in_var].flat[:],\n pd_type)\n\n k = self.metadata['num_comp']\n theta_out = self._cycle_names['theta_out']\n theta = self._cycle_names['theta']\n partials[theta_out, theta] = self.make_jacobian_entry(np.array([.5]), pd_type)\n partials[theta_out, self._cycle_names['psi']] = \\\n self.make_jacobian_entry(np.array([-1/(2*k-2)]), pd_type)\n\n def jacvec_product(self, inputs, d_inputs, d_outputs, mode):\n if self.metadata['jacobian_type'] == 'matvec':\n k = self.metadata['num_comp']\n num_var = self.metadata['num_var']\n theta_out = self._cycle_names['theta_out']\n theta = self._cycle_names['theta']\n psi = self._cycle_names['psi']\n\n if mode == 'fwd':\n if theta_out in d_outputs:\n if theta in d_inputs:\n d_outputs[theta_out] += 0.5 * d_inputs[theta]\n if psi in d_inputs:\n d_outputs[theta_out] += -d_inputs[psi] / (2 * k - 2)\n for i in range(num_var):\n in_var = self._cycle_names['x'].format(i)\n if in_var in d_inputs and 'x_norm2' in d_outputs:\n d_outputs['x_norm2'] += np.dot(inputs[in_var].flat, d_inputs[in_var].flat)\n\n elif mode == 'rev':\n if 'x_norm2' in d_outputs:\n dxnorm = d_outputs['x_norm2']\n for i in range(num_var):\n x_i_name = self._cycle_names['x'].format(i)\n if x_i_name in d_inputs:\n d_inputs[x_i_name] += inputs[x_i_name] * dxnorm\n\n if theta_out in d_outputs:\n dtheta_out = d_outputs[theta_out]\n if theta in d_inputs:\n d_inputs[theta] += .5*dtheta_out\n if psi in d_inputs:\n d_inputs[psi] += -dtheta_out/(2*k-2)\n"
] |
[
[
"numpy.sin",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.ones",
"numpy.eye",
"numpy.prod",
"numpy.cos",
"numpy.sqrt",
"numpy.outer",
"numpy.abs",
"scipy.sparse.csr_matrix",
"numpy.atleast_2d"
]
] |
johnwlambert/argoverse_cbgs_kf_tracker
|
[
"9268cb6dd9844f80eb107a0cc5e77e880d3b3e76"
] |
[
"ab3dmot.py"
] |
[
"#!/usr/bin/env python3\n\nfrom filterpy.kalman import KalmanFilter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pdb\nfrom sklearn.utils.linear_assignment_ import linear_assignment\nimport sys\nimport time\n\nfrom transform_utils import convert_3dbox_to_8corner\nfrom iou_utils import compute_iou_2d_bboxes\n\n\nclass KalmanBoxTracker(object):\n \"\"\"\n This class represents the internel state of individual tracked objects observed as bbox.\n \"\"\"\n count = 0\n def __init__(self, bbox3D, info):\n \"\"\"\n Initialises a tracker using initial bounding box.\n \"\"\"\n #define constant velocity model\n self.kf = KalmanFilter(dim_x=10, dim_z=7) \n self.kf.F = np.array([[1,0,0,0,0,0,0,1,0,0], # state transition matrix\n [0,1,0,0,0,0,0,0,1,0],\n [0,0,1,0,0,0,0,0,0,1],\n [0,0,0,1,0,0,0,0,0,0], \n [0,0,0,0,1,0,0,0,0,0],\n [0,0,0,0,0,1,0,0,0,0],\n [0,0,0,0,0,0,1,0,0,0],\n [0,0,0,0,0,0,0,1,0,0],\n [0,0,0,0,0,0,0,0,1,0],\n [0,0,0,0,0,0,0,0,0,1]]) \n \n self.kf.H = np.array([[1,0,0,0,0,0,0,0,0,0], # measurement function,\n [0,1,0,0,0,0,0,0,0,0],\n [0,0,1,0,0,0,0,0,0,0],\n [0,0,0,1,0,0,0,0,0,0],\n [0,0,0,0,1,0,0,0,0,0],\n [0,0,0,0,0,1,0,0,0,0],\n [0,0,0,0,0,0,1,0,0,0]])\n\n # with angular velocity\n # self.kf = KalmanFilter(dim_x=11, dim_z=7) \n # self.kf.F = np.array([[1,0,0,0,0,0,0,1,0,0,0], # state transition matrix\n # [0,1,0,0,0,0,0,0,1,0,0],\n # [0,0,1,0,0,0,0,0,0,1,0],\n # [0,0,0,1,0,0,0,0,0,0,1], \n # [0,0,0,0,1,0,0,0,0,0,0],\n # [0,0,0,0,0,1,0,0,0,0,0],\n # [0,0,0,0,0,0,1,0,0,0,0],\n # [0,0,0,0,0,0,0,1,0,0,0],\n # [0,0,0,0,0,0,0,0,1,0,0],\n # [0,0,0,0,0,0,0,0,0,1,0],\n # [0,0,0,0,0,0,0,0,0,0,1]]) \n \n # self.kf.H = np.array([[1,0,0,0,0,0,0,0,0,0,0], # measurement function,\n # [0,1,0,0,0,0,0,0,0,0,0],\n # [0,0,1,0,0,0,0,0,0,0,0],\n # [0,0,0,1,0,0,0,0,0,0,0],\n # [0,0,0,0,1,0,0,0,0,0,0],\n # [0,0,0,0,0,1,0,0,0,0,0],\n # [0,0,0,0,0,0,1,0,0,0,0]])\n\n # self.kf.R[0:,0:] *= 10. # measurement uncertainty\n self.kf.P[7:,7:] *= 1000. #state uncertainty, give high uncertainty to the unobservable initial velocities, covariance matrix\n self.kf.P *= 10.\n \n # self.kf.Q[-1,-1] *= 0.01 # process uncertainty\n self.kf.Q[7:,7:] *= 0.01\n self.kf.x[:7] = bbox3D.reshape((7, 1))\n\n self.time_since_update = 0\n self.id = KalmanBoxTracker.count\n KalmanBoxTracker.count += 1\n self.history = []\n self.hits = 1 # number of total hits including the first detection\n self.hit_streak = 1 # number of continuing hit considering the first detection\n self.first_continuing_hit = 1\n self.still_first = True\n self.age = 0\n self.info = info # other info\n\n def update(self, bbox3D, info): \n \"\"\" \n Updates the state vector with observed bbox.\n \"\"\"\n self.time_since_update = 0\n self.history = []\n self.hits += 1\n self.hit_streak += 1 # number of continuing hit\n if self.still_first:\n self.first_continuing_hit += 1 # number of continuing hit in the fist time\n \n ######################### orientation correction\n if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range\n if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2\n\n new_theta = bbox3D[3]\n if new_theta >= np.pi: new_theta -= np.pi * 2 # make the theta still in the range\n if new_theta < -np.pi: new_theta += np.pi * 2\n bbox3D[3] = new_theta\n\n predicted_theta = self.kf.x[3]\n if abs(new_theta - predicted_theta) > np.pi / 2.0 and abs(new_theta - predicted_theta) < np.pi * 3 / 2.0: # if the angle of two theta is not acute angle\n self.kf.x[3] += np.pi \n if self.kf.x[3] > np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range\n if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2\n \n # now the angle is acute: < 90 or > 270, convert the case of > 270 to < 90\n if abs(new_theta - self.kf.x[3]) >= np.pi * 3 / 2.0:\n if new_theta > 0: self.kf.x[3] += np.pi * 2\n else: self.kf.x[3] -= np.pi * 2\n \n ######################### \n\n self.kf.update(bbox3D)\n\n if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range\n if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2\n self.info = info\n\n def predict(self): \n \"\"\"\n Advances the state vector and returns the predicted bounding box estimate.\n \"\"\"\n self.kf.predict() \n if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2\n if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2\n\n self.age += 1\n if(self.time_since_update>0):\n self.hit_streak = 0\n self.still_first = False\n self.time_since_update += 1\n self.history.append(self.kf.x)\n return self.history[-1]\n\n def get_state(self):\n \"\"\"\n Returns the current bounding box estimate.\n \"\"\"\n return self.kf.x[:7].reshape((7, ))\n\n\n\n\ndef associate_detections_to_trackers(detections,trackers,iou_threshold=0.1):\n# def associate_detections_to_trackers(detections,trackers,iou_threshold=0.01): # ablation study\n# def associate_detections_to_trackers(detections,trackers,iou_threshold=0.25):\n \"\"\"\n Assigns detections to tracked object (both represented as bounding boxes)\n\n detections: N x 8 x 3\n trackers: M x 8 x 3\n\n Returns 3 lists of matches, unmatched_detections and unmatched_trackers\n \"\"\"\n if(len(trackers)==0):\n return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,8,3),dtype=int) \n iou_matrix = np.zeros((len(detections),len(trackers)),dtype=np.float32)\n\n for d,det in enumerate(detections):\n for t,trk in enumerate(trackers):\n #print(f'On d={d}, t={t}')\n #iou_matrix[d,t] = iou3d(det,trk)[1] # try 2d iou instead # det: 8 x 3, trk: 8 x 3\n iou_matrix[d,t] = compute_iou_2d_bboxes(det, trk)\n\n matched_indices = linear_assignment(-iou_matrix) # hungarian algorithm\n\n unmatched_detections = []\n for d,det in enumerate(detections):\n if(d not in matched_indices[:,0]):\n unmatched_detections.append(d)\n unmatched_trackers = []\n for t,trk in enumerate(trackers):\n if(t not in matched_indices[:,1]):\n unmatched_trackers.append(t)\n\n #print(iou_matrix)\n\n #filter out matched with low IOU\n matches = []\n for m in matched_indices:\n if(iou_matrix[m[0],m[1]]<iou_threshold):\n unmatched_detections.append(m[0])\n unmatched_trackers.append(m[1])\n else:\n matches.append(m.reshape(1,2))\n if(len(matches)==0):\n matches = np.empty((0,2),dtype=int)\n else:\n matches = np.concatenate(matches,axis=0)\n\n return matches, np.array(unmatched_detections), np.array(unmatched_trackers)\n\n\nclass AB3DMOT(object):\n def __init__(self,max_age=2,min_hits=3): # max age will preserve the bbox does not appear no more than 2 frames, interpolate the detection\n # def __init__(self,max_age=3,min_hits=3): # ablation study\n # def __init__(self,max_age=1,min_hits=3): \n # def __init__(self,max_age=2,min_hits=1): \n # def __init__(self,max_age=2,min_hits=5): \n \"\"\" \n \"\"\"\n self.max_age = max_age\n self.min_hits = min_hits\n self.trackers = []\n self.frame_count = 0\n # self.reorder = [3, 4, 5, 6, 2, 1, 0]\n # self.reorder_back = [6, 5, 4, 0, 1, 2, 3]\n\n def update(self,dets_all):\n \"\"\"\n Params:\n dets_all: dict\n dets - a numpy array of detections in the format [[x,y,z,theta,l,w,h],[x,y,z,theta,l,w,h],...]\n info: a array of other info for each det\n Requires: this method must be called once for each frame even with empty detections.\n Returns the a similar array, where the last column is the object ID.\n\n NOTE: The number of objects returned may differ from the number of detections provided.\n \"\"\"\n dets, info = dets_all['dets'], dets_all['info'] # dets: N x 7, float numpy array\n # dets = dets[:, self.reorder]\n self.frame_count += 1\n\n trks = np.zeros((len(self.trackers),7)) # N x 7 , #get predicted locations from existing trackers.\n to_del = []\n ret = []\n for t,trk in enumerate(trks):\n pos = self.trackers[t].predict().reshape((-1, 1))\n trk[:] = [pos[0], pos[1], pos[2], pos[3], pos[4], pos[5], pos[6]] \n if(np.any(np.isnan(pos))):\n to_del.append(t)\n trks = np.ma.compress_rows(np.ma.masked_invalid(trks)) \n for t in reversed(to_del):\n self.trackers.pop(t)\n\n dets_8corner = [convert_3dbox_to_8corner(det_tmp) for det_tmp in dets]\n if len(dets_8corner) > 0: dets_8corner = np.stack(dets_8corner, axis=0)\n else: dets_8corner = []\n trks_8corner = [convert_3dbox_to_8corner(trk_tmp) for trk_tmp in trks]\n if len(trks_8corner) > 0: trks_8corner = np.stack(trks_8corner, axis=0)\n matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets_8corner, trks_8corner)\n \n #update matched trackers with assigned detections\n for t,trk in enumerate(self.trackers):\n if t not in unmatched_trks:\n d = matched[np.where(matched[:,1]==t)[0],0] # a list of index\n trk.update(dets[d,:][0], info[d, :][0])\n\n #create and initialise new trackers for unmatched detections\n for i in unmatched_dets: # a scalar of index\n trk = KalmanBoxTracker(dets[i,:], info[i, :]) \n self.trackers.append(trk)\n i = len(self.trackers)\n for trk in reversed(self.trackers):\n d = trk.get_state() # bbox location\n # d = d[self.reorder_back]\n\n if((trk.time_since_update < self.max_age) and (trk.hits >= self.min_hits or self.frame_count <= self.min_hits)): \n ret.append(np.concatenate((d, [trk.id+1], trk.info)).reshape(1,-1)) # +1 as MOT benchmark requires positive\n i -= 1\n #remove dead tracklet\n if(trk.time_since_update >= self.max_age):\n self.trackers.pop(i)\n if(len(ret)>0):\n return np.concatenate(ret) # x, y, z, theta, l, w, h, ID, other info, confidence\n return np.empty((0,15)) \n \n\n\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.isnan",
"numpy.empty",
"numpy.ma.masked_invalid",
"numpy.where",
"sklearn.utils.linear_assignment_.linear_assignment",
"numpy.stack"
]
] |
vinnamkim/segmentation_models.pytorch
|
[
"f967ded34df6fb536e8e8cba9b6491ae63b939f5"
] |
[
"segmentation_models_pytorch/encoders/zerocenter.py"
] |
[
"\nimport torch\nimport torch.nn as nn\n#from .utils import load_state_dict_from_url\nfrom .utils import zerocenter\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',\n 'wide_resnet50_2', 'wide_resnet101_2']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',\n 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',\n 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',\n 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = zerocenter(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n out = zerocenter(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = zerocenter(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = zerocenter(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n out = zerocenter(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None):\n super(ResNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = zerocenter(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n\n return x\n\n\ndef _resnet(arch, block, layers, pretrained, progress, **kwargs):\n model = ResNet(block, layers, **kwargs)\n # if pretrained:\n # state_dict = load_state_dict_from_url(model_urls[arch],\n # progress=progress)\n # model.load_state_dict(state_dict)\n return model\n\n\ndef resnet18(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,\n **kwargs)\n\n\ndef resnet34(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-34 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,\n **kwargs)\n\n\ndef resnet50(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-50 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,\n **kwargs)\n\n\ndef resnet101(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-101 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,\n **kwargs)\n\n\ndef resnet152(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-152 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,\n **kwargs)\n\n\ndef resnext50_32x4d(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNeXt-50 32x4d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 4\n return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)\n\n\ndef resnext101_32x8d(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNeXt-101 32x8d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 8\n return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],\n pretrained, progress, **kwargs)\n\n\ndef wide_resnet50_2(pretrained=False, progress=True, **kwargs):\n r\"\"\"Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)\n\n\ndef wide_resnet101_2(pretrained=False, progress=True, **kwargs):\n r\"\"\"Wide ResNet-101-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],\n pretrained, progress, **kwargs)\n\nclass ZeroCenterEncoder(ResNet):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.pretrained = False\n del self.fc\n\n def forward(self, x):\n x0 = self.conv1(x)\n x0 = self.bn1(x0)\n x0 = self.relu(x0)\n x1 = self.maxpool(x0)\n x1 = zerocenter(x1)\n\n x1 = self.layer1(x1)\n x2 = self.layer2(x1)\n x3 = self.layer3(x2)\n x4 = self.layer4(x3)\n\n return [x4, x3, x2, x1, x0]\n\n def load_state_dict(self, state_dict, **kwargs):\n state_dict.pop('fc.bias')\n state_dict.pop('fc.weight')\n super().load_state_dict(state_dict, **kwargs)"
] |
[
[
"torch.nn.Linear",
"torch.flatten",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.init.kaiming_normal_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
]
] |
Zeng-WH/ML2020
|
[
"f467a6260cd782968696950ef74f3780933cdcdd"
] |
[
"CNN/code/filter_visualiton.py"
] |
[
"import os\r\nimport sys\r\nimport argparse\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.optim import Adam\r\nfrom torch.utils.data import Dataset\r\nimport torchvision.transforms as transforms\r\nimport pickle\r\n\r\n\r\ndef normalize(image):\r\n return (image - image.min()) / (image.max() - image.min())\r\n\r\n\r\nlayer_activations = None\r\n\r\ndef filter_explanation(x, model, cnnid, filterid, iteration=100, lr=1):\r\n # x: 需要训练的图片\r\n # cnnid, filterid: 指定第几层cnn中第几个filter\r\n model.eval()\r\n\r\n def hook(model, input, output):\r\n global layer_activations\r\n layer_activations = output\r\n\r\n hook_handle = model.cnn[cnnid].register_forward_hook(hook)\r\n # 当forward了第cnnid层cnn后, 要先呼叫hook, 才可以继续forward下一层cnn\r\n\r\n # Filter activation: 我们先观察x经过被指定filter的activation map\r\n model(x.cuda())\r\n # 正式执行forward的步骤\r\n filter_activations = layer_activations[:, filterid, :, :].detach().cpu()\r\n # 根据function argument 指定的filterid把待定filter的activation map取出来\r\n x = x.cuda()\r\n x.requires_grad_()\r\n optimizer = Adam([x], lr=lr)\r\n # 利用偏微分和optimizer, 逐步修改input image来让filter activation越来越大\r\n for iter in range(iteration):\r\n optimizer.zero_grad()\r\n model(x)\r\n\r\n objective = -layer_activations[:, filterid, :, :].sum()\r\n # 探究image的微量变化会怎样影响activation的程度,加负号代表做maximization\r\n objective.backward()\r\n optimizer.step()\r\n # 修改input image来最大化filter activation\r\n filter_visualization = x.detach().cpu().squeeze()[0]\r\n # 完成图片修改,只剩下要画出来,因此可以直接detach并转成cpu tensor\r\n\r\n hook_handle.remove()\r\n # 一旦model register hook, 该hook就一致存在。如果之后继续register更多hook\r\n # 那model一次forward要做的事情就越来越来越多,因此需要把hook拿掉\r\n\r\n return filter_activations, filter_visualization\r\n\r\n\r\n\r\n\r\n"
] |
[
[
"torch.optim.Adam"
]
] |
aaron8tang/h2o4gpu
|
[
"602275375cb0dfb4acd070a8c86c3ded0bef1156"
] |
[
"src/interface_py/h2o4gpu/solvers/factorization.py"
] |
[
"# - * - encoding : utf - 8 - * -\n# pylint: disable=fixme, line-too-long\n\"\"\"\nMatrix factorization solver.\n\n:copyright: 2017-2019 H2O.ai, Inc.\n:license: Apache License Version 2.0 (see LICENSE for details)\n\"\"\"\nimport numpy as np\nimport scipy\nimport scipy.sparse\n\n\ndef _get_sparse_matrixes(X):\n '''Create csc, csr and coo sparse matrix from any of the above\n\n Arguments:\n X {array-like, csc, csr or coo sparse matrix}\n\n Returns:\n csc, csr, coo\n '''\n\n X_coo = X_csc = X_csr = None\n if scipy.sparse.isspmatrix_coo(X):\n X_coo = X\n X_csr = X_coo.tocsr(True)\n X_csc = X_coo.tocsc(True)\n elif scipy.sparse.isspmatrix_csr(X):\n X_csr = X\n X_csc = X_csr.tocoo(True)\n X_coo = X_csr.tocsc(True)\n elif scipy.sparse.isspmatrix_csc(X):\n X_csc = X\n X_csr = X_csc.tocsr(True)\n X_coo = X_csc.tocoo(True)\n else:\n assert False, \"only coo, csc and csr sparse matrixes are supported\"\n return X_csc, X_csr, X_coo\n\n\nclass FactorizationH2O(object):\n '''Matrix Factorization on GPU with Alternating Least Square (ALS) algorithm.\n\n Factors a sparse rating matrix X (m by n, with N_z non-zero elements)\n into a m-by-f and a f-by-n matrices.\n\n Parameters\n ----------\n f int\n decomposition size\n lambda_ float\n lambda regularization\n max_iter int, default: 100\n number of training iterations\n double_precision bool, default: False\n use double precision, not yet supported\n thetaT {array-like} shape (n, f), default: None\n initial theta matrix\n XT {array-like} shape (m, f), default: None\n initial XT matrix\n random_state int, default: 1234\n\n Attributes\n ----------\n XT {array-like} shape (m, f)\n XT matrix contains user's features\n thetaT {array-like} shape (n, f)\n transposed theta matrix, item's features\n\n Warnings\n --------\n Matrixes ``XT`` and ``thetaT`` may contain nan elements. This is because in some datasets,\n there are users or items with no ratings in training set. That results in solutions of\n a system of linear equations becomes nan. Such elements can be easily removed with numpy\n functions like numpy.nan_to_num, but existence of them may be useful for troubleshooting\n purposes.\n\n '''\n\n def __init__(self, f, lambda_, max_iter=100, double_precision=False, thetaT=None, XT=None, random_state=1234):\n assert not double_precision, 'double precision is not yet supported'\n assert f % 10 == 0, 'f has to be a multiple of 10'\n self.f = f\n self.lambda_ = lambda_\n self.double_precision = double_precision\n self.dtype = np.float64 if self.double_precision else np.float32\n self.thetaT = thetaT\n self.XT = XT\n self.max_iter = max_iter\n self.random_state = random_state\n\n def _load_lib(self):\n from ..libs.lib_utils import GPUlib\n\n gpu_lib = GPUlib().get(1)\n return gpu_lib\n\n def fit(self, X, y=None, X_test=None, X_BATCHES=1, THETA_BATCHES=1, early_stopping_rounds=None, verbose=False, scores=None):\n #pylint: disable=unused-argument\n '''Learn model from rating matrix X.\n\n Parameters\n ----------\n X {array-like, sparse matrix}, shape (m, n)\n Data matrix to be decomposed.\n y None\n Ignored\n X_test {array-like, coo sparse matrix}, shape (m, n)\n Data matrix for cross validation.\n X_BATCHES int, default: 1\n Batches to split XT, increase this parameter in case out of memory error.\n THETA_BATCHES int, default: 1\n Batches to split theta, increase this parameter in case out of memory error.\n early_stopping_rounds int, default: None\n Activates early stopping. Cross validation error needs to decrease\n at least every <early_stopping_rounds> round(s) to continue training. Requires <X_test>.\n Returns the model from the last iteration (not the best one). If early stopping occurs,\n the model will have three additional fields: best_cv_score, best_train_score and best_iteration.\n verbose bool, default: False\n Prints training and validation score(if applicable) on each iteration.\n scores {list}\n List of tuples with train, cv score for every iteration.\n\n Returns\n -------\n self : returns an instance of self.\n\n '''\n\n csc_X, csr_X, coo_X = _get_sparse_matrixes(X)\n\n if early_stopping_rounds is not None:\n assert X_test is not None, 'X_test is mandatory with early stopping'\n if X_test is not None:\n assert scipy.sparse.isspmatrix_coo(\n X_test), 'X_test must be a coo sparse scipy matrix'\n assert X.shape == X_test.shape\n assert X_test.dtype == self.dtype\n\n assert X.dtype == self.dtype\n\n coo_X_test = X_test\n\n lib = self._load_lib()\n if self.double_precision:\n make_data = lib.make_factorization_data_double\n run_step = lib.run_factorization_step_double\n factorization_score = lib.factorization_score_double\n copy_fecatorization_result = lib.copy_fecatorization_result_double\n free_data = lib.free_data_double\n else:\n make_data = lib.make_factorization_data_float\n run_step = lib.run_factorization_step_float\n factorization_score = lib.factorization_score_float\n copy_fecatorization_result = lib.copy_fecatorization_result_float\n free_data = lib.free_data_float\n\n m = coo_X.shape[0]\n n = coo_X.shape[1]\n nnz = csc_X.nnz\n if coo_X_test is None:\n nnz_test = 0\n else:\n nnz_test = coo_X_test.nnz\n\n rs = np.random.RandomState(self.random_state)\n\n if self.thetaT is None:\n self.thetaT = rs.rand(n, self.f).astype(self.dtype)\n else:\n assert self.thetaT.dtype == self.dtype\n\n if self.XT is None:\n self.XT = rs.rand(m, self.f).astype(self.dtype)\n else:\n assert self.XT.dtype == self.dtype\n\n csrRowIndexDevicePtr = None\n csrColIndexDevicePtr = None\n csrValDevicePtr = None\n cscRowIndexDevicePtr = None\n cscColIndexDevicePtr = None\n cscValDevicePtr = None\n cooRowIndexDevicePtr = None\n cooColIndexDevicePtr = None\n cooValDevicePtr = None\n thetaTDevice = None\n XTDevice = None\n cooRowIndexTestDevicePtr = None\n cooColIndexTestDevicePtr = None\n cooValTestDevicePtr = None\n\n status, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, \\\n cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, \\\n cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr, \\\n thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, \\\n cooColIndexTestDevicePtr, cooValTestDevicePtr = make_data( # pylint: disable=W0212\n m, n, self.f, nnz, nnz_test, csr_X.indptr, csr_X.indices, csr_X.data,\n csc_X.indices, csc_X.indptr, csc_X.data,\n coo_X.row, coo_X.col, coo_X.data,\n self.thetaT, self.XT, coo_X_test.row if coo_X_test is not None else None,\n coo_X_test.col if coo_X_test is not None else None, coo_X_test.data if coo_X_test is not None else None,\n csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr,\n cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr,\n thetaTDevice, XTDevice, cooRowIndexTestDevicePtr,\n cooColIndexTestDevicePtr, cooValTestDevicePtr)\n\n assert status == 0, 'Failure uploading the data'\n\n self.best_train_score = np.inf\n self.best_cv_score = np.inf\n self.best_iteration = -1\n cv_score = train_score = np.inf\n\n for i in range(self.max_iter):\n status = run_step(m,\n n,\n self.f,\n nnz,\n self.lambda_,\n csrRowIndexDevicePtr,\n csrColIndexDevicePtr,\n csrValDevicePtr,\n cscRowIndexDevicePtr,\n cscColIndexDevicePtr,\n cscValDevicePtr,\n thetaTDevice,\n XTDevice,\n X_BATCHES,\n THETA_BATCHES)\n if verbose or scores is not None:\n result = factorization_score(m,\n n,\n self.f,\n nnz,\n self.lambda_,\n thetaTDevice,\n XTDevice,\n cooRowIndexDevicePtr,\n cooColIndexDevicePtr,\n cooValDevicePtr)\n train_score = result[0]\n if X_test is not None and (verbose or early_stopping_rounds is not None or scores is not None):\n result = factorization_score(m,\n n,\n self.f,\n nnz_test,\n self.lambda_,\n thetaTDevice,\n XTDevice,\n cooRowIndexTestDevicePtr,\n cooColIndexTestDevicePtr,\n cooValTestDevicePtr)\n cv_score = result[0]\n if verbose:\n print(\"iteration {0} train: {1} cv: {2}\".format(\n i, train_score, cv_score))\n if scores is not None:\n scores.append((train_score, cv_score))\n\n if early_stopping_rounds is not None:\n if self.best_cv_score > cv_score:\n self.best_cv_score = cv_score\n self.best_train_score = train_score\n self.best_iteration = i\n if (i - self.best_iteration) > early_stopping_rounds:\n if verbose:\n print('best iteration:{0} train: {1} cv: {2}'.format(\n self.best_iteration, self.best_train_score, self.best_cv_score))\n break\n\n lib.free_data_int(csrRowIndexDevicePtr)\n lib.free_data_int(csrColIndexDevicePtr)\n free_data(csrValDevicePtr)\n lib.free_data_int(cscRowIndexDevicePtr)\n lib.free_data_int(cscColIndexDevicePtr)\n free_data(cscValDevicePtr)\n lib.free_data_int(cooRowIndexDevicePtr)\n lib.free_data_int(cooColIndexDevicePtr)\n free_data(cooValDevicePtr)\n lib.free_data_int(cooRowIndexTestDevicePtr)\n lib.free_data_int(cooColIndexTestDevicePtr)\n free_data(cooValTestDevicePtr)\n\n copy_fecatorization_result(self.XT, XTDevice, m * self.f)\n copy_fecatorization_result(self.thetaT, thetaTDevice, n * self.f)\n\n free_data(thetaTDevice)\n free_data(XTDevice)\n\n return self\n\n def predict(self, X):\n '''Predict none zero elements of coo sparse matrix X according to the fitted model.\n\n Parameters\n ----------\n X {array-like, sparse coo matrix} shape (m, n)\n Data matrix in coo format. Values are ignored.\n\n Returns\n -------\n {array-like, sparse coo matrix} shape (m, n)\n Predicted values.\n\n '''\n\n assert self.XT is not None and self.thetaT is not None, 'tranform is invoked on an unfitted model'\n assert scipy.sparse.isspmatrix_coo(\n X), 'convert X to coo sparse matrix'\n assert X.dtype == self.dtype\n a = np.take(self.XT, X.row, axis=0)\n b = np.take(self.thetaT, X.col, axis=0)\n val = np.sum(a * b, axis=1)\n return scipy.sparse.coo_matrix((val, (X.row, X.col)), shape=X.shape)\n"
] |
[
[
"scipy.sparse.coo_matrix",
"scipy.sparse.isspmatrix_csc",
"numpy.random.RandomState",
"scipy.sparse.isspmatrix_csr",
"numpy.sum",
"numpy.take",
"scipy.sparse.isspmatrix_coo"
]
] |
fullmoonhalf/SemGCN
|
[
"ce1dce98f8b7cc600ba7e733d17d71192c24b596",
"ce1dce98f8b7cc600ba7e733d17d71192c24b596"
] |
[
"data/prepare_data_2d_h36m_sh.py",
"models/sem_ch_graph_conv.py"
] |
[
"from __future__ import print_function, absolute_import, division\n\nimport argparse\nimport os\nimport zipfile\nimport tarfile\nimport numpy as np\nimport h5py\nfrom glob import glob\nfrom shutil import rmtree\n\nimport sys\n\nsys.path.append('../')\n\nfrom common.h36m_dataset import H36M_NAMES\n\noutput_filename_pt = 'data_2d_h36m_sh_pt_mpii'\noutput_filename_ft = 'data_2d_h36m_sh_ft_h36m'\nsubjects = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']\ncam_map = {\n '54138969': 0,\n '55011271': 1,\n '58860488': 2,\n '60457274': 3,\n}\n\nmetadata = {\n 'num_joints': 16,\n 'keypoints_symmetry': [\n [3, 4, 5, 13, 14, 15],\n [2, 1, 0, 12, 11, 10],\n ]\n}\n\n# Stacked Hourglass produces 16 joints. These are the names.\nSH_NAMES = [''] * 16\nSH_NAMES[0] = 'RFoot'\nSH_NAMES[1] = 'RKnee'\nSH_NAMES[2] = 'RHip'\nSH_NAMES[3] = 'LHip'\nSH_NAMES[4] = 'LKnee'\nSH_NAMES[5] = 'LFoot'\nSH_NAMES[6] = 'Hip'\nSH_NAMES[7] = 'Spine'\nSH_NAMES[8] = 'Thorax'\nSH_NAMES[9] = 'Head'\nSH_NAMES[10] = 'RWrist'\nSH_NAMES[11] = 'RElbow'\nSH_NAMES[12] = 'RShoulder'\nSH_NAMES[13] = 'LShoulder'\nSH_NAMES[14] = 'LElbow'\nSH_NAMES[15] = 'LWrist'\n\n# Permutation that goes from SH detections to H36M ordering.\nSH_TO_GT_PERM = np.array([SH_NAMES.index(h) for h in H36M_NAMES if h != '' and h in SH_NAMES])\nassert np.all(SH_TO_GT_PERM == np.array([6, 2, 1, 0, 3, 4, 5, 7, 8, 9, 13, 14, 15, 12, 11, 10]))\n\nmetadata['keypoints_symmetry'][0] = [SH_TO_GT_PERM.tolist().index(h) for h in metadata['keypoints_symmetry'][0]]\nmetadata['keypoints_symmetry'][1] = [SH_TO_GT_PERM.tolist().index(h) for h in metadata['keypoints_symmetry'][1]]\n\n\ndef process_subject(subject, file_list, output):\n if subject == 'S11':\n assert len(file_list) == 119, \"Expected 119 files for subject \" + subject + \", got \" + str(len(file_list))\n else:\n assert len(file_list) == 120, \"Expected 120 files for subject \" + subject + \", got \" + str(len(file_list))\n\n for f in file_list:\n action, cam = os.path.splitext(os.path.basename(f))[0].replace('_', ' ').split('.')\n\n if subject == 'S11' and action == 'Directions':\n continue # Discard corrupted video\n\n if action not in output[subject]:\n output[subject][action] = [None, None, None, None]\n\n with h5py.File(f) as hf:\n# positions = hf['poses'].value\n positions = np.array(hf['poses'])\n positions = positions[:, SH_TO_GT_PERM, :]\n output[subject][action][cam_map[cam]] = positions.astype('float32')\n\n\nif __name__ == '__main__':\n if os.path.basename(os.getcwd()) != 'data':\n print('This script must be launched from the \"data\" directory')\n exit(0)\n\n parser = argparse.ArgumentParser(description='Human3.6M dataset downloader/converter')\n\n parser.add_argument('-pt', '--pretrained', default='', type=str, metavar='PATH', help='convert pretrained dataset')\n parser.add_argument('-ft', '--fine-tuned', default='', type=str, metavar='PATH', help='convert fine-tuned dataset')\n\n args = parser.parse_args()\n\n if args.pretrained:\n print('Converting pretrained dataset from', args.pretrained)\n print('Extracting...')\n with zipfile.ZipFile(args.pretrained, 'r') as archive:\n archive.extractall('sh_pt')\n\n print('Converting...')\n output = {}\n for subject in subjects:\n output[subject] = {}\n file_list = glob('sh_pt/h36m/' + subject + '/StackedHourglass/*.h5')\n process_subject(subject, file_list, output)\n\n print('Saving...')\n np.savez_compressed(output_filename_pt, positions_2d=output, metadata=metadata)\n\n print('Cleaning up...')\n rmtree('sh_pt')\n\n print('Done.')\n\n if args.fine_tuned:\n print('Converting fine-tuned dataset from', args.fine_tuned)\n print('Extracting...')\n with tarfile.open(args.fine_tuned, 'r:gz') as archive:\n archive.extractall('sh_ft')\n\n print('Converting...')\n output = {}\n for subject in subjects:\n output[subject] = {}\n file_list = glob('sh_ft/' + subject + '/StackedHourglassFineTuned240/*.h5')\n process_subject(subject, file_list, output)\n\n print('Saving...')\n np.savez_compressed(output_filename_ft, positions_2d=output, metadata=metadata)\n\n print('Cleaning up...')\n rmtree('sh_ft')\n\n print('Done.')\n",
"from __future__ import absolute_import, division\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SemCHGraphConv(nn.Module):\n \"\"\"\n Semantic channel-wise graph convolution layer\n \"\"\"\n\n def __init__(self, in_features, out_features, adj, bias=True):\n super(SemCHGraphConv, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n\n self.W = nn.Parameter(torch.zeros(size=(2, in_features, out_features), dtype=torch.float))\n nn.init.xavier_uniform_(self.W.data, gain=1.414)\n\n self.adj = adj.unsqueeze(0).repeat(out_features, 1, 1)\n self.m = (self.adj > 0)\n self.e = nn.Parameter(torch.zeros(out_features, len(self.m[0].nonzero()), dtype=torch.float))\n nn.init.constant_(self.e.data, 1)\n\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_features, dtype=torch.float))\n stdv = 1. / math.sqrt(self.W.size(1))\n self.bias.data.uniform_(-stdv, stdv)\n else:\n self.register_parameter('bias', None)\n\n def forward(self, input):\n h0 = torch.matmul(input, self.W[0]).unsqueeze(1).transpose(1, 3) # B * C * J * 1\n h1 = torch.matmul(input, self.W[1]).unsqueeze(1).transpose(1, 3) # B * C * J * 1\n\n adj = -9e15 * torch.ones_like(self.adj).to(input.device) # C * J * J\n adj[self.m] = self.e.view(-1)\n adj = F.softmax(adj, dim=2)\n\n E = torch.eye(adj.size(1), dtype=torch.float).to(input.device)\n E = E.unsqueeze(0).repeat(self.out_features, 1, 1) # C * J * J\n output = torch.matmul(adj * E, h0) + torch.matmul(adj * (1 - E), h1)\n output = output.transpose(1, 3).squeeze(1)\n\n if self.bias is not None:\n return output + self.bias.view(1, 1, -1)\n else:\n return output\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n"
] |
[
[
"numpy.array",
"numpy.savez_compressed"
],
[
"torch.zeros",
"torch.nn.init.constant_",
"torch.nn.init.xavier_uniform_",
"torch.ones_like",
"torch.nn.functional.softmax",
"torch.matmul"
]
] |
exoplanetvetting/DAVE
|
[
"45ba97b7b535ad26dd555c33c963c6224a9af23c",
"aea19a30d987b214fb4c0cf01aa733f127c411b9"
] |
[
"lpp/newlpp/lppTransform.py",
"diffimg/psffit.py"
] |
[
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 23 20:32:12 2018\nFunctions to correctly fold and bin a light curve.\nCalculate the lpp metric: transform to lower dimensions, knn\n\nDepends on class from reading in a previously created LPP metric Map\n\nDepends on reading in the light curve to data structure.\n\ninput is a class called data \ndata contains\ndata.time (days)\ndata.tzero (day)\ndata.dur (hours)\ndata.period (days)\ndata.flux (normalized to 0)\n\nAfter foldBinLightCurve it contains\ndata.binned\nAfter transform it contains\ndata.lpp_transform\n\n\n@author: smullally\n\"\"\"\nfrom __future__ import division\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\nfrom lpproj import LocalityPreservingProjection\nimport copy\n\ndef computeLPPTransitMetric(data,mapInfo):\n \"\"\"\n This function takes a data class with light curve info\n and the mapInfo with information about the mapping to use.\n It then returns a lpp metric value.\n \"\"\"\n \n binFlux, binPhase=foldBinLightCurve(data,mapInfo.ntrfr,mapInfo.npts)\n \n #plt.figure()\n #plt.plot(binPhase,binFlux,'.--')\n \n \n #Dimensionality Reduction and knn parts\n rawTLpp,transformedTransit=computeRawLPPTransitMetric(binFlux,mapInfo)\n \n #Normalize by Period Dependence\n normTLpp=periodNormalLPPTransitMetric(rawTLpp,np.array([data.period,data.mes]), mapInfo)\n \n return normTLpp,rawTLpp,transformedTransit\n \n \n \n\ndef runningMedian(t,y,dt,runt):\n \"\"\"\n Take a running median of size dt\n Return values at times given in runt\n \"\"\"\n newy=np.zeros(len(y))\n newt=np.zeros(len(y))\n \n srt = np.argsort(t)\n newt = t[srt]\n newy = y[srt]\n\n runy=[]\n for i in range(len(runt)): \n tmp=[]\n for j in range(len(newt)): \n if (newt[j] >= (runt[i]-dt)) and (newt[j] <= (runt[i]+dt)):\n tmp.append(newy[j])\n \n if np.isnan(np.nanmedian(np.array(tmp))) :\n runy.append(0)\n else:\n runy.append(np.nanmedian(np.array(tmp)))\n \n return(list(runt),runy)\n\n \n \n\ndef foldBinLightCurve (data, ntrfr, npts):\n \"\"\"\n Fold and bin light curve for input to LPP metric calculation\n \n data contains time, tzero, dur, priod,mes and flux (centered around zero)\n \n ntrfr -- number of transit fraction for binning around transit ~1.5\n npts -- number of points in the final binning.\n \n \"\"\"\n\n #Create phase light curve\n phaselc =np.mod((data.time-(data.tzero-0.5*data.period))/data.period,1)\n flux=data.flux\n mes=data.mes\n #Determine the fraction of the time the planet transits the star.\n #Insist that ntrfr * transit fraction\n if ~np.isnan(data.dur) & (data.dur >0):\n transit_dur = data.dur\n else:\n transit_dur = 0.2 * data.period/24.\n \n transit_fr=transit_dur/24./data.period\n if (transit_fr * ntrfr) > 0.5 :\n transit_fr = 0.5/ntrfr\n \n #Specify the out of transit (a) and the in transit regions\n binover=1.3\n if mes <= 20:\n binover=-(1/8.0)*mes + 3.8\n \n endfr = .03\n midfr= .11\n a = np.concatenate((np.arange(endfr,.5-midfr,1/npts) , \\\n np.arange((0.5+midfr),(1-endfr),1/npts)), axis=None)\n ovsamp=4.0\n #bstep=(ovsamp*ntrfr*transit_fr)/npts\n b_num=41\n b =np.linspace((0.5-ntrfr*transit_fr),(0.5+ntrfr*transit_fr),b_num)\n\n #print \"length a: %u \" % len(a)\n #print \"length b: %u\" % len(b)\n [runta,runya] = runningMedian(phaselc,flux,binover/npts,a)\n [runtb,runyb] = runningMedian(phaselc,flux,\\\n (binover*ovsamp*ntrfr*transit_fr)/npts,b)\n\n #Combine the two sets of bins\n runymess=np.array(runya + runyb)\n runtmess = np.array(runta + runtb)\n\n srt=np.argsort(runtmess)\n runy=runymess[srt]\n runt=runtmess[srt]\n \n #Scale the flux by the depth so everything has the same depth.\n #Catch or dividing by zero is to not scale.\n scale = -1*np.min(runyb)\n if scale != 0:\n scaledFlux=runy/scale\n else:\n scaledFlux=runy\n \n binnedFlux=scaledFlux\n phasebins=runt\n \n return binnedFlux,phasebins\n\n\ndef computeRawLPPTransitMetric(binFlux,mapInfo):\n \"\"\"\n Perform the matrix transformation with LPP\n Do the knn test to get a raw LPP transit metric number.\n \"\"\"\n \n Yorig=mapInfo.YmapMapped\n lpp=LocalityPreservingProjection(n_components=mapInfo.n_dim)\n lpp.projection_=mapInfo.YmapM\n \n #To equate to Matlab LPP methods, we need to remove mean of transform.\n normBinFlux=binFlux-mapInfo.YmapMean\n \n inputY=lpp.transform(normBinFlux.reshape(1,-1))\n \n knownTransitsY=Yorig[mapInfo.knnGood,:]\n \n dist,ind = knnDistance_fromKnown(knownTransitsY,inputY,mapInfo.knn)\n \n rawLppTrMetric=np.mean(dist)\n \n return rawLppTrMetric,inputY\n \ndef knnDistance_fromKnown(knownTransits,new,knn):\n \"\"\"\n For a group of known transits and a new one.\n Use knn to determine how close the new one is to the known transits\n using knn minkowski p = 3 ()\n Using scipy signal to do this.\n \"\"\"\n #p=3 sets a minkowski distance of 3. #Check that you really used 3 for matlab.\n nbrs=NearestNeighbors(n_neighbors=int(knn), algorithm='kd_tree', p=2)\n nbrs.fit(knownTransits)\n \n distances,indices = nbrs.kneighbors(new)\n \n \n return distances, indices\n \n \n \ndef periodNormalLPPTransitMetric(rawTLpp,newPerMes, mapInfo):\n \"\"\"\n Normalize the rawTransitMetric value by those with the closest period.\n This part removes the period dependence of the metric at short periods.\n Plus it makes a value near one be the threshold between good and bad.\n \n newPerMes is the np.array([period, mes]) of the new sample\n \"\"\"\n knownTrPeriods=mapInfo.mappedPeriods[mapInfo.knnGood]\n knownTrMes=mapInfo.mappedMes[mapInfo.knnGood]\n knownTrrawLpp=mapInfo.dymeans[mapInfo.knnGood]\n nPercentil=mapInfo.nPercentil\n nPsample=mapInfo.nPsample\n \n #Find the those with the nearest periods Npsample-nneighbors\n logPeriods=np.log10(knownTrPeriods)\n logMes=np.log10(knownTrMes)\n knownPerMes=np.stack((logPeriods, logMes), axis=-1)\n\n np.shape(knownPerMes)\n logNew=np.log10(newPerMes).reshape(1,-1)\n #logNew=np.array([np.log10(newPeriod)]).reshape(1,1)\n\n dist,ind = knnDistance_fromKnown(knownPerMes,logNew,nPsample)\n \n #Find the nthPercentile of the rawLpp of these indicies\n nearPeriodLpp=knownTrrawLpp[ind]\n \n LppNPercentile = np.percentile(nearPeriodLpp,nPercentil)\n \n NormLppTransitMetric=rawTLpp/LppNPercentile\n \n return NormLppTransitMetric\n \n \n\ndef lpp_onetransit(tcedata,mapInfo,ntransit):\n \"\"\"\n Chop down the full time series to one orbital period.\n Then gather the lpp value for that one transit.\n \"\"\"\n \n startTime=tcedata.time[0]+ntransit*tcedata.period\n endTime=tcedata.time[0]+(ntransit+1)*tcedata.period + 3/24.0 #A few cadences of overlap\n \n want=(tcedata.time>=startTime) & (tcedata.time<=endTime)\n newtime=tcedata.time[want]\n newflux=tcedata.flux[want]\n \n nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period\n \n if len(newtime>nExpCad*0.75):\n onetransit=copy.deepcopy(tcedata)\n onetransit.time=newtime\n onetransit.flux=newflux\n normTLpp, rawTLpp, transformedTr=computeLPPTransitMetric(onetransit,mapInfo)\n else:\n normTLpp=np.nan\n rawTLpp=np.nan\n \n return normTLpp,rawTLpp\n\n\ndef lpp_averageIndivTransit(tcedata,mapInfo):\n \"\"\"\n \n Create the loop over individual transits and return \n array normalized lpp values, mean and std.\n Input TCE object and mapInfo object.\n \n It is unclear that this individual transit approach\n separates out several new false positives.\n It probably would require retuning for low SNR signals.\n \n \"\"\" \n length=tcedata.time[-1]-tcedata.time[0]\n ntransits=int(np.floor(length/tcedata.period))\n \n lppNorms=np.ones(ntransits)\n lppRaws=np.ones(ntransits)\n \n nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period\n \n \n \n for i in range(ntransits):\n lppNorms[i],lppRaws[i] = lpp_onetransit(tcedata,mapInfo,i)\n \n lppMed=np.nanmedian(lppNorms)\n lppStd=np.nanstd(lppNorms)\n \n return lppNorms,lppMed, lppStd, ntransits\n\n \n \n \n \n ",
"\"\"\"\nCreated on Wed Nov 7 14:26:29 2018\n\nFit an analytic model of a point spread function (PSF) to an image.\n\nNomenclature\n-------------------------\nA point spread function is the analytic model of the distribution of light on the focal planet\nfrom a point source.\n\nA Pixel Response Function 9PRF) is the distribution of light on the pixels of the detector, with one\nvalue for each pixel. In general, a PRF it may include the jitter, or the intrapixel response,\nbut those aren't included in this model yet.\n\n\nUsage\n---------\nCall `fitPrf()` with an image and a PSF model\nExample PSF functions are `gaussianPsf()` and `gaussianWithConstantSkyPsf()`\n\n\nNotes\n----------\n* The signature of a model is function(col, row, *args)\n\n* The fitting function is extremely slow. To speed it up, we use numba to on-the-fly compile\n the PSF model function to C-code. This speeds up the fitting by roughly a factor of 7.\n To have numba precompile the model function you need to decorate the function appropriately.\n The function `jit_psf5args()` decorates `gaussianWithConstantSkyPsf()`. You need to write\n a separate decorator for each model that contains a different number of arguments.\n (At least until I can figure out a better way to decorate).Each argument must be a\n double. If the model can't be described in this fashion it can't be compiled.\n the\n\n* The code uses the following coordinate conventions\n\nImage coordinates\n\n\n row\n ^\n |\n |\n |\n |___________\n col -->\n\nThe bottom left of a pixel is the origin\n\n\n (0,1)\n +-----+ (1,1)\n | |\n | |\n | |\n +-----+ (1,0)\n\nAll functions take (col,row), never (row, col), unless copiously documented\n\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom pdb import set_trace as debug\nimport numpy as np\n\nimport scipy.integrate as spInt\nimport scipy.optimize as spOpt\nfrom numba import njit, cfunc\nfrom numba.types import CPointer, float64, intc\nfrom scipy import LowLevelCallable\n\n#https://stackoverflow.com/questions/49683653/how-to-pass-additional-parameters-to-numba-cfunc-passed-as-lowlevelcallable-to-s\ndef jit_psf5args(func):\n \"\"\"A complicated piece of code used by numba to compile the PSF.\n\n This one works for `gaussianWithConstantSkyPsf()`\n \"\"\"\n jitted_function = njit(func)\n\n @cfunc(float64(intc, CPointer(float64)))\n def wrapped(n, xx):\n return jitted_function(xx[0], xx[1], xx[2], xx[3], xx[4], xx[5], xx[6])\n\n return LowLevelCallable(wrapped.ctypes)\n\n\n@jit_psf5args\ndef gaussianWithConstantSkyPsf(col, row, col0, row0, sigma, flux0, sky):\n \"\"\"A model PSF of a 2d symettric gaussian with a constant background\n\n Inputs\n ---------\n col, row\n (floats) Which location to compute PSF for\n col0, row0\n (floats) Centre of psf\n sigma\n (float) Width of the Gaussian\n flux0\n (float) Height of the gaussian (sort of)\n sky\n (float) Background level\n\n Returns\n --------\n (float)\n \"\"\"\n assert sigma > 0\n\n z_col = .5 * (col - col0) / sigma\n z_row = .5 * (row - row0) / sigma\n\n return sky + flux0 * np.exp(- z_col**2) * np.exp( - z_row**2)\n\n\ndef fitPrf(img, prfFunc, guess):\n \"\"\"Fit a PRF to an image\n\n Inputs\n --------\n img\n (2d numpy array) Image to fit\n prfFunc\n (function) Model to fit. See module level documentation for more details.\n guess\n (tuple or array) Arguments prfFunc\n\n Returns\n ------------\n A scipy.optiminze.ResultsObject. The .x attribute contains the best fit parameters\n\n Example\n ----------\n To fit a model with the signature (col, row, flux, width), the guess array would be of\n length 2.\n \"\"\"\n\n nr, nc = img.shape\n soln = spOpt.minimize(costFunc, guess,args=(prfFunc, img), method='Nelder-Mead', bounds=None)\n return soln\n\n\ndef gaussianPsf(col, row, col0, row0, sigma, flux0):\n\n assert sigma > 0\n\n z_col = .5 * (col - col0) / sigma\n z_row = .5 * (row - row0) / sigma\n\n return flux0 * np.exp(- z_col**2) * np.exp( - z_row**2)\n\n\n\ndef costFunc(arglist, func, img, mask=None):\n \"\"\"Compute difference between image and its model for given model params\n\n Inputs\n ----------\n arglist\n (tuple or array) Tunable parameters of model\n func\n (function) Model to fit\n img\n (2d np array) Image to fit\n\n\n Optional Inputs\n ----------------\n mask\n (2d np array) Zero elements of mask indicate bad data which should not be\n included in the fit\n\n\n Returns\n ----------\n float\n \"\"\"\n\n nr, nc = img.shape\n model = computeModel(nc, nr, func, arglist)\n diff = img - model\n\n if mask is not None:\n assert np.all( mask.shape == img.shape)\n diff[~mask] = 0\n img[~mask] = 0 #In case bad values are set to Nan\n\n cost = np.sqrt( np.sum(diff**2) )\n return cost\n\n\n\n\n\n#@profile\ndef computeModel(numCols, numRows, func, arglist):\n \"\"\"Compute model flux for an image with size (numCols, numRows)\n\n Inputs\n -------\n numCols, numRows\n (ints) Shape of the image to compute the model PRF for\n func\n (function) Model PRF\n arglist\n (tuple or array) Tunable parameters of the model\n\n Returns\n ----------\n A 2d numpy array representing the model PRF image.\n \"\"\"\n\n model = np.zeros( (numRows, numCols) )\n\n for i in range(numCols):\n def gfun(x):\n return i\n\n def hfun(x):\n return i+1\n\n for j in range(numRows):\n val = spInt.dblquad(func, j, j+1, gfun, hfun, args=arglist)[0]\n model[j, i] = val #Numpy flips row and column\n\n return model\n"
] |
[
[
"numpy.array",
"numpy.isnan",
"numpy.percentile",
"numpy.ones",
"numpy.min",
"numpy.mean",
"numpy.shape",
"numpy.stack",
"numpy.arange",
"numpy.argsort",
"numpy.floor",
"numpy.log10",
"numpy.linspace",
"numpy.nanmedian",
"numpy.mod",
"numpy.nanstd"
],
[
"numpy.zeros",
"numpy.sum",
"numpy.exp",
"scipy.LowLevelCallable",
"numpy.all",
"scipy.integrate.dblquad",
"scipy.optimize.minimize"
]
] |
chenchy/onsets-and-frames
|
[
"af7ac2d2e65cba1f6442b81317328d96b3700b26"
] |
[
"transcribe.py"
] |
[
"import argparse\nimport os\nimport sys\n\nimport numpy as np\nimport soundfile\nfrom mir_eval.util import midi_to_hz\n\nfrom onsets_and_frames import *\n\n\ndef load_and_process_audio(flac_path, sequence_length, device):\n\n random = np.random.RandomState(seed=42)\n\n audio, sr = soundfile.read(flac_path, dtype='int16')\n assert sr == SAMPLE_RATE\n\n audio = torch.ShortTensor(audio)\n\n if sequence_length is not None:\n audio_length = len(audio)\n step_begin = random.randint(audio_length - sequence_length) // HOP_LENGTH\n n_steps = sequence_length // HOP_LENGTH\n\n begin = step_begin * HOP_LENGTH\n end = begin + sequence_length\n\n audio = audio[begin:end].to(device)\n else:\n audio = audio.to(device)\n\n audio = audio.float().div_(32768.0)\n\n return audio\n\n\ndef transcribe(model, audio):\n\n mel = melspectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]).transpose(-1, -2)\n onset_pred, offset_pred, _, frame_pred, velocity_pred = model(mel)\n\n predictions = {\n 'onset': onset_pred.reshape((onset_pred.shape[1], onset_pred.shape[2])),\n 'offset': offset_pred.reshape((offset_pred.shape[1], offset_pred.shape[2])),\n 'frame': frame_pred.reshape((frame_pred.shape[1], frame_pred.shape[2])),\n 'velocity': velocity_pred.reshape((velocity_pred.shape[1], velocity_pred.shape[2]))\n }\n\n return predictions\n\n\ndef transcribe_file(model_file, flac_paths, save_path, sequence_length,\n onset_threshold, frame_threshold, device):\n\n model = torch.load(model_file, map_location=device).eval()\n summary(model)\n\n for flac_path in flac_paths:\n print(f'Processing {flac_path}...', file=sys.stderr)\n audio = load_and_process_audio(flac_path, sequence_length, device)\n predictions = transcribe(model, audio)\n\n p_est, i_est, v_est = extract_notes(predictions['onset'], predictions['frame'], predictions['velocity'], onset_threshold, frame_threshold)\n\n scaling = HOP_LENGTH / SAMPLE_RATE\n\n i_est = (i_est * scaling).reshape(-1, 2)\n p_est = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_est])\n\n os.makedirs(save_path, exist_ok=True)\n pred_path = os.path.join(save_path, os.path.basename(flac_path) + '.pred.png')\n save_pianoroll(pred_path, predictions['onset'], predictions['frame'])\n midi_path = os.path.join(save_path, os.path.basename(flac_path) + '.pred.mid')\n save_midi(midi_path, p_est, i_est, v_est)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('model_file', type=str)\n parser.add_argument('flac_paths', type=str, nargs='+')\n parser.add_argument('--save-path', type=str, default='.')\n parser.add_argument('--sequence-length', default=None, type=int)\n parser.add_argument('--onset-threshold', default=0.5, type=float)\n parser.add_argument('--frame-threshold', default=0.5, type=float)\n parser.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu')\n\n with torch.no_grad():\n transcribe_file(**vars(parser.parse_args()))\n"
] |
[
[
"numpy.random.RandomState"
]
] |
britig/S2RL-Policies
|
[
"b9c74b7f5efec225920c09f7e8e82d8555d61bd9"
] |
[
"ppoPolicyTraining.py"
] |
[
"\"\"\"\r\n\tThe file contains the PPO class to train with.\r\n\tNOTE: All \"ALG STEP\"s are following the numbers from the original PPO pseudocode.\r\n\t\t\tIt can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg\r\n\"\"\"\r\n\r\nimport gym\r\n\r\nimport numpy as np\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.optim import Adam\r\n#For continuous actions\r\nfrom torch.distributions import MultivariateNormal\r\n#For discrete action_space\r\nfrom torch.distributions import Categorical\r\nfrom network import FeedForwardActorNN, FeedForwardCriticNN\r\nimport sys\r\nfrom cbf_clf_helper import clf_control, cbf_control\r\n\r\n#Integrating tensorboard\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nwriter = SummaryWriter()\r\n\r\nclass PPO:\r\n\t\"\"\"\r\n\t\tThis is the PPO class we will use as our model in main.py\r\n\t\"\"\"\r\n\tdef __init__(self, env, **hyperparameters):\r\n\t\t\"\"\"\r\n\t\t\tInitializes the PPO model, including hyperparameters.\r\n\r\n\t\t\tParameters:\r\n\t\t\t\tpolicy_class - the policy class to use for our actor/critic networks.\r\n\t\t\t\tenv - the environment to train on.\r\n\t\t\t\thyperparameters - all extra arguments passed into PPO that should be hyperparameters.\r\n\r\n\t\t\tReturns:\r\n\t\t\t\tNone\r\n\t\t\"\"\"\r\n\t\t# Make sure the environment is compatible with our code\r\n\t\tassert(type(env.observation_space) == gym.spaces.Box)\r\n\t\t# Makeassert(type(env.action_space) == gym.spaces.Box)\r\n\r\n\t\t# Initialize hyperparameters for training with PPO\r\n\t\tself._init_hyperparameters(hyperparameters)\r\n\r\n\t\t# Extract environment information\r\n\t\tself.env = env\r\n\t\tself.obs_dim = env.observation_space.shape[0]\r\n\t\tif self.discrete:\r\n\t\t\tself.act_dim = env.action_space.n\r\n\t\telse:\r\n\t\t\tself.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]\r\n\r\n\t\t # Initialize actor and critic networks\r\n\t\tself.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete) \r\n\t\tactor_model = 'ppo_actorKinematicBicycleGymLane.pth'\r\n\t\tpolicy = FeedForwardActorNN(5, 2,False)\r\n\t\tpolicy.load_state_dict(torch.load(actor_model))\r\n\t\tactor_model = policy\r\n\t\t#print(f'model =========== {self.actor}') \t# ALG STEP 1\r\n\t\tself.critic = FeedForwardCriticNN(self.obs_dim, 1)\r\n\t\t#print(f'critic =========== {self.critic}') \r\n\r\n\t\t# Initialize optimizers for actor and critic\r\n\t\tself.actor_optim = Adam(self.actor.parameters(), lr=self.lr)\r\n\t\tself.critic_optim = Adam(self.critic.parameters(), lr=self.lr)\r\n\r\n\t\t# Initialize the covariance matrix used to query the actor for actions\r\n\t\tself.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)\r\n\t\tself.cov_mat = torch.diag(self.cov_var)\r\n\t\tself.obs_count = 0\r\n\t\tself.index_count = 0\r\n\r\n\t\t# This logger will help us with printing out summaries of each iteration\r\n\t\tself.logger = {\r\n\t\t\t't_so_far': 0, # timesteps so far\r\n\t\t\t'i_so_far': 0, # iterations so far\r\n\t\t\t'batch_lens': [], # episodic lengths in batch\r\n\t\t\t'batch_rews': [], # episodic returns in batch\r\n\t\t\t'batch_infractions': [], # Episodic returns in a neural network\r\n\t\t\t'actor_losses': [], # losses of actor network in current iteration\r\n\t\t\t'actor_network' : 0,\t# Actor network\r\n\t\t}\r\n\r\n\tdef learn(self, env_name,failure_observations,subpolicy):\r\n\t\t\"\"\"\r\n\t\t\tTrain the actor and critic networks. Here is where the main PPO algorithm resides.\r\n\r\n\t\t\tParameters:\r\n\t\t\t\ttotal_timesteps - the total number of timesteps to train for\r\n\r\n\t\t\tReturn:\r\n\t\t\t\tNone\r\n\t\t\"\"\"\r\n\t\tprint(f\"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, \", end='')\r\n\t\tprint(f\"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations\")\r\n\t\tt_so_far = 0 # Timesteps simulated so far\r\n\t\ti_so_far = 0 # Iterations ran so far\r\n\t\twhile i_so_far < self.training_step: # ALG STEP 2\r\n\t\t\t# Autobots, roll out (just kidding, we're collecting our batch simulations here)\r\n\t\t\tbatch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3\r\n\r\n\t\t\t# Calculate how many timesteps we collected this batch\r\n\t\t\tt_so_far += np.sum(batch_lens)\r\n\r\n\t\t\t# Increment the number of iterations\r\n\t\t\ti_so_far += 1\r\n\r\n\t\t\t# Logging timesteps so far and iterations so far\r\n\t\t\tself.logger['t_so_far'] = t_so_far\r\n\t\t\tself.logger['i_so_far'] = i_so_far\r\n\r\n\t\t\t# Calculate advantage at k-th iteration\r\n\t\t\tV, _ = self.evaluate(batch_obs, batch_acts)\r\n\t\t\tA_k = batch_rtgs - V.detach() # ALG STEP 5\r\n\r\n\t\t\t# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages\r\n\t\t\t# isn't theoretically necessary, but in practice it decreases the variance of \r\n\t\t\t# our advantages and makes convergence much more stable and faster. I added this because\r\n\t\t\t# solving some environments was too unstable without it.\r\n\t\t\tA_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)\r\n\r\n\t\t\t# This is the loop where we update our network for some n epochs\r\n\t\t\tfor _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7\r\n\t\t\t\t# Calculate V_phi and pi_theta(a_t | s_t)\r\n\t\t\t\tV, curr_log_probs = self.evaluate(batch_obs, batch_acts)\r\n\r\n\t\t\t\t# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)\r\n\t\t\t\t# NOTE: we just subtract the logs, which is the same as\r\n\t\t\t\t# dividing the values and then canceling the log with e^log.\r\n\t\t\t\t# For why we use log probabilities instead of actual probabilities,\r\n\t\t\t\t# here's a great explanation: \r\n\t\t\t\t# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms\r\n\t\t\t\t# TL;DR makes gradient ascent easier behind the scenes.\r\n\t\t\t\tratios = torch.exp(curr_log_probs - batch_log_probs)\r\n\r\n\t\t\t\t# Calculate surrogate losses.\r\n\t\t\t\t#print(f'A_k======================={A_k}')\r\n\t\t\t\tsurr1 = ratios * A_k\r\n\t\t\t\t#print(f'surr1======================={surr1}')\r\n\t\t\t\tsurr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k\r\n\t\t\t\t#print(f'surr2======================={surr2}')\r\n\r\n\t\t\t\t# Calculate actor and critic losses.\r\n\t\t\t\t# NOTE: we take the negative min of the surrogate losses because we're trying to maximize\r\n\t\t\t\t# the performance function, but Adam minimizes the loss. So minimizing the negative\r\n\t\t\t\t# performance function maximizes it.\r\n\t\t\t\tactor_loss = (-torch.min(surr1, surr2)).mean()\r\n\t\t\t\t#print(f'actor_loss======================={actor_loss}')\r\n\t\t\t\tcritic_loss = nn.MSELoss()(V, batch_rtgs)\r\n\r\n\t\t\t\t# Calculate gradients and perform backward propagation for actor network\r\n\t\t\t\tself.actor_optim.zero_grad()\r\n\t\t\t\tactor_loss.backward(retain_graph=True)\r\n\t\t\t\tself.actor_optim.step()\r\n\r\n\t\t\t\t# Calculate gradients and perform backward propagation for critic network\r\n\t\t\t\tself.critic_optim.zero_grad()\r\n\t\t\t\tcritic_loss.backward()\r\n\t\t\t\tself.critic_optim.step()\r\n\r\n\t\t\t\t# Log actor loss\r\n\t\t\t\tself.logger['actor_losses'].append(actor_loss.detach())\r\n\t\t\t\tself.logger['actor_network'] = self.actor\r\n\r\n\t\t\t# Print a summary of our training so far\r\n\t\t\tself._log_summary()\r\n\r\n\t\t\t# Save our model if it's time\r\n\t\t\tif i_so_far % self.save_freq == 0:\r\n\t\t\t\tif subpolicy:\r\n\t\t\t\t\ttorch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')\r\n\t\t\t\t\ttorch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')\r\n\t\t\t\telse:\r\n\t\t\t\t\ttorch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')\r\n\t\t\t\t\ttorch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')\r\n\r\n\tdef rollout(self,subpolicy,failure_observations):\r\n\t\t\"\"\"\r\n\t\t\tThis is where we collect the batch of data\r\n\t\t\tfrom simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch\r\n\t\t\tof data each time we iterate the actor/critic networks.\r\n\r\n\t\t\tParameters:\r\n\t\t\t\tNone\r\n\r\n\t\t\tReturn:\r\n\t\t\t\tbatch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)\r\n\t\t\t\tbatch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)\r\n\t\t\t\tbatch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)\r\n\t\t\t\tbatch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)\r\n\t\t\t\tbatch_lens - the lengths of each episode this batch. Shape: (number of episodes)\r\n\t\t\"\"\"\r\n\t\t# Batch data. For more details, check function header.\r\n\t\tbatch_obs = []\r\n\t\tbatch_acts = []\r\n\t\tbatch_log_probs = []\r\n\t\tbatch_rews = []\r\n\t\tbatch_rtgs = []\r\n\t\tbatch_lens = []\r\n\t\tbatch_infractions = []\r\n\r\n\t\t# Episodic data. Keeps track of rewards per episode, will get cleared\r\n\t\t# upon each new episode\r\n\t\tep_rews = []\r\n\r\n\t\tt = 0 # Keeps track of how many timesteps we've run so far this batch\r\n\r\n\t\t# Keep simulating until we've run more than or equal to specified timesteps per batch\r\n\t\twhile t < self.timesteps_per_batch:\r\n\t\t\tact_list = []\r\n\t\t\tep_rews = [] # rewards collected per episode\r\n\t\t\t# Reset the environment. sNote that obs is short for observation. \r\n\t\t\tobs = self.env.reset()\r\n\t\t\t#print(f'obs reset ============= {obs}')\r\n\t\t\tdone = False\r\n\t\t\tcount_infractions = 0\r\n\t\t\tcount_infractions_acc = 0\r\n\t\t\tcount_infractions_steer = 0\r\n\r\n\t\t\t# Run an episode for a maximum of max_timesteps_per_episode timesteps\r\n\t\t\tfor ep_t in range(self.max_timesteps_per_episode):\r\n\t\t\t\ta_predicted_clf = clf_control(self.env.v_ego)\r\n\t\t\t\tdelta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)\r\n\t\t\t\t# If render is specified, render the environment\r\n\t\t\t\tif self.render:\r\n\t\t\t\t\tself.env.render()\r\n\r\n\t\t\t\tt += 1 # Increment timesteps ran this batch so far\r\n\r\n\t\t\t\t# Track observations in this batch\r\n\t\t\t\tbatch_obs.append(obs)\r\n\r\n\t\t\t\t# Calculate action and make a step in the env. \r\n\t\t\t\t# Note that rew is short for reward.\r\n\t\t\t\tif self.discrete:\r\n\t\t\t\t\taction, log_prob = self.get_action_discrete(obs)\r\n\t\t\t\telse:\r\n\t\t\t\t\taction, log_prob = self.get_action(obs) #self.get_action_discrete(obs)\r\n\t\t\t\t#print(f'action chosen =============== {action}')\r\n\t\t\t\tif(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):\r\n\t\t\t\t\tcount_infractions_acc = count_infractions_acc+1\r\n\t\t\t\tif(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):\r\n\t\t\t\t\t#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')\r\n\t\t\t\t\tcount_infractions_steer = count_infractions_steer+1\r\n\t\t\t\tobs, rew, done, info = self.env.step(action)\r\n\t\t\t\tcount_infractions = count_infractions_acc+count_infractions_steer\r\n\r\n\r\n\t\t\t\t# Track recent reward, action, and action log probability\r\n\t\t\t\tep_rews.append(rew)\r\n\t\t\t\tbatch_acts.append(action)\r\n\t\t\t\tbatch_log_probs.append(log_prob)\r\n\t\t\t\tact_list.append(info)\r\n\r\n\t\t\t\t# If the environment tells us the episode is terminated, break\r\n\t\t\t\tif done:\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\t# Track episodic lengths and rewards\r\n\t\t\t#self.env.render(act_list)\r\n\t\t\tbatch_lens.append(ep_t + 1)\r\n\t\t\tbatch_rews.append(ep_rews)\r\n\t\t\tbatch_infractions.append(count_infractions)\r\n\r\n\t\t# Reshape data as tensors in the shape specified in function description, before returning\r\n\t\tbatch_obs = torch.tensor(batch_obs, dtype=torch.float)\r\n\t\t#print(f'batch_acts =============== {batch_acts}')\r\n\t\t#For discrete state space\r\n\t\tif self.discrete:\r\n\t\t\tbatch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)\r\n\t\telse:\r\n\t\t\tbatch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)\r\n\t\t#print(f'batch_acts =============== {batch_acts}')\r\n\t\tbatch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)\r\n\t\tbatch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4\r\n\r\n\t\t# Log the episodic returns and episodic lengths in this batch.\r\n\t\tself.logger['batch_rews'] = batch_rews\r\n\t\tself.logger['batch_lens'] = batch_lens\r\n\t\tself.logger['batch_infractions'] = batch_infractions\r\n\r\n\t\treturn batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens\r\n\r\n\tdef compute_rtgs(self, batch_rews):\r\n\t\t\"\"\"\r\n\t\t\tCompute the Reward-To-Go of each timestep in a batch given the rewards.\r\n\r\n\t\t\tParameters:\r\n\t\t\t\tbatch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)\r\n\r\n\t\t\tReturn:\r\n\t\t\t\tbatch_rtgs - the rewards to go, Shape: (number of timesteps in batch)\r\n\t\t\"\"\"\r\n\t\t# The rewards-to-go (rtg) per episode per batch to return.\r\n\t\t# The shape will be (num timesteps per episode)\r\n\t\tbatch_rtgs = []\r\n\r\n\t\t# Iterate through each episode\r\n\t\tfor ep_rews in reversed(batch_rews):\r\n\r\n\t\t\tdiscounted_reward = 0 # The discounted reward so far\r\n\r\n\t\t\t# Iterate through all rewards in the episode. We go backwards for smoother calculation of each\r\n\t\t\t# discounted return (think about why it would be harder starting from the beginning)\r\n\t\t\tfor rew in reversed(ep_rews):\r\n\t\t\t\tdiscounted_reward = rew + discounted_reward * self.gamma\r\n\t\t\t\tbatch_rtgs.insert(0, discounted_reward)\r\n\r\n\t\t# Convert the rewards-to-go into a tensor\r\n\t\tbatch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)\r\n\r\n\t\treturn batch_rtgs\r\n\r\n\r\n\t# Probability sampling for discrete actions\r\n\tdef get_action_discrete(self, obs):\r\n\t\t#print(f'obs ================== {obs}')\r\n\t\tmean = self.actor(obs)\r\n\t\t#print(f'mean ================== {mean}')\r\n\r\n\t\tdist = Categorical(mean)\r\n\r\n\t\t#print(f'dist ================== {dist}')\r\n\r\n\t\taction = dist.sample()\r\n\r\n\t\tlog_prob = dist.log_prob(action)\r\n\t\t#print(f'action ====== {action} ========= {log_prob}')\r\n\r\n\t\treturn action.detach().numpy().item(), log_prob.detach().item()\r\n\r\n\r\n\tdef get_action(self, obs):\r\n\t\t\"\"\"\r\n\t\t\tQueries an action from the actor network, should be called from rollout.\r\n\r\n\t\t\tParameters:\r\n\t\t\t\tobs - the observation at the current timestep\r\n\r\n\t\t\tReturn:\r\n\t\t\t\taction - the action to take, as a numpy array\r\n\t\t\t\tlog_prob - the log probability of the selected action in the distribution\r\n\t\t\"\"\"\r\n\t\t# Query the actor network for a mean action\r\n\t\tmean = self.actor(obs)\r\n\r\n\t\t# Create a distribution with the mean action and std from the covariance matrix above.\r\n\t\t# For more information on how this distribution works, check out Andrew Ng's lecture on it:\r\n\t\t# https://www.youtube.com/watch?v=JjB58InuTqM\r\n\t\tdist = MultivariateNormal(mean, self.cov_mat)\r\n\r\n\t\t# Sample an action from the distribution\r\n\t\taction = dist.sample()\r\n\r\n\t\t# Calculate the log probability for that action\r\n\t\tlog_prob = dist.log_prob(action)\r\n\r\n\t\t# Return the sampled action and the log probability of that action in our distribution\r\n\t\treturn action.detach().numpy(), log_prob.detach()\r\n\r\n\tdef evaluate(self, batch_obs, batch_acts):\r\n\t\t\"\"\"\r\n\t\t\tEstimate the values of each observation, and the log probs of\r\n\t\t\teach action in the most recent batch with the most recent\r\n\t\t\titeration of the actor network. Should be called from learn.\r\n\r\n\t\t\tParameters:\r\n\t\t\t\tbatch_obs - the observations from the most recently collected batch as a tensor.\r\n\t\t\t\t\t\t\tShape: (number of timesteps in batch, dimension of observation)\r\n\t\t\t\tbatch_acts - the actions from the most recently collected batch as a tensor.\r\n\t\t\t\t\t\t\tShape: (number of timesteps in batch, dimension of action)\r\n\r\n\t\t\tReturn:\r\n\t\t\t\tV - the predicted values of batch_obs\r\n\t\t\t\tlog_probs - the log probabilities of the actions taken in batch_acts given batch_obs\r\n\t\t\"\"\"\r\n\t\t# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs\r\n\t\tV = self.critic(batch_obs).squeeze()\r\n\r\n\t\t# Calculate the log probabilities of batch actions using most recent actor network.\r\n\t\t# This segment of code is similar to that in get_action()\r\n\t\tmean = self.actor(batch_obs)\r\n\t\tif self.discrete:\r\n\t\t\tdist = Categorical(mean)\r\n\t\telse:\r\n\t\t\tdist = MultivariateNormal(mean, self.cov_mat)\r\n\t\t#For discrete actions\r\n\t\t#dist = Categorical(mean)\r\n\t\tlog_probs = dist.log_prob(batch_acts)\r\n\r\n\t\t# Return the value vector V of each observation in the batch\r\n\t\t# and log probabilities log_probs of each action in the batch\r\n\t\treturn V, log_probs\r\n\r\n\tdef _init_hyperparameters(self, hyperparameters):\r\n\t\t\"\"\"\r\n\t\t\tInitialize default and custom values for hyperparameters\r\n\r\n\t\t\tParameters:\r\n\t\t\t\thyperparameters - the extra arguments included when creating the PPO model, should only include\r\n\t\t\t\t\t\t\t\t\thyperparameters defined below with custom values.\r\n\r\n\t\t\tReturn:\r\n\t\t\t\tNone\r\n\t\t\"\"\"\r\n\t\t# Initialize default values for hyperparameters\r\n\t\t# Algorithm hyperparameters\r\n\t\tself.timesteps_per_batch = 4800 # Number of timesteps to run per batch\r\n\t\tself.max_timesteps_per_episode = 1600 # Max number of timesteps per episode\r\n\t\tself.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration\r\n\t\tself.lr = 0.005 # Learning rate of actor optimizer\r\n\t\tself.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go\r\n\t\tself.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA\r\n\r\n\t\t# Miscellaneous parameters\r\n\t\tself.render = False # If we should render during rollout\r\n\t\tself.save_freq = 10 # How often we save in number of iterations\r\n\t\tself.seed = None # Sets the seed of our program, used for reproducibility of results\r\n\t\tself.discrete = False\t\t\t\t\t\t\t# Sets the type of environment to discrete or continuous\r\n\t\tself.training_step = 200\t\t\t\t\t\t# Sets the number of trainig step\r\n\r\n\t\t# Change any default values to custom values for specified hyperparameters\r\n\t\tfor param, val in hyperparameters.items():\r\n\t\t\texec('self.' + param + ' = ' + str(val))\r\n\r\n\t\t# Sets the seed if specified\r\n\t\tif self.seed != None:\r\n\t\t\t# Check if our seed is valid first\r\n\t\t\tassert(type(self.seed) == int)\r\n\r\n\t\t\t# Set the seed \r\n\t\t\ttorch.manual_seed(self.seed)\r\n\t\t\tprint(f\"Successfully set seed to {self.seed}\")\r\n\r\n\tdef _log_summary(self):\r\n\t\t\"\"\"\r\n\t\t\tPrint to stdout what we've logged so far in the most recent batch.\r\n\r\n\t\t\tParameters:\r\n\t\t\t\tNone\r\n\r\n\t\t\tReturn:\r\n\t\t\t\tNone\r\n\t\t\"\"\"\r\n\t\t# Calculate logging values. I use a few python shortcuts to calculate each value\r\n\t\t# without explaining since it's not too important to PPO; feel free to look it over,\r\n\t\t# and if you have any questions you can email me (look at bottom of README)\r\n\t\tt_so_far = self.logger['t_so_far']\r\n\t\ti_so_far = self.logger['i_so_far']\r\n\t\tavg_ep_lens = np.mean(self.logger['batch_lens'])\r\n\t\tavg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])\r\n\t\tavg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])\r\n\t\tavg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])\r\n\t\tactor_model = self.logger['actor_network']\r\n\r\n\t\t# Round decimal places for more aesthetic logging messages\r\n\t\tavg_ep_lens = str(round(avg_ep_lens, 2))\r\n\t\tavg_ep_rews = str(round(avg_ep_rews, 2))\r\n\t\tavg_ep_infractions = str(round(avg_ep_infractions, 2))\r\n\t\tavg_actor_loss = str(round(avg_actor_loss, 5))\r\n\r\n\t\twriter.add_scalar(\"Average Episodic Return\", int(float(avg_ep_rews)), t_so_far)\r\n\t\twriter.add_scalar(\"Average actor Loss\", int(float(avg_actor_loss)), t_so_far)\r\n\t\twriter.add_scalar(\"Average Infractions\", int(float(avg_ep_infractions)), t_so_far)\r\n\t\t# Tracking the weight of the network\r\n\t\tfor name, param in actor_model.named_parameters():\r\n\t\t\tif 'weight' in name:\r\n\t\t\t\twriter.add_histogram(name, param.detach().numpy(), t_so_far)\r\n\r\n\t\t# Print logging statements\r\n\t\tprint(flush=True)\r\n\t\tprint(f\"-------------------- Iteration #{i_so_far} --------------------\", flush=True)\r\n\t\tprint(f\"Average Episodic Length: {avg_ep_lens}\", flush=True)\r\n\t\tprint(f\"Average Episodic Return: {avg_ep_rews}\", flush=True)\r\n\t\tprint(f\"Average Episodic Infractions : {avg_ep_infractions}\", flush=True)\r\n\t\tprint(f\"Average Loss: {avg_actor_loss}\", flush=True)\r\n\t\tprint(f\"Timesteps So Far: {t_so_far}\", flush=True)\r\n\t\tprint(f\"------------------------------------------------------\", flush=True)\r\n\t\tprint(flush=True)\r\n\r\n\t\t# Reset batch-specific logging data\r\n\t\tself.logger['batch_lens'] = []\r\n\t\tself.logger['batch_rews'] = []\r\n\t\tself.logger['actor_losses'] = []\r\n\r\n\r\ndef test(env, actor_model, is_discrete):\r\n\t\"\"\"\r\n\t\tTests the model.\r\n\t\tParameters:\r\n\t\t\tenv - the environment to test the policy on\r\n\t\t\tactor_model - the actor model to load in\r\n\t\tReturn:\r\n\t\t\tNone\r\n\t\"\"\"\r\n\tprint(f\"Testing {actor_model}\", flush=True)\r\n\r\n\t# If the actor model is not specified, then exit\r\n\tif actor_model == '':\r\n\t\tprint(f\"Didn't specify model file. Exiting.\", flush=True)\r\n\t\tsys.exit(0)\r\n\r\n\t# Extract out dimensions of observation and action spaces\r\n\tobs_dim = env.observation_space.shape[0]\r\n\tif is_discrete:\r\n\t\tact_dim = env.action_space.n\r\n\telse:\r\n\t\tact_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]\r\n\r\n\t# Build our policy the same way we build our actor model in PPO\r\n\tpolicy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)\r\n\r\n\t# Load in the actor model saved by the PPO algorithm\r\n\tpolicy.load_state_dict(torch.load(actor_model))\r\n\t\r\n\r\n\t# Evaluate our policy with a separate module, eval_policy, to demonstrate\r\n\t# that once we are done training the model/policy with ppo.py, we no longer need\r\n\t# ppo.py since it only contains the training algorithm. The model/policy itself exists\r\n\t# independently as a binary file that can be loaded in with torch.\r\n\teval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)\r\n\t\r\n\r\n\r\n"
] |
[
[
"torch.distributions.Categorical",
"torch.nn.MSELoss",
"torch.min",
"numpy.sum",
"numpy.mean",
"torch.clamp",
"torch.manual_seed",
"torch.full",
"torch.tensor",
"torch.load",
"torch.diag",
"torch.distributions.MultivariateNormal",
"torch.exp",
"torch.utils.tensorboard.SummaryWriter"
]
] |
kaylajanos1/TeamSpark-L3Detection
|
[
"ecc2b4ca3588f989add309439feac33014447a32"
] |
[
"detection.py"
] |
[
"#Importing Libraries \nimport os\nimport csv\nimport sys, getopt\n\nimport uuid\n\nimport SimpleITK as sitk\nimport cv2 \nimport numpy as np\nimport tensorflow as tf\nfrom flask import Flask, flash, request, redirect, render_template\nfrom flask import jsonify\nfrom flask import send_from_directory\nfrom flask_materialize import Material\nfrom tensorflow.python.keras.backend import set_session\nfrom werkzeug.utils import secure_filename\nimport shutil\nimport nibabel as nib\nimport pandas as pd\nimport numpy\nfrom sarcopenia_ai.apps.segmentation.segloader import preprocess_test_image\n\n\n\n\n\nfrom sarcopenia_ai.apps.server import settings\nfrom sarcopenia_ai.apps.slice_detection.predict import parse_inputs, to256\nfrom sarcopenia_ai.apps.slice_detection.utils import decode_slice_detection_prediction, \\\n preprocess_sitk_image_for_slice_detection, adjust_detected_position_spacing, place_line_on_img\nfrom sarcopenia_ai.core.model_wrapper import BaseModelWrapper\nfrom sarcopenia_ai.io import load_image\nfrom sarcopenia_ai.preprocessing.preprocessing import blend2d\nfrom sarcopenia_ai.utils import compute_muscle_area, compute_muscle_attenuation\n\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\ngraph = tf.get_default_graph()\n\n\nimport cv2\nimport numpy as np\ndef normalise_zero_one(image, eps=1e-8):\n print(\"Here 1\")\n image = image.astype(np.float32)\n ret = (image - np.min(image))\n ret /= (np.max(image) - np.min(image) + eps)\n return ret\n\ndef normalise_one_one(image):\n print(\"Here 2\")\n ret = normalise_zero_one(image)\n ret *= 2.\n ret -= 1.\n return ret\n \ndef preprocess_test_image(image):\n print(\"Here\")\n #image = normalise_one_one(image, -250, 250)\n image = normalise_one_one(image)\n return image\n################## \n\n\ndef find_max(img):\n return np.unravel_index(np.argmax(img, axis=None), img.shape)[0]\n\n#Read arguments\n#############################\nimport argparse \n \nmsg = \"Adding description\"\n \n# Initialize parser \nparser = argparse.ArgumentParser(description = msg) \n\n# Reading the input arguments \nparser.add_argument(\"-i\", \"--Input\", help = \"Input file or folder\") \nparser.add_argument('-test_name', type=str, default='Test')\n\n# Read arguments from command line \nargs = parser.parse_args() \n \n\n\npath = args.Input\ntest_name = args.test_name\n\n#Creating the result structure variables\nmain = os.getcwd()\ndirectory = os.path.join(main+'/NII_Data/'+path)\n\n\nif not os.path.exists(main+'/Results/'+path+\"/\"):\n os.mkdir(main+'/Results/'+path+'/')\n\nout = os.path.join(main+'/Results/'+path+\"/\"+test_name+'/')\n\nif os.path.exists(out):\n shutil.rmtree(out)\n os.mkdir(out)\n\nif not os.path.exists(out):\n os.mkdir(out)\n\nout_yes = os.path.join(out+'/Yes')\nif not os.path.exists(out_yes):\n os.mkdir(out_yes)\n\nout_no = os.path.join(out+'/No')\nif not os.path.exists(out_no):\n os.mkdir(out_no)\n\nout_rev = os.path.join(out+'/Review/')\nif not os.path.exists(out_rev):\n os.mkdir(out_rev)\n\nout_csv = os.path.join(out+'/Pred CSVs/')\nif not os.path.exists(out_csv):\n os.mkdir(out_csv)\n\n\n#Load the sarcopenia-ai models \n#set_session(sess)\nmodel_wrapper = BaseModelWrapper(settings.SLICE_DETECTION_MODEL_PATH)\nmodel_wrapper.setup_model()\nglobal slice_detection_model\nslice_detection_model= model_wrapper.model\nslice_detection_model._make_predict_function()\n\nglobal segmentation_model\nmodel_wrapper = BaseModelWrapper(settings.SEGMENTATION_MODEL_PATH)\nmodel_wrapper.setup_model()\nsegmentation_model = model_wrapper.model\nsegmentation_model._make_predict_function()\n\n####Updated functions to replace older versions listed in the sarcopenia-ai enviroment\n#Previous research indicates adjusting the HU range can help bone appear better \ndef reduce_hu_intensity_range(img, minv=100, maxv=1500):\n img = np.clip(img, minv, maxv)\n img = 255 * normalise_zero_one(img)\n\n return img\n\n\n\n#Setting up the output file name & Prediction counter \npred_id = 0\ncols = ['Folder_Path','Patient_Folder','Study_Folder','Serie_Folder','L3_detection','L3_position','Total_slices','Confidence','Slice_Thickness', 'Orientation']\nlst = []\n\n#Looping through the input folder and analyzing the images \nfor folder in os.listdir(directory):\n #Patient Folder\n if(folder=='.DS_Store'):\n continue\n #Study Folder \n for sub_folder in os.listdir(directory+\"/\"+folder):\n if(sub_folder=='.DS_Store'):\n continue\n #Series Folder \n for sub_sub_folder in os.listdir(directory+\"/\"+folder+\"/\"+sub_folder):\n #Image Level \n for file in os.listdir(directory+\"/\"+folder+\"/\"+sub_folder+\"/\"+sub_sub_folder):\n print(\"IN SUB-SUB-FOLDER: \"+sub_sub_folder)\n #print(file)\n if(file.endswith(\".nii.gz\") or file.endswith(\".nii\")):\n print(\"Processing file: \"+file)\n try:\n \n if(sub_sub_folder=='.DS_Store'):\n continue\n print(\"IN SUB-SUB-FOLDER: \"+sub_sub_folder)\n \n image_path = directory+\"/\"+folder+\"/\"+sub_folder+\"/\"+sub_sub_folder+\"/\"+file\n \n prob_threshold_U=settings.THRESHOLD_U\n prob_threshold_L=settings.THRESHOLD_L\n\n #Gathering image name \n import ntpath\n head, tail = ntpath.split(image_path)\n image_name = tail or ntpath.basename(head)\n \n pred_id = pred_id +1\n print(\"ID --> \"+str(pred_id))\n results = {\"success\": False, \"prediction\": {'id': pred_id}} \n sitk_image, _ = load_image(image_path) \n print(\"-----------------------------image path: \"+image_path )\n \n\n\n\n\n\n #The code is not set up to analyze 4 dimensional data.\n if len(sitk_image.GetSize()) == 4:\n print(\"-------- 4D Image: Grabbing only first volume\")\n sitk_image = sitk_image[:, :, :, 0]\n\n\n\n\n #Getting image orientation information for output file. \n print('-------------- NIB')\n nib_image = nib.load(image_path)\n orient_nib=nib.orientations.aff2axcodes(nib_image.affine)\n\n print('-------------- Preprocess')\n #Preprocessing the image \n image2d, image2d_preview= preprocess_sitk_image_for_slice_detection(sitk_image)\n image3d = sitk.GetArrayFromImage(sitk_image)\n \n #print(image3d.shape)\n #print(image2d.shape)\n #print(image2d_preview.shape)\n \n spacing = sitk_image.GetSpacing()\n size = list(sitk_image.GetSize())\n \n slice_thickness = spacing[2]\n \n\n\n #Utilizing the sarcopenia-ai model to predict the L3 vertabrae \n with graph.as_default():\n set_session(sess)\n preds = slice_detection_model.predict(image2d)\n\n \n print('-------------- Predict')\n #Processing the model output \n pred_z, prob = decode_slice_detection_prediction(preds)\n slice_z = adjust_detected_position_spacing(pred_z, spacing)\n print('Prob: '+ str(prob))\n print('Slice Z: ' +str(slice_z) )\n print('{red_z: '+str(pred_z))\n\n \n #Normalizing the prediction image to be within %28-%47 percent of the body \n new_z_calculate = 0\n new_pred_z = pred_z\n new_slice_z = slice_z\n new_prob = prob\n\n\n\n print('-------------- Normalize')\n if(slice_z < .27*size[2] or slice_z > .48*size[2]):\n \n print(\"---------------------debug\")\n print(preds.shape)\n print(preds.shape[1])\n new_pred_z = find_max(preds[0, int(.27*preds.shape[1]):int(.48*preds.shape[1])])\n new_pred_z = new_pred_z + int(.27*preds.shape[1]);\n new_slice_z = adjust_detected_position_spacing(new_pred_z, spacing)\n print(\"old position\")\n print(pred_z)\n print(slice_z)\n print(\"new position\")\n print(new_pred_z)\n print(new_slice_z)\n new_z_calculate =1;\n new_prob = float(preds[0,new_pred_z])\n \n \n\n \n ## Outputting prediction data \n print('-------------- Predict CSV')\n preds_reshaped = preds.reshape(preds.shape[0], -1) \n numpy.savetxt(out_csv+\"PRED_\"+str(pred_id)+\".csv\", preds_reshaped, delimiter=\",\")\n\n\n\n #If the prediction for L3 is above the predifined threshold for acceptance \n if (new_prob > prob_threshold_U):\n print('-------------- Above')\n image = image3d\n slice_image = image[new_slice_z,:, :]\n image2dA = place_line_on_img(image2d[0], pred_z, pred_z, r=1)\n image2dB = place_line_on_img(image2d[0], -new_pred_z, new_pred_z, r=1)\n\n cv2.imwrite(out_yes+\"/\"+str(pred_id)+'_YES_'+image_name+'_SL.jpg', to256(slice_image))\n cv2.imwrite(out_yes+\"/\"+str(pred_id)+'_YES_'+image_name+'_FR.jpg', to256(image2dA))\n cv2.imwrite(out_yes+\"/\"+str(pred_id)+'_YES_'+image_name+'_FR2.jpg', to256(image2dB))\n\n output = [image_path,folder,sub_folder,sub_sub_folder,'YES',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]\n \n lst.append(output)\n \n #Images where the L3 vertabrae was not identified \n elif (new_prob <= prob_threshold_L ):\n print('-------------- No')\n image = image3d\n slice_image = image[new_slice_z,:, :]\n image2dA = place_line_on_img(image2d[0], -pred_z, -pred_z, r=1)\n image2dB = place_line_on_img(image2d[0], -new_pred_z, -new_pred_z, r=1)\n\n cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_SL.jpg', to256(slice_image))\n cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_FR.jpg', to256(image2dA))\n cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_FR2.jpg', to256(image2dB))\n \n \n\n output = [image_path,folder,sub_folder,sub_sub_folder,'NO',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]\n \n lst.append(output)\n\n #Images where the L3 vertabrae was identified but confidence requirements were not met. \n else:\n print('-------------- Review') \n image = image3d\n slice_image = image[new_slice_z,:, :]\n \n \n\n image2dA = place_line_on_img(image2d[0], pred_z, pred_z, r=1)\n image2dB = place_line_on_img(image2d[0], new_pred_z, new_pred_z, r=1)\n\n \n cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_SL_'+str(new_slice_z)+'_PROB_'+str(new_prob)+'.jpg', to256(slice_image))\n cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_FR_'+str(slice_z)+'_PROB_'+str(prob)+'.jpg', to256(image2dA))\n cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_FR2_'+str(new_slice_z)+'_PROB_'+str(new_prob)+'.jpg', to256(image2dB))\n \n output = [image_path,folder,sub_folder,sub_sub_folder,'REVIEW',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]\n lst.append(output)\n\n \n #Images that error out (e.g. image orientation is incorrect)\n except:\n print('-------------- Wrong')\n print('-------------- ')\n print('-------------- ')\n print(\"Something went wrong - File: \"+image_path)\n print(\"Unexpected error\"+str(sys.exc_info()[0]))\n output = [image_path,folder,sub_folder,sub_sub_folder,'Error','','','Something went wrong:'+str(sys.exc_info()[1]),'', orient_nib]\n lst.append(output)\n \n\n \n \n#Outputting the results dataset \ndf = pd.DataFrame(lst, columns=cols)\nif not os.path.exists('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/'):\n os.mkdir('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/')\n\n\ndf.to_csv('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/'+path+'_'+test_name+\".csv\")\n\n\nprint(' ')\nprint(' ')\nprint(' ')\nprint(' -------------- PROCESSING COMPLETE ------------------- ')\n\n"
] |
[
[
"numpy.max",
"tensorflow.get_default_graph",
"pandas.DataFrame",
"tensorflow.Session",
"numpy.min",
"tensorflow.ConfigProto",
"numpy.argmax",
"tensorflow.python.keras.backend.set_session",
"numpy.clip"
]
] |
cisc474projectgroup/cartpole-q-learning
|
[
"d7215990c8bdf8c1ff20cdfa3a7530e1a2c641b5"
] |
[
"agent.py"
] |
[
"import random\nimport copy\nfrom collections import defaultdict\nfrom collections import deque\nfrom collections import namedtuple\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\nclass Q():\n\n def __init__(self, n_actions, observation_space, bin_size, low_bound=None, high_bound=None, initial_mean=0.0, initial_std=0.0):\n self.n_actions = n_actions\n self._observation_dimension = 1\n for d in observation_space.shape:\n self._observation_dimension *= d\n\n self._bin_sizes = bin_size if isinstance(bin_size, list) else [bin_size] * self._observation_dimension\n self._dimension_bins = []\n for i, low, high in self._low_high_iter(observation_space, low_bound, high_bound):\n b_size = self._bin_sizes[i]\n bins = self._make_bins(low, high, b_size)\n print(bins)\n self._dimension_bins.append(bins)\n\n # if we encounter the new observation, we initialize action evaluations\n self.table = defaultdict(lambda: initial_std * np.random.randn(self.n_actions) + initial_mean)\n\n @classmethod\n def _make_bins(cls, low, high, bin_size):\n bins = np.arange(low, high, (float(high) - float(low)) / (bin_size - 2)) # exclude both ends\n if min(bins) < 0 and 0 not in bins:\n bins = np.sort(np.append(bins, [0])) # 0 centric bins\n return bins\n \n @classmethod\n def _low_high_iter(cls, observation_space, low_bound, high_bound):\n lows = observation_space.low\n highs = observation_space.high\n for i in range(len(lows)):\n low = lows[i]\n if low_bound is not None:\n _low_bound = low_bound if not isinstance(low_bound, list) else low_bound[i]\n low = low if _low_bound is None else max(low, _low_bound)\n \n high = highs[i]\n if high_bound is not None:\n _high_bound = high_bound if not isinstance(high_bound, list) else high_bound[i]\n high = high if _high_bound is None else min(high, _high_bound)\n \n yield i, low, high\n\n def observation_to_state(self, observation):\n\n state = 0\n # caution: bin_size over 10 will not work accurately\n unit = max(self._bin_sizes)\n for d, o in enumerate(observation.flatten()):\n state = state + np.digitize(o, self._dimension_bins[d]) * pow(unit, d) # bin_size numeral system\n\n return state\n \n def values(self, observation):\n state = self.observation_to_state(observation)\n return self.table[state]\n\n\nclass Agent():\n\n def __init__(self, q, epsilon=0.05):\n self.q = q\n self.epsilon = epsilon\n \n def act(self, observation):\n action = -1\n if np.random.random() < self.epsilon:\n action = np.random.choice(self.q.n_actions)\n else:\n action = np.argmax(self.q.values(observation))\n \n return action\n\n\nclass Trainer():\n\n def __init__(self, agent, gamma=0.95, learning_rate=0.1, learning_rate_decay=None, epsilon=0.05, epsilon_decay=None, max_step=-1,target=500):\n self.agent = agent\n self.gamma = gamma\n self.learning_rate = learning_rate\n self.learning_rate_decay = learning_rate_decay\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n self.max_step = max_step\n\n def train(self, env, episode_count, render=False):\n mean_step_all =[]\n mean_q_all=[]\n goal_time_all=[]\n reward_all=[]\n self.agent.epsilon = self.epsilon\n values = []\n steps = deque(maxlen=100)\n lr = self.learning_rate\n for i in range(episode_count):\n reward_total = 0\n goal_time =0\n obs = env.reset()\n step = 0\n done = False\n while not done:\n if render:\n env.render()\n\n action = self.agent.act(obs)\n next_obs, reward, done,goal_time= env.step(action)\n reward_total+= reward\n goal_time += goal_time\n state = self.agent.q.observation_to_state(obs)\n future = 0 if done else np.max(self.agent.q.values(next_obs))\n value = self.agent.q.table[state][action]\n self.agent.q.table[state][action] += lr * (reward + self.gamma * future - value)\n\n obs = next_obs\n values.append(value)\n step += 1\n if self.max_step > 0 and step > self.max_step:\n done = True\n else:\n mean = np.mean(values)\n steps.append(step)\n mean_step = np.mean(steps)\n print(\"Episode {}: {}steps(avg{}). epsilon={:.3f}, lr={:.3f}, mean q value={:.2f}\".format(\n i, step, mean_step, self.agent.epsilon, lr, mean)\n )\n \n \n mean_step_all.append(mean_step)\n mean_q_all.append(mean)\n reward_all.append(reward_total)\n \n if mean_step>1000:\n render=True\n \n if self.epsilon_decay is not None: \n self.agent.epsilon = self.epsilon_decay(self.agent.epsilon, i)\n if self.learning_rate_decay is not None:\n lr = self.learning_rate_decay(lr, i)\n\n # plot in comparsion\n plt.xlabel('Episodes')\n plt.ylabel('reward')\n \n # plt.plot(mean_step_all, label='Q-learning', color='blue')\n plt.plot(reward_all, label='Q-learning', color='yellow')\n plt.plot(goal_time_all, label='Q-learning', color='green')\n # plt.legend(['reward', 'Q-learning'], loc='upper right')\n plt.title('reward/Episode')\n plt.show()\n\n # plot in comparsion\n plt.xlabel('Episodes')\n plt.ylabel('goal_time')\n\n # plt.plot(mean_step_all, label='Q-learning', color='blue')\n plt.plot(goal_time_all, label='Q-learning', color='green')\n # plt.legend(['reward', 'Q-learning'], loc='upper right')\n plt.title('goal/Episode')\n plt.show()\n\n"
] |
[
[
"numpy.random.choice",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"numpy.mean",
"numpy.digitize",
"matplotlib.pyplot.ylabel",
"numpy.append",
"matplotlib.pyplot.show",
"numpy.random.random"
]
] |
andycasey/stellar-twins
|
[
"9b3cfbf608e3e15a2358bbd33aa5ae21cfc1d0dd"
] |
[
"data/check_apogee_spectra.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" Test the quoted APOGEE uncertainties from individual (rebinned) spectra. \"\"\"\n\n__author__ = \"Andy Casey <arc@ast.cam.ac.uk>\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.io import fits\nfrom glob import glob\nfrom itertools import combinations\n\n\ndef get_differences(apStar_filename):\n\n image = fits.open(apStar_filename)\n\n N_visits = image[0].header[\"NVISITS\"]\n\n data_index = 1\n error_index = 2\n mask_index = 3\n\n # Generate all permutations.\n differences = []\n for i, j in combinations(range(N_visits), 2):\n\n di = image[data_index].data[i + 2, :]\n dj = image[data_index].data[j + 2, :]\n sigma = np.sqrt(image[error_index].data[i + 2, :]**2 \\\n + image[error_index].data[j + 2, :]**2)\n\n ok = (di > 0) * (dj > 0) * np.isfinite(di * dj * sigma) \\\n * (image[mask_index].data[i + 2, :] == 0) \\\n * (image[mask_index].data[j + 2, :] == 0)\n differences.extend(((di - dj)/sigma)[ok])\n\n differences = np.array(differences).flatten()\n return differences\n\n\ndef plot_differences(differences):\n\n fig, ax = plt.subplots(1)\n y_bin, x_bin, _ = ax.hist(differences, bins=100, facecolor=\"#666666\")\n x = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 1000)\n y = np.exp(-0.5*x**2)/np.sqrt(2*np.pi)\n ax.plot(x, y*np.trapz(y_bin, x=x_bin[1:])/np.sqrt(2*np.pi), lw=2, c=\"r\")\n ax.set_title(\"mu = {0:.1f}, sigma(|d|) = {1:.1f}\".format(\n np.median(differences), np.std(np.abs(differences))))\n\n ax.set_xlabel(\"(F1 - F2)/sqrt(sigma_1^2 + sigma_2^2)\")\n return fig\n\n\n\nif __name__ == \"__main__\":\n\n filenames = glob(\"APOGEE/*.fits\")\n all_differences = []\n for filename in filenames:\n\n differences = get_differences(filename)\n if len(differences) > 0: \n fig = plot_differences(differences)\n fig.savefig(\"APOGEE/{0}.png\".format(filename.split(\"/\")[-1].split(\".\")[0]))\n\n plt.close(\"all\")\n print(filename)\n all_differences.extend(differences)\n\n fig = plot_differences(np.array(all_differences))\n fig.savefig(\"APOGEE/all.png\")\n\n\n"
] |
[
[
"numpy.array",
"numpy.median",
"matplotlib.pyplot.close",
"numpy.exp",
"matplotlib.pyplot.subplots",
"numpy.trapz",
"numpy.sqrt",
"numpy.abs",
"numpy.isfinite"
]
] |
ctrl-z-9000-times/NEUWON
|
[
"ed5c13f8cecfd638dd0952d231c36f48073a64a6"
] |
[
"neuwon/database/examples/life/model.py"
] |
[
"from neuwon.database import Database\nimport numpy as np\nimport numba\n\nclass GameOfLife:\n\n class _CellBaseClass:\n __slots__ = ()\n @classmethod\n def _add_to_database(cls, database):\n cell_data = database.add_class(\"Cell\", cls)\n cell_data.add_attribute(\"coordinates\", shape=(2,), dtype=np.int32)\n cell_data.add_attribute(\"alive\", False, dtype=np.bool)\n cell_data.add_connectivity_matrix(\"neighbors\", \"Cell\")\n return cell_data.get_instance_type()\n\n def __init__(self, shape):\n self.db = Database()\n self.Cell = self._CellBaseClass._add_to_database(self.db)\n self.shape = shape\n self.grid = np.empty(self.shape, dtype=object)\n for x in range(self.shape[0]):\n for y in range(self.shape[1]):\n self.grid[x,y] = self.Cell(coordinates=(x,y))\n for x in range(self.shape[0]):\n for y in range(self.shape[1]):\n cell = self.grid[x,y]\n neighbors = []\n for x_offset in [-1, 0, 1]:\n for y_offset in [-1, 0, 1]:\n nx = x - x_offset\n ny = y - y_offset\n if nx < 0: nx = 0\n if ny < 0: ny = 0\n if nx >= self.shape[0]: nx = self.shape[0] - 1\n if ny >= self.shape[1]: ny = self.shape[1] - 1\n neighbor = self.grid[nx, ny]\n if cell != neighbor:\n neighbors.append(neighbor)\n cell.neighbors = neighbors\n self.db.get(\"Cell.neighbors\").to_csr()\n\n def randomize(self, alive_fraction):\n a = self.db.get_data(\"Cell.alive\")\n a.fill(False)\n a[np.random.uniform(size=a.shape) < alive_fraction] = True\n\n def get_num_alive(self):\n return sum(self.db.get_data(\"Cell.alive\"))\n\n def advance(self):\n a = self.db.get_data(\"Cell.alive\")\n n = self.db.get_data(\"Cell.neighbors\")\n # C is the number of living neighbors for each cell.\n c = n * np.array(a, dtype=np.int32)\n _advance(a, c)\n\n@numba.njit(parallel=True)\ndef _advance(a, c):\n for idx in numba.prange(len(a)):\n ci = c[idx]\n if a[idx]:\n if ci not in range(2, 4):\n a[idx] = False\n else:\n if ci == 3:\n a[idx] = True\n"
] |
[
[
"numpy.array",
"numpy.random.uniform",
"numpy.empty"
]
] |
zean-wen/mmgnn_textvqa
|
[
"2cfe82ed54610975a1d4937f2032e5f4565ecbe7"
] |
[
"pythia/utils/checkpoint.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport os\nimport warnings\n\n# import git\nimport torch\nimport yaml\n\nfrom pythia.common.registry import registry\nfrom pythia.utils.distributed_utils import is_main_process, synchronize\nfrom pythia.utils.general import (ckpt_name_from_core_args,\n foldername_from_config_override, updir)\n\n\nclass Checkpoint:\n def __init__(self, trainer):\n \"\"\"\n Generates a path for saving model which can also be used for resuming\n from a checkpoint.\n \"\"\"\n self.trainer = trainer\n\n self.config = self.trainer.config\n self.save_dir = self.config.training_parameters.save_dir\n self.model_name = self.config.model\n\n self.ckpt_foldername = ckpt_name_from_core_args(self.config)\n self.ckpt_foldername += foldername_from_config_override(self.trainer.args)\n\n self.device = registry.get(\"current_device\")\n\n self.ckpt_prefix = \"\"\n\n if hasattr(self.trainer.model, \"get_ckpt_name\"):\n self.ckpt_prefix = self.trainer.model.get_ckpt_name() + \"_\"\n\n self.config[\"log_foldername\"] = self.ckpt_foldername\n self.ckpt_foldername = os.path.join(self.save_dir, self.ckpt_foldername)\n self.pth_filepath = os.path.join(\n self.ckpt_foldername, self.ckpt_prefix + self.model_name + getattr(self.config.model_attributes,\n self.model_name).code_name + \"_final.pth\"\n )\n\n self.models_foldername = os.path.join(self.ckpt_foldername, \"models\")\n if not os.path.exists(self.models_foldername):\n os.makedirs(self.models_foldername)\n\n self.save_config()\n self.repo_path = updir(os.path.abspath(__file__), n=3)\n # self.repo = git.Repo(self.repo_path)\n\n def save_config(self):\n cfg_file = os.path.join(self.ckpt_foldername, \"config.yaml\")\n with open(cfg_file, \"w\") as f:\n # Pop out config_override if present to remove clutter in\n # saved configuration yaml file\n self.config.pop(\"config_override\", None)\n f.write(str(self.config))\n\n def load_state_dict(self):\n tp = self.config.training_parameters\n if tp.resume_file is not None:\n if os.path.exists(tp.resume_file):\n self._load(tp.resume_file)\n return\n else:\n raise RuntimeError(\"{} doesn't exist\".format(tp.resume_file))\n\n ckpt_filepath = os.path.join(\n self.ckpt_foldername, self.ckpt_prefix + \"best.ckpt\"\n )\n\n if tp.resume is True:\n if os.path.exists(ckpt_filepath):\n self._load(ckpt_filepath)\n else:\n warnings.warn(\n \"Tried to resume but checkpoint filepath {} \"\n \"is not present. Skipping.\".format(ckpt_filepath)\n )\n\n def _load(self, file):\n self.trainer.writer.write(\"Loading checkpoint\")\n ckpt = self._torch_load(file)\n\n data_parallel = registry.get(\"data_parallel\")\n\n if \"model\" in ckpt:\n ckpt_model = ckpt[\"model\"]\n else:\n ckpt_model = ckpt\n ckpt = {\"model\": ckpt}\n\n pretrained_mapping = self.config.training_parameters.pretrained_mapping\n\n if not self.config.training_parameters.load_pretrained:\n pretrained_mapping = {}\n\n new_dict = {}\n\n # TODO: Move to separate function\n for attr in ckpt_model:\n if \"fa_history\" in attr:\n new_dict[attr.replace(\"fa_history\", \"fa_context\")] = ckpt_model[attr]\n elif data_parallel is False and attr.startswith(\"module.\"):\n # In case the ckpt was actually a data parallel model\n # replace first module. from dataparallel with empty string\n new_dict[attr.replace(\"module.\", \"\", 1)] = ckpt_model[attr]\n else:\n new_dict[attr] = ckpt_model[attr]\n\n if len(pretrained_mapping.items()) == 0:\n final_dict = new_dict\n\n self.trainer.model.load_state_dict(final_dict)\n\n if \"optimizer\" in ckpt:\n self.trainer.optimizer.load_state_dict(ckpt[\"optimizer\"])\n else:\n warnings.warn(\n \"'optimizer' key is not present in the \"\n \"checkpoint asked to be loaded. Skipping.\"\n )\n\n self.trainer.early_stopping.init_from_checkpoint(ckpt)\n\n self.trainer.writer.write(\"Checkpoint loaded\")\n\n if \"best_iteration\" in ckpt:\n self.trainer.current_iteration = ckpt[\"best_iteration\"]\n registry.register(\"current_iteration\", self.trainer.current_iteration)\n\n if \"best_epoch\" in ckpt:\n self.trainer.current_epoch = ckpt[\"best_epoch\"]\n registry.register(\"current_epoch\", self.trainer.current_epoch)\n else:\n final_dict = {}\n model = self.trainer.model\n own_state = model.state_dict()\n\n for key, value in pretrained_mapping.items():\n key += \".\"\n value += \".\"\n for attr in new_dict:\n for own_attr in own_state:\n if (\n key in attr\n and value in own_attr\n and attr.replace(key, \"\") == own_attr.replace(value, \"\")\n ):\n self.trainer.writer.write(\n \"Copying \" + attr + \" \" + own_attr\n )\n own_state[own_attr].copy_(new_dict[attr])\n self.trainer.writer.write(\"Pretrained model loaded\")\n\n def _load_state_dict_mapping(self, ckpt_model):\n model = self.trainer.model\n attr_mapping = {\n \"image_feature_encoders\": \"img_feat_encoders\",\n \"image_feature_embeddings_list\": \"img_embeddings_list\",\n \"image_text_multi_modal_combine_layer\": \"multi_modal_combine_layer\",\n \"text_embeddings\": \"text_embeddings\",\n \"classifier\": \"classifier\",\n }\n\n data_parallel = registry.get(\"data_parallel\")\n\n if not data_parallel:\n for key in attr_mapping:\n attr_mapping[key.replace(\"module.\", \"\")] = attr_mapping[key]\n attr_mapping.pop(key)\n\n for key in attr_mapping:\n getattr(model, key).load_state_dict(ckpt_model[attr_mapping[key]])\n\n def _torch_load(self, file):\n if \"cuda\" in str(self.device):\n return torch.load(file)\n else:\n return torch.load(file, map_location=lambda storage, loc: storage)\n\n # def _get_vcs_fields(self):\n # \"\"\"Returns a dict with git fields of the current repository\n #\n # To reproduce an experiment directly from a checkpoint\n #\n # 1) Export `config` key as a yaml\n # 2) Clone repository and checkout at given commit on given branch\n # 3) Any local change (diff) while running the experiment is stored\n # in the value with key `git/diff`, output the diff to a `path.diff`\n # file and apply the patch to the current state by simply\n #\n # `patch -p0 < path.diff`\n # \"\"\"\n #\n # return {\n # \"git/branch\": self.repo.active_branch.name,\n # \"git/commit_hash\": self.repo.head.commit.name_rev,\n # \"git/commit_author\": self.repo.head.commit.author.name,\n # \"git/commit_message\": self.repo.head.commit.message,\n # \"git/diff\": self.repo.git.diff(\"--no-prefix\"),\n # }\n\n def save(self, iteration, update_best=False):\n # Only save in main process\n if not is_main_process():\n return\n\n ckpt_filepath = os.path.join(\n self.models_foldername, \"model_%d.ckpt\" % iteration\n )\n best_ckpt_filepath = os.path.join(\n self.ckpt_foldername, self.ckpt_prefix + \"best.ckpt\"\n )\n\n best_iteration = self.trainer.early_stopping.best_monitored_iteration\n best_metric = self.trainer.early_stopping.best_monitored_value\n\n ckpt = {\n \"model\": self.trainer.model.state_dict(),\n \"optimizer\": self.trainer.optimizer.state_dict(),\n \"best_iteration\": best_iteration,\n \"best_metric_value\": best_metric,\n \"config\": self.config,\n }\n\n # git_metadata_dict = self._get_vcs_fields()\n # ckpt.update(git_metadata_dict)\n\n torch.save(ckpt, ckpt_filepath)\n\n if update_best:\n torch.save(ckpt, best_ckpt_filepath)\n\n def restore(self):\n self.trainer.writer.write(\"Restoring checkpoint\")\n best_path = os.path.join(self.ckpt_foldername, self.ckpt_prefix + \"best.ckpt\")\n\n if os.path.exists(best_path):\n ckpt = self._torch_load(best_path)\n self.trainer.model.load_state_dict(ckpt[\"model\"])\n self.trainer.optimizer.load_state_dict(ckpt[\"optimizer\"])\n\n def finalize(self):\n torch.save(self.trainer.model.state_dict(), self.pth_filepath)\n"
] |
[
[
"torch.save",
"torch.load"
]
] |
DataViva/dataviva-scripts
|
[
"1e36f11e2849c33b8118cefe1755d312b19c0ecd",
"1e36f11e2849c33b8118cefe1755d312b19c0ecd",
"1e36f11e2849c33b8118cefe1755d312b19c0ecd"
] |
[
"scripts/secex_monthly/_pci_wld_eci.py",
"commands/load_metadata/countries.py",
"commands/load_metadata/territories.py"
] |
[
"import sys\nimport pandas as pd\n\ndef pci_wld_eci(eci_file_path, pci_file_path, ymp, ymw, year):\n \n pcis = pd.read_csv(pci_file_path, sep=\"\\t\", compression=\"bz2\", converters={\"hs_id\": str})\n pcis[\"year\"] = int(year)\n pcis[\"month\"] = \"00\"\n pcis = pcis.set_index([\"year\", \"month\", \"hs_id\"])\n \n ecis = pd.read_csv(eci_file_path, sep=\"\\t\", compression=\"bz2\")\n ecis[\"year\"] = int(year)\n ecis[\"month\"] = \"00\"\n ecis = ecis.set_index([\"year\", \"month\", \"wld_id\"])\n \n ymp[\"pci\"] = pcis[\"pci\"]\n ymw[\"eci\"] = ecis[\"eci\"]\n \n return [ymp, ymw]\n ",
"import click\nimport pandas\nimport json\nfrom clients import s3, redis\n\n\n@click.command()\n@click.option('--both', 'upload', flag_value='s3_and_redis', default=True, help='Upload metadata to both s3 and Redis')\n@click.option('--s3', 'upload', flag_value='only_s3', help='Upload metadata only to s3')\n@click.option('--redis', 'upload', flag_value='only_redis', help='Upload metadata only to Redis')\ndef countries(upload):\n csv = s3.get('metadata/continents.csv')\n df_continents = pandas.read_csv(\n csv,\n sep=';',\n header=0,\n names=['id', 'country_id', 'name_en', 'name_pt'],\n converters={\n \"country_id\": lambda x: '%03d' % int(x)\n }\n )\n\n continents = {}\n\n for _, row in df_continents.iterrows():\n continents[row['country_id']] = {\n 'id': row[\"id\"],\n 'name_en': row[\"name_en\"],\n 'name_pt': row[\"name_pt\"],\n }\n\n csv = s3.get('metadata/wld.csv')\n df = pandas.read_csv(\n csv,\n sep=';',\n header=0,\n names=['id', 'name_pt', 'name_en', 'abbreviation'],\n converters={\n \"id\": str\n }\n )\n\n countries = {}\n\n for _, row in df.iterrows():\n country = {\n 'id': row[\"id\"],\n 'name_pt': row[\"name_pt\"],\n 'name_en': row[\"name_en\"],\n 'abbrv': row[\"abbreviation\"],\n 'continent': continents.get(row[\"id\"], {})\n }\n\n countries[row['id']] = country\n if upload != 'only_s3':\n redis.set('country/' + str(row['id']), json.dumps(country, ensure_ascii=False))\n\n if upload != 'only_redis':\n s3.put('country.json', json.dumps(countries, ensure_ascii=False))\n\n click.echo(\"Countries loaded.\")\n",
"import click\nimport pandas\nimport pickle\nimport json\nfrom clients import s3, redis\n\n\n@click.command()\n@click.option('--both', 'upload', flag_value='s3_and_redis', default=True, help='Upload metadata to both s3 and Redis')\n@click.option('--s3', 'upload', flag_value='only_s3', help='Upload metadata only to s3')\n@click.option('--redis', 'upload', flag_value='only_redis', help='Upload metadata only to Redis')\ndef territories(upload):\n csv = s3.get('metadata/development_territories.csv')\n df = pandas.read_csv(\n csv,\n sep=';',\n header=0,\n names=['territory', 'microterritory', 'municipy_id'],\n converters={\n \"municipy_id\": str\n }\n )\n\n territories = {}\n\n for _, row in df.iterrows():\n territory = {\n 'territory': row[\"territory\"],\n 'microterritory': row[\"microterritory\"],\n 'municipy_id': row[\"municipy_id\"]\n }\n\n territories[row['municipy_id']] = territory\n if upload != 'only_s3':\n redis.set('territory/' +\n str(row['municipy_id']), json.dumps(territory, ensure_ascii=False))\n\n if upload != 'only_redis':\n s3.put('territory.json', json.dumps(territories, ensure_ascii=False))\n\n click.echo(\"Territories loaded.\")\n"
] |
[
[
"pandas.read_csv"
],
[
"pandas.read_csv"
],
[
"pandas.read_csv"
]
] |
Red-Portal/Stone-Soup-1
|
[
"267621c86161a839da9b144c2745d28d9166d903"
] |
[
"docs/examples/Moving_Platform_Simulation.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\nMulti-Sensor Moving Platform Simulation Example\n===============================================\nThis example looks at how multiple sensors can be mounted on a single moving platform and exploiting a defined moving\nplatform as a sensor target.\n\"\"\"\n\n# %%\n# Building a Simulated Multi-Sensor Moving Platform\n# -------------------------------------------------\n# The focus of this example is to show how to setup and configure a simulation environment in order to provide a\n# multi-sensor moving platform, as such the application of a tracker will not be covered in detail. For more information\n# about trackers and how to configure them review of the tutorials and demonstrations is recommended.\n#\n# This example makes use of Stone Soup :class:`~.MovingPlatform`, :class:`~.MultiTransitionMovingPlatform` and\n# :class:`~.Sensor` objects.\n#\n# In order to configure platforms, sensors and the simulation we will need to import some specific Stone Soup objects.\n# As these have been introduced in previous tutorials they are imported upfront. New functionality within this example\n# will be imported at the relevant point in order to draw attention to the new features.\n\n# Some general imports and set up\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\n\n# Stone Soup imports:\nfrom stonesoup.types.state import State, GaussianState\nfrom stonesoup.types.array import StateVector\nfrom stonesoup.types.array import CovarianceMatrix\nfrom stonesoup.models.transition.linear import (\n CombinedLinearGaussianTransitionModel, ConstantVelocity)\nfrom stonesoup.predictor.particle import ParticlePredictor\nfrom stonesoup.resampler.particle import SystematicResampler\nfrom stonesoup.updater.particle import ParticleUpdater\nfrom stonesoup.measures import Mahalanobis\nfrom stonesoup.hypothesiser.distance import DistanceHypothesiser\nfrom stonesoup.dataassociator.neighbour import GNNWith2DAssignment\nfrom stonesoup.tracker.simple import SingleTargetTracker\n\n# Define the simulation start time\nstart_time = datetime.now()\n\n# %%\n# Create a multi-sensor platform\n# ------------------------------\n# We have previously demonstrated how to create a :class:`~.FixedPlatform` which exploited a\n# :class:`~.RadarRangeBearingElevation` *Sensor* in order to detect and track targets generated within a\n# :class:`~.MultiTargetGroundTruthSimulator`.\n#\n# In this example we are going to create a moving platform which will be mounted with a pair of sensors and moves within\n# a 6 dimensional state space according to the following :math:`\\mathbf{x}`.\n#\n# .. math::\n# \\mathbf{x} = \\begin{bmatrix}\n# x\\\\ \\dot{x}\\\\ y\\\\ \\dot{y}\\\\ z\\\\ \\dot{z} \\end{bmatrix}\n# = \\begin{bmatrix}\n# 0\\\\ 0\\\\ 0\\\\ 50\\\\ 8000\\\\ 0 \\end{bmatrix}\n#\n# The platform will be initiated with a near constant velocity model which has been parameterised to have zero noise.\n# Therefore the platform location at time :math:`k` is given by :math:`F_{k}x_{k-1}` where :math:`F_{k}` is given by:\n#\n# .. math::\n# F_{k} = \\begin{bmatrix}\n# 1 & \\triangle k & 0 & 0 & 0 & 0\\\\\n# 0 & 1 & 0 & 0 & 0 & 0\\\\\n# 0 & 0 & 1 & \\triangle k & 0 & 0\\\\\n# 0 & 0 & 0 & 1 & 0 & 0\\\\\n# 0 & 0 & 0 & 0 & 1 & \\triangle k \\\\\n# 0 & 0 & 0 & 0 & 0 & 1\\\\\n# \\end{bmatrix}\n\n# First import the Moving platform\nfrom stonesoup.platform.base import MovingPlatform\n\n# Define the initial platform position, in this case the origin\ninitial_loc = StateVector([[0], [0], [0], [50], [8000], [0]])\ninitial_state = State(initial_loc, start_time)\n\n# Define transition model and position for 3D platform\ntransition_model = CombinedLinearGaussianTransitionModel(\n [ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])\n\n# create our fixed platform\nsensor_platform = MovingPlatform(states=initial_state,\n position_mapping=(0, 2, 4),\n velocity_mapping=(1, 3, 5),\n transition_model=transition_model)\n\n# %%\n# With our platform generated we now need to build a set of sensors which will be mounted onto the platform. In this\n# case we will exploit a :class:`~.RadarElevationBearingRangeRate` and a :class:`~.PassiveElevationBearing` sensor\n# (e.g. an optical sensor, which has no capability to directly measure range).\n#\n# First we will create a radar which is capable of measuring bearing (:math:`\\phi`), elevation (:math:`\\theta`), range\n# (:math:`r`) and range-rate (:math:`\\dot{r}`) of the target platform.\n\n# Import a range rate bearing elevation capable radar\nfrom stonesoup.sensor.radar.radar import RadarElevationBearingRangeRate\n\n# Create a radar sensor\nradar_noise_covar = CovarianceMatrix(np.diag(\n np.array([np.deg2rad(3), # Elevation\n np.deg2rad(3), # Bearing\n 100., # Range\n 25.]))) # Range Rate\n\n# radar mountings\nradar_mounting_offsets = StateVector([10, 0, 0]) # e.g. nose cone\nradar_rotation_offsets = StateVector([0, 0, 0])\n\n# Mount the radar onto the platform\n\nradar = RadarElevationBearingRangeRate(ndim_state=6,\n position_mapping=(0, 2, 4),\n velocity_mapping=(1, 3, 5),\n noise_covar=radar_noise_covar,\n mounting_offset=radar_mounting_offsets,\n rotation_offset=radar_rotation_offsets,\n )\nsensor_platform.add_sensor(radar)\n\n# %%\n# Our second sensor is a passive sensor, capable of measuring the bearing (:math:`\\phi`) and elevation (:math:`\\theta`)\n# of the target platform. For the purposes of this example we will assume that the passive sensor is an imager.\n# The imager sensor model is described by the following equations:\n#\n# .. math::\n# \\mathbf{z}_k = h(\\mathbf{x}_k, \\dot{\\mathbf{x}}_k)\n#\n# where:\n#\n# * :math:`\\mathbf{z}_k` is a measurement vector of the form:\n#\n# .. math::\n# \\mathbf{z}_k = \\begin{bmatrix} \\theta \\\\ \\phi \\end{bmatrix}\n#\n# * :math:`h` is a non - linear model function of the form:\n#\n# .. math::\n# h(\\mathbf{x}_k,\\dot{\\mathbf{x}}_k) = \\begin{bmatrix}\n# \\arcsin(\\mathcal{z} /\\sqrt{\\mathcal{x} ^ 2 + \\mathcal{y} ^ 2 +\\mathcal{z} ^ 2}) \\\\\n# \\arctan(\\mathcal{y},\\mathcal{x}) \\ \\\n# \\end{bmatrix} + \\dot{\\mathbf{x}}_k\n#\n# * :math:`\\mathbf{z}_k` is Gaussian distributed with covariance :math:`R`, i.e.:\n#\n# .. math::\n# \\mathbf{z}_k \\sim \\mathcal{N}(0, R)\n#\n# .. math::\n# R = \\begin{bmatrix}\n# \\sigma_{\\theta}^2 & 0 \\\\\n# 0 & \\sigma_{\\phi}^2 \\\\\n# \\end{bmatrix}\n\n# Import a passive sensor capability\nfrom stonesoup.sensor.passive import PassiveElevationBearing\n\nimager_noise_covar = CovarianceMatrix(np.diag(np.array([np.deg2rad(0.05), # Elevation\n np.deg2rad(0.05)]))) # Bearing\n\n# imager mounting offset\nimager_mounting_offsets = StateVector([0, 8, -1]) # e.g. wing mounted imaging pod\nimager_rotation_offsets = StateVector([0, 0, 0])\n\n# Mount the imager onto the platform\nimager = PassiveElevationBearing(ndim_state=6,\n mapping=(0, 2, 4),\n noise_covar=imager_noise_covar,\n mounting_offset=imager_mounting_offsets,\n rotation_offset=imager_rotation_offsets,\n )\nsensor_platform.add_sensor(imager)\n\n# %%\n# Notice that we have added sensors to specific locations on the aircraft, defined by the mounting_offset parameter.\n# The values in this array are defined in the platforms local coordinate frame of reference. So in this case an offset\n# of :math:`[0, 8, -1]` means the sensor is located 8 meters to the right and 1 meter below the center point of the\n# platform.\n#\n# Now that we have mounted the two sensors we can see that the platform object has both associated with it:\nsensor_platform.sensors\n\n\n# %%\n# Create a Target Platform\n# ------------------------\n# There are two ways of generating a target in Stone Soup. Firstly, we can use the inbuilt ground-truth generator\n# functionality within Stone Soup, which we demonstrated in the previous example, and creates a random target based on\n# our selected parameters. The second method provides a means to generate a target which will perform specific\n# behaviours, this is the approach we will take here.\n#\n# In order to create a target which moves in pre-defined sequences we exploit the fact that platforms can be used as\n# sensor targets within a simulation, coupled with the :class:`~.MultiTransitionMovingPlatform` which enables a platform\n# to be provided with a pre-defined list of transition models and transition times. The platform will continue to loop\n# over the transition sequence provided until the simulation ends.\n#\n# When simulating sensor platforms it is important to note that within the simulation Stone Soup treats all platforms as\n# potential targets. Therefore if we created multiple sensor platforms they would each *sense* all other platforms\n# within the simulation (sensor-target geometry dependant).\n#\n# For this example we will create an air target which will fly a sequence of straight and level followed by a\n# coordinated turn in the :math:`x-y` plane. This is configured such that the target will perform each manoeuvre for 8\n# seconds, and it will turn through 45 degrees over the course of the turn manoeuvre.\n\n# Import a Constant Turn model to enable target to perform basic manoeuvre\nfrom stonesoup.models.transition.linear import ConstantTurn\n\nstraight_level = CombinedLinearGaussianTransitionModel(\n [ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])\n\n# Configure the aircraft turn behaviour\nturn_noise_diff_coeffs = np.array([0., 0.])\n\nturn_rate = np.pi/32 # specified in radians per seconds...\n\nturn_model = ConstantTurn(turn_noise_diff_coeffs=turn_noise_diff_coeffs, turn_rate=turn_rate)\n\n# Configure turn model to maintain current altitude\nturning = CombinedLinearGaussianTransitionModel(\n [turn_model, ConstantVelocity(0.)])\n\nmanoeuvre_list = [straight_level, turning]\nmanoeuvre_times = [timedelta(seconds=8),\n timedelta(seconds=8)]\n\n# %%\n# Now that we have created a list of manoeuvre behaviours and durations we can build our multi-transition moving\n# platform. Because we intend for this platform to be a target we do not need to attach any sensors to it.\n\n# Import a multi-transition moving platform\nfrom stonesoup.platform.base import MultiTransitionMovingPlatform\n\ninitial_target_location = StateVector([[0], [-40], [1800], [0], [8000], [0]])\ninitial_target_state = State(initial_target_location, start_time)\ntarget = MultiTransitionMovingPlatform(transition_models=manoeuvre_list,\n transition_times=manoeuvre_times,\n states=initial_target_state,\n position_mapping=(0, 2, 4),\n velocity_mapping=(1, 3, 5),\n sensors=None)\n\n# %%\n# Creating the simulator\n# ----------------------\n# Now that we have build our sensor platform and a target platform we need to wrap them in a simulator. Because we do\n# not want any additional ground truth objects, which is how most simulators work in Stone Soup, we need to use a\n# :class:`~.DummyGroundTruthSimulator` which returns a set of empty ground truth paths with timestamps. These are then\n# feed into a :class:`~.PlatformDetectionSimulator` with the two platforms we have already built.\n\n# Import the required simulators\nfrom stonesoup.simulator.simple import DummyGroundTruthSimulator\nfrom stonesoup.simulator.platform import PlatformDetectionSimulator\n\n# %%\n# We now need to create an array of timestamps which starts at *datetime.now()* and enable the simulator to run for\n# 25 seconds.\n\ntimes = np.arange(0, 24, 1) # 25 seconds\n\ntimestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times]\n\ntruths = DummyGroundTruthSimulator(times=timestamps)\nsim = PlatformDetectionSimulator(groundtruth=truths, platforms=[sensor_platform, target])\n\n# %%\n# Create a Tracker\n# ------------------------------------\n# Now that we have setup our sensor platform, target and simulation we need to create a tracker. For this example we\n# will use a Particle Filter as this enables us to handle the non-linear nature of the imaging sensor. In this example\n# we will use an inflated constant noise model to account for target motion uncertainty.\n#\n# Note that we don't add a measurement model to the updater, this is because each sensor adds their measurement model to\n# each detection they generate. The tracker handles this internally by checking for a measurement model with each\n# detection it receives and applying only the relevant measurement model.\n\ntarget_transition_model = CombinedLinearGaussianTransitionModel(\n [ConstantVelocity(5), ConstantVelocity(5), ConstantVelocity(1)])\n\n# First add a Particle Predictor\npredictor = ParticlePredictor(target_transition_model)\n\n# Now create a resampler and particle updater\nresampler = SystematicResampler()\nupdater = ParticleUpdater(measurement_model=None,\n resampler=resampler)\n\n# Create a particle initiator\nfrom stonesoup.initiator.simple import GaussianParticleInitiator, SinglePointInitiator\nsingle_point_initiator = SinglePointInitiator(\n GaussianState([[0], [-40], [2000], [0], [8000], [0]], np.diag([10000, 1000, 10000, 1000, 10000, 1000])),\n None)\n\ninitiator = GaussianParticleInitiator(number_particles=500,\n initiator=single_point_initiator)\n\nhypothesiser = DistanceHypothesiser(predictor, updater, measure=Mahalanobis(), missed_distance=np.inf)\ndata_associator = GNNWith2DAssignment(hypothesiser)\n\nfrom stonesoup.deleter.time import UpdateTimeStepsDeleter\ndeleter = UpdateTimeStepsDeleter(time_steps_since_update=10)\n\n# Create a Kalman single-target tracker\ntracker = SingleTargetTracker(\n initiator=initiator,\n deleter=deleter,\n detector=sim,\n data_associator=data_associator,\n updater=updater\n)\n\n# %%\n# The final step is to iterate our tracker over the simulation and plot out the results. Because we have a bearing\n# only sensor it does not make sense to plot out the detections without animating the resulting plot. This\n# animation shows the sensor platform (blue) moving towards the true target position (red). The estimated target\n# position is shown in black, radar detections are shown in yellow while the bearing only imager detections are\n# coloured green.\nfrom matplotlib import animation\nimport matplotlib\n\nmatplotlib.rcParams['animation.html'] = 'jshtml'\n\nfrom stonesoup.models.measurement.nonlinear import CartesianToElevationBearingRangeRate\nfrom stonesoup.functions import sphere2cart\n\nfig = plt.figure(figsize=(10, 6))\nax = fig.add_subplot(1, 1, 1)\n\n\nframes = []\nfor time, ctracks in tracker:\n artists = []\n\n ax.set_xlabel(\"$East$\")\n ax.set_ylabel(\"$North$\")\n ax.set_ylim(0, 2250)\n ax.set_xlim(-1000, 1000)\n X = [state.state_vector[0] for state in sensor_platform]\n Y = [state.state_vector[2] for state in sensor_platform]\n artists.extend(ax.plot(X, Y, color='b'))\n\n for detection in sim.detections:\n if isinstance(detection.measurement_model, CartesianToElevationBearingRangeRate):\n x, y = detection.measurement_model.inverse_function(detection)[[0, 2]]\n color = 'y'\n else:\n r = 10000000\n # extract the platform rotation offsets\n _, el_offset, az_offset = sensor_platform.orientation\n # obtain measurement angles and map to cartesian\n e, a = detection.state_vector\n x, y, _ = sphere2cart(r, a + az_offset, e + el_offset)\n color = 'g'\n X = [sensor_platform.state_vector[0], x]\n Y = [sensor_platform.state_vector[2], y]\n artists.extend(ax.plot(X, Y, color=color))\n\n X = [state.state_vector[0] for state in target]\n Y = [state.state_vector[2] for state in target]\n artists.extend(ax.plot(X, Y, color='r'))\n\n for track in ctracks:\n X = [state.state_vector[0] for state in track]\n Y = [state.state_vector[2] for state in track]\n artists.extend(ax.plot(X, Y, color='k'))\n\n frames.append(artists)\n\nanimation.ArtistAnimation(fig, frames)\n\n\n# %%\n# To increase your confidence with simulated platform targets it would be good practice to modify the target to fly\n# pre-defined shapes, a race track oval for example. You could also experiment with different sensor performance levels\n# in order to see at what point the tracker is no longer able to generate a reasonable estimate of the target location.\n\n# %%\n# Key points\n# ----------\n# 1. Platforms, static or moving, can be used as targets for sensor platforms.\n# 2. Simulations can be built with only known platform behaviours when you want to test specific scenarios.\n# 3. A tracker can be configured to exploit all sensor data created in a simulation.\n\n\n"
] |
[
[
"numpy.deg2rad",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.animation.ArtistAnimation",
"numpy.diag"
]
] |
jinyu121/CIOD
|
[
"97cea8fce0de3d1d552de1ad9b941e85f2920efa"
] |
[
"lib/model/rpn/proposal_layer.py"
] |
[
"from __future__ import absolute_import\n# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Sean Bell\n# --------------------------------------------------------\n# --------------------------------------------------------\n# Reorganized and modified by Jianwei Yang and Jiasen Lu\n# --------------------------------------------------------\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport math\nimport yaml\nfrom model.utils.config import cfg\nfrom .generate_anchors import generate_anchors\nfrom .bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch\nfrom model.nms.nms_wrapper import nms\n\nimport pdb\n\nDEBUG = False\n\n\nclass _ProposalLayer(nn.Module):\n \"\"\"\n Outputs object detection proposals by applying estimated bounding-box\n transformations to a set of regular boxes (called \"anchors\").\n \"\"\"\n\n def __init__(self, feat_stride, scales, ratios):\n super(_ProposalLayer, self).__init__()\n\n self._feat_stride = feat_stride\n self._anchors = torch.from_numpy(generate_anchors(scales=np.array(scales),\n ratios=np.array(ratios))).float()\n self._num_anchors = self._anchors.size(0)\n\n # rois blob: holds R regions of interest, each is a 5-tuple\n # (n, x1, y1, x2, y2) specifying an image batch index n and a\n # rectangle (x1, y1, x2, y2)\n # top[0].reshape(1, 5)\n #\n # # scores blob: holds scores for R regions of interest\n # if len(top) > 1:\n # top[1].reshape(1, 1, 1, 1)\n\n def forward(self, input):\n\n # Algorithm:\n #\n # for each (H, W) location i\n # generate A anchor boxes centered on cell i\n # apply predicted bbox deltas at cell i to each of the A anchors\n # clip predicted boxes to image\n # remove predicted boxes with either height or width < threshold\n # sort all (proposal, score) pairs by score from highest to lowest\n # take top pre_nms_topN proposals before NMS\n # apply NMS with threshold 0.7 to remaining proposals\n # take after_nms_topN proposals after NMS\n # return the top proposals (-> RoIs top, scores top)\n\n # the first set of _num_anchors channels are bg probs\n # the second set are the fg probs\n scores = input[0][:, self._num_anchors:, :, :]\n bbox_deltas = input[1]\n im_info = input[2]\n cfg_key = input[3]\n\n pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N\n post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N\n nms_thresh = cfg[cfg_key].RPN_NMS_THRESH\n min_size = cfg[cfg_key].RPN_MIN_SIZE\n\n batch_size = bbox_deltas.size(0)\n\n feat_height, feat_width = scores.size(2), scores.size(3)\n shift_x = np.arange(0, feat_width) * self._feat_stride\n shift_y = np.arange(0, feat_height) * self._feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n shifts = torch.from_numpy(np.vstack((shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel())).transpose())\n shifts = shifts.contiguous().type_as(scores).float()\n\n A = self._num_anchors\n K = shifts.size(0)\n\n self._anchors = self._anchors.type_as(scores)\n # anchors = self._anchors.view(1, A, 4) + shifts.view(1, K, 4).permute(1, 0, 2).contiguous()\n anchors = self._anchors.view(1, A, 4) + shifts.view(K, 1, 4)\n anchors = anchors.view(1, K * A, 4).expand(batch_size, K * A, 4)\n\n # Transpose and reshape predicted bbox transformations to get them\n # into the same order as the anchors:\n\n bbox_deltas = bbox_deltas.permute(0, 2, 3, 1).contiguous()\n bbox_deltas = bbox_deltas.view(batch_size, -1, 4)\n\n # Same story for the scores:\n scores = scores.permute(0, 2, 3, 1).contiguous()\n scores = scores.view(batch_size, -1)\n\n # Convert anchors into proposals via bbox transformations\n proposals = bbox_transform_inv(anchors, bbox_deltas, batch_size)\n\n # 2. clip predicted boxes to image\n proposals = clip_boxes(proposals, im_info, batch_size)\n # proposals = clip_boxes_batch(proposals, im_info, batch_size)\n\n # assign the score to 0 if it's non keep.\n # keep = self._filter_boxes(proposals, min_size * im_info[:, 2])\n\n # trim keep index to make it euqal over batch\n # keep_idx = torch.cat(tuple(keep_idx), 0)\n\n # scores_keep = scores.view(-1)[keep_idx].view(batch_size, trim_size)\n # proposals_keep = proposals.view(-1, 4)[keep_idx, :].contiguous().view(batch_size, trim_size, 4)\n\n # _, order = torch.sort(scores_keep, 1, True)\n\n scores_keep = scores\n proposals_keep = proposals\n _, order = torch.sort(scores_keep, 1, True)\n\n output = scores.new(batch_size, post_nms_topN, 5).zero_()\n for i in range(batch_size):\n # # 3. remove predicted boxes with either height or width < threshold\n # # (NOTE: convert min_size to input image scale stored in im_info[2])\n proposals_single = proposals_keep[i]\n scores_single = scores_keep[i]\n\n # # 4. sort all (proposal, score) pairs by score from highest to lowest\n # # 5. take top pre_nms_topN (e.g. 6000)\n order_single = order[i]\n\n if pre_nms_topN > 0 and pre_nms_topN < scores_keep.numel():\n order_single = order_single[:pre_nms_topN]\n\n proposals_single = proposals_single[order_single, :]\n scores_single = scores_single[order_single].view(-1, 1)\n\n # 6. apply nms (e.g. threshold = 0.7)\n # 7. take after_nms_topN (e.g. 300)\n # 8. return the top proposals (-> RoIs top)\n\n keep_idx_i = nms(torch.cat((proposals_single, scores_single), 1), nms_thresh, force_cpu=not cfg.USE_GPU_NMS)\n keep_idx_i = keep_idx_i.long().view(-1)\n\n if post_nms_topN > 0:\n keep_idx_i = keep_idx_i[:post_nms_topN]\n proposals_single = proposals_single[keep_idx_i, :]\n scores_single = scores_single[keep_idx_i, :]\n\n # padding 0 at the end.\n num_proposal = proposals_single.size(0)\n output[i, :, 0] = i\n output[i, :num_proposal, 1:] = proposals_single\n\n return output\n\n def backward(self, top, propagate_down, bottom):\n \"\"\"This layer does not propagate gradients.\"\"\"\n pass\n\n def reshape(self, bottom, top):\n \"\"\"Reshaping happens during the call to forward.\"\"\"\n pass\n\n def _filter_boxes(self, boxes, min_size):\n \"\"\"Remove all boxes with any side smaller than min_size.\"\"\"\n ws = boxes[:, :, 2] - boxes[:, :, 0] + 1\n hs = boxes[:, :, 3] - boxes[:, :, 1] + 1\n keep = ((ws >= min_size.view(-1, 1).expand_as(ws)) & (hs >= min_size.view(-1, 1).expand_as(hs)))\n return keep\n"
] |
[
[
"numpy.array",
"torch.cat",
"numpy.arange",
"numpy.meshgrid",
"torch.sort"
]
] |
diptikaushal/TOPSIS-Dipti-101803601
|
[
"0919e64072d4f301f311ddf280bc0c7b920ecbd5"
] |
[
"topsis.py"
] |
[
"import pandas as pd\r\nimport sys\r\nfrom os import path\r\nimport numpy\r\nfrom sys import exit\r\n\r\ndef main(): \r\n if len(sys.argv)!=5:\r\n print(\"Incorrect no. of parameters passed.\")\r\n exit(0)\r\n \r\n i=sys.argv[1]\r\n w=sys.argv[2]\r\n im=sys.argv[3]\r\n result=sys.argv[4]\r\n if not i.endswith('.csv'):\r\n print(\"Input file is not in .csv format.\")\r\n exit(0)\r\n if not path.exists(i):\r\n print(\"No such file exists!!\")\r\n exit(0)\r\n \r\n f = pd.read_csv(i)\r\n c = f.shape[-1]\r\n if c<3:\r\n print(\"File should have at least 3 or more columns.\")\r\n exit(0)\r\n k=0\r\n for i in f.columns:\r\n k=k+1\r\n for j in f.index:\r\n if k!=1:\r\n v=isinstance(f[i][j],numpy.int64)\r\n v1=isinstance(f[i][j],float)\r\n if not v and not v1:\r\n print(f'It is not a numeric value in {k} column.')\r\n exit(0)\r\n weights=w.split(',')\r\n impacts=im.split(',')\r\n \r\n for i in range(0, len(weights)): \r\n weights[i] = int(weights[i]) \r\n \r\n if len(weights)!=len(impacts) and len(weights)!=len(f.iloc[:,1:]):\r\n print(\"No. of input Impacts, Weights and columns(from second to last) should be similar.\")\r\n exit(0)\r\n \r\n for j in impacts:\r\n if j!='+' and j!='-':\r\n print(\"Impact can be '+' or '-'.\")\r\n exit(0)\r\n \r\n if w.count(\",\")*2+1!=len(w) and im.count(\",\")*2+1!=len(im):\r\n print(\"Weights and Impacts should be separated by commas(,).\")\r\n exit(0)\r\n \r\n a=f.iloc[:,1:]\r\n vp=[]\r\n vn=[]\r\n sp=[]\r\n sn=[]\r\n skn=[]\r\n p=[]\r\n for col in range(a.shape[1]):\r\n total=0\r\n for row in range(a.shape[0]):\r\n total=total+a.iloc[row,col]**2\r\n total=total**0.5\r\n for i in range(a.shape[0]):\r\n a.iloc[i,col]=a.iloc[i,col]/total\r\n for j in range(a.shape[0]):\r\n a.iloc[j,col]=a.iloc[j,col]*weights[col]\r\n \r\n if impacts[col]=='+':\r\n vp.append(a.iloc[:,col].max())\r\n vn.append(a.iloc[:,col].min())\r\n else:\r\n vp.append(a.iloc[:,col].min())\r\n vn.append(a.iloc[:,col].max())\r\n \r\n for m in range(a.shape[0]):\r\n temp=0\r\n ans=0\r\n for n in range(a.shape[1]):\r\n temp=temp+(a.iloc[m,n]-vp[n])**2\r\n temp=temp**0.5\r\n sp.append(temp)\r\n \r\n for q in range(a.shape[1]):\r\n ans=ans+(a.iloc[m,q]-vn[q])**2\r\n ans=ans**0.5\r\n sn.append(ans)\r\n for w in range(0,len(sp)):\r\n skn.append(sp[w]+sn[w])\r\n for y in range(0,len(skn)):\r\n p.append(sn[y]/skn[y])\r\n \r\n f.insert(5,\"Topsis Score\",p)\r\n f.insert(6,\"Rank\",f[\"Topsis Score\"].rank(ascending=False))\r\n f.to_csv(result)\r\n\r\nif __name__ == \"__main__\":\r\n main()"
] |
[
[
"pandas.read_csv"
]
] |
dzwallkilled/pytorch-deeplab-xception
|
[
"d8c04a24641c8c31a6800a37de6a7bfe607e5495"
] |
[
"dataloaders/__init__.py"
] |
[
"from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd, rip\nfrom torch.utils.data import DataLoader\n\n\ndef make_data_loader(args, **kwargs):\n\n if args.dataset == 'pascal':\n train_set = pascal.VOCSegmentation(args, split='train')\n val_set = pascal.VOCSegmentation(args, split='val')\n if args.use_sbd:\n sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])\n train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set])\n\n num_class = train_set.NUM_CLASSES\n train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)\n val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)\n test_loader = None\n\n return train_loader, val_loader, test_loader, num_class\n\n elif args.dataset == 'cityscapes':\n train_set = cityscapes.CityscapesSegmentation(args, split='train')\n val_set = cityscapes.CityscapesSegmentation(args, split='val')\n test_set = cityscapes.CityscapesSegmentation(args, split='test')\n num_class = train_set.NUM_CLASSES\n train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)\n val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)\n test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)\n\n return train_loader, val_loader, test_loader, num_class\n\n elif args.dataset == 'coco':\n train_set = coco.COCOSegmentation(args, split='train')\n val_set = coco.COCOSegmentation(args, split='val')\n num_class = train_set.NUM_CLASSES\n train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)\n val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)\n test_loader = None\n return train_loader, val_loader, test_loader, num_class\n\n elif args.dataset == 'rip':\n classes = {'full': 7, 'level1': 2, 'level2': 3, 'level3': 5}\n import os\n from mypath import Path\n data_root = Path.db_root_dir(args.dataset)\n root = os.path.join(data_root, 'RipTrainingAllData')\n\n patches, level = args.rip_mode.split('-')\n if patches == 'patches':\n patches = 'COCOJSONPatches'\n elif patches == 'patches_v1':\n patches = 'COCOJSONPatches_v1'\n else:\n patches = 'COCOJSONs'\n # patches = 'COCOJSONPatches' if patches == 'patches' else 'COCOJSONs'\n train_ann_file =os.path.join(data_root, patches, level, 'cv_5_fold', 'train_1.json')\n val_ann_file =os.path.join(data_root, patches, level, 'cv_5_fold', 'val_1.json')\n\n train_set = rip.RIPSegmentation(args, split='train', root=root, ann_file=train_ann_file)\n val_set = rip.RIPSegmentation(args, split='val', root=root, ann_file=val_ann_file)\n num_classes = classes[level]\n # NOTE: drop_last=True here to avoid situation when batch_size=1 which causes BatchNorm2d errors\n train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, **kwargs)\n val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)\n test_loader = None\n return train_loader, val_loader, test_loader, num_classes\n\n else:\n raise NotImplementedError\n\n"
] |
[
[
"torch.utils.data.DataLoader"
]
] |
Tirbo06/qlib
|
[
"ad0afc111cf27777bc05d712006ee5b14cc77840"
] |
[
"qlib/data/data.py"
] |
[
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport abc\nimport six\nimport time\nimport queue\nimport bisect\nimport logging\nimport importlib\nimport traceback\nimport numpy as np\nimport pandas as pd\nfrom multiprocessing import Pool\n\nfrom .cache import H\nfrom ..config import C\nfrom .ops import *\nfrom ..log import get_module_logger\nfrom ..utils import parse_field, read_bin, hash_args, normalize_cache_fields\nfrom .base import Feature\nfrom .cache import DiskDatasetCache, DiskExpressionCache\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass CalendarProvider(object):\n \"\"\"Calendar provider base class\n\n Provide calendar data.\n \"\"\"\n\n @abc.abstractmethod\n def calendar(self, start_time=None, end_time=None, freq=\"day\", future=False):\n \"\"\"Get calendar of certain market in given time range.\n\n Parameters\n ----------\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency, available: year/quarter/month/week/day\n future : bool\n whether including future trading day\n\n Returns\n ----------\n list\n calendar list\n \"\"\"\n raise NotImplementedError(\"Subclass of CalendarProvider must implement `calendar` method\")\n\n def locate_index(self, start_time, end_time, freq, future):\n \"\"\"Locate the start time index and end time index in a calendar under certain frequency.\n\n Parameters\n ----------\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency, available: year/quarter/month/week/day\n future : bool\n whether including future trading day\n\n Returns\n -------\n pd.Timestamp\n the real start time\n pd.Timestamp\n the real end time\n int\n the index of start time\n int\n the index of end time\n \"\"\"\n start_time = pd.Timestamp(start_time)\n end_time = pd.Timestamp(end_time)\n calendar, calendar_index = self._get_calendar(freq=freq, future=future)\n if start_time not in calendar_index:\n try:\n start_time = calendar[bisect.bisect_left(calendar, start_time)]\n except IndexError:\n raise IndexError(\n \"`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`\"\n )\n start_index = calendar_index[start_time]\n if end_time not in calendar_index:\n end_time = calendar[bisect.bisect_right(calendar, end_time) - 1]\n end_index = calendar_index[end_time]\n return start_time, end_time, start_index, end_index\n\n def _get_calendar(self, freq, future):\n \"\"\"Load calendar using memcache.\n\n Parameters\n ----------\n freq : str\n frequency of read calendar file\n future : bool\n whether including future trading day\n\n Returns\n -------\n list\n list of timestamps\n dict\n dict composed by timestamp as key and index as value for fast search\n \"\"\"\n flag = f\"{freq}_future_{future}\"\n if flag in H[\"c\"]:\n _calendar, _calendar_index = H[\"c\"][flag]\n else:\n _calendar = np.array(self._load_calendar(freq, future))\n _calendar_index = {x: i for i, x in enumerate(_calendar)} # for fast search\n H[\"c\"][flag] = _calendar, _calendar_index\n return _calendar, _calendar_index\n\n def _uri(self, start_time, end_time, freq, future=False):\n \"\"\"Get the uri of calendar generation task.\"\"\"\n return hash_args(start_time, end_time, freq, future)\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass InstrumentProvider(object):\n \"\"\"Instrument provider base class\n\n Provide instrument data.\n \"\"\"\n\n @staticmethod\n def instruments(market=\"all\", filter_pipe=None):\n \"\"\"Get the general config dictionary for a base market adding several dynamic filters.\n\n Parameters\n ----------\n market : str\n market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500\n filter_pipe : list\n the list of dynamic filters\n\n Returns\n ----------\n dict\n dict of stockpool config\n {`market`=>base market name, `filter_pipe`=>list of filters}\n\n example :\n {'market': 'csi500',\n 'filter_pipe': [{'filter_type': 'ExpressionDFilter',\n 'rule_expression': '$open<40',\n 'filter_start_time': None,\n 'filter_end_time': None,\n 'keep': False},\n {'filter_type': 'NameDFilter',\n 'name_rule_re': 'SH[0-9]{4}55',\n 'filter_start_time': None,\n 'filter_end_time': None}]}\n \"\"\"\n if filter_pipe is None:\n filter_pipe = []\n config = {\"market\": market, \"filter_pipe\": []}\n # the order of the filters will affect the result, so we need to keep\n # the order\n for filter_t in filter_pipe:\n config[\"filter_pipe\"].append(filter_t.to_config())\n return config\n\n @abc.abstractmethod\n def list_instruments(self, instruments, start_time=None, end_time=None, freq=\"day\", as_list=False):\n \"\"\"List the instruments based on a certain stockpool config.\n\n Parameters\n ----------\n instruments : dict\n stockpool config\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n as_list : bool\n return instruments as list or dict\n\n Returns\n -------\n dict or list\n instruments list or dictionary with time spans\n \"\"\"\n raise NotImplementedError(\"Subclass of InstrumentProvider must implement `list_instruments` method\")\n\n def _uri(self, instruments, start_time=None, end_time=None, freq=\"day\", as_list=False):\n return hash_args(instruments, start_time, end_time, freq, as_list)\n\n # instruments type\n LIST = \"LIST\"\n DICT = \"DICT\"\n CONF = \"CONF\"\n\n @classmethod\n def get_inst_type(cls, inst):\n if \"market\" in inst:\n return cls.CONF\n if isinstance(inst, dict):\n return cls.DICT\n if isinstance(inst, (list, tuple, pd.Index, np.ndarray)):\n return cls.LIST\n raise ValueError(f\"Unknown instrument type {inst}\")\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass FeatureProvider(object):\n \"\"\"Feature provider class\n\n Provide feature data.\n \"\"\"\n\n @abc.abstractmethod\n def feature(self, instrument, field, start_time, end_time, freq):\n \"\"\"Get feature data.\n\n Parameters\n ----------\n instrument : str\n a certain instrument\n field : str\n a certain field of feature\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency, available: year/quarter/month/week/day\n\n Returns\n -------\n pd.Series\n data of a certain feature\n \"\"\"\n raise NotImplementedError(\"Subclass of FeatureProvider must implement `feature` method\")\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass ExpressionProvider(object):\n \"\"\"Expression provider class\n\n Provide Expression data.\n \"\"\"\n\n def __init__(self):\n self.expression_instance_cache = {}\n\n def get_expression_instance(self, field):\n try:\n if field in self.expression_instance_cache:\n expression = self.expression_instance_cache[field]\n else:\n expression = eval(parse_field(field))\n self.expression_instance_cache[field] = expression\n except NameError as e:\n get_module_logger(\"data\").exception(\n \"ERROR: field [%s] contains invalid operator/variable [%s]\" % (str(field), str(e).split()[1])\n )\n raise\n except SyntaxError:\n get_module_logger(\"data\").exception(\"ERROR: field [%s] contains invalid syntax\" % str(field))\n raise\n return expression\n\n @abc.abstractmethod\n def expression(self, instrument, field, start_time=None, end_time=None, freq=\"day\"):\n \"\"\"Get Expression data.\n\n Parameters\n ----------\n instrument : str\n a certain instrument\n field : str\n a certain field of feature\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency, available: year/quarter/month/week/day\n\n Returns\n -------\n pd.Series\n data of a certain expression\n \"\"\"\n raise NotImplementedError(\"Subclass of ExpressionProvider must implement `Expression` method\")\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass DatasetProvider(object):\n \"\"\"Dataset provider class\n\n Provide Dataset data.\n \"\"\"\n\n @abc.abstractmethod\n def dataset(self, instruments, fields, start_time=None, end_time=None, freq=\"day\"):\n \"\"\"Get dataset data.\n\n Parameters\n ----------\n instruments : list or dict\n list/dict of instruments or dict of stockpool config\n fields : list\n list of feature instances\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency\n\n Returns\n ----------\n pd.DataFrame\n a pandas dataframe with <instrument, datetime> index\n \"\"\"\n raise NotImplementedError(\"Subclass of DatasetProvider must implement `Dataset` method\")\n\n def _uri(\n self,\n instruments,\n fields,\n start_time=None,\n end_time=None,\n freq=\"day\",\n disk_cache=1,\n **kwargs,\n ):\n \"\"\"Get task uri, used when generating rabbitmq task in qlib_server\n\n Parameters\n ----------\n instruments : list or dict\n list/dict of instruments or dict of stockpool config\n fields : list\n list of feature instances\n start_time : str\n start of the time range\n end_time : str\n end of the time range\n freq : str\n time frequency\n disk_cache : int\n whether to skip(0)/use(1)/replace(2) disk_cache\n\n \"\"\"\n return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache)\n\n @staticmethod\n def get_instruments_d(instruments, freq):\n \"\"\"\n Parse different types of input instruments to output instruments_d\n Wrong format of input instruments will lead to exception.\n\n \"\"\"\n if isinstance(instruments, dict):\n if \"market\" in instruments:\n # dict of stockpool config\n instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)\n else:\n # dict of instruments and timestamp\n instruments_d = instruments\n elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):\n # list or tuple of a group of instruments\n instruments_d = list(instruments)\n else:\n raise ValueError(\"Unsupported input type for param `instrument`\")\n return instruments_d\n\n @staticmethod\n def get_column_names(fields):\n \"\"\"\n Get column names from input fields\n\n \"\"\"\n if len(fields) == 0:\n raise ValueError(\"fields cannot be empty\")\n fields = fields.copy()\n column_names = [str(f) for f in fields]\n return column_names\n\n @staticmethod\n def parse_fields(fields):\n # parse and check the input fields\n return [ExpressionD.get_expression_instance(f) for f in fields]\n\n @staticmethod\n def dataset_processor(instruments_d, column_names, start_time, end_time, freq):\n \"\"\"\n Load and process the data, return the data set.\n - default using multi-kernel method.\n\n \"\"\"\n normalize_column_names = normalize_cache_fields(column_names)\n data = dict()\n # One process for one task, so that the memory will be freed quicker.\n if C.maxtasksperchild is None:\n p = Pool(processes=C.kernels)\n else:\n p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)\n\n if isinstance(instruments_d, dict):\n for inst, spans in instruments_d.items():\n data[inst] = p.apply_async(\n DatasetProvider.expression_calculator,\n args=(\n inst,\n start_time,\n end_time,\n freq,\n normalize_column_names,\n spans,\n C,\n ),\n )\n else:\n for inst in instruments_d:\n data[inst] = p.apply_async(\n DatasetProvider.expression_calculator,\n args=(\n inst,\n start_time,\n end_time,\n freq,\n normalize_column_names,\n None,\n C,\n ),\n )\n\n p.close()\n p.join()\n\n new_data = dict()\n for inst in sorted(data.keys()):\n if len(data[inst].get()) > 0:\n # NOTE: Python version >= 3.6; in versions after python3.6, dict will always guarantee the insertion order\n new_data[inst] = data[inst].get()\n\n if len(new_data) > 0:\n data = pd.concat(new_data, names=[\"instrument\"], sort=False)\n data = DiskDatasetCache.cache_to_origin_data(data, column_names)\n else:\n data = pd.DataFrame(columns=column_names)\n\n return data\n\n @staticmethod\n def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):\n \"\"\"\n Calculate the expressions for one instrument, return a df result.\n If the expression has been calculated before, load from cache.\n\n return value: A data frame with index 'datetime' and other data columns.\n\n \"\"\"\n # NOTE: This place is compatible with windows, windows multi-process is spawn\n if getattr(ExpressionD, \"_provider\", None) is None:\n register_all_wrappers()\n\n obj = dict()\n for field in column_names:\n # The client does not have expression provider, the data will be loaded from cache using static method.\n obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)\n\n data = pd.DataFrame(obj)\n _calendar = Cal.calendar(freq=freq)\n data.index = _calendar[data.index.values.astype(np.int)]\n data.index.names = [\"datetime\"]\n\n if spans is None:\n return data\n else:\n mask = np.zeros(len(data), dtype=np.bool)\n for begin, end in spans:\n mask |= (data.index >= begin) & (data.index <= end)\n return data[mask]\n\n\nclass LocalCalendarProvider(CalendarProvider):\n \"\"\"Local calendar data provider class\n\n Provide calendar data from local data source.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.remote = kwargs.get(\"remote\", False)\n\n @property\n def _uri_cal(self):\n \"\"\"Calendar file uri.\"\"\"\n if self.remote:\n return os.path.join(C.mount_path, \"calendars\", \"{}.txt\")\n else:\n return os.path.join(C.provider_uri, \"calendars\", \"{}.txt\")\n\n def _load_calendar(self, freq, future):\n \"\"\"Load original calendar timestamp from file.\n\n Parameters\n ----------\n freq : str\n frequency of read calendar file\n\n Returns\n ----------\n list\n list of timestamps\n \"\"\"\n if future:\n fname = self._uri_cal.format(freq + \"_future\")\n # if future calendar not exists, return current calendar\n if not os.path.exists(fname):\n get_module_logger(\"data\").warning(f\"{freq}_future.txt not exists, return current calendar!\")\n fname = self._uri_cal.format(freq)\n else:\n fname = self._uri_cal.format(freq)\n if not os.path.exists(fname):\n raise ValueError(\"calendar not exists for freq \" + freq)\n with open(fname) as f:\n return [pd.Timestamp(x.strip()) for x in f]\n\n def calendar(self, start_time=None, end_time=None, freq=\"day\", future=False):\n _calendar, _calendar_index = self._get_calendar(freq, future)\n if start_time == \"None\":\n start_time = None\n if end_time == \"None\":\n end_time = None\n # strip\n if start_time:\n start_time = pd.Timestamp(start_time)\n if start_time > _calendar[-1]:\n return np.array([])\n else:\n start_time = _calendar[0]\n if end_time:\n end_time = pd.Timestamp(end_time)\n if end_time < _calendar[0]:\n return np.array([])\n else:\n end_time = _calendar[-1]\n _, _, si, ei = self.locate_index(start_time, end_time, freq, future)\n return _calendar[si : ei + 1]\n\n\nclass LocalInstrumentProvider(InstrumentProvider):\n \"\"\"Local instrument data provider class\n\n Provide instrument data from local data source.\n \"\"\"\n\n def __init__(self):\n pass\n\n @property\n def _uri_inst(self):\n \"\"\"Instrument file uri.\"\"\"\n return os.path.join(C.provider_uri, \"instruments\", \"{}.txt\")\n\n def _load_instruments(self, market):\n fname = self._uri_inst.format(market)\n if not os.path.exists(fname):\n raise ValueError(\"instruments not exists for market \" + market)\n _instruments = dict()\n with open(fname) as f:\n for line in f:\n inst_time = line.strip().split()\n inst = inst_time[0]\n if len(inst_time) == 3:\n # `day`\n begin = inst_time[1]\n end = inst_time[2]\n elif len(inst_time) == 5:\n # `1min`\n begin = inst_time[1] + \" \" + inst_time[2]\n end = inst_time[3] + \" \" + inst_time[4]\n _instruments.setdefault(inst, []).append((pd.Timestamp(begin), pd.Timestamp(end)))\n return _instruments\n\n def list_instruments(self, instruments, start_time=None, end_time=None, freq=\"day\", as_list=False):\n market = instruments[\"market\"]\n if market in H[\"i\"]:\n _instruments = H[\"i\"][market]\n else:\n _instruments = self._load_instruments(market)\n H[\"i\"][market] = _instruments\n # strip\n # use calendar boundary\n cal = Cal.calendar(freq=freq)\n start_time = pd.Timestamp(start_time or cal[0])\n end_time = pd.Timestamp(end_time or cal[-1])\n _instruments_filtered = {\n inst: list(\n filter(\n lambda x: x[0] <= x[1],\n [(max(start_time, x[0]), min(end_time, x[1])) for x in spans],\n )\n )\n for inst, spans in _instruments.items()\n }\n _instruments_filtered = {key: value for key, value in _instruments_filtered.items() if value}\n # filter\n filter_pipe = instruments[\"filter_pipe\"]\n for filter_config in filter_pipe:\n from . import filter as F\n\n filter_t = getattr(F, filter_config[\"filter_type\"]).from_config(filter_config)\n _instruments_filtered = filter_t(_instruments_filtered, start_time, end_time, freq)\n # as list\n if as_list:\n return list(_instruments_filtered)\n return _instruments_filtered\n\n\nclass LocalFeatureProvider(FeatureProvider):\n \"\"\"Local feature data provider class\n\n Provide feature data from local data source.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.remote = kwargs.get(\"remote\", False)\n\n @property\n def _uri_data(self):\n \"\"\"Static feature file uri.\"\"\"\n if self.remote:\n return os.path.join(C.mount_path, \"features\", \"{}\", \"{}.{}.bin\")\n else:\n return os.path.join(C.provider_uri, \"features\", \"{}\", \"{}.{}.bin\")\n\n def feature(self, instrument, field, start_index, end_index, freq):\n # validate\n field = str(field).lower()[1:]\n uri_data = self._uri_data.format(instrument.lower(), field, freq)\n if not os.path.exists(uri_data):\n get_module_logger(\"data\").warning(\"WARN: data not found for %s.%s\" % (instrument, field))\n return pd.Series()\n # raise ValueError('uri_data not found: ' + uri_data)\n # load\n series = read_bin(uri_data, start_index, end_index)\n return series\n\n\nclass LocalExpressionProvider(ExpressionProvider):\n \"\"\"Local expression data provider class\n\n Provide expression data from local data source.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def expression(self, instrument, field, start_time=None, end_time=None, freq=\"day\"):\n expression = self.get_expression_instance(field)\n start_time = pd.Timestamp(start_time)\n end_time = pd.Timestamp(end_time)\n _, _, start_index, end_index = Cal.locate_index(start_time, end_time, freq, future=False)\n lft_etd, rght_etd = expression.get_extended_window_size()\n series = expression.load(instrument, max(0, start_index - lft_etd), end_index + rght_etd, freq)\n # Ensure that each column type is consistent\n # FIXME: The stock data is currently float. If there is other types of data, this part needs to be re-implemented.\n try:\n series = series.astype(float)\n except ValueError:\n pass\n if not series.empty:\n series = series.loc[start_index:end_index]\n return series\n\n\nclass LocalDatasetProvider(DatasetProvider):\n \"\"\"Local dataset data provider class\n\n Provide dataset data from local data source.\n \"\"\"\n\n def __init__(self):\n pass\n\n def dataset(self, instruments, fields, start_time=None, end_time=None, freq=\"day\"):\n instruments_d = self.get_instruments_d(instruments, freq)\n column_names = self.get_column_names(fields)\n cal = Cal.calendar(start_time, end_time, freq)\n if len(cal) == 0:\n return pd.DataFrame(columns=column_names)\n start_time = cal[0]\n end_time = cal[-1]\n\n data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)\n\n return data\n\n @staticmethod\n def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq=\"day\"):\n \"\"\"\n This method is used to prepare the expression cache for the client.\n Then the client will load the data from expression cache by itself.\n\n \"\"\"\n instruments_d = DatasetProvider.get_instruments_d(instruments, freq)\n column_names = DatasetProvider.get_column_names(fields)\n cal = Cal.calendar(start_time, end_time, freq)\n if len(cal) == 0:\n return\n start_time = cal[0]\n end_time = cal[-1]\n\n if C.maxtasksperchild is None:\n p = Pool(processes=C.kernels)\n else:\n p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)\n\n for inst in instruments_d:\n p.apply_async(\n LocalDatasetProvider.cache_walker,\n args=(\n inst,\n start_time,\n end_time,\n freq,\n column_names,\n ),\n )\n\n p.close()\n p.join()\n\n @staticmethod\n def cache_walker(inst, start_time, end_time, freq, column_names):\n \"\"\"\n If the expressions of one instrument haven't been calculated before,\n calculate it and write it into expression cache.\n\n \"\"\"\n for field in column_names:\n ExpressionD.expression(inst, field, start_time, end_time, freq)\n\n\nclass ClientCalendarProvider(CalendarProvider):\n \"\"\"Client calendar data provider class\n\n Provide calendar data by requesting data from server as a client.\n \"\"\"\n\n def __init__(self):\n self.conn = None\n self.queue = queue.Queue()\n\n def set_conn(self, conn):\n self.conn = conn\n\n def calendar(self, start_time=None, end_time=None, freq=\"day\", future=False):\n self.conn.send_request(\n request_type=\"calendar\",\n request_content={\n \"start_time\": str(start_time),\n \"end_time\": str(end_time),\n \"freq\": freq,\n \"future\": future,\n },\n msg_queue=self.queue,\n msg_proc_func=lambda response_content: [pd.Timestamp(c) for c in response_content],\n )\n result = self.queue.get(timeout=C[\"timeout\"])\n return result\n\n\nclass ClientInstrumentProvider(InstrumentProvider):\n \"\"\"Client instrument data provider class\n\n Provide instrument data by requesting data from server as a client.\n \"\"\"\n\n def __init__(self):\n self.conn = None\n self.queue = queue.Queue()\n\n def set_conn(self, conn):\n self.conn = conn\n\n def list_instruments(self, instruments, start_time=None, end_time=None, freq=\"day\", as_list=False):\n def inst_msg_proc_func(response_content):\n if isinstance(response_content, dict):\n instrument = {\n i: [(pd.Timestamp(s), pd.Timestamp(e)) for s, e in t] for i, t in response_content.items()\n }\n else:\n instrument = response_content\n return instrument\n\n self.conn.send_request(\n request_type=\"instrument\",\n request_content={\n \"instruments\": instruments,\n \"start_time\": str(start_time),\n \"end_time\": str(end_time),\n \"freq\": freq,\n \"as_list\": as_list,\n },\n msg_queue=self.queue,\n msg_proc_func=inst_msg_proc_func,\n )\n result = self.queue.get(timeout=C[\"timeout\"])\n if isinstance(result, Exception):\n raise result\n get_module_logger(\"data\").debug(\"get result\")\n return result\n\n\nclass ClientDatasetProvider(DatasetProvider):\n \"\"\"Client dataset data provider class\n\n Provide dataset data by requesting data from server as a client.\n \"\"\"\n\n def __init__(self):\n self.conn = None\n\n def set_conn(self, conn):\n self.conn = conn\n self.queue = queue.Queue()\n\n def dataset(\n self,\n instruments,\n fields,\n start_time=None,\n end_time=None,\n freq=\"day\",\n disk_cache=0,\n return_uri=False,\n ):\n if Inst.get_inst_type(instruments) == Inst.DICT:\n get_module_logger(\"data\").warning(\n \"Getting features from a dict of instruments is not recommended because the features will not be \"\n \"cached! \"\n \"The dict of instruments will be cleaned every day.\"\n )\n\n if disk_cache == 0:\n \"\"\"\n Call the server to generate the expression cache.\n Then load the data from the expression cache directly.\n - default using multi-kernel method.\n\n \"\"\"\n self.conn.send_request(\n request_type=\"feature\",\n request_content={\n \"instruments\": instruments,\n \"fields\": fields,\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"freq\": freq,\n \"disk_cache\": 0,\n },\n msg_queue=self.queue,\n )\n feature_uri = self.queue.get(timeout=C[\"timeout\"])\n if isinstance(feature_uri, Exception):\n raise feature_uri\n else:\n instruments_d = self.get_instruments_d(instruments, freq)\n column_names = self.get_column_names(fields)\n cal = Cal.calendar(start_time, end_time, freq)\n if len(cal) == 0:\n return pd.DataFrame(columns=column_names)\n start_time = cal[0]\n end_time = cal[-1]\n\n data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)\n if return_uri:\n return data, feature_uri\n else:\n return data\n else:\n\n \"\"\"\n Call the server to generate the data-set cache, get the uri of the cache file.\n Then load the data from the file on NFS directly.\n - using single-process implementation.\n\n \"\"\"\n self.conn.send_request(\n request_type=\"feature\",\n request_content={\n \"instruments\": instruments,\n \"fields\": fields,\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"freq\": freq,\n \"disk_cache\": 1,\n },\n msg_queue=self.queue,\n )\n # - Done in callback\n feature_uri = self.queue.get(timeout=C[\"timeout\"])\n if isinstance(feature_uri, Exception):\n raise feature_uri\n get_module_logger(\"data\").debug(\"get result\")\n try:\n # pre-mound nfs, used for demo\n mnt_feature_uri = os.path.join(C.mount_path, C.dataset_cache_dir_name, feature_uri)\n df = DiskDatasetCache.read_data_from_cache(mnt_feature_uri, start_time, end_time, fields)\n get_module_logger(\"data\").debug(\"finish slicing data\")\n if return_uri:\n return df, feature_uri\n return df\n except AttributeError:\n raise IOError(\"Unable to fetch instruments from remote server!\")\n\n\nclass BaseProvider:\n \"\"\"Local provider class\n\n To keep compatible with old qlib provider.\n \"\"\"\n\n def calendar(self, start_time=None, end_time=None, freq=\"day\", future=False):\n return Cal.calendar(start_time, end_time, freq, future=future)\n\n def instruments(self, market=\"all\", filter_pipe=None, start_time=None, end_time=None):\n if start_time is not None or end_time is not None:\n get_module_logger(\"Provider\").warning(\n \"The instruments corresponds to a stock pool. \"\n \"Parameters `start_time` and `end_time` does not take effect now.\"\n )\n return InstrumentProvider.instruments(market, filter_pipe)\n\n def list_instruments(self, instruments, start_time=None, end_time=None, freq=\"day\", as_list=False):\n return Inst.list_instruments(instruments, start_time, end_time, freq, as_list)\n\n def features(\n self,\n instruments,\n fields,\n start_time=None,\n end_time=None,\n freq=\"day\",\n disk_cache=None,\n ):\n \"\"\"\n disk_cache : int\n whether to skip(0)/use(1)/replace(2) disk_cache\n\n This function will try to use cache method which has a keyword `disk_cache`,\n and will use provider method if a type error is raised because the DatasetD instance\n is a provider class.\n \"\"\"\n disk_cache = C.default_disk_cache if disk_cache is None else disk_cache\n if C.disable_disk_cache:\n disk_cache = False\n try:\n return DatasetD.dataset(instruments, fields, start_time, end_time, freq, disk_cache)\n except TypeError:\n return DatasetD.dataset(instruments, fields, start_time, end_time, freq)\n\n\nclass LocalProvider(BaseProvider):\n def _uri(self, type, **kwargs):\n \"\"\"_uri\n The server hope to get the uri of the request. The uri will be decided\n by the dataprovider. For ex, different cache layer has different uri.\n\n :param type: The type of resource for the uri\n :param **kwargs:\n \"\"\"\n if type == \"calendar\":\n return Cal._uri(**kwargs)\n elif type == \"instrument\":\n return Inst._uri(**kwargs)\n elif type == \"feature\":\n return DatasetD._uri(**kwargs)\n\n def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):\n \"\"\"features_uri\n\n Return the uri of the generated cache of features/dataset\n\n :param disk_cache:\n :param instruments:\n :param fields:\n :param start_time:\n :param end_time:\n :param freq:\n \"\"\"\n return DatasetD._dataset_uri(instruments, fields, start_time, end_time, freq, disk_cache)\n\n\nclass ClientProvider(BaseProvider):\n \"\"\"Client Provider\n\n Requesting data from server as a client. Can propose requests:\n - Calendar : Directly respond a list of calendars\n - Instruments (without filter): Directly respond a list/dict of instruments\n - Instruments (with filters): Respond a list/dict of instruments\n - Features : Respond a cache uri\n The general workflow is described as follows:\n When the user use client provider to propose a request, the client provider will connect the server and send the request. The client will start to wait for the response. The response will be made instantly indicating whether the cache is available. The waiting procedure will terminate only when the client get the reponse saying `feature_available` is true.\n `BUG` : Everytime we make request for certain data we need to connect to the server, wait for the response and disconnect from it. We can't make a sequence of requests within one connection. You can refer to https://python-socketio.readthedocs.io/en/latest/client.html for documentation of python-socketIO client.\n \"\"\"\n\n def __init__(self):\n from .client import Client\n\n self.client = Client(C.flask_server, C.flask_port)\n self.logger = get_module_logger(self.__class__.__name__)\n if isinstance(Cal, ClientCalendarProvider):\n Cal.set_conn(self.client)\n Inst.set_conn(self.client)\n if hasattr(DatasetD, \"provider\"):\n DatasetD.provider.set_conn(self.client)\n else:\n DatasetD.set_conn(self.client)\n\n\nclass Wrapper(object):\n \"\"\"Data Provider Wrapper\"\"\"\n\n def __init__(self):\n self._provider = None\n\n def register(self, provider):\n self._provider = provider\n\n def __getattr__(self, key):\n if self._provider is None:\n raise AttributeError(\"Please run qlib.init() first using qlib\")\n return getattr(self._provider, key)\n\n\ndef get_cls_from_name(cls_name):\n return getattr(importlib.import_module(\".data\", package=\"qlib\"), cls_name)\n\n\ndef get_provider_obj(config, **params):\n if isinstance(config, dict):\n params.update(config[\"kwargs\"])\n config = config[\"class\"]\n return get_cls_from_name(config)(**params)\n\n\ndef register_wrapper(wrapper, cls_or_obj):\n \"\"\"register_wrapper\n\n :param wrapper: A wrapper of all kinds of providers\n :param cls_or_obj: A class or class name or object instance in data/data.py\n \"\"\"\n if isinstance(cls_or_obj, str):\n cls_or_obj = get_cls_from_name(cls_or_obj)\n obj = cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj\n wrapper.register(obj)\n\n\nCal = Wrapper()\nInst = Wrapper()\nFeatureD = Wrapper()\nExpressionD = Wrapper()\nDatasetD = Wrapper()\nD = Wrapper()\n\n\ndef register_all_wrappers():\n \"\"\"register_all_wrappers\"\"\"\n logger = get_module_logger(\"data\")\n\n _calendar_provider = get_provider_obj(C.calendar_provider)\n if getattr(C, \"calendar_cache\", None) is not None:\n _calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)\n register_wrapper(Cal, _calendar_provider)\n logger.debug(f\"registering Cal {C.calendar_provider}-{C.calenar_cache}\")\n\n register_wrapper(Inst, C.instrument_provider)\n logger.debug(f\"registering Inst {C.instrument_provider}\")\n\n if getattr(C, \"feature_provider\", None) is not None:\n feature_provider = get_provider_obj(C.feature_provider)\n register_wrapper(FeatureD, feature_provider)\n logger.debug(f\"registering FeatureD {C.feature_provider}\")\n\n if getattr(C, \"expression_provider\", None) is not None:\n # This provider is unnecessary in client provider\n _eprovider = get_provider_obj(C.expression_provider)\n if getattr(C, \"expression_cache\", None) is not None:\n _eprovider = get_provider_obj(C.expression_cache, provider=_eprovider)\n register_wrapper(ExpressionD, _eprovider)\n logger.debug(f\"registering ExpressioneD {C.expression_provider}-{C.expression_cache}\")\n\n _dprovider = get_provider_obj(C.dataset_provider)\n if getattr(C, \"dataset_cache\", None) is not None:\n _dprovider = get_provider_obj(C.dataset_cache, provider=_dprovider)\n register_wrapper(DatasetD, _dprovider)\n logger.debug(f\"registering DataseteD {C.dataset_provider}-{C.dataset_cache}\")\n\n register_wrapper(D, C.provider)\n logger.debug(f\"registering D {C.provider}\")\n"
] |
[
[
"numpy.array",
"pandas.DataFrame",
"pandas.Timestamp",
"pandas.concat",
"pandas.Series"
]
] |
sandutsar/magenta
|
[
"77ed668af96edea7c993d38973b9da342bd31e82"
] |
[
"magenta/common/sequence_example_lib.py"
] |
[
"# Copyright 2022 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions for working with tf.train.SequenceExamples.\"\"\"\n\nimport math\n\nimport tensorflow.compat.v1 as tf\n\nQUEUE_CAPACITY = 500\nSHUFFLE_MIN_AFTER_DEQUEUE = QUEUE_CAPACITY // 5\n\n\ndef _shuffle_inputs(input_tensors, capacity, min_after_dequeue, num_threads):\n \"\"\"Shuffles tensors in `input_tensors`, maintaining grouping.\"\"\"\n shuffle_queue = tf.RandomShuffleQueue(\n capacity, min_after_dequeue, dtypes=[t.dtype for t in input_tensors])\n enqueue_op = shuffle_queue.enqueue(input_tensors)\n runner = tf.train.QueueRunner(shuffle_queue, [enqueue_op] * num_threads)\n tf.train.add_queue_runner(runner)\n\n output_tensors = shuffle_queue.dequeue()\n\n for i in range(len(input_tensors)):\n output_tensors[i].set_shape(input_tensors[i].shape)\n\n return output_tensors\n\n\ndef get_padded_batch(file_list, batch_size, input_size, label_shape=None,\n num_enqueuing_threads=4, shuffle=False):\n \"\"\"Reads batches of SequenceExamples from TFRecords and pads them.\n\n Can deal with variable length SequenceExamples by padding each batch to the\n length of the longest sequence with zeros.\n\n Args:\n file_list: A list of paths to TFRecord files containing SequenceExamples.\n batch_size: The number of SequenceExamples to include in each batch.\n input_size: The size of each input vector. The returned batch of inputs\n will have a shape [batch_size, num_steps, input_size].\n label_shape: Shape for labels. If not specified, will use [].\n num_enqueuing_threads: The number of threads to use for enqueuing\n SequenceExamples.\n shuffle: Whether to shuffle the batches.\n\n Returns:\n inputs: A tensor of shape [batch_size, num_steps, input_size] of floats32s.\n labels: A tensor of shape [batch_size, num_steps] of int64s.\n lengths: A tensor of shape [batch_size] of int32s. The lengths of each\n SequenceExample before padding.\n Raises:\n ValueError: If `shuffle` is True and `num_enqueuing_threads` is less than 2.\n \"\"\"\n file_queue = tf.train.string_input_producer(file_list)\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(file_queue)\n\n sequence_features = {\n 'inputs': tf.FixedLenSequenceFeature(shape=[input_size],\n dtype=tf.float32),\n 'labels': tf.FixedLenSequenceFeature(shape=label_shape or [],\n dtype=tf.int64)}\n\n _, sequence = tf.parse_single_sequence_example(\n serialized_example, sequence_features=sequence_features)\n\n length = tf.shape(sequence['inputs'])[0]\n input_tensors = [sequence['inputs'], sequence['labels'], length]\n\n if shuffle:\n if num_enqueuing_threads < 2:\n raise ValueError(\n '`num_enqueuing_threads` must be at least 2 when shuffling.')\n shuffle_threads = int(math.ceil(num_enqueuing_threads) / 2.)\n\n # Since there may be fewer records than SHUFFLE_MIN_AFTER_DEQUEUE, take the\n # minimum of that number and the number of records.\n min_after_dequeue = count_records(\n file_list, stop_at=SHUFFLE_MIN_AFTER_DEQUEUE)\n input_tensors = _shuffle_inputs(\n input_tensors, capacity=QUEUE_CAPACITY,\n min_after_dequeue=min_after_dequeue,\n num_threads=shuffle_threads)\n\n num_enqueuing_threads -= shuffle_threads\n\n tf.logging.info(input_tensors)\n return tf.train.batch(\n input_tensors,\n batch_size=batch_size,\n capacity=QUEUE_CAPACITY,\n num_threads=num_enqueuing_threads,\n dynamic_pad=True,\n allow_smaller_final_batch=False)\n\n\ndef count_records(file_list, stop_at=None):\n \"\"\"Counts number of records in files from `file_list` up to `stop_at`.\n\n Args:\n file_list: List of TFRecord files to count records in.\n stop_at: Optional number of records to stop counting at.\n\n Returns:\n Integer number of records in files from `file_list` up to `stop_at`.\n \"\"\"\n num_records = 0\n for tfrecord_file in file_list:\n tf.logging.info('Counting records in %s.', tfrecord_file)\n for _ in tf.python_io.tf_record_iterator(tfrecord_file):\n num_records += 1\n if stop_at and num_records >= stop_at:\n tf.logging.info('Number of records is at least %d.', num_records)\n return num_records\n tf.logging.info('Total records: %d', num_records)\n return num_records\n\n\ndef flatten_maybe_padded_sequences(maybe_padded_sequences, lengths=None):\n \"\"\"Flattens the batch of sequences, removing padding (if applicable).\n\n Args:\n maybe_padded_sequences: A tensor of possibly padded sequences to flatten,\n sized `[N, M, ...]` where M = max(lengths).\n lengths: Optional length of each sequence, sized `[N]`. If None, assumes no\n padding.\n\n Returns:\n flatten_maybe_padded_sequences: The flattened sequence tensor, sized\n `[sum(lengths), ...]`.\n \"\"\"\n def flatten_unpadded_sequences():\n # The sequences are equal length, so we should just flatten over the first\n # two dimensions.\n return tf.reshape(maybe_padded_sequences,\n [-1] + maybe_padded_sequences.shape.as_list()[2:])\n\n if lengths is None:\n return flatten_unpadded_sequences()\n\n def flatten_padded_sequences():\n indices = tf.where(tf.sequence_mask(lengths))\n return tf.gather_nd(maybe_padded_sequences, indices)\n\n return tf.cond(\n tf.equal(tf.reduce_min(lengths), tf.shape(maybe_padded_sequences)[1]),\n flatten_unpadded_sequences,\n flatten_padded_sequences)\n"
] |
[
[
"tensorflow.compat.v1.parse_single_sequence_example",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.gather_nd",
"tensorflow.compat.v1.train.string_input_producer",
"tensorflow.compat.v1.FixedLenSequenceFeature",
"tensorflow.compat.v1.train.QueueRunner",
"tensorflow.compat.v1.train.batch",
"tensorflow.compat.v1.train.add_queue_runner",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.reduce_min",
"tensorflow.compat.v1.TFRecordReader",
"tensorflow.compat.v1.sequence_mask",
"tensorflow.compat.v1.python_io.tf_record_iterator",
"tensorflow.compat.v1.RandomShuffleQueue"
]
] |
jarvmiller/statsmodels
|
[
"15e90a99c81dd0b61c1aa76ebda2df008e88870d"
] |
[
"statsmodels/sandbox/tests/test_formula.py"
] |
[
"\"\"\"\nTest functions for models.formula\n\"\"\"\n\nimport string\n\nimport numpy as np\nimport numpy.random as R\nimport numpy.linalg as L\nfrom numpy.testing import assert_almost_equal, assert_equal, assert_, \\\n assert_raises\n\nfrom statsmodels.sandbox import formula #, contrast #, utils\nfrom statsmodels.sandbox import contrast_old as contrast\n\n\nclass TestTerm(object):\n\n def test_init(self):\n t1 = formula.Term(\"trivial\")\n sqr = lambda x: x*x\n\n t2 = formula.Term(\"not_so_trivial\", sqr, \"sqr\")\n\n assert_raises(ValueError, formula.Term, \"name\", termname=0)\n\n\n def test_str(self):\n t = formula.Term(\"name\")\n s = str(t)\n\n def test_add(self):\n t1 = formula.Term(\"t1\")\n t2 = formula.Term(\"t2\")\n f = t1 + t2\n assert_(isinstance(f, formula.Formula))\n assert_(f.hasterm(t1))\n assert_(f.hasterm(t2))\n\n def test_mul(self):\n t1 = formula.Term(\"t1\")\n t2 = formula.Term(\"t2\")\n f = t1 * t2\n assert_(isinstance(f, formula.Formula))\n\n intercept = formula.Term(\"intercept\")\n f = t1 * intercept\n assert_equal(str(f), str(formula.Formula(t1)))\n\n f = intercept * t1\n assert_equal(str(f), str(formula.Formula(t1)))\n\nclass TestFormula(object):\n\n def setup(self):\n self.X = R.standard_normal((40,10))\n self.namespace = {}\n self.terms = []\n for i in range(10):\n name = '%s' % string.ascii_uppercase[i]\n self.namespace[name] = self.X[:,i]\n self.terms.append(formula.Term(name))\n\n self.formula = self.terms[0]\n for i in range(1, 10):\n self.formula += self.terms[i]\n self.formula.namespace = self.namespace\n\n def test_namespace(self):\n space1 = {'X':np.arange(50), 'Y':np.arange(50)*2}\n space2 = {'X':np.arange(20), 'Y':np.arange(20)*2}\n space3 = {'X':np.arange(30), 'Y':np.arange(30)*2}\n X = formula.Term('X')\n Y = formula.Term('Y')\n\n X.namespace = space1\n assert_almost_equal(X(), np.arange(50))\n\n Y.namespace = space2\n assert_almost_equal(Y(), np.arange(20)*2)\n\n f = X + Y\n\n f.namespace = space1\n assert_equal(f().shape, (2,50))\n assert_almost_equal(Y(), np.arange(20)*2)\n assert_almost_equal(X(), np.arange(50))\n\n f.namespace = space2\n assert_equal(f().shape, (2,20))\n assert_almost_equal(Y(), np.arange(20)*2)\n assert_almost_equal(X(), np.arange(50))\n\n f.namespace = space3\n assert_equal(f().shape, (2,30))\n assert_almost_equal(Y(), np.arange(20)*2)\n assert_almost_equal(X(), np.arange(50))\n\n xx = X**2\n assert_equal(xx().shape, (50,))\n\n xx.namespace = space3\n assert_equal(xx().shape, (30,))\n\n xx = X * formula.I\n assert_equal(xx().shape, (50,))\n xx.namespace = space3\n assert_equal(xx().shape, (30,))\n\n xx = X * X\n assert_equal(xx.namespace, X.namespace)\n\n xx = X + Y\n assert_equal(xx.namespace, {})\n\n Y.namespace = {'X':np.arange(50), 'Y':np.arange(50)*2}\n xx = X + Y\n assert_equal(xx.namespace, {})\n\n Y.namespace = X.namespace\n xx = X+Y\n assert_equal(xx.namespace, Y.namespace)\n\n def test_termcolumns(self):\n t1 = formula.Term(\"A\")\n t2 = formula.Term(\"B\")\n f = t1 + t2 + t1 * t2\n def other(val):\n return np.array([3.2*val,4.342*val**2, 5.234*val**3])\n q = formula.Quantitative(['other%d' % i for i in range(1,4)], termname='other', func=t1, transform=other)\n f += q\n q.namespace = f.namespace = self.formula.namespace\n a = q()\n b = f()\n c = f.termcolumns(q)\n b = b[c]\n assert_almost_equal(a,b)\n\n\n def test_str(self):\n s = str(self.formula)\n\n def test_call(self):\n x = self.formula()\n assert_equal(np.array(x).shape, (10, 40))\n\n def test_design(self):\n x = self.formula.design()\n assert_equal(x.shape, (40, 10))\n\n def test_product(self):\n prod = self.formula['A'] * self.formula['C']\n f = self.formula + prod\n f.namespace = self.namespace\n x = f.design()\n p = f['A*C']\n p.namespace = self.namespace\n col = f.termcolumns(prod, dict=False)\n assert_almost_equal(np.squeeze(x[:,col]), self.X[:,0] * self.X[:,2])\n assert_almost_equal(np.squeeze(p()), self.X[:,0] * self.X[:,2])\n\n def test_intercept1(self):\n prod = self.terms[0] * self.terms[2]\n f = self.formula + formula.I\n icol = f.names().index('intercept')\n f.namespace = self.namespace\n assert_almost_equal(f()[icol], np.ones((40,)))\n\n def test_intercept3(self):\n t = self.formula['A']\n t.namespace = self.namespace\n prod = t * formula.I\n prod.namespace = self.formula.namespace\n assert_almost_equal(np.squeeze(prod()), t())\n\n def test_contrast1(self):\n term = self.terms[0] + self.terms[2]\n c = contrast.Contrast(term, self.formula)\n col1 = self.formula.termcolumns(self.terms[0], dict=False)\n col2 = self.formula.termcolumns(self.terms[1], dict=False)\n test = [[1] + [0]*9, [0]*2 + [1] + [0]*7]\n assert_almost_equal(c.matrix, test)\n\n def test_contrast2(self):\n dummy = formula.Term('zero')\n self.namespace['zero'] = np.zeros((40,), np.float64)\n term = dummy + self.terms[2]\n c = contrast.Contrast(term, self.formula)\n test = [0]*2 + [1] + [0]*7\n assert_almost_equal(c.matrix, test)\n\n def test_contrast3(self):\n X = self.formula.design()\n P = np.dot(X, L.pinv(X))\n\n dummy = formula.Term('noise')\n resid = np.identity(40) - P\n self.namespace['noise'] = np.transpose(np.dot(resid, R.standard_normal((40,5))))\n terms = dummy + self.terms[2]\n terms.namespace = self.formula.namespace\n c = contrast.Contrast(terms, self.formula)\n assert_equal(c.matrix.shape, (10,))\n\n def test_power(self):\n\n t = self.terms[2]\n t2 = t**2\n t.namespace = t2.namespace = self.formula.namespace\n assert_almost_equal(t()**2, t2())\n\n def test_quantitative(self):\n t = self.terms[2]\n sint = formula.Quantitative('t', func=t, transform=np.sin)\n t.namespace = sint.namespace = self.formula.namespace\n assert_almost_equal(np.sin(t()), sint())\n\n def test_factor1(self):\n f = ['a','b','c']*10\n fac = formula.Factor('ff', f)\n fac.namespace = {'ff':f}\n assert_equal(list(fac.values()), f)\n\n def test_factor2(self):\n f = ['a','b','c']*10\n fac = formula.Factor('ff', f)\n fac.namespace = {'ff':f}\n assert_equal(fac().shape, (3,30))\n\n def test_factor3(self):\n f = ['a','b','c']*10\n fac = formula.Factor('ff', f)\n fac.namespace = {'ff':f}\n m = fac.main_effect(reference=1)\n m.namespace = fac.namespace\n assert_equal(m().shape, (2,30))\n\n def test_factor4(self):\n f = ['a','b','c']*10\n fac = formula.Factor('ff', f)\n fac.namespace = {'ff':f}\n m = fac.main_effect(reference=2)\n m.namespace = fac.namespace\n r = np.array([np.identity(3)]*10)\n r.shape = (30,3)\n r = r.T\n _m = np.array([r[0]-r[2],r[1]-r[2]])\n assert_almost_equal(_m, m())\n\n def test_factor5(self):\n f = ['a','b','c']*3\n fac = formula.Factor('ff', f)\n fac.namespace = {'ff':f}\n\n assert_equal(fac(), [[1,0,0]*3,\n [0,1,0]*3,\n [0,0,1]*3])\n assert_equal(fac['a'], [1,0,0]*3)\n assert_equal(fac['b'], [0,1,0]*3)\n assert_equal(fac['c'], [0,0,1]*3)\n\n\n def test_ordinal_factor(self):\n f = ['a','b','c']*3\n fac = formula.Factor('ff', ['a','b','c'], ordinal=True)\n fac.namespace = {'ff':f}\n\n assert_equal(fac(), [0,1,2]*3)\n assert_equal(fac['a'], [1,0,0]*3)\n assert_equal(fac['b'], [0,1,0]*3)\n assert_equal(fac['c'], [0,0,1]*3)\n\n def test_ordinal_factor2(self):\n f = ['b','c', 'a']*3\n fac = formula.Factor('ff', ['a','b','c'], ordinal=True)\n fac.namespace = {'ff':f}\n\n assert_equal(fac(), [1,2,0]*3)\n assert_equal(fac['a'], [0,0,1]*3)\n assert_equal(fac['b'], [1,0,0]*3)\n assert_equal(fac['c'], [0,1,0]*3)\n\n def test_contrast4(self):\n\n f = self.formula + self.terms[5] + self.terms[5]\n f.namespace = self.namespace\n estimable = False\n\n c = contrast.Contrast(self.terms[5], f)\n\n assert_equal(estimable, False)\n\n def test_interactions(self):\n\n f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])\n assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'a*b', 'a*c', 'b*c']))\n\n f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=3)\n assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))\n\n f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[1,2,3])\n assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))\n\n f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[3])\n assert_equal(set(f.termnames()), set(['a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))\n\n def test_subtract(self):\n f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])\n ff = f - f['a*b']\n assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'a*c', 'b*c']))\n\n ff = f - f['a*b'] - f['a*c']\n assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))\n\n ff = f - (f['a*b'] + f['a*c'])\n assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))\n"
] |
[
[
"numpy.array",
"numpy.random.standard_normal",
"numpy.zeros",
"numpy.testing.assert_equal",
"numpy.testing.assert_almost_equal",
"numpy.ones",
"numpy.linalg.pinv",
"numpy.identity",
"numpy.arange",
"numpy.testing.assert_raises",
"numpy.squeeze"
]
] |
redfrexx/osm_association_rules
|
[
"33975ce25047f9ab3b21e890bc5ed9bab59a0a2f"
] |
[
"src/ohsome/tests/test_ohsome_client.py"
] |
[
"#!/usr/bin/env python\n\"\"\"\nTests for ohsome client\n\"\"\"\n\nimport os\nimport pandas as pd\nfrom nose.tools import raises\nimport geojson\nimport geopandas as gpd\nimport ohsome\n\n\n@raises(ohsome.OhsomeException)\ndef test_handle_multiple_responses_throw_timeouterror():\n \"\"\"\n Tests counting elements within a bounding box for two timestamps\n :return:\n \"\"\"\n # GIVEN\n bboxes = [8.67066,49.41423,8.68177,49.4204]\n time = \"2010-01-01/2011-01-01/P1Y\"\n keys = [\"building\"]\n values = [\"\"]\n\n # WHEN\n client = ohsome.OhsomeClientParallel()\n response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values, timeout=2)\n del client\n\n\ndef test_elements_count():\n \"\"\"\n Tests counting elements within a bounding box for two timestamps\n :return:\n \"\"\"\n # GIVEN\n bboxes = [8.67066,49.41423,8.68177,49.4204]\n time = \"2010-01-01/2011-01-01/P1Y\"\n keys = [\"building\"]\n values = [\"\"]\n\n timestamps = [\"2010-01-01T00:00:00Z\", \"2011-01-01T00:00:00Z\"]\n counts = [53.0, 256.0]\n expected = pd.DataFrame({\"timestamp\": timestamps, \"value\": counts})\n\n # WHEN\n client = ohsome.OhsomeClient()\n response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)\n result = response.as_dataframe()\n del client\n\n # THEN\n assert expected.equals(result)\n\ndef test_elements_count_group_by_key():\n \"\"\"\n Tests counting elements within a bounding box and grouping them by keys\n :return:\n \"\"\"\n\n #GIVEN\n bboxes = \"8.67066,49.41423,8.68177,49.4204\"\n time = \"2010-01-01/2011-01-01/P1Y\"\n groupByKeys = [\"building\"]\n\n timestamps = [\"2010-01-01T00:00:00Z\", \"2011-01-01T00:00:00Z\", \"2010-01-01T00:00:00Z\", \"2011-01-01T00:00:00Z\"]\n counts = [482.0, 628.0, 53.0, 256.0]\n keys = [\"remainder\", \"remainder\", \"building\", \"building\"]\n expected = pd.DataFrame({\"key\": keys, \"timestamp\": timestamps, \"value\": counts})\n expected.set_index([\"key\", \"timestamp\"], inplace=True)\n\n # WHEN\n client = ohsome.OhsomeClient()\n response = client.elements.count.groupBy.key.post(bboxes=bboxes, groupByKeys=groupByKeys, time=time)\n results = response.as_dataframe()\n\n # THEN\n assert expected.equals(results)\n\ndef test_elemets_count_ratio():\n \"\"\"\n Tests count ratio\n :return:\n \"\"\"\n bboxes = \"8.67066,49.41423,8.68177,49.4204\"\n time = \"2010-01-01\"\n keys = [\"building\"]\n keys2 = [\"addr:city\"]\n values = [\"\"]\n values2 = [\"\"]\n\n expected = 365.0\n\n client = ohsome.OhsomeClient()\n response = client.elements.count.ratio.post(bboxes=bboxes, time=time, keys=keys, keys2=keys2,\n values=values, values2=values2)\n #results = response.as_dataframe()\n\n# Cache is disabled\n\"\"\"\ndef test_use_cache_dir():\n # GIVEN\n bboxes = \"8.67066,49.41423,8.68177,49.4204\"\n time = \"2010-01-01/2018-01-01/P1Y\"\n keys = [\"building\"]\n values = [\"\"]\n cache_dir = \"./tmp\"\n\n timestamps = [\"2010-01-01T00:00:00Z\", \"2011-01-01T00:00:00Z\"]\n counts = [53.0, 256.0]\n expected = pd.DataFrame({\"timestamp\": timestamps, \"value\": counts})\n\n # WHEN\n client = ohsome.OhsomeClient(cache_dir=cache_dir)\n assert os.path.exists(cache_dir)\n\n response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)\n result = response.as_dataframe()\n #del client\n\"\"\"\n\n@raises(AssertionError)\ndef test_elements_count_exception():\n \"\"\"\n Tests whether a TypeError is raised if the result cannot be converted to a geodataframe object\n :return:\n \"\"\"\n # GIVEN\n bboxes = \"8.67066,49.41423,8.68177,49.4204\"\n time = \"2010-01-01/2011-01-01/P1Y\"\n keys = [\"building\"]\n values = [\"\"]\n\n # WHEN\n client = ohsome.OhsomeClient()\n response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)\n response.as_geodataframe()\n\ndef test_elements_geometry():\n \"\"\"\n Tests whether the result of an elements/geometry query can be converted to a geodataframe\n :return:\n \"\"\"\n # GIVEN\n bboxes = \"8.67066,49.41423,8.68177,49.4204\"\n time = \"2010-01-01\"\n keys = [\"landuse\"]\n values = [\"grass\"]\n\n # WHEN\n client = ohsome.OhsomeClient()\n response = client.elements.geometry.post(bboxes=bboxes, time=time, keys=keys, values=values)\n result = response.as_geodataframe()\n del client\n\n # THEN\n assert len(result.geometry) == 9\n\ndef test_to_file_assert_filetype():\n \"\"\"\n Asserts whether an error is thrown if the output file is not json or geojson\n :return:\n \"\"\"\n output_file = \"./out.shp\"\n\ndef test_format_coordinates():\n \"\"\"\n Asserts that coordinates of a MultiPolygon are concerted correctly\n :return:\n \"\"\"\n # GIVEN\n bpolys = geojson.FeatureCollection([{\"type\": \"Feature\",\n \"geometry\": {\"coordinates\": [[[[13,51], [13,51.1], [13.1,51.1], [13.1,51], [13,51]],\n [[13,51], [14,51.1], [14.1,51.1], [14.1,51], [14,51]]]],\n \"type\": \"MultiPolygon\"}}])\n time = \"2018-01-01\"\n keys = [\"landuse\"]\n values = [\"grass\"]\n\n # WHEN\n client = ohsome.OhsomeClient()\n response = client.elements.geometry.post(bpolys=ohsome.format_coordinates(bpolys), time=time, keys=keys, values=values)\n result = response.as_geodataframe()\n del client\n\n # THEN\n assert len(result.geometry) == 74\n\ndef test_format_geodataframe():\n\n # GIVEN\n bpolys = geojson.FeatureCollection([{\"type\": \"Feature\",\n \"properties\": {\"id\": 0},\n \"geometry\": {\"coordinates\": [\n [[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],\n [[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],\n \"type\": \"MultiPolygon\"}}])\n\n bpolys_df = gpd.GeoDataFrame().from_features(bpolys)\n time = \"2018-01-01\"\n keys = [\"amenity\"]\n values = [\"\"]\n format = \"geojson\"\n properties = [\"tags\", \"metadata\"]\n\n # WHEN\n client = ohsome.OhsomeClient()\n response = client.elements.count.groupBy.boundary.post(bpolys=bpolys_df, time=time, keys=keys, values=values,\n format=format, properties=properties)\n result = response.as_geodataframe()\n del client\n\n # THEN\n assert result[\"value\"][0] == 538\n\ndef test_parallel_user():\n\n # GIVEN\n bpolys = geojson.FeatureCollection([{\"type\": \"Feature\",\n \"properties\": {\"id\": 0},\n \"geometry\": {\"coordinates\": [\n [[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],\n [[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],\n \"type\": \"MultiPolygon\"}},\n {\"type\": \"Feature\",\n \"properties\": {\"id\": 1},\n \"geometry\": {\"coordinates\": [\n [[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],\n [[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],\n \"type\": \"MultiPolygon\"}}\n ])\n\n bpolys_df = gpd.GeoDataFrame().from_features(bpolys)\n timeperiod = \"2017-01-01,2018-01-01\"\n keys = [\"amenity\"]\n values = [\"\"]\n format = \"json\"\n properties = [\"metadata\"]\n\n # WHEN\n client = ohsome.OhsomeClientParallel(chunksize=1)\n response = client.users.count.groupBy.boundary.post(bpolys=bpolys_df, time=timeperiod, keys=keys, values=values,\n format=format, properties=properties)\n result = response.as_dataframe()\n del client\n\n # THEN\n assert result[\"value\"][0] == 33.\n"
] |
[
[
"pandas.DataFrame"
]
] |
zhaoxin94/Dassl.pytorch
|
[
"c0690f3669c561f2ed7410c22fc65eaef30dfd22",
"c0690f3669c561f2ed7410c22fc65eaef30dfd22"
] |
[
"dassl/engine/trainer.py",
"dassl/modeling/backbone/T2T_CA/transformer_block_ca.py"
] |
[
"import time\nimport numpy as np\nimport os.path as osp\nimport datetime\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\nfrom torch.utils.tensorboard import SummaryWriter\nimport nni\n\nfrom dassl.data import DataManager\nfrom dassl.optim import build_optimizer, build_lr_scheduler\nfrom dassl.utils import (\n MetricMeter, AverageMeter, tolist_if_not, count_num_param, load_checkpoint,\n save_checkpoint, resume_from_checkpoint, load_pretrained_weights\n)\nfrom dassl.modeling import build_head, build_backbone\nfrom dassl.evaluation import build_evaluator\n\n\nclass SimpleNet(nn.Module):\n \"\"\"A simple neural network composed of a CNN backbone\n and optionally a head such as mlp for classification.\n \"\"\"\n\n def __init__(self, cfg, model_cfg, num_classes, **kwargs):\n super().__init__()\n self.backbone = build_backbone(\n model_cfg.BACKBONE.NAME,\n verbose=cfg.VERBOSE,\n pretrained=model_cfg.BACKBONE.PRETRAINED,\n **kwargs\n )\n fdim = self.backbone.out_features\n print(\"------------------------fdim:\", fdim)\n\n self.head = None\n if model_cfg.HEAD.NAME and model_cfg.HEAD.HIDDEN_LAYERS:\n self.head = build_head(\n model_cfg.HEAD.NAME,\n verbose=cfg.VERBOSE,\n in_features=fdim,\n hidden_layers=model_cfg.HEAD.HIDDEN_LAYERS,\n activation=model_cfg.HEAD.ACTIVATION,\n bn=model_cfg.HEAD.BN,\n dropout=model_cfg.HEAD.DROPOUT,\n **kwargs\n )\n fdim = self.head.out_features\n\n self.classifier = None\n if num_classes > 0:\n self.classifier = nn.Linear(fdim, num_classes)\n\n self._fdim = fdim\n\n @property\n def fdim(self):\n return self._fdim\n\n def forward(self, x, return_feature=False):\n f = self.backbone(x)\n if self.head is not None:\n f = self.head(f)\n\n if self.classifier is None:\n return f\n\n y = self.classifier(f)\n\n if return_feature:\n return y, f\n\n return y\n\n\nclass TrainerBase:\n \"\"\"Base class for iterative trainer.\"\"\"\n\n def __init__(self):\n self._models = OrderedDict()\n self._optims = OrderedDict()\n self._scheds = OrderedDict()\n self._writer = None\n\n def register_model(self, name='model', model=None, optim=None, sched=None):\n if self.__dict__.get('_models') is None:\n raise AttributeError(\n 'Cannot assign model before super().__init__() call'\n )\n\n if self.__dict__.get('_optims') is None:\n raise AttributeError(\n 'Cannot assign optim before super().__init__() call'\n )\n\n if self.__dict__.get('_scheds') is None:\n raise AttributeError(\n 'Cannot assign sched before super().__init__() call'\n )\n\n assert name not in self._models, 'Found duplicate model names'\n\n self._models[name] = model\n self._optims[name] = optim\n self._scheds[name] = sched\n\n def get_model_names(self, names=None):\n names_real = list(self._models.keys())\n if names is not None:\n names = tolist_if_not(names)\n for name in names:\n assert name in names_real\n return names\n else:\n return names_real\n\n def save_model(self, epoch, directory, is_best=False, model_name=''):\n names = self.get_model_names()\n\n for name in names:\n model_dict = self._models[name].state_dict()\n\n optim_dict = None\n if self._optims[name] is not None:\n optim_dict = self._optims[name].state_dict()\n\n sched_dict = None\n if self._scheds[name] is not None:\n sched_dict = self._scheds[name].state_dict()\n\n save_checkpoint(\n {\n 'state_dict': model_dict,\n 'epoch': epoch + 1,\n 'optimizer': optim_dict,\n 'scheduler': sched_dict\n },\n osp.join(directory, name),\n is_best=is_best,\n model_name=model_name\n )\n\n def resume_model_if_exist(self, directory):\n names = self.get_model_names()\n file_missing = False\n\n for name in names:\n path = osp.join(directory, name)\n if not osp.exists(path):\n file_missing = True\n break\n\n if file_missing:\n print('No checkpoint found, train from scratch')\n return 0\n\n print(\n 'Found checkpoint in \"{}\". Will resume training'.format(directory)\n )\n\n for name in names:\n path = osp.join(directory, name)\n start_epoch = resume_from_checkpoint(\n path, self._models[name], self._optims[name],\n self._scheds[name]\n )\n\n return start_epoch\n\n def load_model(self, directory, epoch=None):\n if not directory:\n print(\n 'Note that load_model() is skipped as no pretrained model is given'\n )\n return\n\n names = self.get_model_names()\n\n # By default, the best model is loaded\n model_file = 'model-best.pth.tar'\n\n if epoch is not None:\n model_file = 'model.pth.tar-' + str(epoch)\n\n for name in names:\n model_path = osp.join(directory, name, model_file)\n\n if not osp.exists(model_path):\n raise FileNotFoundError(\n 'Model not found at \"{}\"'.format(model_path)\n )\n\n checkpoint = load_checkpoint(model_path)\n state_dict = checkpoint['state_dict']\n epoch = checkpoint['epoch']\n\n print(\n 'Loading weights to {} '\n 'from \"{}\" (epoch = {})'.format(name, model_path, epoch)\n )\n self._models[name].load_state_dict(state_dict)\n\n def set_model_mode(self, mode='train', names=None):\n names = self.get_model_names(names)\n\n for name in names:\n if mode == 'train':\n self._models[name].train()\n else:\n self._models[name].eval()\n\n def update_lr(self, names=None):\n names = self.get_model_names(names)\n\n for name in names:\n if self._scheds[name] is not None:\n self._scheds[name].step()\n\n def detect_anomaly(self, loss):\n if not torch.isfinite(loss).all():\n raise FloatingPointError('Loss is infinite or NaN!')\n\n def init_writer(self, log_dir):\n if self.__dict__.get('_writer') is None or self._writer is None:\n print(\n 'Initializing summary writer for tensorboard '\n 'with log_dir={}'.format(log_dir)\n )\n self._writer = SummaryWriter(log_dir=log_dir)\n\n def close_writer(self):\n if self._writer is not None:\n self._writer.close()\n\n def write_scalar(self, tag, scalar_value, global_step=None):\n if self._writer is None:\n # Do nothing if writer is not initialized\n # Note that writer is only used when training is needed\n pass\n else:\n self._writer.add_scalar(tag, scalar_value, global_step)\n\n def train(self, start_epoch, max_epoch):\n \"\"\"Generic training loops.\"\"\"\n self.start_epoch = start_epoch\n self.max_epoch = max_epoch\n\n self.before_train()\n for self.epoch in range(self.start_epoch, self.max_epoch):\n self.before_epoch()\n self.run_epoch()\n self.after_epoch()\n self.after_train()\n\n def before_train(self):\n pass\n\n def after_train(self):\n pass\n\n def before_epoch(self):\n pass\n\n def after_epoch(self):\n pass\n\n def run_epoch(self):\n raise NotImplementedError\n\n def test(self):\n raise NotImplementedError\n\n def parse_batch_train(self, batch):\n raise NotImplementedError\n\n def parse_batch_test(self, batch):\n raise NotImplementedError\n\n def forward_backward(self, batch):\n raise NotImplementedError\n\n def model_inference(self, input):\n raise NotImplementedError\n\n def model_zero_grad(self, names=None):\n names = self.get_model_names(names)\n for name in names:\n if self._optims[name] is not None:\n self._optims[name].zero_grad()\n\n def model_backward(self, loss):\n self.detect_anomaly(loss)\n if not self.use_amp:\n loss.backward()\n else:\n self.scaler.scale(loss).backward()\n\n def model_update(self, names=None):\n names = self.get_model_names(names)\n for name in names:\n if self._optims[name] is not None:\n if not self.use_amp:\n self._optims[name].step()\n else:\n self.scaler.step(self._optims[name])\n\n def model_backward_and_update(self, loss, names=None):\n self.model_zero_grad(names)\n self.model_backward(loss)\n self.model_update(names)\n if self.use_amp:\n self.scaler.update()\n\n\nclass SimpleTrainer(TrainerBase):\n \"\"\"A simple trainer class implementing generic functions.\"\"\"\n\n def __init__(self, cfg):\n super().__init__()\n self.check_cfg(cfg)\n\n if torch.cuda.is_available() and cfg.USE_CUDA:\n self.device = torch.device('cuda')\n else:\n self.device = torch.device('cpu')\n\n # use amp to accelerate training\n self.use_amp = cfg.TRAIN.USE_AMP\n if self.use_amp:\n self.scaler = torch.cuda.amp.GradScaler()\n\n # Save as attributes some frequently used variables\n self.start_epoch = self.epoch = 0\n self.max_epoch = cfg.OPTIM.MAX_EPOCH\n self.output_dir = cfg.OUTPUT_DIR\n\n self.cfg = cfg\n self.build_data_loader()\n self.build_model()\n self.evaluator = build_evaluator(cfg, lab2cname=self.dm.lab2cname)\n\n # zhaoxin modify\n self.best_val_acc = -np.inf\n self.best_test_acc = -np.inf\n self.best_val_test_acc = 0\n self.best_val_epoch = 0\n self.best_test_epoch = 0\n\n def check_cfg(self, cfg):\n \"\"\"Check whether some variables are set correctly for\n the trainer (optional).\n\n For example, a trainer might require a particular sampler\n for training such as 'RandomDomainSampler', so it is good\n to do the checking:\n\n assert cfg.DATALOADER.SAMPLER_TRAIN == 'RandomDomainSampler'\n \"\"\"\n pass\n\n def build_data_loader(self):\n \"\"\"Create essential data-related attributes.\n\n What must be done in the re-implementation\n of this method:\n 1) initialize data manager\n 2) assign as attributes the data loaders\n 3) assign as attribute the number of classes\n \"\"\"\n self.dm = DataManager(self.cfg)\n self.train_loader_x = self.dm.train_loader_x\n self.train_loader_u = self.dm.train_loader_u\n self.val_loader = self.dm.val_loader\n self.test_loader = self.dm.test_loader\n self.num_classes = self.dm.num_classes\n\n def build_model(self):\n \"\"\"Build and register model.\n\n The default builds a classification model along with its\n optimizer and scheduler.\n\n Custom trainers can re-implement this method if necessary.\n \"\"\"\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, self.num_classes)\n # for name, module in self.model.named_children():\n # print(name)\n if cfg.MODEL.INIT_WEIGHTS:\n load_pretrained_weights(self.model, cfg.MODEL.INIT_WEIGHTS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n def train(self):\n super().train(self.start_epoch, self.max_epoch)\n\n def before_train(self):\n # directory = self.cfg.OUTPUT_DIR\n if self.cfg.RESUME:\n directory = self.cfg.RESUME\n self.start_epoch = self.resume_model_if_exist(directory)\n\n # Initialize summary writer\n self.init_writer(self.output_dir)\n\n # Remember the starting time (for computing the elapsed time)\n self.time_start = time.time()\n\n def after_train(self):\n print('Finished training')\n\n do_test = not self.cfg.TEST.NO_TEST\n if do_test and not self.cfg.NNI:\n if self.cfg.TEST.FINAL_MODEL == 'best_val':\n print('Deploy the model with the best val performance')\n self.load_model(self.output_dir)\n # zhaoxin modify\n if self.cfg.TEST.PER_CLASS_RESULT:\n self.best_val_test_acc, per_class_accs = self.test(return_per_class_results=True)\n perclass_path = osp.join(self.output_dir, 'perclass_result.txt')\n with open(perclass_path, 'w') as f:\n for acc in per_class_accs:\n f.write(\"{:6f}\\n\".format(acc))\n else:\n self.best_val_test_acc = self.test()\n\n # zhaoxin add\n if self.cfg.TEST.FINAL_MODEL == 'best_val':\n print(\n 'best_val_acc: {}\\nbest_val_epoch: {}\\nbest_val_test_acc: {}'.\n format(\n self.best_val_acc, self.best_val_epoch,\n self.best_val_test_acc\n )\n )\n if self.cfg.TEST.TEST_EVERY_EPOCH:\n print(\n 'best_test_acc: {}\\nbest_test_epoch: {}'.format(\n self.best_test_acc, self.best_test_epoch\n )\n )\n\n result_path = osp.join(self.output_dir, 'result.txt')\n with open(result_path, 'w') as f:\n f.write(\"{:6f}\\n\".format(self.best_val_test_acc))\n\n if self.cfg.NNI:\n nni.report_final_result(self.best_val_acc)\n\n # Show elapsed time\n elapsed = round(time.time() - self.time_start)\n elapsed = str(datetime.timedelta(seconds=elapsed))\n print('Elapsed: {}'.format(elapsed))\n\n # Close writer\n self.close_writer()\n\n def after_epoch(self):\n last_epoch = (self.epoch + 1) == self.max_epoch\n do_test = not self.cfg.TEST.NO_TEST\n meet_checkpoint_freq = (\n self.epoch + 1\n ) % self.cfg.TRAIN.CHECKPOINT_FREQ == 0 if self.cfg.TRAIN.CHECKPOINT_FREQ > 0 else False\n\n # zhaoxin modify\n if do_test and self.cfg.TEST.FINAL_MODEL == 'best_val':\n curr_val_acc = self.test(split='val')\n\n # nni: report intermediate result\n if self.cfg.NNI:\n nni.report_intermediate_result(curr_val_acc)\n\n is_best = curr_val_acc > self.best_val_acc\n if is_best:\n self.best_val_acc = curr_val_acc\n self.best_val_epoch = self.epoch + 1\n self.save_model(\n self.epoch,\n self.output_dir,\n model_name='model-best.pth.tar'\n )\n\n if do_test and self.cfg.TEST.TEST_EVERY_EPOCH:\n curr_test_acc = self.test(split='test')\n if curr_test_acc > self.best_test_acc:\n self.best_test_acc = curr_test_acc\n self.best_test_epoch = self.epoch + 1\n\n # if self.cfg.TEST.FINAL_MODEL == 'best_val':\n # if is_best:\n # self.best_val_test_acc = curr_test_acc\n\n if meet_checkpoint_freq or last_epoch:\n self.save_model(self.epoch, self.output_dir)\n\n @torch.no_grad()\n def test(self, split=None, return_per_class_results=False):\n \"\"\"A generic testing pipeline.\"\"\"\n self.set_model_mode('eval')\n self.evaluator.reset()\n\n if split is None:\n split = self.cfg.TEST.SPLIT\n\n if split == 'val' and self.val_loader is not None:\n data_loader = self.val_loader\n print('Do evaluation on {} set'.format(split))\n else:\n data_loader = self.test_loader\n print('Do evaluation on test set')\n\n for batch_idx, batch in enumerate(data_loader):\n input, label = self.parse_batch_test(batch)\n output = self.model_inference(input)\n self.evaluator.process(output, label)\n\n results = self.evaluator.evaluate()\n\n for k, v in results.items():\n if k == 'perclass_accuracies':\n continue\n tag = '{}/{}'.format(split, k)\n self.write_scalar(tag, v, self.epoch)\n if not return_per_class_results:\n return list(results.values())[0]\n else:\n return results['accuracy'], results['perclass_accuracies']\n\n def model_inference(self, input):\n return self.model(input)\n\n def parse_batch_test(self, batch):\n input = batch['img']\n label = batch['label']\n\n input = input.to(self.device)\n label = label.to(self.device)\n\n return input, label\n\n def get_current_lr(self, names=None):\n names = self.get_model_names(names)\n name = names[0]\n return self._optims[name].param_groups[0]['lr']\n\n\nclass TrainerXU(SimpleTrainer):\n \"\"\"A base trainer using both labeled and unlabeled data.\n\n In the context of domain adaptation, labeled and unlabeled data\n come from source and target domains respectively.\n\n When it comes to semi-supervised learning, all data comes from the\n same domain.\n \"\"\"\n\n def run_epoch(self):\n self.set_model_mode('train')\n losses = MetricMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n # Decide to iterate over labeled or unlabeled dataset\n len_train_loader_x = len(self.train_loader_x)\n len_train_loader_u = len(self.train_loader_u)\n if self.cfg.TRAIN.COUNT_ITER == 'train_x':\n self.num_batches = len_train_loader_x\n elif self.cfg.TRAIN.COUNT_ITER == 'train_u':\n self.num_batches = len_train_loader_u\n elif self.cfg.TRAIN.COUNT_ITER == 'smaller_one':\n self.num_batches = min(len_train_loader_x, len_train_loader_u)\n else:\n raise ValueError\n\n train_loader_x_iter = iter(self.train_loader_x)\n train_loader_u_iter = iter(self.train_loader_u)\n\n end = time.time()\n for self.batch_idx in range(self.num_batches):\n try:\n batch_x = next(train_loader_x_iter)\n except StopIteration:\n train_loader_x_iter = iter(self.train_loader_x)\n batch_x = next(train_loader_x_iter)\n\n try:\n batch_u = next(train_loader_u_iter)\n except StopIteration:\n train_loader_u_iter = iter(self.train_loader_u)\n batch_u = next(train_loader_u_iter)\n\n data_time.update(time.time() - end)\n loss_summary = self.forward_backward(batch_x, batch_u)\n batch_time.update(time.time() - end)\n losses.update(loss_summary)\n\n if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:\n nb_this_epoch = self.num_batches - (self.batch_idx + 1)\n nb_future_epochs = (\n self.max_epoch - (self.epoch + 1)\n ) * self.num_batches\n eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)\n eta = str(datetime.timedelta(seconds=int(eta_seconds)))\n print(\n 'epoch [{0}/{1}][{2}/{3}]\\t'\n 'time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'eta {eta}\\t'\n '{losses}\\t'\n 'lr {lr}'.format(\n self.epoch + 1,\n self.max_epoch,\n self.batch_idx + 1,\n self.num_batches,\n batch_time=batch_time,\n data_time=data_time,\n eta=eta,\n losses=losses,\n lr=self.get_current_lr()\n )\n )\n\n n_iter = self.epoch * self.num_batches + self.batch_idx\n for name, meter in losses.meters.items():\n self.write_scalar('train/' + name, meter.avg, n_iter)\n self.write_scalar('train/lr', self.get_current_lr(), n_iter)\n\n end = time.time()\n\n def parse_batch_train(self, batch_x, batch_u):\n input_x = batch_x['img']\n label_x = batch_x['label']\n input_u = batch_u['img']\n\n input_x = input_x.to(self.device)\n label_x = label_x.to(self.device)\n input_u = input_u.to(self.device)\n\n return input_x, label_x, input_u\n\n\nclass TrainerX(SimpleTrainer):\n \"\"\"A base trainer using labeled data only.\"\"\"\n\n def run_epoch(self):\n self.set_model_mode('train')\n losses = MetricMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n self.num_batches = len(self.train_loader_x)\n\n end = time.time()\n for self.batch_idx, batch in enumerate(self.train_loader_x):\n data_time.update(time.time() - end)\n loss_summary = self.forward_backward(batch)\n batch_time.update(time.time() - end)\n losses.update(loss_summary)\n\n if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:\n nb_this_epoch = self.num_batches - (self.batch_idx + 1)\n nb_future_epochs = (\n self.max_epoch - (self.epoch + 1)\n ) * self.num_batches\n eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)\n eta = str(datetime.timedelta(seconds=int(eta_seconds)))\n print(\n 'epoch [{0}/{1}][{2}/{3}]\\t'\n 'time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'eta {eta}\\t'\n '{losses}\\t'\n 'lr {lr}'.format(\n self.epoch + 1,\n self.max_epoch,\n self.batch_idx + 1,\n self.num_batches,\n batch_time=batch_time,\n data_time=data_time,\n eta=eta,\n losses=losses,\n lr=self.get_current_lr()\n )\n )\n\n n_iter = self.epoch * self.num_batches + self.batch_idx\n for name, meter in losses.meters.items():\n self.write_scalar('train/' + name, meter.avg, n_iter)\n self.write_scalar('train/lr', self.get_current_lr(), n_iter)\n\n end = time.time()\n\n def parse_batch_train(self, batch):\n input = batch['img']\n label = batch['label']\n domain = batch['domain']\n\n input = input.to(self.device)\n label = label.to(self.device)\n domain = domain.to(self.device)\n\n return input, label, domain\n",
"# Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd.\n#\n# This source code is licensed under the Clear BSD License\n# LICENSE file in the root directory of this file\n# All rights reserved.\n\"\"\"\nBorrow from timm(https://github.com/rwightman/pytorch-image-models)\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom timm.models.layers import DropPath\n\n\nclass Mlp(nn.Module):\n\n def __init__(\n self,\n in_features,\n hidden_features=None,\n out_features=None,\n act_layer=nn.GELU,\n drop=0.\n ):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass Attention(nn.Module):\n\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=False,\n qk_scale=None,\n attn_drop=0.,\n proj_drop=0.,\n cross_attn=False\n ):\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n\n self.scale = qk_scale or head_dim**-0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n self.cross_attn = cross_attn\n\n def forward(self, x):\n B, N, C = x.shape\n qkv = self.qkv(x\n ).reshape(B, N, 3, self.num_heads,\n C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2]\n\n if self.cross_attn:\n q = torch.cat((q[B // 2:], q[:B // 2]))\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n\nclass Block(nn.Module):\n\n def __init__(\n self,\n dim,\n num_heads,\n mlp_ratio=4.,\n qkv_bias=False,\n qk_scale=None,\n drop=0.,\n attn_drop=0.,\n drop_path=0.,\n act_layer=nn.GELU,\n norm_layer=nn.LayerNorm,\n layer_depth=0,\n cross_attn=False\n ):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim,\n num_heads=num_heads,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n attn_drop=attn_drop,\n proj_drop=drop,\n cross_attn=cross_attn\n )\n self.drop_path = DropPath(drop_path\n ) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(\n in_features=dim,\n hidden_features=mlp_hidden_dim,\n act_layer=act_layer,\n drop=drop\n )\n if cross_attn:\n print(\n 'insert cross attention layer in the depth {}'.\n format(layer_depth)\n )\n\n def forward(self, x):\n x = x + self.drop_path(self.attn(self.norm1(x)))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x\n\n\ndef get_sinusoid_encoding(n_position, d_hid):\n ''' Sinusoid position encoding table '''\n\n def get_position_angle_vec(position):\n return [\n position / np.power(10000, 2 * (hid_j//2) / d_hid)\n for hid_j in range(d_hid)\n ]\n\n sinusoid_table = np.array(\n [get_position_angle_vec(pos_i) for pos_i in range(n_position)]\n )\n sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i\n sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1\n\n return torch.FloatTensor(sinusoid_table).unsqueeze(0)\n"
] |
[
[
"torch.nn.Linear",
"torch.device",
"torch.no_grad",
"torch.isfinite",
"torch.cuda.is_available",
"torch.cuda.amp.GradScaler",
"torch.utils.tensorboard.SummaryWriter"
],
[
"torch.nn.Linear",
"numpy.sin",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.Identity",
"torch.FloatTensor",
"numpy.power",
"numpy.cos"
]
] |
bhevencious/BioNEV
|
[
"3ec46c503fb147a8fb1b017d90b0f4ba2317f8f7"
] |
[
"src/bionev/evaluation.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, average_precision_score, precision_score, recall_score, f1_score, roc_auc_score, matthews_corrcoef\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\nfrom bionev.utils import *\n\n\ndef LinkPrediction(embedding_look_up, original_graph, train_graph, test_pos_edges, seed):\n random.seed(seed)\n\n train_neg_edges = generate_neg_edges(original_graph, len(train_graph.edges()), seed)\n\n # create a auxiliary graph to ensure that testing negative edges will not used in training\n G_aux = copy.deepcopy(original_graph)\n G_aux.add_edges_from(train_neg_edges)\n test_neg_edges = generate_neg_edges(G_aux, len(test_pos_edges), seed)\n\n # construct X_train, y_train, X_test, y_test\n X_train = []\n y_train = []\n for edge in train_graph.edges():\n node_u_emb = embedding_look_up[edge[0]]\n node_v_emb = embedding_look_up[edge[1]]\n feature_vector = np.append(node_u_emb, node_v_emb)\n X_train.append(feature_vector)\n y_train.append(1)\n for edge in train_neg_edges:\n node_u_emb = embedding_look_up[edge[0]]\n node_v_emb = embedding_look_up[edge[1]]\n feature_vector = np.append(node_u_emb, node_v_emb)\n X_train.append(feature_vector)\n y_train.append(0)\n\n X_test = []\n y_test = []\n for edge in test_pos_edges:\n node_u_emb = embedding_look_up[edge[0]]\n node_v_emb = embedding_look_up[edge[1]]\n feature_vector = np.append(node_u_emb, node_v_emb)\n X_test.append(feature_vector)\n y_test.append(1)\n for edge in test_neg_edges:\n node_u_emb = embedding_look_up[edge[0]]\n node_v_emb = embedding_look_up[edge[1]]\n feature_vector = np.append(node_u_emb, node_v_emb)\n X_test.append(feature_vector)\n y_test.append(0)\n\n # shuffle for training and testing\n c = list(zip(X_train, y_train))\n random.shuffle(c)\n X_train, y_train = zip(*c)\n\n c = list(zip(X_test, y_test))\n random.shuffle(c)\n X_test, y_test = zip(*c)\n\n X_train = np.array(X_train)\n y_train = np.array(y_train)\n\n X_test = np.array(X_test)\n y_test = np.array(y_test)\n\n clf1 = LogisticRegression(random_state=seed, max_iter=1000, solver='lbfgs')\n clf1.fit(X_train, y_train)\n y_pred_proba = clf1.predict_proba(X_test)[:, 1]\n y_pred = clf1.predict(X_test)\n auc_roc = roc_auc_score(y_test, y_pred_proba)\n avg_pr = average_precision_score(y_test, y_pred_proba)\n precision = precision_score(y_test, y_pred, average='binary')\n recall = recall_score(y_test, y_pred, average='binary')\n accuracy = accuracy_score(y_test, y_pred)\n f1 = f1_score(y_test, y_pred)\n mcc = matthews_corrcoef(y_test, y_pred)\n top_1, top_3 = predHits(y_test, y_pred, clf1.predict(X_test), clf1.predict(X_test))\n print('#' * 35 + ' Link Prediction Performance ' + '#' * 35)\n print(f'AUC-ROC: {auc_roc:.3f}, AVG-PR: {avg_pr:.3f}, Precision: {precision:.3f}, Recall: {recall:.3f}, Accuracy: {accuracy:.3f}, F1: {f1:.3f}, MCC: {mcc:.3f}, Top_1: {top_1:.3f}, Top_3: {top_3:.3f}')\n print('#' * 100)\n return auc_roc, avg_pr, precision, recall, accuracy, f1, mcc, top_1, top_3\n\n\ndef NodeClassification(embedding_look_up, node_list, labels, testing_ratio, seed):\n\n X_train, y_train, X_test, y_test = split_train_test_classify(embedding_look_up, node_list, labels,\n testing_ratio=testing_ratio,seed=seed)\n binarizer = MultiLabelBinarizer(sparse_output=True)\n y_all = np.append(y_train, y_test)\n binarizer.fit(y_all)\n y_train = binarizer.transform(y_train).todense()\n y_test = binarizer.transform(y_test).todense()\n model = OneVsRestClassifier(LogisticRegression(random_state=seed, max_iter=1000, solver='lbfgs'))\n model.fit(X_train, y_train)\n y_pred_prob = model.predict_proba(X_test)\n\n ## small trick : we assume that we know how many label to predict\n y_pred = get_y_pred(y_test, y_pred_prob)\n\n accuracy = accuracy_score(y_test, y_pred)\n micro_f1 = f1_score(y_test, y_pred, average=\"micro\")\n macro_f1 = f1_score(y_test, y_pred, average=\"macro\")\n\n print('#' * 9 + ' Node Classification Performance ' + '#' * 9)\n print(f'Accuracy: {accuracy:.3f}, Micro-F1: {micro_f1:.3f}, Macro-F1: {macro_f1:.3f}')\n print('#' * 50)\n return accuracy, micro_f1, macro_f1\n\n\ndef predHits(truth, pred1, pred2, pred3):\n hits_1 = 0\n hits_3 = 0\n pred1 = np.rint(pred1).astype(np.int32)\n pred2 = np.rint(pred2).astype(np.int32)\n pred3 = np.rint(pred3).astype(np.int32)\n \n for i in range(len(truth)):\n if truth[i] == pred1[i]:\n hits_1 = hits_1 + 1\n if (truth[i] == pred1[i]) or (truth[i] == pred2[i]) or (truth[i] == pred3[i]):\n hits_3 = hits_3 + 1\n top_1 = hits_1/len(truth)\n top_3 = hits_3/len(truth)\n \n return top_1, top_3\n \n"
] |
[
[
"sklearn.metrics.matthews_corrcoef",
"sklearn.metrics.accuracy_score",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.average_precision_score",
"sklearn.preprocessing.MultiLabelBinarizer",
"sklearn.metrics.precision_score",
"sklearn.metrics.f1_score",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.recall_score"
]
] |
LongfeiProjects/exotica
|
[
"206b296edf9bf3b653ca3984b1449151ca17d374"
] |
[
"exotica_python/src/pyexotica/publish_trajectory.py"
] |
[
"from __future__ import print_function\nfrom time import sleep\nimport matplotlib.pyplot as plt\nimport signal\n\n\ndef sigIntHandler(signal, frame):\n raise KeyboardInterrupt\n\n\ndef publishPose(q, problem, t=0.0):\n problem.getScene().Update(q, t)\n problem.getScene().getSolver().publishFrames()\n\n\ndef publishTrajectory(traj, T, problem):\n if len(traj) == 0:\n print(\"Trajectory has zero elements\")\n raise\n signal.signal(signal.SIGINT, sigIntHandler)\n print('Playing back trajectory '+str(T)+'s')\n dt = float(T)/float(len(traj))\n t = 0\n while True:\n try:\n publishPose(traj[t], problem, float(t)*dt)\n sleep(dt)\n t = (t+1) % len(traj)\n except KeyboardInterrupt:\n return False\n return True\n\n\ndef publishTimeIndexedTrajectory(traj, Ts, problem, once=False):\n if len(traj) == 0:\n print(\"Trajectory has zero elements\")\n raise\n signal.signal(signal.SIGINT, sigIntHandler)\n print('Playing back trajectory '+str(len(Ts)) +\n ' states in '+str(Ts[len(Ts)-1]))\n idx = 0\n\n while True:\n try:\n for i in range(1, len(Ts)-1):\n publishPose(traj[i], problem, Ts[i])\n sleep(Ts[i]-Ts[i-1])\n if once:\n break\n except KeyboardInterrupt:\n return False\n return True\n\n\ndef plot(solution):\n print('Plotting the solution')\n plt.plot(solution, '.-')\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot"
]
] |
00wuweimin/jubilant-dollop
|
[
"cc91caf8ee7aba5824abe25cbb3870299b369f91"
] |
[
"pointer_network.py"
] |
[
"import torch\r\nimport torch.nn as nn\r\nfrom torch import optim\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ntrain_data = np.load(\"E:\\\\quant_research\\\\train the rank of ten points\\\\RNN_point\\\\data\\\\train_data_10num.npy\")\r\ntrain_aim = np.load(\"E:\\\\quant_research\\\\train the rank of ten points\\\\RNN_point\\\\data\\\\train_label_10num.npy\")\r\ntrain_data = train_data.reshape(train_data.shape[0],10,1)\r\ntrain_data = train_data.swapaxes(0, 1)\r\ntrain_data = torch.from_numpy(train_data).type(torch.FloatTensor)\r\ntrain_aim = torch.from_numpy(train_aim).type(torch.FloatTensor)\r\n\r\ntest_data = np.load(\"E:\\\\quant_research\\\\train the rank of ten points\\\\RNN_point\\\\data\\\\test_data_10num.npy\")\r\ntest_aim = np.load(\"E:\\\\quant_research\\\\train the rank of ten points\\\\RNN_point\\\\data\\\\test_label_10num.npy\")\r\ntest_data = test_data.reshape(test_data.shape[0],10,1)\r\ntest_data = test_data.swapaxes(0, 1)\r\ntest_data = torch.from_numpy(test_data).type(torch.FloatTensor)\r\ntest_aim = torch.from_numpy(test_aim).type(torch.FloatTensor)\r\n\r\nclass Encoder(nn.Module):\r\n\r\n \r\n def __init__(self, input_size, hidden_size, batch_size, bidirectional=True):\r\n super(Encoder, self).__init__()\r\n self.hidden_size = hidden_size\r\n self.input_size = input_size\r\n self.batch_size = batch_size\r\n self.bidirectional = bidirectional\r\n\r\n self.lstm = nn.LSTM(input_size, hidden_size, batch_first=False, bidirectional=bidirectional)\r\n\r\n def forward(self, inputs, hidden):\r\n output, hidden = self.lstm(inputs, hidden) \r\n return output, hidden\r\n\r\n def init_hidden(self):\r\n return (torch.zeros(1 + int(self.bidirectional), self.batch_size, self.hidden_size),\r\n torch.zeros(1 + int(self.bidirectional), self.batch_size, self.hidden_size)) #(num_layers * num_directions, batch, hidden_size)\r\n\r\n\r\nclass AttentionDecoder(nn.Module):\r\n\r\n def __init__(self, hidden_size, output_size, batch_size, vocab_size,seq_len):\r\n super(AttentionDecoder, self).__init__()\r\n self.hidden_size = hidden_size\r\n self.output_size = output_size\r\n self.batch_size = batch_size\r\n self.seq_len = seq_len\r\n self.vocab_size = vocab_size\r\n\r\n self.attn = nn.Linear(hidden_size + output_size + vocab_size, 1) \r\n self.lstm = nn.LSTM(hidden_size + vocab_size, output_size) \r\n self.final = nn.Linear(output_size, vocab_size)\r\n\r\n def init_hidden(self):\r\n return (torch.zeros(1, self.batch_size, self.output_size),\r\n torch.zeros(1, self.batch_size, self.output_size))\r\n\r\n \r\n def forward(self, decoder_hidden, encoder_outputs, input):\r\n seq = 0\r\n weights= []\r\n i = 0\r\n output = torch.zeros(self.batch_size, self.vocab_size)\r\n for i in range(len(encoder_outputs)):\r\n weights.append(self.attn(torch.cat((decoder_hidden[0][:].squeeze(0),encoder_outputs[i],output), dim=1)))\r\n\r\n normalized_weight = F.softmax(torch.cat(weights, 1), 1)\r\n normalized_weights = normalized_weight\r\n\r\n\r\n attn_applied = torch.bmm(normalized_weight.unsqueeze(1),\r\n encoder_outputs.transpose(0,1)) \r\n input_lstm = torch.cat((attn_applied.transpose(0,1)[0], output),\r\n dim=1) # if we are using embedding, use embedding of input here instead\r\n\r\n output_, hidden = self.lstm(input_lstm.unsqueeze(0), decoder_hidden)\r\n\r\n output = self.final(output_[0]) #output 为(vocab_size, output_size)\r\n #output = self.final2(output)\r\n\r\n # hidden0 = hidden[0].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)\r\n # hidden1 = hidden[1].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)\r\n # decoder_hidden = (hidden0, hidden1)\r\n # decoder_hiddens = decoder_hidden\r\n\r\n\r\n out = F.softmax(output,1)\r\n\r\n return out\r\n\r\n\r\n\r\nseq_len = 10\r\ninput_size = 1\r\nhidden_size = 2\r\nbatch_size = train_data.shape[1]\r\nbidirectional = True\r\noutput_size = hidden_size * (1 + bidirectional)\r\nvocal_size = 10\r\n\r\ninput = []\r\nfor i in range(10):\r\n m = np.ones((10000,10))*i\r\n input.append(m)\r\ninput = np.array(input)\r\ninput = torch.from_numpy(input).type(torch.FloatTensor)\r\n\r\nclass pointer_atten(nn.Module):\r\n def __init__(self):\r\n super(pointer_atten, self).__init__()\r\n self.layer1 = Encoder(input_size = input_size,\r\n hidden_size = hidden_size,\r\n batch_size = batch_size,\r\n bidirectional=True)\r\n self.layer2 = AttentionDecoder(\r\n hidden_size = hidden_size * (1 + bidirectional),\r\n output_size = output_size,\r\n batch_size = batch_size,\r\n vocab_size = vocal_size,\r\n seq_len = 1\r\n )\r\n\r\n def forward(self,x):\r\n output, hidden = self.layer1.forward(x, self.layer1.init_hidden())\r\n hidden0 = hidden[0].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)\r\n hidden1 = hidden[1].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)\r\n decoder_hidden = (hidden0, hidden1)\r\n encoder_outputs = output\r\n last_output = self.layer2.forward(decoder_hidden, output, input)\r\n\r\n return last_output\r\n\r\nNet = pointer_atten()\r\n\r\n\r\nlearning_rate = 0.05\r\nLoss = nn.MSELoss(reduction='mean')\r\noptimizer = torch.optim.Adam(Net.parameters(), lr=learning_rate)\r\n\r\n###########################################\r\n# train\r\n###########################################\r\nloss_list = []\r\nTrue_list = []\r\nnum_epochs = 10000\r\nepoch = 10000\r\nbatch = train_aim.detach().numpy().size\r\n\r\nNet.load_state_dict(torch.load('E:\\\\quant_research\\\\train the rank of ten points\\\\RNN_point\\\\net_10num\\\\net720.pkl'))\r\n\r\nfor epoch in range(1000):\r\n train_data = Variable(train_data,requires_grad=True)\r\n train_aim = Variable(train_aim,requires_grad=True)\r\n\r\n # Forward pass\r\n outputs = Net(train_data)\r\n loss = Loss(outputs, train_aim)\r\n loss_list.append(loss)\r\n\r\n # Backward and optimize\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n if (epoch) % 10 == 0:\r\n print ('Epoch [{}/{}], Loss: {:.4f}'\r\n .format(epoch+1,num_epochs,loss.item()))\r\n\r\n is_not = outputs.detach().numpy() - train_aim.detach().numpy()\r\n is_not = np.where(is_not < -0.1, 10, is_not)\r\n is_not = np.where(is_not < 0.1, 1, 0)\r\n T_pre = np.nansum(is_not)\r\n True_rate = T_pre / batch\r\n True_list.append(True_rate)\r\n print('accuracy of prediction in training data:', True_rate)\r\n\r\n if epoch % 10 ==0:\r\n torch.save(Net.state_dict(), 'E:\\\\quant_research\\\\train the rank of ten points\\\\\\RNN_point\\\\net_10num\\\\net{}.pkl'.format(epoch))\r\n\r\nloss_array = np.array(loss_list)\r\ntrue_array = np.array(True_list)\r\nnp.save('E:\\\\quant_research\\\\train the rank of ten points\\\\\\RNN_point\\\\loss',loss_array)\r\nnp.save('E:\\\\quant_research\\\\train the rank of ten points\\\\\\RNN_point\\\\true',true_array)\r\n\r\n\r\n\r\nloss_array = np.load('E:\\\\quant_research\\\\train the rank of ten points\\\\\\RNN_point\\\\loss.npy',allow_pickle=True)\r\ntrue_array = np.load('E:\\\\quant_research\\\\train the rank of ten points\\\\\\RNN_point\\\\true.npy')\r\n\r\n\r\n\r\noutputs = Net(train_data)\r\nloss = Loss(outputs, train_aim)\r\nlabel = np.argmax(outputs.detach().numpy(),axis = 1)\r\nlabel_aim = np.argmax(train_aim.detach().numpy(),axis = 1)\r\nTrue_rate = np.sum(label == label_aim) / 10000\r\nprint('loss in testing data:%.5f,accuracy of prediction in testing data:%.5f'%(loss,True_rate))\r\n\r\n\r\noutputs = Net(test_data)\r\nloss = Loss(outputs, test_aim)\r\nlabel = np.argmax(outputs.detach().numpy(),axis = 1)\r\nlabel_aim = np.argmax(test_aim.detach().numpy(),axis = 1)\r\nTrue_rate = np.sum(label == label_aim) / 10000\r\nprint('loss in training data:%.5f,accuracy of prediction in training data:%.5f'%(loss,True_rate))\r\n\r\n"
] |
[
[
"torch.nn.Linear",
"torch.zeros",
"numpy.array",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.MSELoss",
"torch.autograd.Variable",
"numpy.sum",
"numpy.nansum",
"numpy.ones",
"numpy.load",
"numpy.save",
"torch.from_numpy",
"numpy.where",
"torch.load",
"torch.nn.functional.softmax"
]
] |
NimaPng/tsid
|
[
"23bbc6bace4f4623c2189535e71ba63bedbc4368"
] |
[
"exercizes/ur5_conf.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 18 09:47:07 2019\n\n@author: student\n\"\"\"\n\nimport numpy as np\nimport os\n\nnp.set_printoptions(precision=3, linewidth=200, suppress=True)\nLINE_WIDTH = 60\n\nN_SIMULATION = 4000 # number of time steps simulated\ndt = 0.002 # controller time step\nq0 = np.array([ 0. , -1.0, 0.7, 0. , 0. , 0. ]) # initial configuration\n\n# REFERENCE SINUSOIDAL TRAJECTORY\namp = np.array([0*0.02, 0.1, 0.10]) # amplitude\nphi = np.array([0.0, 0.5*np.pi, 0.0]) # phase\ntwo_pi_f = 1.4*2*np.pi*np.array([1.0, 0.5, 0.5]) # frequency (time 2 PI)\noffset = np.array([0.0, 0.0, 0.0])\n\nw_ee = 1.0 # weight of end-effector task\nw_posture = 1e-3 # weight of joint posture task\nw_torque_bounds = 1.0 # weight of the torque bounds\nw_joint_bounds = 1.0\n\nkp_ee = 5.0 # proportional gain of end-effector constraint\nkp_posture = 1.0 # proportional gain of joint posture task\n\ntau_max_scaling = 0.4 # scaling factor of torque bounds\nv_max_scaling = 0.4\n\nee_frame_name = \"ee_fixed_joint\" # end-effector frame name\nee_task_mask = np.array([1., 1, 1, 0, 0, 0])\n\nPRINT_N = 500 # print every PRINT_N time steps\nDISPLAY_N = 20 # update robot configuration in viwewer every DISPLAY_N time steps\nCAMERA_TRANSFORM = [2.582354784011841, 1.620774507522583, 1.0674564838409424, 0.2770655155181885, 0.5401807427406311, 0.6969326734542847, 0.3817386031150818]\nSPHERE_RADIUS = 0.03\nREF_SPHERE_RADIUS = 0.03\nEE_SPHERE_COLOR = (1, 0.5, 0, 0.5)\nEE_REF_SPHERE_COLOR = (1, 0, 0, 0.5)\n\nfrom example_robot_data.robots_loader import getModelPath\nurdf = \"/ur_description/urdf/ur5_robot.urdf\"\npath = getModelPath(urdf)\nurdf = path+urdf\n"
] |
[
[
"numpy.set_printoptions",
"numpy.array"
]
] |
keflavich/pyregion
|
[
"1ed46731eedffcb52910b0574b2a4e7a8cc99a7d"
] |
[
"pyregion/mpl_helper.py"
] |
[
"import copy\nimport numpy as np\nfrom math import cos, sin, pi, atan2\nimport warnings\nimport matplotlib.patches as mpatches\nfrom matplotlib.path import Path\nfrom matplotlib.lines import Line2D\nfrom matplotlib.transforms import Affine2D, Bbox, IdentityTransform\nfrom matplotlib.text import Annotation\n\n\ndef rotated_polygon(xy, ox, oy, angle):\n # angle in degree\n theta = angle / 180. * pi\n\n st = sin(theta)\n ct = cos(theta)\n\n xy = np.asarray(xy, dtype=\"d\")\n x, y = xy[:, 0], xy[:, 1]\n x1 = x - ox\n y1 = y - oy\n\n x2 = ct * x1 + -st * y1\n y2 = st * x1 + ct * y1\n\n xp = x2 + ox\n yp = y2 + oy\n\n return np.hstack((xp.reshape((-1, 1)), yp.reshape((-1, 1))))\n\n # sss3 = [s1[0] for s1 in sss2 if isinstance(s1[0], parser_ds9.Shape)]\n\n\n_point_type_dict = dict(circle=\"o\",\n box=\"s\",\n diamond=\"D\",\n x=\"x\",\n cross=\"+\",\n arrow=\"^\",\n boxcircle=\"*\")\n\n_ds9_to_mpl_colormap = dict(green=\"lime\",\n )\n\n\ndef properties_func_default(shape, saved_attrs):\n attr_list = copy.copy(shape.attr[0])\n attr_dict = copy.copy(shape.attr[1])\n\n attr_list.extend(saved_attrs[0])\n attr_dict.update(saved_attrs[1])\n\n color = attr_dict.get(\"color\", None)\n color = _ds9_to_mpl_colormap.get(color, color)\n\n if shape.name == \"text\":\n kwargs = dict(color=color,\n rotation=attr_dict.get(\"textangle\", 0),\n )\n font = attr_dict.get(\"font\")\n if font:\n a = font.split()\n if len(a) >= 3:\n fontsize = float(a[1])\n kwargs[\"fontsize\"] = fontsize\n elif shape.name == \"point\":\n point_attrs = attr_dict.get(\"point\", \"boxcircle\").split()\n if len(point_attrs) == 1:\n point_type = point_attrs[0]\n point_size = 11\n elif len(point_attrs) > 1:\n point_type = point_attrs[0]\n point_size = int(point_attrs[1])\n\n marker = _point_type_dict.get(point_type, \"o\")\n kwargs = dict(markeredgecolor=color,\n markerfacecolor=\"none\",\n marker=marker,\n markeredgewidth=int(attr_dict.get(\"width\", 1)),\n markersize=point_size\n )\n elif shape.name in [\"line\", \"vector\"]:\n fontsize = 10 # default font size\n\n font = attr_dict.get(\"font\")\n if font:\n a = font.split()\n if len(a) >= 3:\n fontsize = float(a[1])\n\n kwargs = dict(color=color,\n linewidth=int(attr_dict.get(\"width\", 1)),\n mutation_scale=fontsize,\n )\n if int(attr_dict.get(\"dash\", \"0\")):\n kwargs[\"linestyle\"] = \"dashed\"\n\n else:\n kwargs = dict(edgecolor=color,\n linewidth=int(attr_dict.get(\"width\", 1)),\n facecolor=\"none\"\n )\n\n if \"background\" in attr_list:\n kwargs[\"linestyle\"] = \"dashed\"\n\n if int(attr_dict.get(\"dash\", \"0\")):\n kwargs[\"linestyle\"] = \"dashed\"\n if shape.exclude:\n kwargs[\"hatch\"] = \"/\"\n\n return kwargs\n\n\ndef _get_text(txt, x, y, dx, dy, ha=\"center\", va=\"center\", **kwargs):\n if \"color\" in kwargs:\n textcolor = kwargs[\"color\"]\n del kwargs[\"color\"]\n elif \"markeredgecolor\" in kwargs:\n textcolor = kwargs[\"markeredgecolor\"]\n else:\n import matplotlib as mpl\n textcolor = mpl.rcParams['text.color']\n ann = Annotation(txt, (x, y), xytext=(dx, dy),\n xycoords='data',\n textcoords=\"offset points\",\n color=textcolor,\n ha=ha, va=va,\n **kwargs)\n ann.set_transform(IdentityTransform())\n\n return ann\n\n\ndef as_mpl_artists(shape_list,\n properties_func=None,\n text_offset=5.0, origin=1):\n \"\"\"\n Converts a region list to a list of patches and a list of artists.\n\n\n Optional Keywords:\n [ text_offset ] - If there is text associated with the regions, add\n some vertical offset (in pixels) to the text so that it doesn't overlap\n with the regions.\n\n Often, the regions files implicitly assume the lower-left corner\n of the image as a coordinate (1,1). However, the python convetion\n is that the array index starts from 0. By default (origin = 1),\n coordinates of the returned mpl artists have coordinate shifted by\n (1, 1). If you do not want this shift, set origin=0.\n \"\"\"\n\n patch_list = []\n artist_list = []\n\n if properties_func is None:\n properties_func = properties_func_default\n\n # properties for continued(? multiline?) regions\n saved_attrs = None\n\n for shape in shape_list:\n\n patches = []\n\n if saved_attrs is None:\n _attrs = [], {}\n else:\n _attrs = copy.copy(saved_attrs[0]), copy.copy(saved_attrs[1])\n\n kwargs = properties_func(shape, _attrs)\n\n if shape.name == \"composite\":\n saved_attrs = shape.attr\n continue\n\n if saved_attrs is None and shape.continued:\n saved_attrs = shape.attr\n # elif (shape.name in shape.attr[1]):\n # if (shape.attr[1][shape.name] != \"ignore\"):\n # saved_attrs = shape.attr\n\n if not shape.continued:\n saved_attrs = None\n\n # text associated with the shape\n txt = shape.attr[1].get(\"text\")\n\n if shape.name == \"polygon\":\n xy = np.array(shape.coord_list)\n xy.shape = -1, 2\n\n # -1 for change origin to 0,0\n patches = [mpatches.Polygon(xy - origin, closed=True, **kwargs)]\n\n elif shape.name == \"rotbox\" or shape.name == \"box\":\n xc, yc, w, h, rot = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n _box = np.array([[-w / 2., -h / 2.],\n [-w / 2., h / 2.],\n [w / 2., h / 2.],\n [w / 2., -h / 2.]])\n box = _box + [xc, yc]\n rotbox = rotated_polygon(box, xc, yc, rot)\n patches = [mpatches.Polygon(rotbox, closed=True, **kwargs)]\n\n elif shape.name == \"ellipse\":\n xc, yc = shape.coord_list[:2]\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n angle = shape.coord_list[-1]\n\n maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]\n\n patches = [mpatches.Ellipse((xc, yc), 2 * maj, 2 * min,\n angle=angle, **kwargs)\n for maj, min in zip(maj_list, min_list)]\n\n elif shape.name == \"annulus\":\n xc, yc = shape.coord_list[:2]\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n r_list = shape.coord_list[2:]\n\n patches = [mpatches.Ellipse((xc, yc), 2 * r, 2 * r, **kwargs) for r in r_list]\n\n elif shape.name == \"circle\":\n xc, yc, major = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n patches = [mpatches.Ellipse((xc, yc), 2 * major, 2 * major, angle=0, **kwargs)]\n\n elif shape.name == \"panda\":\n xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,\n theta1=a1, theta2=a2, **kwargs)\n for rr in np.linspace(r1, r2, rn + 1)]\n\n for aa in np.linspace(a1, a2, an + 1):\n xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc\n yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc\n p = Path(np.transpose([xx, yy]))\n patches.append(mpatches.PathPatch(p, **kwargs))\n\n elif shape.name == \"pie\":\n xc, yc, r1, r2, a1, a2 = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,\n theta1=a1, theta2=a2, **kwargs)\n for rr in [r1, r2]]\n\n for aa in [a1, a2]:\n xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc\n yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc\n p = Path(np.transpose([xx, yy]))\n patches.append(mpatches.PathPatch(p, **kwargs))\n\n elif shape.name == \"epanda\":\n xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n # mpl takes angle a1, a2 as angle as in circle before\n # transformation to ellipse.\n\n x1, y1 = cos(a1 / 180. * pi), sin(a1 / 180. * pi) * r11 / r12\n x2, y2 = cos(a2 / 180. * pi), sin(a2 / 180. * pi) * r11 / r12\n\n a1, a2 = atan2(y1, x1) / pi * 180., atan2(y2, x2) / pi * 180.\n\n patches = [mpatches.Arc((xc, yc), rr1 * 2, rr2 * 2,\n angle=angle, theta1=a1, theta2=a2,\n **kwargs)\n for rr1, rr2 in zip(np.linspace(r11, r21, rn + 1),\n np.linspace(r12, r22, rn + 1))]\n\n for aa in np.linspace(a1, a2, an + 1):\n xx = np.array([r11, r21]) * np.cos(aa / 180. * np.pi)\n yy = np.array([r11, r21]) * np.sin(aa / 180. * np.pi)\n p = Path(np.transpose([xx, yy]))\n tr = Affine2D().scale(1, r12 / r11).rotate_deg(angle).translate(xc, yc)\n p2 = tr.transform_path(p)\n patches.append(mpatches.PathPatch(p2, **kwargs))\n\n elif shape.name == \"text\":\n xc, yc = shape.coord_list[:2]\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n if txt:\n _t = _get_text(txt, xc, yc, 0, 0, **kwargs)\n artist_list.append(_t)\n\n elif shape.name == \"point\":\n xc, yc = shape.coord_list[:2]\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n artist_list.append(Line2D([xc], [yc],\n **kwargs))\n\n if txt:\n textshape = copy.copy(shape)\n textshape.name = \"text\"\n textkwargs = properties_func(textshape, _attrs)\n _t = _get_text(txt, xc, yc, 0, text_offset,\n va=\"bottom\",\n **textkwargs)\n artist_list.append(_t)\n\n elif shape.name in [\"line\", \"vector\"]:\n if shape.name == \"line\":\n x1, y1, x2, y2 = shape.coord_list[:4]\n # -1 for change origin to 0,0\n x1, y1, x2, y2 = x1 - origin, y1 - origin, x2 - origin, y2 - origin\n\n a1, a2 = shape.attr[1].get(\"line\", \"0 0\").strip().split()[:2]\n\n arrowstyle = \"-\"\n if int(a1):\n arrowstyle = \"<\" + arrowstyle\n if int(a2):\n arrowstyle = arrowstyle + \">\"\n\n else: # shape.name == \"vector\"\n x1, y1, l, a = shape.coord_list[:4]\n # -1 for change origin to 0,0\n x1, y1 = x1 - origin, y1 - origin\n x2, y2 = x1 + l * np.cos(a / 180. * np.pi), y1 + l * np.sin(a / 180. * np.pi)\n v1 = int(shape.attr[1].get(\"vector\", \"0\").strip())\n\n if v1:\n arrowstyle = \"->\"\n else:\n arrowstyle = \"-\"\n\n patches = [mpatches.FancyArrowPatch(posA=(x1, y1),\n posB=(x2, y2),\n arrowstyle=arrowstyle,\n arrow_transmuter=None,\n connectionstyle=\"arc3\",\n patchA=None, patchB=None,\n shrinkA=0, shrinkB=0,\n connector=None,\n **kwargs)]\n\n else:\n warnings.warn(\"'as_mpl_artists' does not know how to convert {0} \"\n \"to mpl artist\".format(shape.name))\n\n patch_list.extend(patches)\n\n if txt and patches:\n # the text associated with a shape uses different\n # matplotlib keywords than the shape itself for, e.g.,\n # color\n textshape = copy.copy(shape)\n textshape.name = \"text\"\n textkwargs = properties_func(textshape, _attrs)\n\n # calculate the text position\n _bb = [p.get_window_extent() for p in patches]\n\n # this is to work around backward-incompatible change made\n # in matplotlib 1.2. This change is later reverted so only\n # some versions are affected. With affected version of\n # matplotlib, get_window_extent method calls get_transform\n # method which sets the _transformSet to True, which is\n # not desired.\n for p in patches:\n p._transformSet = False\n\n _bbox = Bbox.union(_bb)\n x0, y0, x1, y1 = _bbox.extents\n xc = .5 * (x0 + x1)\n\n _t = _get_text(txt, xc, y1, 0, text_offset,\n va=\"bottom\",\n **textkwargs)\n artist_list.append(_t)\n\n return patch_list, artist_list\n"
] |
[
[
"matplotlib.lines.Line2D",
"numpy.array",
"numpy.sin",
"numpy.asarray",
"matplotlib.patches.PathPatch",
"matplotlib.patches.Polygon",
"matplotlib.transforms.Bbox.union",
"matplotlib.transforms.IdentityTransform",
"matplotlib.patches.Ellipse",
"matplotlib.text.Annotation",
"numpy.transpose",
"matplotlib.patches.FancyArrowPatch",
"numpy.cos",
"numpy.linspace",
"matplotlib.transforms.Affine2D",
"matplotlib.patches.Arc"
]
] |
aurobindoc/feast
|
[
"72f155882c95f21573b31a613edf066bdb55f630"
] |
[
"sdk/python/feast/on_demand_feature_view.py"
] |
[
"import copy\nimport functools\nimport warnings\nfrom types import MethodType\nfrom typing import Dict, List, Optional, Type, Union\n\nimport dill\nimport pandas as pd\n\nfrom feast.base_feature_view import BaseFeatureView\nfrom feast.data_source import RequestSource\nfrom feast.errors import RegistryInferenceFailure, SpecifiedFeaturesNotPresentError\nfrom feast.feature import Feature\nfrom feast.feature_view import FeatureView\nfrom feast.feature_view_projection import FeatureViewProjection\nfrom feast.field import Field, from_value_type\nfrom feast.protos.feast.core.OnDemandFeatureView_pb2 import (\n OnDemandFeatureView as OnDemandFeatureViewProto,\n)\nfrom feast.protos.feast.core.OnDemandFeatureView_pb2 import (\n OnDemandFeatureViewMeta,\n OnDemandFeatureViewSpec,\n OnDemandSource,\n)\nfrom feast.protos.feast.core.OnDemandFeatureView_pb2 import (\n UserDefinedFunction as UserDefinedFunctionProto,\n)\nfrom feast.type_map import (\n feast_value_type_to_pandas_type,\n python_type_to_feast_value_type,\n)\nfrom feast.usage import log_exceptions\nfrom feast.value_type import ValueType\n\nwarnings.simplefilter(\"once\", DeprecationWarning)\n\n\nclass OnDemandFeatureView(BaseFeatureView):\n \"\"\"\n [Experimental] An OnDemandFeatureView defines a logical group of features that are\n generated by applying a transformation on a set of input sources, such as feature\n views and request data sources.\n\n Attributes:\n name: The unique name of the on demand feature view.\n features: The list of features in the output of the on demand feature view.\n source_feature_view_projections: A map from input source names to actual input\n sources with type FeatureViewProjection.\n source_request_sources: A map from input source names to the actual input\n sources with type RequestSource.\n udf: The user defined transformation function, which must take pandas dataframes\n as inputs.\n description: A human-readable description.\n tags: A dictionary of key-value pairs to store arbitrary metadata.\n owner: The owner of the on demand feature view, typically the email of the primary\n maintainer.\n \"\"\"\n\n # TODO(adchia): remove inputs from proto and declaration\n name: str\n features: List[Field]\n source_feature_view_projections: Dict[str, FeatureViewProjection]\n source_request_sources: Dict[str, RequestSource]\n udf: MethodType\n description: str\n tags: Dict[str, str]\n owner: str\n\n @log_exceptions\n def __init__(\n self,\n *args,\n name: Optional[str] = None,\n features: Optional[List[Feature]] = None,\n sources: Optional[\n Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]\n ] = None,\n udf: Optional[MethodType] = None,\n inputs: Optional[\n Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]\n ] = None,\n schema: Optional[List[Field]] = None,\n description: str = \"\",\n tags: Optional[Dict[str, str]] = None,\n owner: str = \"\",\n ):\n \"\"\"\n Creates an OnDemandFeatureView object.\n\n Args:\n name: The unique name of the on demand feature view.\n features (deprecated): The list of features in the output of the on demand\n feature view, after the transformation has been applied.\n sources (optional): A map from input source names to the actual input sources,\n which may be feature views, feature view projections, or request data sources.\n These sources serve as inputs to the udf, which will refer to them by name.\n udf (optional): The user defined transformation function, which must take pandas\n dataframes as inputs.\n inputs (optional): A map from input source names to the actual input sources,\n which may be feature views, feature view projections, or request data sources.\n These sources serve as inputs to the udf, which will refer to them by name.\n schema (optional): The list of features in the output of the on demand feature\n view, after the transformation has been applied.\n description (optional): A human-readable description.\n tags (optional): A dictionary of key-value pairs to store arbitrary metadata.\n owner (optional): The owner of the on demand feature view, typically the email\n of the primary maintainer.\n \"\"\"\n positional_attributes = [\"name\", \"features\", \"inputs\", \"udf\"]\n\n _name = name\n\n _schema = schema or []\n if len(_schema) == 0 and features is not None:\n _schema = [Field.from_feature(feature) for feature in features]\n if features is not None:\n warnings.warn(\n (\n \"The `features` parameter is being deprecated in favor of the `schema` parameter. \"\n \"Please switch from using `features` to `schema`. This will also requiring switching \"\n \"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not \"\n \"support the `features` parameter.\"\n ),\n DeprecationWarning,\n )\n\n _sources = sources or inputs\n if inputs and sources:\n raise ValueError(\"At most one of `sources` or `inputs` can be specified.\")\n elif inputs:\n warnings.warn(\n (\n \"The `inputs` parameter is being deprecated. Please use `sources` instead. \"\n \"Feast 0.21 and onwards will not support the `inputs` parameter.\"\n ),\n DeprecationWarning,\n )\n\n _udf = udf\n\n if args:\n warnings.warn(\n (\n \"On demand feature view parameters should be specified as keyword arguments \"\n \"instead of positional arguments. Feast 0.23 and onwards will not support \"\n \"positional arguments in on demand feature view definitions.\"\n ),\n DeprecationWarning,\n )\n if len(args) > len(positional_attributes):\n raise ValueError(\n f\"Only {', '.join(positional_attributes)} are allowed as positional args \"\n f\"when defining feature views, for backwards compatibility.\"\n )\n if len(args) >= 1:\n _name = args[0]\n if len(args) >= 2:\n _schema = args[1]\n # Convert Features to Fields.\n if len(_schema) > 0 and isinstance(_schema[0], Feature):\n _schema = [Field.from_feature(feature) for feature in _schema]\n warnings.warn(\n (\n \"The `features` parameter is being deprecated in favor of the `schema` parameter. \"\n \"Please switch from using `features` to `schema`. This will also requiring switching \"\n \"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not \"\n \"support the `features` parameter.\"\n ),\n DeprecationWarning,\n )\n if len(args) >= 3:\n _sources = args[2]\n warnings.warn(\n (\n \"The `inputs` parameter is being deprecated. Please use `sources` instead. \"\n \"Feast 0.21 and onwards will not support the `inputs` parameter.\"\n ),\n DeprecationWarning,\n )\n if len(args) >= 4:\n _udf = args[3]\n\n if not _name:\n raise ValueError(\n \"The name of the on demand feature view must be specified.\"\n )\n\n if not _sources:\n raise ValueError(\"The `sources` parameter must be specified.\")\n\n super().__init__(\n name=_name,\n features=_schema,\n description=description,\n tags=tags,\n owner=owner,\n )\n\n assert _sources is not None\n self.source_feature_view_projections: Dict[str, FeatureViewProjection] = {}\n self.source_request_sources: Dict[str, RequestSource] = {}\n for source_name, odfv_source in _sources.items():\n if isinstance(odfv_source, RequestSource):\n self.source_request_sources[source_name] = odfv_source\n elif isinstance(odfv_source, FeatureViewProjection):\n self.source_feature_view_projections[source_name] = odfv_source\n else:\n self.source_feature_view_projections[\n source_name\n ] = odfv_source.projection\n\n if _udf is None:\n raise ValueError(\"The `udf` parameter must be specified.\")\n assert _udf\n self.udf = _udf\n\n @property\n def proto_class(self) -> Type[OnDemandFeatureViewProto]:\n return OnDemandFeatureViewProto\n\n def __copy__(self):\n fv = OnDemandFeatureView(\n name=self.name,\n schema=self.features,\n sources=dict(\n **self.source_feature_view_projections, **self.source_request_sources,\n ),\n udf=self.udf,\n description=self.description,\n tags=self.tags,\n owner=self.owner,\n )\n fv.projection = copy.copy(self.projection)\n return fv\n\n def __eq__(self, other):\n if not super().__eq__(other):\n return False\n\n if (\n not self.source_feature_view_projections\n == other.source_feature_view_projections\n or not self.source_request_sources == other.source_request_sources\n or not self.udf.__code__.co_code == other.udf.__code__.co_code\n ):\n return False\n\n return True\n\n def __hash__(self):\n return super().__hash__()\n\n def to_proto(self) -> OnDemandFeatureViewProto:\n \"\"\"\n Converts an on demand feature view object to its protobuf representation.\n\n Returns:\n A OnDemandFeatureViewProto protobuf.\n \"\"\"\n meta = OnDemandFeatureViewMeta()\n if self.created_timestamp:\n meta.created_timestamp.FromDatetime(self.created_timestamp)\n if self.last_updated_timestamp:\n meta.last_updated_timestamp.FromDatetime(self.last_updated_timestamp)\n sources = {}\n for source_name, fv_projection in self.source_feature_view_projections.items():\n sources[source_name] = OnDemandSource(\n feature_view_projection=fv_projection.to_proto()\n )\n for (source_name, request_sources,) in self.source_request_sources.items():\n sources[source_name] = OnDemandSource(\n request_data_source=request_sources.to_proto()\n )\n\n spec = OnDemandFeatureViewSpec(\n name=self.name,\n features=[feature.to_proto() for feature in self.features],\n sources=sources,\n user_defined_function=UserDefinedFunctionProto(\n name=self.udf.__name__, body=dill.dumps(self.udf, recurse=True),\n ),\n description=self.description,\n tags=self.tags,\n owner=self.owner,\n )\n\n return OnDemandFeatureViewProto(spec=spec, meta=meta)\n\n @classmethod\n def from_proto(cls, on_demand_feature_view_proto: OnDemandFeatureViewProto):\n \"\"\"\n Creates an on demand feature view from a protobuf representation.\n\n Args:\n on_demand_feature_view_proto: A protobuf representation of an on-demand feature view.\n\n Returns:\n A OnDemandFeatureView object based on the on-demand feature view protobuf.\n \"\"\"\n sources = {}\n for (\n source_name,\n on_demand_source,\n ) in on_demand_feature_view_proto.spec.sources.items():\n if on_demand_source.WhichOneof(\"source\") == \"feature_view\":\n sources[source_name] = FeatureView.from_proto(\n on_demand_source.feature_view\n ).projection\n elif on_demand_source.WhichOneof(\"source\") == \"feature_view_projection\":\n sources[source_name] = FeatureViewProjection.from_proto(\n on_demand_source.feature_view_projection\n )\n else:\n sources[source_name] = RequestSource.from_proto(\n on_demand_source.request_data_source\n )\n on_demand_feature_view_obj = cls(\n name=on_demand_feature_view_proto.spec.name,\n schema=[\n Field(\n name=feature.name,\n dtype=from_value_type(ValueType(feature.value_type)),\n )\n for feature in on_demand_feature_view_proto.spec.features\n ],\n sources=sources,\n udf=dill.loads(\n on_demand_feature_view_proto.spec.user_defined_function.body\n ),\n description=on_demand_feature_view_proto.spec.description,\n tags=dict(on_demand_feature_view_proto.spec.tags),\n owner=on_demand_feature_view_proto.spec.owner,\n )\n\n # FeatureViewProjections are not saved in the OnDemandFeatureView proto.\n # Create the default projection.\n on_demand_feature_view_obj.projection = FeatureViewProjection.from_definition(\n on_demand_feature_view_obj\n )\n\n if on_demand_feature_view_proto.meta.HasField(\"created_timestamp\"):\n on_demand_feature_view_obj.created_timestamp = (\n on_demand_feature_view_proto.meta.created_timestamp.ToDatetime()\n )\n if on_demand_feature_view_proto.meta.HasField(\"last_updated_timestamp\"):\n on_demand_feature_view_obj.last_updated_timestamp = (\n on_demand_feature_view_proto.meta.last_updated_timestamp.ToDatetime()\n )\n\n return on_demand_feature_view_obj\n\n def get_request_data_schema(self) -> Dict[str, ValueType]:\n schema: Dict[str, ValueType] = {}\n for request_source in self.source_request_sources.values():\n if isinstance(request_source.schema, List):\n new_schema = {}\n for field in request_source.schema:\n new_schema[field.name] = field.dtype.to_value_type()\n schema.update(new_schema)\n elif isinstance(request_source.schema, Dict):\n schema.update(request_source.schema)\n else:\n raise Exception(\n f\"Request source schema is not correct type: ${str(type(request_source.schema))}\"\n )\n return schema\n\n def get_transformed_features_df(\n self, df_with_features: pd.DataFrame, full_feature_names: bool = False,\n ) -> pd.DataFrame:\n # Apply on demand transformations\n columns_to_cleanup = []\n for source_fv_projection in self.source_feature_view_projections.values():\n for feature in source_fv_projection.features:\n full_feature_ref = f\"{source_fv_projection.name}__{feature.name}\"\n if full_feature_ref in df_with_features.keys():\n # Make sure the partial feature name is always present\n df_with_features[feature.name] = df_with_features[full_feature_ref]\n columns_to_cleanup.append(feature.name)\n elif feature.name in df_with_features.keys():\n # Make sure the full feature name is always present\n df_with_features[full_feature_ref] = df_with_features[feature.name]\n columns_to_cleanup.append(full_feature_ref)\n\n # Compute transformed values and apply to each result row\n df_with_transformed_features = self.udf.__call__(df_with_features)\n\n # Work out whether the correct columns names are used.\n rename_columns: Dict[str, str] = {}\n for feature in self.features:\n short_name = feature.name\n long_name = f\"{self.projection.name_to_use()}__{feature.name}\"\n if (\n short_name in df_with_transformed_features.columns\n and full_feature_names\n ):\n rename_columns[short_name] = long_name\n elif not full_feature_names:\n # Long name must be in dataframe.\n rename_columns[long_name] = short_name\n\n # Cleanup extra columns used for transformation\n df_with_features.drop(columns=columns_to_cleanup, inplace=True)\n return df_with_transformed_features.rename(columns=rename_columns)\n\n def infer_features(self):\n \"\"\"\n Infers the set of features associated to this feature view from the input source.\n\n Raises:\n RegistryInferenceFailure: The set of features could not be inferred.\n \"\"\"\n df = pd.DataFrame()\n for feature_view_projection in self.source_feature_view_projections.values():\n for feature in feature_view_projection.features:\n dtype = feast_value_type_to_pandas_type(feature.dtype.to_value_type())\n df[f\"{feature_view_projection.name}__{feature.name}\"] = pd.Series(\n dtype=dtype\n )\n df[f\"{feature.name}\"] = pd.Series(dtype=dtype)\n for request_data in self.source_request_sources.values():\n for field in request_data.schema:\n dtype = feast_value_type_to_pandas_type(field.dtype.to_value_type())\n df[f\"{field.name}\"] = pd.Series(dtype=dtype)\n output_df: pd.DataFrame = self.udf.__call__(df)\n inferred_features = []\n for f, dt in zip(output_df.columns, output_df.dtypes):\n inferred_features.append(\n Field(\n name=f,\n dtype=from_value_type(\n python_type_to_feast_value_type(f, type_name=str(dt))\n ),\n )\n )\n\n if self.features:\n missing_features = []\n for specified_features in self.features:\n if specified_features not in inferred_features:\n missing_features.append(specified_features)\n if missing_features:\n raise SpecifiedFeaturesNotPresentError(\n [f.name for f in missing_features], self.name\n )\n else:\n self.features = inferred_features\n\n if not self.features:\n raise RegistryInferenceFailure(\n \"OnDemandFeatureView\",\n f\"Could not infer Features for the feature view '{self.name}'.\",\n )\n\n @staticmethod\n def get_requested_odfvs(feature_refs, project, registry):\n all_on_demand_feature_views = registry.list_on_demand_feature_views(\n project, allow_cache=True\n )\n requested_on_demand_feature_views: List[OnDemandFeatureView] = []\n for odfv in all_on_demand_feature_views:\n for feature in odfv.features:\n if f\"{odfv.name}:{feature.name}\" in feature_refs:\n requested_on_demand_feature_views.append(odfv)\n break\n return requested_on_demand_feature_views\n\n\n# TODO(felixwang9817): Force this decorator to accept kwargs and switch from\n# `features` to `schema`.\ndef on_demand_feature_view(\n *args,\n features: Optional[List[Feature]] = None,\n sources: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,\n inputs: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,\n schema: Optional[List[Field]] = None,\n description: str = \"\",\n tags: Optional[Dict[str, str]] = None,\n owner: str = \"\",\n):\n \"\"\"\n Creates an OnDemandFeatureView object with the given user function as udf.\n\n Args:\n features (deprecated): The list of features in the output of the on demand\n feature view, after the transformation has been applied.\n sources (optional): A map from input source names to the actual input sources,\n which may be feature views, feature view projections, or request data sources.\n These sources serve as inputs to the udf, which will refer to them by name.\n inputs (optional): A map from input source names to the actual input sources,\n which may be feature views, feature view projections, or request data sources.\n These sources serve as inputs to the udf, which will refer to them by name.\n schema (optional): The list of features in the output of the on demand feature\n view, after the transformation has been applied.\n description (optional): A human-readable description.\n tags (optional): A dictionary of key-value pairs to store arbitrary metadata.\n owner (optional): The owner of the on demand feature view, typically the email\n of the primary maintainer.\n \"\"\"\n positional_attributes = [\"features\", \"inputs\"]\n\n _schema = schema or []\n if len(_schema) == 0 and features is not None:\n _schema = [Field.from_feature(feature) for feature in features]\n if features is not None:\n warnings.warn(\n (\n \"The `features` parameter is being deprecated in favor of the `schema` parameter. \"\n \"Please switch from using `features` to `schema`. This will also requiring switching \"\n \"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not \"\n \"support the `features` parameter.\"\n ),\n DeprecationWarning,\n )\n\n _sources = sources or inputs\n if inputs and sources:\n raise ValueError(\"At most one of `sources` or `inputs` can be specified.\")\n elif inputs:\n warnings.warn(\n (\n \"The `inputs` parameter is being deprecated. Please use `sources` instead. \"\n \"Feast 0.21 and onwards will not support the `inputs` parameter.\"\n ),\n DeprecationWarning,\n )\n\n if args:\n warnings.warn(\n (\n \"On demand feature view parameters should be specified as keyword arguments \"\n \"instead of positional arguments. Feast 0.23 and onwards will not support \"\n \"positional arguments in on demand feature view definitions.\"\n ),\n DeprecationWarning,\n )\n if len(args) > len(positional_attributes):\n raise ValueError(\n f\"Only {', '.join(positional_attributes)} are allowed as positional args \"\n f\"when defining feature views, for backwards compatibility.\"\n )\n if len(args) >= 1:\n _schema = args[0]\n # Convert Features to Fields.\n if len(_schema) > 0 and isinstance(_schema[0], Feature):\n _schema = [Field.from_feature(feature) for feature in _schema]\n warnings.warn(\n (\n \"The `features` parameter is being deprecated in favor of the `schema` parameter. \"\n \"Please switch from using `features` to `schema`. This will also requiring switching \"\n \"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not \"\n \"support the `features` parameter.\"\n ),\n DeprecationWarning,\n )\n if len(args) >= 2:\n _sources = args[1]\n warnings.warn(\n (\n \"The `inputs` parameter is being deprecated. Please use `sources` instead. \"\n \"Feast 0.21 and onwards will not support the `inputs` parameter.\"\n ),\n DeprecationWarning,\n )\n\n if not _sources:\n raise ValueError(\"The `sources` parameter must be specified.\")\n\n def decorator(user_function):\n on_demand_feature_view_obj = OnDemandFeatureView(\n name=user_function.__name__,\n sources=_sources,\n schema=_schema,\n udf=user_function,\n description=description,\n tags=tags,\n owner=owner,\n )\n functools.update_wrapper(\n wrapper=on_demand_feature_view_obj, wrapped=user_function\n )\n return on_demand_feature_view_obj\n\n return decorator\n"
] |
[
[
"pandas.DataFrame",
"pandas.Series"
]
] |
lianapanatau/BERT-for-RRC-ABSA
|
[
"d31d81d5f9dce594a23f256199988fc2a11ce016",
"d31d81d5f9dce594a23f256199988fc2a11ce016"
] |
[
"pytorch-pretrained-bert/src/gen_pt_squad.py",
"transformers/analab/plot/neuron.py"
] |
[
"# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and authors from University of Illinois at Chicago.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport logging\nimport argparse\nimport random\nimport json\nfrom tqdm import tqdm, trange\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\n\nimport squad_data_utils as data_utils\nimport modelconfig\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef gen(args):\n\n tokenizer = BertTokenizer.from_pretrained(modelconfig.MODEL_ARCHIVE_MAP[args.bert_model] )\n\n train_examples = data_utils.read_squad_examples(os.path.join(args.input_dir, \"train.json\"), is_training=True)\n \n train_features = data_utils.convert_examples_to_features(\n train_examples, tokenizer, args.max_seq_length, args.doc_stride, args.max_query_length, is_training=True)\n logger.info(\"***** Running training *****\")\n logger.info(\" Num orig examples = %d\", len(train_examples))\n logger.info(\" Num split examples = %d\", len(train_features))\n\n input_ids_np = np.array([f.input_ids for f in train_features], dtype=np.int16)\n segment_ids_np = np.array([f.segment_ids for f in train_features], dtype=np.int16)\n input_mask_np = np.array([f.input_mask for f in train_features], dtype=np.int16)\n start_positions_np = np.array([f.start_position for f in train_features], dtype=np.int16)\n end_positions_np = np.array([f.end_position for f in train_features], dtype=np.int16)\n\n np.savez_compressed(os.path.join(args.output_dir, \"data.npz\"), \n input_ids=input_ids_np, \n segment_ids = segment_ids_np, \n input_mask = input_mask_np, \n start_positions = start_positions_np, \n end_positions = end_positions_np)\n \n #>>>>> validation\n valid_examples=data_utils.read_squad_examples(os.path.join(args.input_dir,\"dev.json\"), is_training=True)\n\n valid_features = data_utils.convert_examples_to_features(\n valid_examples, tokenizer, args.max_seq_length, args.doc_stride, args.max_query_length, is_training=True)\n \n logger.info(\" Num orig examples = %d\", len(valid_examples))\n logger.info(\" Num split examples = %d\", len(valid_features))\n\n valid_input_ids_np = np.array([f.input_ids for f in valid_features], dtype=np.int16)\n valid_segment_ids_np = np.array([f.segment_ids for f in valid_features], dtype=np.int16)\n valid_input_mask_np = np.array([f.input_mask for f in valid_features], dtype=np.int16)\n valid_start_positions_np = np.array([f.start_position for f in valid_features], dtype=np.int16)\n valid_end_positions_np = np.array([f.end_position for f in valid_features], dtype=np.int16)\n \n np.savez_compressed(os.path.join(args.output_dir, \"dev.npz\"), \n input_ids=valid_input_ids_np, \n segment_ids = valid_segment_ids_np, \n input_mask = valid_input_mask_np, \n start_positions = valid_start_positions_np, \n end_positions = valid_end_positions_np)\n #<<<<< end of validation declaration\n\ndef main(): \n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--bert-model\", default='bert-base', type=str)\n\n parser.add_argument(\"--input_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--max_seq_length\",\n default=320,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n \n parser.add_argument('--seed',\n type=int,\n default=0,\n help=\"random seed for initialization\")\n \n parser.add_argument('--doc_stride',\n type=int,\n default=128)\n \n parser.add_argument('--max_query_length',\n type=int,\n default=30)\n \n parser.add_argument('--max_answer_length',\n type=int,\n default=30)\n \n \n args = parser.parse_args()\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n os.makedirs(args.output_dir, exist_ok=True)\n gen(args)\n \nif __name__==\"__main__\":\n main()",
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport shutil\nimport matplotlib\nimport random\n\nfrom torch.optim.optimizer import Optimizer, required\nfrom torch.distributions import Categorical\n\nfrom matplotlib import pyplot as plt\n\nfrom .plot import Plot\n\n\nclass NeuronPlot(Plot):\n def __init__(self, fn, width=25, height=15):\n super().__init__(fn, width, height)\n self.width = width\n self.height = height\n\n def plot(self, log_reg):\n plt.tick_params(axis='both', which='major', labelsize=20)\n plt.plot(range(len(log_reg.coef_[0])), log_reg.coef_.T)\n plt.xlabel('Neuron Index', size=20)\n plt.ylabel('Neuron Weight', size=20)\n"
] |
[
[
"numpy.random.seed",
"numpy.array",
"torch.manual_seed"
],
[
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
anukaal/opytimizer
|
[
"5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9"
] |
[
"opytimizer/optimizers/social/qsa.py"
] |
[
"\"\"\"Queuing Search Algorithm.\n\"\"\"\n\nimport copy\n\nimport numpy as np\n\nimport opytimizer.math.random as r\nimport opytimizer.utils.constant as c\nimport opytimizer.utils.logging as l\nfrom opytimizer.core import Optimizer\n\nlogger = l.get_logger(__name__)\n\n\nclass QSA(Optimizer):\n \"\"\"A QSA class, inherited from Optimizer.\n\n This is the designed class to define QSA-related\n variables and methods.\n\n References:\n J. Zhang et al. Queuing search algorithm: A novel metaheuristic algorithm\n for solving engineering optimization problems.\n Applied Mathematical Modelling (2018).\n\n \"\"\"\n\n def __init__(self, params=None):\n \"\"\"Initialization method.\n\n Args:\n params (dict): Contains key-value parameters to the meta-heuristics.\n\n \"\"\"\n\n logger.info('Overriding class: Optimizer -> QSA.')\n\n # Overrides its parent class with the receiving params\n super(QSA, self).__init__()\n\n # Builds the class\n self.build(params)\n\n logger.info('Class overrided.')\n\n def _calculate_queue(self, n_agents, t_1, t_2, t_3):\n \"\"\"Calculates the number of agents that belongs to each queue.\n\n Args:\n n_agents (int): Number of agents.\n t_1 (float): Fitness value of first agent in the population.\n t_2 (float): Fitness value of second agent in the population.\n t_3 (float): Fitness value of third agent in the population.\n\n Returns:\n The number of agents in first, second and third queues.\n\n \"\"\"\n\n # Checks if potential service time is bigger than `epsilon`\n if t_1 > c.EPSILON:\n # Calculates the proportion of agents in first, second and third queues\n n_1 = (1 / t_1) / ((1 / t_1) + (1 / t_2) + (1 / t_3))\n n_2 = (1 / t_2) / ((1 / t_1) + (1 / t_2) + (1 / t_3))\n n_3 = (1 / t_3) / ((1 / t_1) + (1 / t_2) + (1 / t_3))\n\n # If the potential service time is smaller than `epsilon`\n else:\n # Each queue will have 1/3 ratio\n n_1 = 1 / 3\n n_2 = 1 / 3\n n_3 = 1 / 3\n\n # Calculates the number of agents that belongs to each queue\n q_1 = int(n_1 * n_agents)\n q_2 = int(n_2 * n_agents)\n q_3 = int(n_3 * n_agents)\n\n return q_1, q_2, q_3\n\n def _business_one(self, agents, function, beta):\n \"\"\"Performs the first business phase.\n\n Args:\n agents (list): List of agents.\n function (Function): A Function object that will be used as the objective function.\n beta (float): Range of fluctuation.\n\n \"\"\"\n\n # Sorts agents\n agents.sort(key=lambda x: x.fit)\n\n # Copies temporary agents to represent `A_1`, `A_2` and `A_3`\n A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])\n\n # Calculates the number of agents in each queue\n q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)\n\n # Represents the update patterns by eq. 4 and eq. 5\n case = None\n\n # Iterates through all agents\n for i, agent in enumerate(agents):\n # Creates another temporary agent\n a = copy.deepcopy(agent)\n\n # If index is smaller than the number of agents in first queue\n if i < q_1:\n # If it is the first agent in first queue\n if i == 0:\n # Defines the case as one\n case = 1\n\n # `A` will receive a copy from `A_1`\n A = copy.deepcopy(A_1)\n\n # If index is between first and second queues\n elif q_1 <= i < q_1 + q_2:\n # If index is the first agent in second queue\n if i == q_1:\n # Defines the case as one\n case = 1\n\n # `A` will receive a copy from `A_2`\n A = copy.deepcopy(A_2)\n\n # If index is between second and third queues\n else:\n # If index is the first agent in third queue\n if i == q_1 + q_2:\n # Defines the case as one\n case = 1\n\n # `A` will receive a copy from `A_3`\n A = copy.deepcopy(A_3)\n\n # Generates a uniform random number\n alpha = r.generate_uniform_random_number(-1, 1)\n\n # Generates an Erlang distribution\n E = r.generate_gamma_random_number(1, 0.5, (agent.n_variables, agent.n_dimensions))\n\n # If case is defined as one\n if case == 1:\n # Generates an Erlang number\n e = r.generate_gamma_random_number(1, 0.5, 1)\n\n # Calculates the fluctuation (eq. 6)\n F_1 = beta * alpha * (E * np.fabs(A.position - a.position)) + \\\n e * (A.position - a.position)\n\n # Updates the temporary agent's position (eq. 4)\n a.position = A.position + F_1\n\n # Evaluates the agent\n a.fit = function(a.position)\n\n # If new fitness is better than current agent's fitness\n if a.fit < agent.fit:\n # Replaces the current agent's position and fitness\n agent.position = copy.deepcopy(a.position)\n agent.fit = copy.deepcopy(a.fit)\n\n # Defines the case as one\n case = 1\n\n # If new fitness is worse than current agent's fitness\n else:\n # Defines the case as two\n case = 2\n\n # If case is defined as two\n else:\n # Calculates the fluctuation (eq. 7)\n F_2 = beta * alpha * (E * np.fabs(A.position - a.position))\n\n # Updates the temporary agent's position (eq. 5)\n a.position += F_2\n\n # Evaluates the agent\n a.fit = function(a.position)\n\n # If new fitness is better than current agent's fitness\n if a.fit < agent.fit:\n # Replaces the current agent's position and fitness\n agent.position = copy.deepcopy(a.position)\n agent.fit = copy.deepcopy(a.fit)\n\n # Defines the case as two\n case = 2\n\n # If new fitness is worse than current agent's fitness\n else:\n # Defines the case as one\n case = 1\n\n def _business_two(self, agents, function):\n \"\"\"Performs the second business phase.\n\n Args:\n agents (list): List of agents.\n function (Function): A Function object that will be used as the objective function.\n\n \"\"\"\n\n # Sorts agents\n agents.sort(key=lambda x: x.fit)\n\n # Copies temporary agents to represent `A_1`, `A_2` and `A_3`\n A_1, A_2, A_3 = copy.deepcopy(agents[0]), copy.deepcopy(agents[1]), copy.deepcopy(agents[2])\n\n # Calculates the number of agents in each queue\n q_1, q_2, _ = self._calculate_queue(len(agents), A_1.fit, A_2.fit, A_3.fit)\n\n # Calculates the probability of handling the business\n pr = [i / len(agents) for i in range(1, len(agents) + 1)]\n\n # Calculates the confusion degree\n cv = A_1.fit / (A_2.fit + A_3.fit + c.EPSILON)\n\n # Iterates through all agents\n for i, agent in enumerate(agents):\n # Creates another temporary agent\n a = copy.deepcopy(agent)\n\n # If index is smaller than the number of agents in first queue\n if i < q_1:\n # `A` will receive a copy from `A_1`\n A = copy.deepcopy(A_1)\n\n # If index is between first and second queues\n elif q_1 <= i < q_1 + q_2:\n # `A` will receive a copy from `A_2`\n A = copy.deepcopy(A_2)\n\n # If index is between second and third queues\n else:\n # `A` will receive a copy from `A_3`\n A = copy.deepcopy(A_3)\n\n # Generates a uniform random number\n r1 = r.generate_uniform_random_number()\n\n # If random number is smaller than probability of handling the business\n if r1 < pr[i]:\n # Randomly selects two individuals\n A_1, A_2 = np.random.choice(agents, 2, replace=False)\n\n # Generates another uniform random number\n r2 = r.generate_uniform_random_number()\n\n # Generates an Erlang number\n e = r.generate_gamma_random_number(1, 0.5, 1)\n\n # If random number is smaller than confusion degree\n if r2 < cv:\n # Calculates the fluctuation (eq. 14)\n F_1 = e * (A_1.position - A_2.position)\n\n # Update agent's position (eq. 12)\n a.position += F_1\n\n # If random number is bigger than confusion degree\n else:\n # Calculates the fluctuation (eq. 15)\n F_2 = e * (A.position - A_1.position)\n\n # Update agent's position (eq. 13)\n a.position += F_2\n\n # Evaluates the agent\n a.fit = function(a.position)\n\n # If the new fitness is better than the current agent's fitness\n if a.fit < agent.fit:\n # Replaces the current agent's position and fitness\n agent.position = copy.deepcopy(a.position)\n agent.fit = copy.deepcopy(a.fit)\n\n def _business_three(self, agents, function):\n \"\"\"Performs the third business phase.\n\n Args:\n agents (list): List of agents.\n function (Function): A Function object that will be used as the objective function.\n\n \"\"\"\n\n # Sorts agents\n agents.sort(key=lambda x: x.fit)\n\n # Calculates the probability of handling the business\n pr = [i / len(agents) for i in range(1, len(agents) + 1)]\n\n # Iterates through all agents\n for i, agent in enumerate(agents):\n # Creates another temporary agent\n a = copy.deepcopy(agent)\n\n # Iterates through all decision variables\n for j in range(agent.n_variables):\n # Generates a uniform random number\n r1 = r.generate_uniform_random_number()\n\n # If random number is smaller than probability of handling the business\n if r1 < pr[i]:\n # Randomly selects two individuals\n A_1, A_2 = np.random.choice(agents, 2, replace=False)\n\n # Generates an Erlang number\n e = r.generate_gamma_random_number(1, 0.5, 1)\n\n # Updates temporary agent's position (eq. 17)\n a.position[j] = A_1.position[j] + e * (A_2.position[j] - a.position[j])\n\n # Evaluates the agent\n a.fit = function(a.position)\n\n # If the new fitness is better than the current agent's fitness\n if a.fit < agent.fit:\n # Replaces the current agent's position and fitness\n agent.position = copy.deepcopy(a.position)\n agent.fit = copy.deepcopy(a.fit)\n\n def update(self, space, function, iteration, n_iterations):\n \"\"\"Wraps Queue Search Algorithm over all agents and variables.\n\n Args:\n space (Space): Space containing agents and update-related information.\n function (Function): A Function object that will be used as the objective function.\n iteration (int): Current iteration.\n n_iterations (int): Maximum number of iterations.\n\n \"\"\"\n\n # Calculates the range of fluctuation.\n beta = np.exp(np.log(1 / (iteration + c.EPSILON)) * np.sqrt(iteration / n_iterations))\n\n # Performs the first business phase\n self._business_one(space.agents, function, beta)\n\n # Performs the second business phase\n self._business_two(space.agents, function)\n\n # Performs the third business phase\n self._business_three(space.agents, function)\n"
] |
[
[
"numpy.fabs",
"numpy.sqrt",
"numpy.random.choice",
"numpy.log"
]
] |
movermeyer/pandas_datareaders_unofficial
|
[
"458dcf473d070cd7686d53d4a9b479cbe0ab9218"
] |
[
"draft/truefx/truefx_tick.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests_cache\nimport datetime\nimport pandas as pd\nfrom datetime import timedelta\n\nimport pandas as pd\nfrom pandas.io.common import ZipFile\nfrom pandas.compat import BytesIO, StringIO, PY2\n\ndef main():\n expire_after = timedelta(days=1)\n if PY2:\n filename = 'cache_py2' \n else:\n filename = 'cache'\n session = requests_cache.CachedSession(cache_name=filename, expire_after=expire_after)\n\n dt = pd.to_datetime(\"2014-01-01\")\n symbol = \"AUD/USD\"\n symbol = symbol.replace(\"/\", \"\").upper()\n year = dt.year\n month = dt.month\n month_name = datetime.datetime(year=1970, month=month, day=1).strftime('%B').upper()\n #url = \"http://www.truefx.com/dev/data/2014/JANUARY-2014/AUDUSD-2014-01.zip\"\n url = \"http://www.truefx.com/dev/data/{year:04d}/{month_name}-{year:04d}/{symbol}-{year:04d}-{month:02d}.zip\".format(year=year, month=month, symbol=symbol, month_name=month_name)\n response = session.get(url)\n zip_data = BytesIO(response.content)\n filename = \"{symbol}-{year:04d}-{month:02d}.csv\".format(year=year, month=month, symbol=symbol)\n\n with ZipFile(zip_data, 'r') as zf:\n #filename = zf.namelist()[0]\n zfile = zf.open(filename)\n #print(zfile)\n #(symb, dt, ask, bid) = zfile.read().split(',') \n #print(zfile.__dict__)\n data = zfile.readlines()\n #df = pd.read_csv(zfile._fileobj) # ToFix: can't make it work correctly\n\n #return\n df = pd.DataFrame(data)\n #df = df[:100] # just for test\n df[0] = df[0].str.decode('utf8')\n df[0] = df[0].str.replace('\\n', '')\n df[0] = df[0].map(lambda s: s.split(','))\n df['Symbol'] = df[0].map(lambda t: t[0])\n df['Date'] = df[0].map(lambda t: pd.to_datetime(t[1]))\n df['Bid'] = df[0].map(lambda t: t[2]).astype(float)\n df['Ask'] = df[0].map(lambda t: t[3]).astype(float)\n del df[0]\n df = df.set_index('Date')\n print(df)\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.compat.BytesIO",
"pandas.io.common.ZipFile"
]
] |
arthw/colorization
|
[
"e7f85ec307c9d27a16a87276beaaf2dee5492292"
] |
[
"interactive-deep-colorization/ui/gui_gamut.py"
] |
[
"import cv2\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom data import lab_gamut\nimport numpy as np\n\n\nclass GUIGamut(QWidget):\n def __init__(self, gamut_size=110):\n QWidget.__init__(self)\n self.gamut_size = gamut_size\n self.win_size = gamut_size * 2 # divided by 4\n self.setFixedSize(self.win_size, self.win_size)\n self.ab_grid = lab_gamut.abGrid(gamut_size=gamut_size, D=1)\n self.reset()\n\n def set_gamut(self, l_in=50):\n self.l_in = l_in\n self.ab_map, self.mask = self.ab_grid.update_gamut(l_in=l_in)\n self.update()\n\n def set_ab(self, color):\n self.color = color\n self.lab = lab_gamut.rgb2lab_1d(self.color)\n x, y = self.ab_grid.ab2xy(self.lab[1], self.lab[2])\n self.pos = QPointF(x, y)\n self.update()\n\n def is_valid_point(self, pos):\n if pos is None:\n return False\n else:\n x = pos.x()\n y = pos.y()\n if x >= 0 and y >= 0 and x < self.win_size and y < self.win_size:\n return self.mask[y, x]\n else:\n return False\n\n def update_ui(self, pos):\n self.pos = pos\n a, b = self.ab_grid.xy2ab(pos.x(), pos.y())\n # get color we need L\n L = self.l_in\n lab = np.array([L, a, b])\n color = lab_gamut.lab2rgb_1d(lab, clip=True, dtype='uint8')\n self.emit(SIGNAL('update_color'), color)\n self.update()\n\n def paintEvent(self, event):\n painter = QPainter()\n painter.begin(self)\n painter.setRenderHint(QPainter.Antialiasing)\n painter.fillRect(event.rect(), Qt.white)\n if self.ab_map is not None:\n ab_map = cv2.resize(self.ab_map, (self.win_size, self.win_size))\n qImg = QImage(ab_map.tostring(), self.win_size, self.win_size, QImage.Format_RGB888)\n painter.drawImage(0, 0, qImg)\n\n painter.setPen(QPen(Qt.gray, 3, Qt.DotLine, cap=Qt.RoundCap, join=Qt.RoundJoin))\n painter.drawLine(self.win_size/2, 0, self.win_size/2, self.win_size)\n painter.drawLine(0, self.win_size/2, self.win_size, self.win_size/2)\n if self.pos is not None:\n painter.setPen(QPen(Qt.black, 2, Qt.SolidLine, cap=Qt.RoundCap, join=Qt.RoundJoin))\n w = 5\n x = self.pos.x()\n y = self.pos.y()\n painter.drawLine(x - w, y, x + w, y)\n painter.drawLine(x, y - w, x, y + w)\n painter.end()\n\n def mousePressEvent(self, event):\n pos = event.pos()\n\n if event.button() == Qt.LeftButton and self.is_valid_point(pos): # click the point\n self.update_ui(pos)\n self.mouseClicked = True\n\n def mouseMoveEvent(self, event):\n pos = event.pos()\n if self.is_valid_point(pos):\n if self.mouseClicked:\n self.update_ui(pos)\n\n def mouseReleaseEvent(self, event):\n self.mouseClicked = False\n\n def sizeHint(self):\n return QSize(self.win_size, self.win_size)\n\n def reset(self):\n self.ab_map = None\n self.mask = None\n self.color = None\n self.lab = None\n self.pos = None\n self.mouseClicked = False\n self.update()\n"
] |
[
[
"numpy.array"
]
] |
kmh4321/datasets
|
[
"286d7a8a5eb3e073f18f8fee4f774bafc23fb445"
] |
[
"tensorflow_datasets/audio/nsynth.py"
] |
[
"# coding=utf-8\n# Copyright 2019 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"NSynth Dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport csv\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_datasets.public_api as tfds\n\n_DESCRIPTION = \"\"\"\\\nThe NSynth Dataset is an audio dataset containing ~300k musical notes, each\nwith a unique pitch, timbre, and envelope. Each note is annotated with three\nadditional pieces of information based on a combination of human evaluation\nand heuristic algorithms: Source, Family, and Qualities.\n\"\"\"\n\n_FULL_DESCRIPTION = \"\"\"\\\nFull NSynth Dataset is split into train, valid, and test sets, with no\ninstruments overlapping between the train set and the valid/test sets.\n\"\"\"\n\n\n_GANSYNTH_DESCRIPTION = \"\"\"\\\nNSynth Dataset limited to acoustic instruments in the MIDI pitch interval\n[24, 84]. Uses alternate splits that have overlap in instruments (but not exact\nnotes) between the train set and valid/test sets. This variant was originally \nintroduced in the ICLR 2019 GANSynth paper (https://arxiv.org/abs/1902.08710).\n\"\"\"\n\n_F0_AND_LOUDNESS_ADDENDUM = \"\"\"\\\nThis version additionally contains estimates for F0 using CREPE\n(Kim et al., 2018) and A-weighted perceptual loudness. Both signals are provided\nat a frame rate of 250Hz.\n\"\"\"\n\n# From http://proceedings.mlr.press/v70/engel17a.html\n_CITATION = \"\"\"\\\n@InProceedings{pmlr-v70-engel17a,\n title = \t {Neural Audio Synthesis of Musical Notes with {W}ave{N}et Autoencoders},\n author = \t {Jesse Engel and Cinjon Resnick and Adam Roberts and Sander Dieleman and Mohammad Norouzi and Douglas Eck and Karen Simonyan},\n booktitle = \t {Proceedings of the 34th International Conference on Machine Learning},\n pages = \t {1068--1077},\n year = \t {2017},\n editor = \t {Doina Precup and Yee Whye Teh},\n volume = \t {70},\n series = \t {Proceedings of Machine Learning Research},\n address = \t {International Convention Centre, Sydney, Australia},\n month = \t {06--11 Aug},\n publisher = \t {PMLR},\n pdf = \t {http://proceedings.mlr.press/v70/engel17a/engel17a.pdf},\n url = \t {http://proceedings.mlr.press/v70/engel17a.html},\n}\n\"\"\"\n\n_NUM_SECS = 4\n_AUDIO_RATE = 16000 # 16 kHz\n_F0_AND_LOUDNESS_RATE = 250 # 250 Hz\n\n_INSTRUMENT_FAMILIES = [\n \"bass\", \"brass\", \"flute\", \"guitar\", \"keyboard\", \"mallet\", \"organ\", \"reed\",\n \"string\", \"synth_lead\", \"vocal\"]\n_INSTRUMENT_SOURCES = [\"acoustic\", \"electronic\", \"synthetic\"]\n_QUALITIES = [\n \"bright\",\n \"dark\",\n \"distortion\",\n \"fast_decay\",\n \"long_release\",\n \"multiphonic\",\n \"nonlinear_env\",\n \"percussive\",\n \"reverb\",\n \"tempo-synced\"]\n\n_BASE_DOWNLOAD_PATH = \"http://download.magenta.tensorflow.org/datasets/nsynth/nsynth-\"\n\n_SPLITS = [\"train\", \"valid\", \"test\"]\n_SPLIT_SHARDS = {\n \"train\": 512,\n \"valid\": 32,\n \"test\": 8,\n}\n\n\nclass NsynthConfig(tfds.core.BuilderConfig):\n \"\"\"BuilderConfig for NSynth Dataset.\"\"\"\n\n def __init__(self,\n gansynth_subset=False,\n estimate_f0_and_loudness=False,\n **kwargs):\n \"\"\"Constructs a NsynthConfig.\n\n Args:\n gansynth_subset: bool, whether to use the subset of the dataset introduced\n in the ICLR 2019 GANSynth paper (Engel, et al. 2018). This subset uses\n acoustic-only instrument sources and limits the pitches to the interval\n [24, 84]. The train and test splits are also modified so that\n instruments (but not specific notes) overlap between them. See\n https://arxiv.org/abs/1902.08710 for more details.\n estimate_f0_and_loudness: bool, whether to estimate fundamental frequency\n (F0) and loudness for the audio (at 250 Hz) and add them to the set of\n features.\n **kwargs: keyword arguments forwarded to super.\n \"\"\"\n name_parts = []\n if gansynth_subset:\n name_parts.append(\"gansynth_subset\")\n else:\n name_parts.append(\"full\")\n if estimate_f0_and_loudness:\n name_parts.append(\"f0_and_loudness\")\n super(NsynthConfig, self).__init__(\n name=\".\".join(name_parts),\n version=tfds.core.Version(\n \"1.1.0\", experiments={tfds.core.Experiment.S3: False}),\n **kwargs)\n self.gansynth_subset = gansynth_subset\n self.estimate_f0_and_loudness = estimate_f0_and_loudness\n\n\nclass Nsynth(tfds.core.BeamBasedBuilder):\n \"\"\"A large-scale and high-quality dataset of annotated musical notes.\"\"\"\n BUILDER_CONFIGS = [\n NsynthConfig(description=_FULL_DESCRIPTION),\n NsynthConfig(\n gansynth_subset=True,\n description=_GANSYNTH_DESCRIPTION),\n NsynthConfig(\n gansynth_subset=True,\n estimate_f0_and_loudness=True,\n description=_GANSYNTH_DESCRIPTION + _F0_AND_LOUDNESS_ADDENDUM),\n ]\n\n def _info(self):\n features = {\n \"id\":\n tf.string,\n \"audio\":\n tfds.features.Tensor(\n shape=(_AUDIO_RATE * _NUM_SECS,), dtype=tf.float32),\n \"pitch\":\n tfds.features.ClassLabel(num_classes=128),\n \"velocity\":\n tfds.features.ClassLabel(num_classes=128),\n \"instrument\": {\n # We read the list of labels in _split_generators.\n \"label\": tfds.features.ClassLabel(num_classes=1006),\n \"family\": tfds.features.ClassLabel(names=_INSTRUMENT_FAMILIES),\n \"source\": tfds.features.ClassLabel(names=_INSTRUMENT_SOURCES),\n },\n \"qualities\": {quality: tf.bool for quality in _QUALITIES},\n }\n if self.builder_config.estimate_f0_and_loudness:\n f0_and_ld_shape = (_F0_AND_LOUDNESS_RATE * _NUM_SECS + 1,)\n features[\"f0\"] = {\n \"hz\":\n tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32),\n \"midi\":\n tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32),\n \"confidence\":\n tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32)\n }\n features[\"loudness\"] = {\n \"db\":\n tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32)\n }\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict(features),\n homepage=\"https://g.co/magenta/nsynth-dataset\",\n citation=_CITATION,\n metadata=tfds.core.BeamMetadataDict(),\n )\n\n def _split_generators(self, dl_manager):\n \"\"\"Returns splits.\"\"\"\n\n dl_urls = {}\n dl_urls[\"examples\"] = {\n split: _BASE_DOWNLOAD_PATH + \"%s.tfrecord.tar\" % split\n for split in _SPLITS\n }\n dl_urls[\"instrument_labels\"] = (\n _BASE_DOWNLOAD_PATH + \"instrument_labels.txt\")\n if self.builder_config.gansynth_subset:\n dl_urls[\"gansynth_splits\"] = (\n _BASE_DOWNLOAD_PATH + \"gansynth_splits.csv\")\n dl_paths = dl_manager.download_and_extract(dl_urls)\n\n with tf.io.gfile.GFile(dl_paths[\"instrument_labels\"]) as f:\n instrument_labels = f.read().strip().splitlines()\n self.info.features[\"instrument\"][\"label\"].names = instrument_labels\n\n split_ids = {s: set() for s in _SPLITS}\n split_dirs = {s: [dl_paths[\"examples\"][s]] for s in _SPLITS}\n if self.builder_config.gansynth_subset:\n # Generator needs to see all original splits for each new split.\n split_dirs = {s: dl_paths[\"examples\"].values() for s in _SPLITS}\n with tf.io.gfile.GFile(dl_paths[\"gansynth_splits\"]) as f:\n reader = csv.DictReader(f)\n for row in reader:\n split_ids[row[\"split\"]].add(row[\"id\"])\n\n return [\n tfds.core.SplitGenerator( # pylint: disable=g-complex-comprehension\n name=split,\n num_shards=_SPLIT_SHARDS[split],\n gen_kwargs={\n \"tfrecord_dirs\": split_dirs[split],\n \"ids\": split_ids[split],\n \"split\": split,\n })\n for split in _SPLITS\n ]\n\n def _build_pcollection(self, pipeline, tfrecord_dirs, ids, split):\n \"\"\"Build PCollection of examples for split.\"\"\"\n beam = tfds.core.lazy_imports.apache_beam\n\n def _emit_base_example(ex):\n \"\"\"Maps an input example to a TFDS example.\"\"\"\n beam.metrics.Metrics.counter(split, \"base-examples\").inc()\n features = ex.features.feature\n return {\n \"id\": features[\"note_str\"].bytes_list.value[0],\n \"audio\":\n np.array(features[\"audio\"].float_list.value, dtype=np.float32),\n \"pitch\":\n features[\"pitch\"].int64_list.value[0],\n \"velocity\":\n features[\"velocity\"].int64_list.value[0],\n \"instrument\": {\n \"label\":\n tf.compat.as_text(\n features[\"instrument_str\"].bytes_list.value[0]),\n \"family\":\n tf.compat.as_text(\n features[\"instrument_family_str\"].bytes_list.value[0]),\n \"source\":\n tf.compat.as_text(\n features[\"instrument_source_str\"].bytes_list.value[0])\n },\n \"qualities\": {\n q: features[\"qualities\"].int64_list.value[i]\n for (i, q) in enumerate(_QUALITIES)\n }\n }\n\n def _in_split(ex, split_ids):\n if not split_ids or tf.compat.as_text(ex[\"id\"]) in split_ids:\n beam.metrics.Metrics.counter(split, \"in-split\").inc()\n return True\n return False\n\n def _estimate_f0(ex):\n \"\"\"Estimate the fundamental frequency using CREPE and add to example.\"\"\"\n ex = ex.copy()\n beam.metrics.Metrics.counter(split, \"estimate-f0\").inc()\n _, f0_hz, f0_confidence, _ = tfds.core.lazy_imports.crepe.predict(\n ex[\"audio\"],\n sr=_AUDIO_RATE,\n viterbi=True,\n step_size=1000 / _F0_AND_LOUDNESS_RATE,\n verbose=0)\n f0_midi = tfds.core.lazy_imports.librosa.core.hz_to_midi(f0_hz)\n # Set -infs introduced by hz_to_midi to 0.\n f0_midi[f0_midi == -np.inf] = 0\n # Set nans to 0 in confidence.\n f0_confidence = np.nan_to_num(f0_confidence)\n ex[\"f0\"] = {\n \"hz\": f0_hz.astype(np.float32),\n \"midi\": f0_midi.astype(np.float32),\n \"confidence\": f0_confidence.astype(np.float32),\n }\n return ex\n\n def _compute_loudness(ex):\n \"\"\"Compute loudness and add to example.\"\"\"\n ex = ex.copy()\n beam.metrics.Metrics.counter(split, \"compute-loudness\").inc()\n librosa = tfds.core.lazy_imports.librosa\n n_fft = 2048\n amin = 1e-15\n top_db = 200.0\n stft = librosa.stft(\n ex[\"audio\"],\n n_fft=n_fft,\n hop_length=int(_AUDIO_RATE // _F0_AND_LOUDNESS_RATE))\n loudness_db = librosa.perceptual_weighting(\n np.abs(stft)**2,\n librosa.fft_frequencies(_AUDIO_RATE, n_fft=n_fft),\n amin=amin,\n top_db=top_db)\n # Average across freq in linear scale.\n mean_loudness_amp = np.mean(librosa.db_to_amplitude(loudness_db), axis=0)\n mean_loudness_db = librosa.amplitude_to_db(\n mean_loudness_amp,\n amin=amin,\n top_db=top_db)\n ex[\"loudness\"] = {\"db\": mean_loudness_db.astype(np.float32)}\n return ex\n\n examples = (\n pipeline\n | beam.Create([os.path.join(dir_, \"*\") for dir_ in tfrecord_dirs])\n | beam.io.tfrecordio.ReadAllFromTFRecord(\n coder=beam.coders.ProtoCoder(tf.train.Example))\n | beam.Map(_emit_base_example)\n | beam.Filter(_in_split, split_ids=ids))\n if self.builder_config.estimate_f0_and_loudness:\n examples = (\n examples\n | beam.Reshuffle()\n | beam.Map(_estimate_f0)\n | beam.Map(_compute_loudness))\n if split == tfds.Split.TRAIN:\n # Output mean and variance of loudness for TRAIN split.\n loudness = examples | beam.Map(lambda x: np.mean(x[\"loudness\"][\"db\"]))\n loudness_mean = (\n loudness\n | \"loudness_mean\" >> beam.combiners.Mean.Globally())\n loudness_variance = (\n loudness\n | beam.Map(lambda ld, ld_mean: (ld - ld_mean)**2,\n ld_mean=beam.pvalue.AsSingleton(loudness_mean))\n | \"loudness_variance\" >> beam.combiners.Mean.Globally())\n self.info.metadata[\"loudness_db_mean\"] = loudness_mean\n self.info.metadata[\"loudness_db_variance\"] = loudness_variance\n\n return examples\n"
] |
[
[
"numpy.array",
"tensorflow.io.gfile.GFile",
"numpy.nan_to_num",
"tensorflow.compat.as_text",
"numpy.mean",
"numpy.abs"
]
] |
RafaelPedruzzi/IA-2019-2
|
[
"7d99a8f02ec826403bd48c6eba574d802e558c36"
] |
[
"trab2/probOneR.py"
] |
[
"## -------------------------------------------------------- ##\n# Trab 2 IA 2019-2\n#\n# Rafael Belmock Pedruzzi\n#\n# probOneR.py: implementation of the probabilistic OneR classifier.\n#\n# Python version: 3.7.4\n## -------------------------------------------------------- ##\n\nimport numpy as np\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.utils.validation import check_X_y, check_array, check_is_fitted\nfrom sklearn.utils.multiclass import unique_labels\nfrom sklearn.metrics import euclidean_distances\nfrom sklearn.preprocessing import KBinsDiscretizer\nfrom sklearn.metrics.cluster import contingency_matrix\nfrom sklearn.metrics import confusion_matrix\nfrom itertools import product, zip_longest, accumulate\nfrom random import random\n\nclass Prob_OneR(BaseEstimator, ClassifierMixin):\n\n def fit(self, X, y):\n # check that x and y have correct shape\n X, y = check_X_y(X,y)\n # store the classes seen during fit\n self.classes_ = unique_labels(y)\n\n self.y_ = y\n\n kbd = KBinsDiscretizer(n_bins = len(np.unique(y)), encode='ordinal')\n X = kbd.fit_transform(X)\n self.X_ = X\n self.kbd_ = kbd\n\n cm_list = []\n hits = []\n for i in X.T:\n cm = contingency_matrix(i, y)\n cm_list.append(cm)\n hits.append(sum(max(k) for k in cm))\n\n rule = np.argmax(hits) # chosen rule\n self.r_ = rule\n\n rule_cm = cm_list[rule]\n class_selector = []\n for i, c in enumerate(rule_cm):\n cSum = sum(c)\n probRatio = [ (i/cSum) for i in c]\n # Building the \"partitions\" of the roulette:\n probRatio = list(accumulate(probRatio))\n class_selector.append(probRatio)\n self.class_selector = class_selector\n\n # Return the classifier\n return self\n\n def predict(self, X):\n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n # Input validation\n X = check_array(X)\n\n X = self.kbd_.transform(X)\n\n y = []\n for i in X[:,self.r_]:\n probRatio = self.class_selector[int(i)]\n # Selecting a random element:\n selector = random()\n for i in range(len(probRatio)):\n if selector <= probRatio[i]:\n y.append(self.classes_[i])\n break\n return y\n\n\n# from sklearn import datasets\n# from sklearn.model_selection import train_test_split, cross_val_score\n# from sklearn.metrics import f1_score\n\n# nn= Prob_OneR()\n# iris = datasets.load_iris()\n# x_train,x_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size = 0.4, random_state = 0)\n# nn.fit(x_train, y_train)\n# y_pred = nn.predict(x_test)\n# print(y_test)\n# print(y_pred)\n# score = cross_val_score(nn, x_train, y_train, cv = 5)\n# print(score)\n"
] |
[
[
"sklearn.utils.validation.check_is_fitted",
"sklearn.utils.validation.check_X_y",
"sklearn.utils.multiclass.unique_labels",
"sklearn.utils.validation.check_array",
"sklearn.metrics.cluster.contingency_matrix",
"numpy.argmax",
"numpy.unique"
]
] |
Jiawei-Yang/TumorCP
|
[
"6053c75642fcbc0fb0424320ab3d758f24883b0e",
"6053c75642fcbc0fb0424320ab3d758f24883b0e"
] |
[
"nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ResencUNet.py",
"nnunet/experiment_planning/change_batch_size.py"
] |
[
"# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\nfrom nnunet.network_architecture.generic_modular_residual_UNet import FabiansUNet, get_default_network_config\nfrom nnunet.network_architecture.initialization import InitWeights_He\nfrom nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer\nfrom nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2\nfrom nnunet.utilities.nd_softmax import softmax_helper\n\n\nclass nnUNetTrainerV2_ResencUNet(nnUNetTrainerV2):\n def initialize_network(self):\n if self.threeD:\n cfg = get_default_network_config(3, None, norm_type=\"in\")\n\n else:\n cfg = get_default_network_config(1, None, norm_type=\"in\")\n\n stage_plans = self.plans['plans_per_stage'][self.stage]\n conv_kernel_sizes = stage_plans['conv_kernel_sizes']\n blocks_per_stage_encoder = stage_plans['num_blocks_encoder']\n blocks_per_stage_decoder = stage_plans['num_blocks_decoder']\n pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes']\n\n self.network = FabiansUNet(self.num_input_channels, self.base_num_features, blocks_per_stage_encoder, 2,\n pool_op_kernel_sizes, conv_kernel_sizes, cfg, self.num_classes,\n blocks_per_stage_decoder, True, False, 320, InitWeights_He(1e-2))\n\n if torch.cuda.is_available():\n self.network.cuda()\n self.network.inference_apply_nonlin = softmax_helper\n\n def setup_DA_params(self):\n \"\"\"\n net_num_pool_op_kernel_sizes is different in resunet\n \"\"\"\n super().setup_DA_params()\n self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(\n np.vstack(self.net_num_pool_op_kernel_sizes[1:]), axis=0))[:-1]\n\n def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,\n save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,\n validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,\n force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0,\n segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):\n ds = self.network.decoder.deep_supervision\n self.network.decoder.deep_supervision = False\n ret = nnUNetTrainer.validate(self, do_mirroring=do_mirroring, use_sliding_window=use_sliding_window,\n step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian,\n overwrite=overwrite, validation_folder_name=validation_folder_name,\n debug=debug, all_in_gpu=all_in_gpu,\n segmentation_export_kwargs=segmentation_export_kwargs,\n run_postprocessing_on_folds=run_postprocessing_on_folds)\n self.network.decoder.deep_supervision = ds\n return ret\n\n def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,\n mirror_axes: Tuple[int] = None,\n use_sliding_window: bool = True, step_size: float = 0.5,\n use_gaussian: bool = True, pad_border_mode: str = 'constant',\n pad_kwargs: dict = None, all_in_gpu: bool = False,\n verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:\n ds = self.network.decoder.deep_supervision\n self.network.decoder.deep_supervision = False\n ret = nnUNetTrainer.predict_preprocessed_data_return_seg_and_softmax(self, data, do_mirroring=do_mirroring,\n mirror_axes=mirror_axes,\n use_sliding_window=use_sliding_window,\n step_size=step_size,\n use_gaussian=use_gaussian,\n pad_border_mode=pad_border_mode,\n pad_kwargs=pad_kwargs,\n all_in_gpu=all_in_gpu,\n verbose=verbose,\n mixed_precision=mixed_precision)\n self.network.decoder.deep_supervision = ds\n return ret\n\n def run_training(self):\n self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we\n # want at the start of the training\n ds = self.network.decoder.deep_supervision\n self.network.decoder.deep_supervision = True\n ret = nnUNetTrainer.run_training(self)\n self.network.decoder.deep_supervision = ds\n return ret\n\n\nnnUNetTrainerV2_ResencUNet_copy1 = nnUNetTrainerV2_ResencUNet\nnnUNetTrainerV2_ResencUNet_copy2 = nnUNetTrainerV2_ResencUNet\nnnUNetTrainerV2_ResencUNet_copy3 = nnUNetTrainerV2_ResencUNet\nnnUNetTrainerV2_ResencUNet_copy4 = nnUNetTrainerV2_ResencUNet\n",
"from batchgenerators.utilities.file_and_folder_operations import *\nimport numpy as np\n\nif __name__ == '__main__':\n # input_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_plans_3D.pkl'\n # output_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_LISA_plans_3D.pkl'\n # a = load_pickle(input_file)\n # a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size']))\n # save_pickle(a, output_file)\n \n input_file = '../../data/nnUNet_preprocessed/Task100_LiTSbaseline/nnUNetPlansv2.1_plans_3D.pkl'\n output_file = '../../data/nnUNet_preprocessed/Task100_LiTSbaseline/nnUNetPlansv2.1_plans_3D.pkl'\n a = load_pickle(input_file)\n print(a['plans_per_stage'])\n # a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size']))\n a['plans_per_stage'][0]['patch_size'] = np.array([128, 128, 128])\n a['plans_per_stage'][1]['patch_size'] = np.array([128, 128, 128])\n a['plans_per_stage'][0]['num_pool_per_axis'] = np.array([5, 5, 5])\n a['plans_per_stage'][1]['num_pool_per_axis'] = np.array([5, 5, 5])\n a['plans_per_stage'][0]['pool_op_kernel_sizes'] = [[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]\n a['plans_per_stage'][1]['pool_op_kernel_sizes'] = [[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]\n a['plans_per_stage'][0]['conv_kernel_sizes'] = [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]]\n a['plans_per_stage'][1]['conv_kernel_sizes'] = [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]]\n save_pickle(a, output_file)"
] |
[
[
"torch.cuda.is_available",
"numpy.vstack"
],
[
"numpy.array"
]
] |
priumoraes/tpu
|
[
"c7fbe70f00956e802c23c9e831d7482613968fa7",
"c7fbe70f00956e802c23c9e831d7482613968fa7"
] |
[
"models/official/amoeba_net/amoeba_net.py",
"models/experimental/mnasnet/mnasnet_main.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=line-too-long\nr\"\"\"TensorFlow AmoebaNet Example.\n\nGCP Run Example\npython amoeba_net.py --data_dir=gs://cloud-tpu-datasets/imagenet-data --model_dir=gs://cloud-tpu-ckpts/models/ameoba_net_x/ \\\n--drop_connect_keep_prob=1.0 --cell_name=evol_net_x --num_cells=12 --reduction_size=256 --image_size=299 --num_epochs=48 \\\n--train_batch_size=256 --num_epochs_per_eval=4.0 --lr_decay_value=0.89 --lr_num_epochs_per_decay=1 --alsologtostderr \\\n--tpu=huangyp-tpu-0\n\"\"\"\n# pylint: enable=line-too-long\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport io\nimport itertools\nimport math\nimport os\nfrom absl import app\nfrom absl import flags\nimport absl.logging as _logging # pylint: disable=unused-import\nimport numpy as np\nfrom PIL import Image\nimport tensorflow as tf\nimport amoeba_net_model as model_lib\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_log_pb2\n\n# Cloud TPU Cluster Resolvers\nflags.DEFINE_string(\n 'tpu', default=None,\n help='The Cloud TPU to use for training. This should be either the name '\n 'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')\n\nflags.DEFINE_string(\n 'gcp_project', default=None,\n help='Project name for the Cloud TPU-enabled project. If not specified, we '\n 'will attempt to automatically detect the GCE project from metadata.')\n\nflags.DEFINE_string(\n 'tpu_zone', default=None,\n help='GCE zone where the Cloud TPU is located in. If not specified, we '\n 'will attempt to automatically detect the GCE project from metadata.')\n\n# General Parameters\nflags.DEFINE_integer(\n 'num_shards', 8,\n 'Number of shards (TPU cores).')\n\nflags.DEFINE_integer(\n 'distributed_group_size', 1,\n help='Size of the distributed batch norm. group.'\n 'Default is normalization over local examples only.'\n 'When set to a value greater than 1, it will enable'\n 'a distribtued batch norm. To enable a global batch norm.'\n 'set distributed_group_size to FLAGS.num_shards')\n\nflags.DEFINE_bool(\n 'use_tpu', True,\n 'Use TPUs rather than CPU or GPU.')\n\nflags.DEFINE_string(\n 'data_dir', '',\n 'Directory where input data is stored')\n\nflags.DEFINE_string(\n 'model_dir', None,\n 'Directory where model output is stored')\n\nflags.DEFINE_string(\n 'export_dir', None,\n 'The directory where the exported SavedModel will be stored.')\n\nflags.DEFINE_bool(\n 'export_to_tpu', False,\n help='Whether to export additional metagraph with \"serve, tpu\" tags'\n ' in addition to \"serve\" only metagraph.')\n\nflags.DEFINE_integer(\n 'iterations_per_loop', 500,\n 'Number of iterations per TPU training loop.')\n\nflags.DEFINE_integer(\n 'train_batch_size', 256,\n 'Global (not per-shard) batch size for training')\n\nflags.DEFINE_integer(\n 'eval_batch_size', 256,\n 'Global (not per-shard) batch size for evaluation')\n\nflags.DEFINE_float(\n 'num_epochs', 48.,\n 'Number of steps use for training.')\n\nflags.DEFINE_float(\n 'num_epochs_per_eval', 1.,\n 'Number of training epochs to run between evaluations.')\n\nflags.DEFINE_string(\n 'mode', 'train_and_eval',\n 'Mode to run: train, eval, train_and_eval, or predict')\n\nflags.DEFINE_integer(\n 'save_checkpoints_steps', None,\n 'Interval (in steps) at which the model data '\n 'should be checkpointed. Set to 0 to disable.')\n\nflags.DEFINE_bool(\n 'enable_hostcall', True,\n 'Skip the host_call which is executed every training step. This is'\n ' generally used for generating training summaries (train loss,'\n ' learning rate, etc...). When --enable_hostcall=True, there could'\n ' be a performance drop if host_call function is slow and cannot'\n ' keep up with the TPU-side computation.')\n\n# Model specific parameters\nflags.DEFINE_bool('use_aux_head', True, 'Include aux head or not.')\nflags.DEFINE_float(\n 'aux_scaling', 0.4, 'Scaling factor of aux_head')\nflags.DEFINE_float(\n 'batch_norm_decay', 0.9, 'Batch norm decay.')\nflags.DEFINE_float(\n 'batch_norm_epsilon', 1e-5, 'Batch norm epsilon.')\nflags.DEFINE_float(\n 'dense_dropout_keep_prob', None, 'Dense dropout keep probability.')\nflags.DEFINE_float(\n 'drop_connect_keep_prob', 1.0, 'Drop connect keep probability.')\nflags.DEFINE_string(\n 'drop_connect_version', None, 'Drop connect version.')\nflags.DEFINE_string(\n 'cell_name', 'amoeba_net_d', 'Which network to run.')\nflags.DEFINE_integer(\n 'num_cells', 12, 'Total number of cells.')\nflags.DEFINE_integer(\n 'reduction_size', 256, 'Default cell reduction size.')\nflags.DEFINE_integer(\n 'stem_reduction_size', 32, 'Stem filter size.')\nflags.DEFINE_float(\n 'weight_decay', 4e-05, 'Weight decay for slim model.')\nflags.DEFINE_integer(\n 'num_label_classes', 1001, 'The number of classes that images fit into.')\n\n# Training hyper-parameters\nflags.DEFINE_float(\n 'lr', 0.64, 'Learning rate.')\nflags.DEFINE_string(\n 'optimizer', 'rmsprop',\n 'Optimizer (one of sgd, rmsprop, momentum)')\nflags.DEFINE_float(\n 'moving_average_decay', 0.9999,\n 'moving average decay rate')\nflags.DEFINE_float(\n 'lr_decay_value', 0.9,\n 'Exponential decay rate used in learning rate adjustment')\nflags.DEFINE_integer(\n 'lr_num_epochs_per_decay', 1,\n 'Exponential decay epochs used in learning rate adjustment')\nflags.DEFINE_string(\n 'lr_decay_method', 'exponential',\n 'Method of decay: exponential, cosine, constant, stepwise')\nflags.DEFINE_float(\n 'lr_warmup_epochs', 3.0,\n 'Learning rate increased from zero linearly to lr for the first '\n 'lr_warmup_epochs.')\nflags.DEFINE_float('gradient_clipping_by_global_norm', 0,\n 'gradient_clipping_by_global_norm')\n\nflags.DEFINE_integer(\n 'image_size', 299, 'Size of image, assuming image height and width.')\n\nflags.DEFINE_integer(\n 'num_train_images', 1281167, 'The number of images in the training set.')\nflags.DEFINE_integer(\n 'num_eval_images', 50000, 'The number of images in the evaluation set.')\n\nflags.DEFINE_bool(\n 'use_bp16', True, 'If True, use bfloat16 for activations')\n\nflags.DEFINE_integer(\n 'eval_timeout', 60*60*24,\n 'Maximum seconds between checkpoints before evaluation terminates.')\n\n# Inference configuration.\nflags.DEFINE_bool(\n 'inference_with_all_cores', True, 'Whether to round-robin'\n 'among all cores visible to the host for TPU inference.')\nflags.DEFINE_bool(\n 'add_warmup_requests', True,\n 'Whether to add warmup requests into the export saved model dir,'\n 'especially for TPU inference.')\nflags.DEFINE_string('model_name', 'amoeba_net',\n 'Serving model name used for the model server.')\nflags.DEFINE_multi_integer(\n 'inference_batch_sizes', [8],\n 'Known inference batch sizes used to warm up for each core.')\n\nFLAGS = flags.FLAGS\n\n\ndef build_run_config():\n \"\"\"Return RunConfig for TPU estimator.\"\"\"\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu,\n zone=FLAGS.tpu_zone,\n project=FLAGS.gcp_project)\n\n eval_steps = FLAGS.num_eval_images // FLAGS.eval_batch_size\n iterations_per_loop = (eval_steps if FLAGS.mode == 'eval'\n else FLAGS.iterations_per_loop)\n save_checkpoints_steps = FLAGS.save_checkpoints_steps or iterations_per_loop\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n model_dir=FLAGS.model_dir,\n save_checkpoints_steps=save_checkpoints_steps,\n keep_checkpoint_max=None,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=iterations_per_loop,\n num_shards=FLAGS.num_shards,\n per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n ))\n return run_config\n\n\ndef build_image_serving_input_receiver_fn(shape,\n dtype=tf.float32):\n \"\"\"Returns a input_receiver_fn for raw images during serving.\"\"\"\n\n def _preprocess_image(encoded_image):\n \"\"\"Preprocess a single raw image.\"\"\"\n image = tf.image.decode_image(encoded_image, channels=shape[-1])\n image.set_shape(shape)\n return tf.cast(image, dtype)\n\n def serving_input_receiver_fn():\n image_bytes_list = tf.placeholder(\n shape=[None],\n dtype=tf.string,\n )\n images = tf.map_fn(\n _preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)\n return tf.estimator.export.TensorServingInputReceiver(\n features=images, receiver_tensors=image_bytes_list)\n\n return serving_input_receiver_fn\n\n\ndef _encode_image(image_array, fmt='PNG'):\n \"\"\"encodes an (numpy) image array to string.\n\n Args:\n image_array: (numpy) image array\n fmt: image format to use\n\n Returns:\n encoded image string\n \"\"\"\n pil_image = Image.fromarray(image_array)\n image_io = io.BytesIO()\n pil_image.save(image_io, format=fmt)\n return image_io.getvalue()\n\n\ndef write_warmup_requests(savedmodel_dir,\n model_name,\n image_size,\n batch_sizes=None,\n num_requests=8):\n \"\"\"Writes warmup requests for inference into a tfrecord file.\n\n Args:\n savedmodel_dir: string, the file to the exported model folder.\n model_name: string, a model name used inside the model server.\n image_size: int, size of image, assuming image height and width.\n batch_sizes: list, a list of batch sizes to create different input requests.\n num_requests: int, number of requests per batch size.\n\n Raises:\n ValueError: if batch_sizes is not a valid integer list.\n \"\"\"\n if not isinstance(batch_sizes, list) or not batch_sizes:\n raise ValueError('batch sizes should be a valid non-empty list.')\n extra_assets_dir = os.path.join(savedmodel_dir, 'assets.extra')\n tf.gfile.MkDir(extra_assets_dir)\n with tf.python_io.TFRecordWriter(\n os.path.join(extra_assets_dir, 'tf_serving_warmup_requests')) as writer:\n for batch_size in batch_sizes:\n for _ in range(num_requests):\n request = predict_pb2.PredictRequest()\n image = np.uint8(np.random.rand(image_size, image_size, 3) * 255)\n request.inputs['input'].CopyFrom(\n tf.make_tensor_proto(\n [_encode_image(image)] * batch_size, shape=[batch_size]))\n request.model_spec.name = model_name\n request.model_spec.signature_name = 'serving_default'\n log = prediction_log_pb2.PredictionLog(\n predict_log=prediction_log_pb2.PredictLog(request=request))\n writer.write(log.SerializeToString())\n\n\n# TODO(ereal): simplify this.\ndef override_with_flags(hparams):\n \"\"\"Overrides parameters with flag values.\"\"\"\n override_flag_names = [\n 'aux_scaling',\n 'train_batch_size',\n 'batch_norm_decay',\n 'batch_norm_epsilon',\n 'dense_dropout_keep_prob',\n 'drop_connect_keep_prob',\n 'drop_connect_version',\n 'eval_batch_size',\n 'gradient_clipping_by_global_norm',\n 'lr',\n 'lr_decay_method',\n 'lr_decay_value',\n 'lr_num_epochs_per_decay',\n 'moving_average_decay',\n 'image_size',\n 'num_cells',\n 'reduction_size',\n 'stem_reduction_size',\n 'num_epochs',\n 'num_epochs_per_eval',\n 'optimizer',\n 'enable_hostcall',\n 'use_aux_head',\n 'use_bp16',\n 'use_tpu',\n 'lr_warmup_epochs',\n 'weight_decay',\n 'num_shards',\n 'distributed_group_size',\n 'num_train_images',\n 'num_eval_images',\n 'num_label_classes',\n ]\n for flag_name in override_flag_names:\n flag_value = getattr(FLAGS, flag_name, 'INVALID')\n if flag_value == 'INVALID':\n tf.logging.fatal('Unknown flag %s.' % str(flag_name))\n if flag_value is not None:\n _set_or_add_hparam(hparams, flag_name, flag_value)\n\n\ndef build_hparams():\n \"\"\"Build tf.Hparams for training Amoeba Net.\"\"\"\n hparams = model_lib.build_hparams(FLAGS.cell_name)\n override_with_flags(hparams)\n return hparams\n\n\ndef _terminate_eval():\n tf.logging.info('Timeout passed with no new checkpoints ... terminating eval')\n return True\n\n\ndef _get_next_checkpoint():\n return tf.contrib.training.checkpoints_iterator(\n FLAGS.model_dir,\n timeout=FLAGS.eval_timeout,\n timeout_fn=_terminate_eval)\n\n\ndef _set_or_add_hparam(hparams, name, value):\n if getattr(hparams, name, None) is None:\n hparams.add_hparam(name, value)\n else:\n hparams.set_hparam(name, value)\n\n\ndef _load_global_step_from_checkpoint_dir(checkpoint_dir):\n try:\n checkpoint_reader = tf.train.NewCheckpointReader(\n tf.train.latest_checkpoint(checkpoint_dir))\n return checkpoint_reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)\n except: # pylint: disable=bare-except\n return 0\n\n\ndef main(_):\n mode = FLAGS.mode\n data_dir = FLAGS.data_dir\n model_dir = FLAGS.model_dir\n hparams = build_hparams()\n\n estimator_parmas = {}\n\n train_steps_per_epoch = int(\n math.ceil(hparams.num_train_images / float(hparams.train_batch_size)))\n eval_steps = hparams.num_eval_images // hparams.eval_batch_size\n eval_batch_size = (None if mode == 'train' else\n hparams.eval_batch_size)\n\n model = model_lib.AmoebaNetEstimatorModel(hparams, model_dir)\n\n if hparams.use_tpu:\n run_config = build_run_config()\n image_classifier = tf.contrib.tpu.TPUEstimator(\n model_fn=model.model_fn,\n use_tpu=True,\n config=run_config,\n params=estimator_parmas,\n predict_batch_size=eval_batch_size,\n train_batch_size=hparams.train_batch_size,\n eval_batch_size=eval_batch_size,\n export_to_tpu=FLAGS.export_to_tpu,\n experimental_exported_model_uses_all_cores=FLAGS\n .inference_with_all_cores)\n else:\n save_checkpoints_steps = (FLAGS.save_checkpoints_steps or\n FLAGS.iterations_per_loop)\n run_config = tf.estimator.RunConfig(\n model_dir=FLAGS.model_dir,\n save_checkpoints_steps=save_checkpoints_steps)\n image_classifier = tf.estimator.Estimator(\n model_fn=model.model_fn,\n config=run_config,\n params=estimator_parmas)\n\n # Input pipelines are slightly different (with regards to shuffling and\n # preprocessing) between training and evaluation.\n imagenet_train = model_lib.InputPipeline(\n is_training=True, data_dir=data_dir, hparams=hparams)\n imagenet_eval = model_lib.InputPipeline(\n is_training=False, data_dir=data_dir, hparams=hparams)\n\n if hparams.moving_average_decay < 1:\n eval_hooks = [model_lib.LoadEMAHook(model_dir,\n hparams.moving_average_decay)]\n else:\n eval_hooks = []\n\n if mode == 'eval':\n for checkpoint in _get_next_checkpoint():\n tf.logging.info('Starting to evaluate.')\n try:\n eval_results = image_classifier.evaluate(\n input_fn=imagenet_eval.input_fn,\n steps=eval_steps,\n hooks=eval_hooks,\n checkpoint_path=checkpoint)\n tf.logging.info('Evaluation results: %s' % eval_results)\n except tf.errors.NotFoundError:\n # skip checkpoint if it gets deleted prior to evaluation\n tf.logging.info('Checkpoint %s no longer exists ... skipping')\n elif mode == 'train_and_eval':\n current_step = _load_global_step_from_checkpoint_dir(model_dir)\n tf.logging.info('Starting training at step=%d.' % current_step)\n train_steps_per_eval = int(\n hparams.num_epochs_per_eval * train_steps_per_epoch)\n # Final Evaluation if training is finished.\n if current_step >= hparams.num_epochs * train_steps_per_epoch:\n eval_results = image_classifier.evaluate(\n input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)\n tf.logging.info('Evaluation results: %s' % eval_results)\n while current_step < hparams.num_epochs * train_steps_per_epoch:\n image_classifier.train(\n input_fn=imagenet_train.input_fn, steps=train_steps_per_eval)\n current_step += train_steps_per_eval\n tf.logging.info('Starting evaluation at step=%d.' % current_step)\n eval_results = image_classifier.evaluate(\n input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)\n tf.logging.info('Evaluation results: %s' % eval_results)\n elif mode == 'predict':\n for checkpoint in _get_next_checkpoint():\n tf.logging.info('Starting prediction ...')\n time_hook = model_lib.SessionTimingHook()\n eval_hooks.append(time_hook)\n result_iter = image_classifier.predict(\n input_fn=imagenet_eval.input_fn,\n hooks=eval_hooks,\n checkpoint_path=checkpoint,\n yield_single_examples=False)\n results = list(itertools.islice(result_iter, eval_steps))\n tf.logging.info('Inference speed = {} images per second.'.format(\n time_hook.compute_speed(len(results) * eval_batch_size)))\n elif mode == 'train':\n current_step = _load_global_step_from_checkpoint_dir(model_dir)\n total_step = int(hparams.num_epochs * train_steps_per_epoch)\n if current_step < total_step:\n tf.logging.info('Starting training ...')\n image_classifier.train(\n input_fn=imagenet_train.input_fn,\n steps=total_step-current_step)\n else:\n tf.logging.info('Mode not found.')\n\n if FLAGS.export_dir is not None:\n tf.logging.info('Starting exporting saved model ...')\n serving_shape = [hparams.image_size, hparams.image_size, 3]\n export_path = image_classifier.export_saved_model(\n export_dir_base=FLAGS.export_dir,\n serving_input_receiver_fn=build_image_serving_input_receiver_fn(\n serving_shape),\n as_text=True)\n if FLAGS.add_warmup_requests:\n write_warmup_requests(\n export_path,\n FLAGS.model_name,\n hparams.image_size,\n batch_sizes=FLAGS.inference_batch_sizes)\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n app.run(main)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Train a MnasNet on ImageNet on TPU.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nimport tensorflow as tf\n\nimport imagenet_input\nimport mnasnet_models\nimport mnasnet_utils\nfrom tensorflow.contrib.tpu.python.tpu import async_checkpoint\nfrom tensorflow.contrib.training.python.training import evaluation\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python.estimator import estimator\nfrom tensorflow.python.keras import backend as K\n\n\nFLAGS = flags.FLAGS\n\nFAKE_DATA_DIR = 'gs://cloud-tpu-test-datasets/fake_imagenet'\n\nflags.DEFINE_bool(\n 'use_tpu', default=True,\n help=('Use TPU to execute the model for training and evaluation. If'\n ' --use_tpu=false, will use whatever devices are available to'\n ' TensorFlow by default (e.g. CPU and GPU)'))\n\n# Cloud TPU Cluster Resolvers\nflags.DEFINE_string(\n 'tpu', default=None,\n help='The Cloud TPU to use for training. This should be either the name '\n 'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')\n\nflags.DEFINE_string(\n 'gcp_project', default=None,\n help='Project name for the Cloud TPU-enabled project. If not specified, we '\n 'will attempt to automatically detect the GCE project from metadata.')\n\nflags.DEFINE_string(\n 'tpu_zone', default=None,\n help='GCE zone where the Cloud TPU is located in. If not specified, we '\n 'will attempt to automatically detect the GCE project from metadata.')\n\n# Model specific flags\nflags.DEFINE_string(\n 'data_dir', default=FAKE_DATA_DIR,\n help=('The directory where the ImageNet input data is stored. Please see'\n ' the README.md for the expected data format.'))\n\nflags.DEFINE_string(\n 'model_dir', default=None,\n help=('The directory where the model and training/evaluation summaries are'\n ' stored.'))\n\nflags.DEFINE_string(\n 'model_name',\n default='mnasnet-a1',\n help=(\n 'The model name to select models among existing MnasNet configurations.'\n ))\n\nflags.DEFINE_string(\n 'mode', default='train_and_eval',\n help='One of {\"train_and_eval\", \"train\", \"eval\"}.')\n\nflags.DEFINE_integer(\n 'train_steps', default=437898,\n help=('The number of steps to use for training. Default is 437898 steps'\n ' which is approximately 350 epochs at batch size 1024. This flag'\n ' should be adjusted according to the --train_batch_size flag.'))\n\nflags.DEFINE_integer(\n 'input_image_size', default=224, help='Input image size.')\n\nflags.DEFINE_integer(\n 'train_batch_size', default=1024, help='Batch size for training.')\n\nflags.DEFINE_integer(\n 'eval_batch_size', default=1024, help='Batch size for evaluation.')\n\nflags.DEFINE_integer(\n 'num_train_images', default=1281167, help='Size of training data set.')\n\nflags.DEFINE_integer(\n 'num_eval_images', default=50000, help='Size of evaluation data set.')\n\nflags.DEFINE_integer(\n 'steps_per_eval', default=6255,\n help=('Controls how often evaluation is performed. Since evaluation is'\n ' fairly expensive, it is advised to evaluate as infrequently as'\n ' possible (i.e. up to --train_steps, which evaluates the model only'\n ' after finishing the entire training regime).'))\n\nflags.DEFINE_integer(\n 'eval_timeout',\n default=None,\n help='Maximum seconds between checkpoints before evaluation terminates.')\n\nflags.DEFINE_bool(\n 'skip_host_call', default=False,\n help=('Skip the host_call which is executed every training step. This is'\n ' generally used for generating training summaries (train loss,'\n ' learning rate, etc...). When --skip_host_call=false, there could'\n ' be a performance drop if host_call function is slow and cannot'\n ' keep up with the TPU-side computation.'))\n\nflags.DEFINE_integer(\n 'iterations_per_loop', default=1251,\n help=('Number of steps to run on TPU before outfeeding metrics to the CPU.'\n ' If the number of iterations in the loop would exceed the number of'\n ' train steps, the loop will exit before reaching'\n ' --iterations_per_loop. The larger this value is, the higher the'\n ' utilization on the TPU.'))\n\nflags.DEFINE_integer(\n 'num_parallel_calls', default=64,\n help=('Number of parallel threads in CPU for the input pipeline'))\n\nflags.DEFINE_string(\n 'bigtable_project', None,\n 'The Cloud Bigtable project. If None, --gcp_project will be used.')\nflags.DEFINE_string(\n 'bigtable_instance', None,\n 'The Cloud Bigtable instance to load data from.')\nflags.DEFINE_string(\n 'bigtable_table', 'imagenet',\n 'The Cloud Bigtable table to load data from.')\nflags.DEFINE_string(\n 'bigtable_train_prefix', 'train_',\n 'The prefix identifying training rows.')\nflags.DEFINE_string(\n 'bigtable_eval_prefix', 'validation_',\n 'The prefix identifying evaluation rows.')\nflags.DEFINE_string(\n 'bigtable_column_family', 'tfexample',\n 'The column family storing TFExamples.')\nflags.DEFINE_string(\n 'bigtable_column_qualifier', 'example',\n 'The column name storing TFExamples.')\n\nflags.DEFINE_string(\n 'data_format', default='channels_last',\n help=('A flag to override the data format used in the model. The value'\n ' is either channels_first or channels_last. To run the network on'\n ' CPU or TPU, channels_last should be used. For GPU, channels_first'\n ' will improve performance.'))\nflags.DEFINE_integer(\n 'num_label_classes', default=1000, help='Number of classes, at least 2')\nflags.DEFINE_float(\n 'batch_norm_momentum',\n default=None,\n help=('Batch normalization layer momentum of moving average to override.'))\nflags.DEFINE_float(\n 'batch_norm_epsilon',\n default=None,\n help=('Batch normalization layer epsilon to override..'))\n\nflags.DEFINE_bool(\n 'transpose_input', default=True,\n help='Use TPU double transpose optimization')\n\nflags.DEFINE_string(\n 'export_dir',\n default=None,\n help=('The directory where the exported SavedModel will be stored.'))\nflags.DEFINE_bool(\n 'export_to_tpu', default=False,\n help=('Whether to export additional metagraph with \"serve, tpu\" tags'\n ' in addition to \"serve\" only metagraph.'))\nflags.DEFINE_bool(\n 'post_quantize', default=True, help=('Enable post quantization.'))\n\nflags.DEFINE_float(\n 'base_learning_rate',\n default=0.016,\n help=('Base learning rate when train batch size is 256.'))\n\nflags.DEFINE_float(\n 'momentum', default=0.9,\n help=('Momentum parameter used in the MomentumOptimizer.'))\n\nflags.DEFINE_float(\n 'moving_average_decay', default=0.9999,\n help=('Moving average decay rate.'))\n\nflags.DEFINE_float(\n 'weight_decay', default=1e-5,\n help=('Weight decay coefficiant for l2 regularization.'))\n\nflags.DEFINE_float(\n 'label_smoothing', default=0.1,\n help=('Label smoothing parameter used in the softmax_cross_entropy'))\n\nflags.DEFINE_float(\n 'dropout_rate', default=0.2,\n help=('Dropout rate for the final output layer.'))\n\n\nflags.DEFINE_integer('log_step_count_steps', 64, 'The number of steps at '\n 'which the global step information is logged.')\n\nflags.DEFINE_bool(\n 'use_cache', default=True, help=('Enable cache for training input.'))\n\nflags.DEFINE_float(\n 'depth_multiplier', default=None, help=('Depth multiplier per layer.'))\n\nflags.DEFINE_float(\n 'depth_divisor', default=None, help=('Depth divisor (default to 8).'))\n\nflags.DEFINE_float(\n 'min_depth', default=None, help=('Minimal depth (default to None).'))\n\nflags.DEFINE_bool(\n 'use_async_checkpointing', default=False, help=('Enable async checkpoint'))\n\n# Learning rate schedule\nLR_SCHEDULE = [ # (multiplier, epoch to start) tuples\n (1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)\n]\n\n# The input tensor is in the range of [0, 255], we need to scale them to the\n# range of [0, 1]\nMEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]\nSTDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]\n\n\ndef mnasnet_model_fn(features, labels, mode, params):\n \"\"\"The model_fn for MnasNet to be used with TPUEstimator.\n\n Args:\n features: `Tensor` of batched images.\n labels: `Tensor` of labels for the data samples\n mode: one of `tf.estimator.ModeKeys.{TRAIN,EVAL,PREDICT}`\n params: `dict` of parameters passed to the model from the TPUEstimator,\n `params['batch_size']` is always provided and should be used as the\n effective batch size.\n\n Returns:\n A `TPUEstimatorSpec` for the model\n \"\"\"\n if isinstance(features, dict):\n features = features['feature']\n\n # In most cases, the default data format NCHW instead of NHWC should be\n # used for a significant performance boost on GPU/TPU. NHWC should be used\n # only if the network needs to be run on CPU since the pooling operations\n # are only supported on NHWC.\n if FLAGS.data_format == 'channels_first':\n assert not FLAGS.transpose_input # channels_first only for GPU\n features = tf.transpose(features, [0, 3, 1, 2])\n\n if FLAGS.transpose_input and mode != tf.estimator.ModeKeys.PREDICT:\n features = tf.transpose(features, [3, 0, 1, 2]) # HWCN to NHWC\n\n # Normalize the image to zero mean and unit variance.\n features -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=features.dtype)\n features /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=features.dtype)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n has_moving_average_decay = (FLAGS.moving_average_decay > 0)\n # This is essential, if using a keras-derived model.\n K.set_learning_phase(is_training)\n tf.logging.info('Using open-source implementation for MnasNet definition.')\n override_params = {}\n if FLAGS.batch_norm_momentum:\n override_params['batch_norm_momentum'] = FLAGS.batch_norm_momentum\n if FLAGS.batch_norm_epsilon:\n override_params['batch_norm_epsilon'] = FLAGS.batch_norm_epsilon\n if FLAGS.dropout_rate:\n override_params['dropout_rate'] = FLAGS.dropout_rate\n if FLAGS.data_format:\n override_params['data_format'] = FLAGS.data_format\n if FLAGS.num_label_classes:\n override_params['num_classes'] = FLAGS.num_label_classes\n if FLAGS.depth_multiplier:\n override_params['depth_multiplier'] = FLAGS.depth_multiplier\n if FLAGS.depth_divisor:\n override_params['depth_divisor'] = FLAGS.depth_divisor\n if FLAGS.min_depth:\n override_params['min_depth'] = FLAGS.min_depth\n\n logits, _ = mnasnet_models.build_mnasnet_model(\n features,\n model_name=FLAGS.model_name,\n training=is_training,\n override_params=override_params)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n 'classes': tf.argmax(logits, axis=1),\n 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')\n }\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n export_outputs={\n 'classify': tf.estimator.export.PredictOutput(predictions)\n })\n\n # If necessary, in the model_fn, use params['batch_size'] instead the batch\n # size flags (--train_batch_size or --eval_batch_size).\n batch_size = params['batch_size'] # pylint: disable=unused-variable\n\n # Calculate loss, which includes softmax cross entropy and L2 regularization.\n one_hot_labels = tf.one_hot(labels, FLAGS.num_label_classes)\n cross_entropy = tf.losses.softmax_cross_entropy(\n logits=logits,\n onehot_labels=one_hot_labels,\n label_smoothing=FLAGS.label_smoothing)\n\n # Add weight decay to the loss for non-batch-normalization variables.\n loss = cross_entropy + FLAGS.weight_decay * tf.add_n(\n [tf.nn.l2_loss(v) for v in tf.trainable_variables()\n if 'batch_normalization' not in v.name])\n\n global_step = tf.train.get_global_step()\n if has_moving_average_decay:\n ema = tf.train.ExponentialMovingAverage(\n decay=FLAGS.moving_average_decay, num_updates=global_step)\n ema_vars = tf.trainable_variables() + tf.get_collection('moving_vars')\n for v in tf.global_variables():\n # We maintain mva for batch norm moving mean and variance as well.\n if 'moving_mean' in v.name or 'moving_variance' in v.name:\n ema_vars.append(v)\n ema_vars = list(set(ema_vars))\n\n host_call = None\n restore_vars_dict = None\n if is_training:\n # Compute the current epoch and associated learning rate from global_step.\n current_epoch = (\n tf.cast(global_step, tf.float32) / params['steps_per_epoch'])\n\n scaled_lr = FLAGS.base_learning_rate * (FLAGS.train_batch_size / 256.0)\n learning_rate = mnasnet_utils.build_learning_rate(scaled_lr, global_step,\n params['steps_per_epoch'])\n optimizer = mnasnet_utils.build_optimizer(learning_rate)\n if FLAGS.use_tpu:\n # When using TPU, wrap the optimizer with CrossShardOptimizer which\n # handles synchronization details between different TPU cores. To the\n # user, this should look like regular synchronous training.\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n # Batch normalization requires UPDATE_OPS to be added as a dependency to\n # the train operation.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss, global_step)\n\n if has_moving_average_decay:\n with tf.control_dependencies([train_op]):\n train_op = ema.apply(ema_vars)\n\n if not FLAGS.skip_host_call:\n def host_call_fn(gs, loss, lr, ce):\n \"\"\"Training host call. Creates scalar summaries for training metrics.\n\n This function is executed on the CPU and should not directly reference\n any Tensors in the rest of the `model_fn`. To pass Tensors from the\n model to the `metric_fn`, provide as part of the `host_call`. See\n https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec\n for more information.\n\n Arguments should match the list of `Tensor` objects passed as the second\n element in the tuple passed to `host_call`.\n\n Args:\n gs: `Tensor with shape `[batch]` for the global_step\n loss: `Tensor` with shape `[batch]` for the training loss.\n lr: `Tensor` with shape `[batch]` for the learning_rate.\n ce: `Tensor` with shape `[batch]` for the current_epoch.\n\n Returns:\n List of summary ops to run on the CPU host.\n \"\"\"\n gs = gs[0]\n # Host call fns are executed FLAGS.iterations_per_loop times after one\n # TPU loop is finished, setting max_queue value to the same as number of\n # iterations will make the summary writer only flush the data to storage\n # once per loop.\n with tf.contrib.summary.create_file_writer(\n FLAGS.model_dir, max_queue=FLAGS.iterations_per_loop).as_default():\n with tf.contrib.summary.always_record_summaries():\n tf.contrib.summary.scalar('loss', loss[0], step=gs)\n tf.contrib.summary.scalar('learning_rate', lr[0], step=gs)\n tf.contrib.summary.scalar('current_epoch', ce[0], step=gs)\n\n return tf.contrib.summary.all_summary_ops()\n\n # To log the loss, current learning rate, and epoch for Tensorboard, the\n # summary op needs to be run on the host CPU via host_call. host_call\n # expects [batch_size, ...] Tensors, thus reshape to introduce a batch\n # dimension. These Tensors are implicitly concatenated to\n # [params['batch_size']].\n gs_t = tf.reshape(global_step, [1])\n loss_t = tf.reshape(loss, [1])\n lr_t = tf.reshape(learning_rate, [1])\n ce_t = tf.reshape(current_epoch, [1])\n\n host_call = (host_call_fn, [gs_t, loss_t, lr_t, ce_t])\n\n else:\n train_op = None\n if has_moving_average_decay:\n # Load moving average variables for eval.\n restore_vars_dict = ema.variables_to_restore(ema_vars)\n\n eval_metrics = None\n if mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn(labels, logits):\n \"\"\"Evaluation metric function. Evaluates accuracy.\n\n This function is executed on the CPU and should not directly reference\n any Tensors in the rest of the `model_fn`. To pass Tensors from the model\n to the `metric_fn`, provide as part of the `eval_metrics`. See\n https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec\n for more information.\n\n Arguments should match the list of `Tensor` objects passed as the second\n element in the tuple passed to `eval_metrics`.\n\n Args:\n labels: `Tensor` with shape `[batch]`.\n logits: `Tensor` with shape `[batch, num_classes]`.\n\n Returns:\n A dict of the metrics to return from evaluation.\n \"\"\"\n predictions = tf.argmax(logits, axis=1)\n top_1_accuracy = tf.metrics.accuracy(labels, predictions)\n in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)\n top_5_accuracy = tf.metrics.mean(in_top_5)\n\n return {\n 'top_1_accuracy': top_1_accuracy,\n 'top_5_accuracy': top_5_accuracy,\n }\n\n eval_metrics = (metric_fn, [labels, logits])\n\n num_params = np.sum([np.prod(v.shape) for v in tf.trainable_variables()])\n tf.logging.info('number of trainable parameters: {}'.format(num_params))\n\n def _scaffold_fn():\n saver = tf.train.Saver(restore_vars_dict)\n return tf.train.Scaffold(saver=saver)\n\n return tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op,\n host_call=host_call,\n eval_metrics=eval_metrics,\n scaffold_fn=_scaffold_fn if has_moving_average_decay else None)\n\n\ndef _verify_non_empty_string(value, field_name):\n \"\"\"Ensures that a given proposed field value is a non-empty string.\n\n Args:\n value: proposed value for the field.\n field_name: string name of the field, e.g. `project`.\n\n Returns:\n The given value, provided that it passed the checks.\n\n Raises:\n ValueError: the value is not a string, or is a blank string.\n \"\"\"\n if not isinstance(value, str):\n raise ValueError(\n 'Bigtable parameter \"%s\" must be a string.' % field_name)\n if not value:\n raise ValueError(\n 'Bigtable parameter \"%s\" must be non-empty.' % field_name)\n return value\n\n\ndef _select_tables_from_flags():\n \"\"\"Construct training and evaluation Bigtable selections from flags.\n\n Returns:\n [training_selection, evaluation_selection]\n \"\"\"\n project = _verify_non_empty_string(\n FLAGS.bigtable_project or FLAGS.gcp_project,\n 'project')\n instance = _verify_non_empty_string(FLAGS.bigtable_instance, 'instance')\n table = _verify_non_empty_string(FLAGS.bigtable_table, 'table')\n train_prefix = _verify_non_empty_string(FLAGS.bigtable_train_prefix,\n 'train_prefix')\n eval_prefix = _verify_non_empty_string(FLAGS.bigtable_eval_prefix,\n 'eval_prefix')\n column_family = _verify_non_empty_string(FLAGS.bigtable_column_family,\n 'column_family')\n column_qualifier = _verify_non_empty_string(FLAGS.bigtable_column_qualifier,\n 'column_qualifier')\n return [\n imagenet_input.BigtableSelection(\n project=project,\n instance=instance,\n table=table,\n prefix=p,\n column_family=column_family,\n column_qualifier=column_qualifier)\n for p in (train_prefix, eval_prefix)\n ]\n\n\ndef export(est, export_dir, post_quantize=True):\n \"\"\"Export graph to SavedModel and TensorFlow Lite.\n\n Args:\n est: estimator instance.\n export_dir: string, exporting directory.\n post_quantize: boolean, whether to quantize model checkpoint after training.\n\n Raises:\n ValueError: the export directory path is not specified.\n \"\"\"\n if not export_dir:\n raise ValueError('The export directory path is not specified.')\n # The guide to serve a exported TensorFlow model is at:\n # https://www.tensorflow.org/serving/serving_basic\n def lite_image_serving_input_fn():\n \"\"\"serving input fn for raw images.\"\"\"\n input_shape = [1, FLAGS.input_image_size, FLAGS.input_image_size, 3]\n images = tf.placeholder(shape=input_shape, dtype=tf.float32)\n return tf.estimator.export.ServingInputReceiver(images, {'images': images})\n\n tf.logging.info('Starting to export model.')\n est.export_saved_model(\n export_dir_base=export_dir,\n serving_input_receiver_fn=lite_image_serving_input_fn)\n\n subfolder = sorted(tf.gfile.ListDirectory(export_dir), reverse=True)[0]\n tf.logging.info('Starting to export TFLite.')\n converter = tf.lite.TFLiteConverter.from_saved_model(\n os.path.join(export_dir, subfolder),\n input_arrays=['truediv'],\n output_arrays=['logits'])\n tflite_model = converter.convert()\n tflite_file = os.path.join(export_dir, FLAGS.model_name + '.tflite')\n tf.gfile.GFile(tflite_file, 'wb').write(tflite_model)\n\n if post_quantize:\n tf.logging.info('Starting to export quantized TFLite.')\n converter = tf.lite.TFLiteConverter.from_saved_model(\n os.path.join(export_dir, subfolder),\n input_arrays=['truediv'],\n output_arrays=['logits'])\n converter.post_training_quantize = True\n quant_tflite_model = converter.convert()\n quant_tflite_file = os.path.join(export_dir,\n FLAGS.model_name + '_postquant.tflite')\n tf.gfile.GFile(quant_tflite_file, 'wb').write(quant_tflite_model)\n\n\ndef main(unused_argv):\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu if (FLAGS.tpu or FLAGS.use_tpu) else '',\n zone=FLAGS.tpu_zone,\n project=FLAGS.gcp_project)\n\n if FLAGS.use_async_checkpointing:\n save_checkpoints_steps = None\n else:\n save_checkpoints_steps = max(100, FLAGS.iterations_per_loop)\n config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n model_dir=FLAGS.model_dir,\n save_checkpoints_steps=save_checkpoints_steps,\n log_step_count_steps=FLAGS.log_step_count_steps,\n session_config=tf.ConfigProto(\n graph_options=tf.GraphOptions(\n rewrite_options=rewriter_config_pb2.RewriterConfig(\n disable_meta_optimizer=True))),\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig\n .PER_HOST_V2)) # pylint: disable=line-too-long\n # Initializes model parameters.\n params = dict(steps_per_epoch=FLAGS.num_train_images / FLAGS.train_batch_size)\n mnasnet_est = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=mnasnet_model_fn,\n config=config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n export_to_tpu=FLAGS.export_to_tpu,\n params=params)\n\n # Input pipelines are slightly different (with regards to shuffling and\n # preprocessing) between training and evaluation.\n if FLAGS.bigtable_instance:\n tf.logging.info('Using Bigtable dataset, table %s', FLAGS.bigtable_table)\n select_train, select_eval = _select_tables_from_flags()\n imagenet_train, imagenet_eval = [imagenet_input.ImageNetBigtableInput(\n is_training=is_training,\n use_bfloat16=False,\n transpose_input=FLAGS.transpose_input,\n selection=selection) for (is_training, selection) in\n [(True, select_train),\n (False, select_eval)]]\n else:\n if FLAGS.data_dir == FAKE_DATA_DIR:\n tf.logging.info('Using fake dataset.')\n else:\n tf.logging.info('Using dataset: %s', FLAGS.data_dir)\n imagenet_train, imagenet_eval = [\n imagenet_input.ImageNetInput(\n is_training=is_training,\n data_dir=FLAGS.data_dir,\n transpose_input=FLAGS.transpose_input,\n cache=FLAGS.use_cache and is_training,\n image_size=FLAGS.input_image_size,\n num_parallel_calls=FLAGS.num_parallel_calls,\n use_bfloat16=False) for is_training in [True, False]\n ]\n\n if FLAGS.mode == 'eval':\n eval_steps = FLAGS.num_eval_images // FLAGS.eval_batch_size\n # Run evaluation when there's a new checkpoint\n for ckpt in evaluation.checkpoints_iterator(\n FLAGS.model_dir, timeout=FLAGS.eval_timeout):\n tf.logging.info('Starting to evaluate.')\n try:\n start_timestamp = time.time() # This time will include compilation time\n eval_results = mnasnet_est.evaluate(\n input_fn=imagenet_eval.input_fn,\n steps=eval_steps,\n checkpoint_path=ckpt)\n elapsed_time = int(time.time() - start_timestamp)\n tf.logging.info('Eval results: %s. Elapsed seconds: %d',\n eval_results, elapsed_time)\n\n # Terminate eval job when final checkpoint is reached\n current_step = int(os.path.basename(ckpt).split('-')[1])\n if current_step >= FLAGS.train_steps:\n tf.logging.info(\n 'Evaluation finished after training step %d', current_step)\n break\n\n except tf.errors.NotFoundError:\n # Since the coordinator is on a different job than the TPU worker,\n # sometimes the TPU worker does not finish initializing until long after\n # the CPU job tells it to start evaluating. In this case, the checkpoint\n # file could have been deleted already.\n tf.logging.info(\n 'Checkpoint %s no longer exists, skipping checkpoint', ckpt)\n\n if FLAGS.export_dir:\n export(mnasnet_est, FLAGS.export_dir, FLAGS.post_quantize)\n else: # FLAGS.mode == 'train' or FLAGS.mode == 'train_and_eval'\n current_step = estimator._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long\n\n tf.logging.info(\n 'Training for %d steps (%.2f epochs in total). Current'\n ' step %d.', FLAGS.train_steps,\n FLAGS.train_steps / params['steps_per_epoch'], current_step)\n\n start_timestamp = time.time() # This time will include compilation time\n\n if FLAGS.mode == 'train':\n hooks = []\n if FLAGS.use_async_checkpointing:\n hooks.append(\n async_checkpoint.AsyncCheckpointSaverHook(\n checkpoint_dir=FLAGS.model_dir,\n save_steps=max(100, FLAGS.iterations_per_loop)))\n mnasnet_est.train(\n input_fn=imagenet_train.input_fn,\n max_steps=FLAGS.train_steps,\n hooks=hooks)\n\n else:\n assert FLAGS.mode == 'train_and_eval'\n while current_step < FLAGS.train_steps:\n # Train for up to steps_per_eval number of steps.\n # At the end of training, a checkpoint will be written to --model_dir.\n next_checkpoint = min(current_step + FLAGS.steps_per_eval,\n FLAGS.train_steps)\n mnasnet_est.train(\n input_fn=imagenet_train.input_fn, max_steps=next_checkpoint)\n current_step = next_checkpoint\n\n tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',\n next_checkpoint, int(time.time() - start_timestamp))\n\n # Evaluate the model on the most recent model in --model_dir.\n # Since evaluation happens in batches of --eval_batch_size, some images\n # may be excluded modulo the batch size. As long as the batch size is\n # consistent, the evaluated images are also consistent.\n tf.logging.info('Starting to evaluate.')\n eval_results = mnasnet_est.evaluate(\n input_fn=imagenet_eval.input_fn,\n steps=FLAGS.num_eval_images // FLAGS.eval_batch_size)\n tf.logging.info('Eval results at step %d: %s',\n next_checkpoint, eval_results)\n\n elapsed_time = int(time.time() - start_timestamp)\n tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',\n FLAGS.train_steps, elapsed_time)\n if FLAGS.export_dir:\n export(mnasnet_est, FLAGS.export_dir, FLAGS.post_quantize)\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n app.run(main)\n"
] |
[
[
"tensorflow.logging.set_verbosity",
"tensorflow.train.latest_checkpoint",
"numpy.random.rand",
"tensorflow.image.decode_image",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.logging.info",
"tensorflow.map_fn",
"tensorflow.estimator.Estimator",
"tensorflow.estimator.export.TensorServingInputReceiver",
"tensorflow.estimator.RunConfig",
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.placeholder",
"tensorflow.gfile.MkDir",
"tensorflow.cast",
"tensorflow.contrib.training.checkpoints_iterator"
],
[
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.nn.in_top_k",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.contrib.tpu.CrossShardOptimizer",
"tensorflow.reshape",
"tensorflow.contrib.training.python.training.evaluation.checkpoints_iterator",
"tensorflow.estimator.export.ServingInputReceiver",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.one_hot",
"tensorflow.cast",
"tensorflow.trainable_variables",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.argmax",
"tensorflow.train.Saver",
"tensorflow.logging.info",
"tensorflow.transpose",
"tensorflow.global_variables",
"tensorflow.python.estimator.estimator._load_global_step_from_checkpoint_dir",
"tensorflow.constant",
"numpy.prod",
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.get_collection",
"tensorflow.train.get_global_step",
"tensorflow.logging.set_verbosity",
"tensorflow.estimator.export.PredictOutput",
"tensorflow.losses.softmax_cross_entropy",
"tensorflow.nn.l2_loss",
"tensorflow.gfile.GFile",
"tensorflow.contrib.summary.scalar",
"tensorflow.metrics.accuracy",
"tensorflow.placeholder",
"tensorflow.contrib.summary.create_file_writer",
"tensorflow.contrib.summary.all_summary_ops",
"tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig",
"tensorflow.python.keras.backend.set_learning_phase",
"tensorflow.train.Scaffold",
"tensorflow.contrib.summary.always_record_summaries",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.gfile.ListDirectory",
"tensorflow.metrics.mean"
]
] |
rohanshah13/cloud-emea-copy
|
[
"12acebc809080e5898ead86a412b17a5272759c2"
] |
[
"third_party/ridayesh_run_tag.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors,\n# The HuggingFace Inc. team, and The XTREME Benchmark Authors.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Fine-tuning models for NER and POS tagging.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport random\nfrom dataclasses import dataclass, field\nfrom typing import Optional\nimport json\n\nimport numpy as np\nimport scipy\nimport torch\nfrom seqeval.metrics import precision_score, recall_score, f1_score\nfrom tensorboardX import SummaryWriter\nfrom torch.nn import CrossEntropyLoss\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom torch.utils.data import RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\nfrom utils_tag import convert_examples_to_features\nfrom utils_tag import get_labels\nfrom utils_tag import read_examples_from_file\n# import lang2vec.lang2vec as l2v\nfrom scipy.spatial import distance\n\nfrom transformers import (\n AdamW,\n get_linear_schedule_with_warmup,\n WEIGHTS_NAME,\n AutoConfig,\n AutoModelForTokenClassification,\n AutoTokenizer,\n HfArgumentParser,\n MultiLingAdapterArguments,\n AdapterConfig,\n AdapterType,\n)\n#from xlm import XLMForTokenClassification\n\nDEFAULT_LANGUAGES = {\n 'mr': 'hi',\n 'bn': 'hi',\n 'ta': 'ta',\n 'fo': 'fo',\n 'no': 'da',\n 'da': 'da',\n 'be': 'be',\n 'uk': 'uk',\n 'bg': 'bg'\n}\nlogger = logging.getLogger(__name__)\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n logger.info(f'Seed = {args.seed}')\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id=None):\n \"\"\"Train the model.\"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n print(f'Local Rank = {args.local_rank}')\n print(len(train_dataset))\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay},\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n logging.info([n for (n, p) in model.named_parameters() if p.requires_grad])\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size * args.gradient_accumulation_steps * (\n torch.distributed.get_world_size() if args.local_rank != -1 else 1))\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n best_score = 0.0\n best_checkpoint = None\n patience = 0\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0])\n set_seed(args) # Add here for reproductibility (even between python 2 and 3)\n\n cur_epoch = 0\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n cur_epoch += 1\n for step, batch in enumerate(epoch_iterator):\n batch = tuple(t.to(args.device) for t in batch if t is not None)\n inputs = {\"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"labels\": batch[3]}\n\n if args.model_type != \"distilbert\":\n # XLM and RoBERTa don\"t use segment_ids\n inputs[\"token_type_ids\"] = batch[2] if args.model_type in [\"bert\", \"xlnet\"] else None\n\n if args.model_type == \"xlm\":\n inputs[\"langs\"] = batch[4]\n\n outputs = model(**inputs)\n loss = outputs[0]\n\n if args.n_gpu > 1:\n # mean() to average on multi-gpu parallel training\n loss = loss.mean()\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n scheduler.step() # Update learning rate schedule\n optimizer.step()\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n if args.local_rank == -1 and args.evaluate_during_training:\n # Only evaluate on single GPU otherwise metrics may not average well\n results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=\"dev\", lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name)\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n if args.save_only_best_checkpoint:\n result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=\"dev\", prefix=global_step, lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name)\n if result[\"f1\"] > best_score:\n logger.info(\"result['f1']={} > best_score={}\".format(result[\"f1\"], best_score))\n best_score = result[\"f1\"]\n # Save the best model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-best\")\n best_checkpoint = output_dir\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # Take care of distributed/parallel training\n model_to_save = model.module if hasattr(model, \"module\") else model\n if args.do_save_adapters:\n model_to_save.save_all_adapters(output_dir)\n if args.do_save_adapter_fusions:\n model_to_save.save_all_adapter_fusions(output_dir)\n if args.do_save_full_model:\n model_to_save.save_pretrained(output_dir)\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving the best model checkpoint to %s\", output_dir)\n logger.info(\"Reset patience to 0\")\n patience = 0\n else:\n patience += 1\n logger.info(\"Hit patience={}\".format(patience))\n if args.eval_patience > 0 and patience > args.eval_patience:\n logger.info(\"early stop! patience={}\".format(patience))\n epoch_iterator.close()\n train_iterator.close()\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n return global_step, tr_loss / global_step\n else:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # Take care of distributed/parallel training\n model_to_save = model.module if hasattr(model, \"module\") else model\n if args.do_save_adapters:\n model_to_save.save_all_adapters(output_dir)\n if args.do_save_adapter_fusions:\n model_to_save.save_all_adapter_fusions(output_dir)\n if args.do_save_full_model:\n model_to_save.save_pretrained(output_dir)\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\ndef calc_weight_multi(args, model, batch, lang_adapter_names, task_name, adapter_weights, step=10, lang=None):\n inputs = {\"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"return_sequence_out\": True,\n \"labels\": batch[3]}\n # logger.info(f'Language Adapters are {lang_adapter_names}')\n adapter_weights = [torch.FloatTensor([0.5 for _ in range(len(lang_adapter_names))]).to(args.device) for _ in range(13)]\n if args.lang_to_vec:\n logger.info(lang)\n logger.info(lang_adapter_names)\n adapter_weights = calc_l2v_weights(lang, lang_adapter_names, args.en_weight)\n logger.info(adapter_weights)\n for step_no in range(step):\n for w in adapter_weights: w.requires_grad = True\n if args.lang_to_vec and step_no == 0:\n normed_adapter_weights = adapter_weights\n else:\n normed_adapter_weights = [torch.nn.functional.softmax(w) for w in adapter_weights]\n # logger.info(f'Initial Adapter Weights = {normed_adapter_weights}')\n model.set_active_adapters([lang_adapter_names, [task_name]])\n inputs[\"adapter_names\"] = [lang_adapter_names, [task_name]]\n\n inputs[\"adapter_weights\"] = normed_adapter_weights\n outputs = model(**inputs)\n\n loss, logits, orig_sequence_output = outputs[:3]\n kept_logits = outputs[-1]\n entropy = torch.nn.functional.softmax(kept_logits, dim=1)*torch.nn.functional.log_softmax(kept_logits, dim=1)\n entropy = -entropy.sum() / kept_logits.size(0)\n grads = torch.autograd.grad(entropy, adapter_weights)\n #print(adapter_weights)\n #print(grads)\n #print(grads)\n for i, w in enumerate(adapter_weights):\n adapter_weights[i] = adapter_weights[i].data - 10*grads[i].data\n\n\n normed_adapter_weights = [torch.nn.functional.softmax(w) for w in adapter_weights]\n #print(normed_adapter_weights)\n # logger.info(f'Final Adapter Weights = {normed_adapter_weights}')\n return normed_adapter_weights\n\ndef jaccard_sim(vec1, vec2):\n intersection = 0\n union = 0\n for i in range(len(vec1)):\n if vec1[i] == '--' or vec2[i] == '--':\n continue\n if vec1[i] == 1 or vec2[i] == 1:\n union += 1\n if vec1[i] == 1 and vec2[i] == 1:\n intersection += 1\n return intersection/union\n\ndef get_sim(lang1, lang2):\n features = l2v.get_features(f'{DEFAULT_LANGUAGES[lang1]} {lang2}', 'learned')\n similarity = 1 - distance.cosine(features[DEFAULT_LANGUAGES[lang1]], features[lang2])\n return similarity\n\ndef get_syntax_sim(lang1, lang2):\n features = l2v.get_features(f'{lang1} {lang2}', \"syntax_wals|syntax_sswl|syntax_ethnologue\")\n similarity = jaccard_sim(features[lang1], features[lang2])\n return similarity\n\ndef calc_l2v_weights(args, lang, lang_adapter_names):\n adapter_weight = []\n for adapter_lang in lang_adapter_names:\n if args.en_weight is not None and adapter_lang == 'en':\n continue\n if args.lang_to_vec == 'learned':\n adapter_weight.append(get_sim(lang, adapter_lang))\n elif args.lang_to_vec == 'syntax':\n adapter_weight.append(get_syntax_sim(lang, adapter_lang))\n else:\n logger.info('INVALID FEATURE TYPE')\n exit()\n logger.info(adapter_weight)\n adapter_weight = torch.FloatTensor(adapter_weight)\n adapter_weight = torch.nn.functional.softmax(adapter_weight/args.temperature).tolist()\n if args.en_weight is not None:\n adapter_weight = [(1 - args.en_weight)*aw for aw in adapter_weight]\n en_index = lang_adapter_names.index('en')\n adapter_weight.insert(en_index, args.en_weight)\n return adapter_weight\n\ndef scaled_input(emb, batch_size=16, num_batch=1, baseline=None, start_i=None, end_i=None):\n # shape of emb: (num_head, seq_len, seq_len)\n if baseline is None:\n baseline = torch.zeros_like(emb) \n\n num_points = batch_size * num_batch\n scale = 1.0 / num_points\n if start_i is None:\n step = (emb.unsqueeze(0) - baseline.unsqueeze(0)) * scale\n res = torch.cat([torch.add(baseline.unsqueeze(0), step*i) for i in range(num_points)], dim=0)\n return res, step[0]\n else:\n step = (emb - baseline) * scale\n start_emb = torch.add(baseline, step*start_i)\n end_emb = torch.add(baseline, step*end_i)\n step_new = (end_emb.unsqueeze(0) - start_emb.unsqueeze(0)) * scale\n res = torch.cat([torch.add(start_emb.unsqueeze(0), step_new*i) for i in range(num_points)], dim=0)\n return res, step_new[0]\n\n#Changed the default of calc_weight_step to 0\ndef evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=\"\", lang=\"en\", lang2id=None, print_result=True, adapter_weight=None, lang_adapter_names=None, task_name=None, calc_weight_step=0):\n eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode, lang=lang, lang2id=lang2id)\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n if args.get_attr:\n eval_sampler = RandomSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n else:\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n # Eval!\n logger.info(\"***** Running evaluation %s in %s *****\" % (prefix, lang))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n model.eval()\n counter = 0\n \n head_importances = None\n all_head_importances = None\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n counter += 1 \n logger.info(f'Batch number = {counter}')\n batch = tuple(t.to(args.device) for t in batch)\n if calc_weight_step > 0:\n adapter_weight = calc_weight_multi(args, model, batch, lang_adapter_names, task_name, adapter_weight, calc_weight_step, lang=lang)\n if args.get_attr:\n inputs = {\"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"labels\": batch[3],\n \"adapter_weights\": adapter_weight}\n if args.model_type != \"distilbert\":\n # XLM and RoBERTa don\"t use segment_ids\n inputs[\"token_type_ids\"] = batch[2] if args.model_type in [\"bert\", \"xlnet\"] else None\n if args.model_type == 'xlm':\n inputs[\"langs\"] = batch[4]\n inputs[\"output_attentions\"] = True\n outputs = model(**inputs)\n tmp_eval_loss, logits, attentions, kept_labels, kl_logits = outputs\n\n attr_all = []\n res_attr = []\n \n input_len = int(inputs[\"attention_mask\"][0].sum())\n example_head_importances = None\n #Remove the batch_size dim since batch_size=1\n logits = logits[0]\n for tar_layer in range(12):\n att = attentions[tar_layer][0]\n pred_labels = torch.argmax(logits, dim=-1)\n\n scale_att, step = scaled_input(att.data)\n scale_att.requires_grad_(True)\n\n attr_all = None\n prob_all = None\n for j_batch in range(1):\n one_batch_att = scale_att[j_batch*16:(j_batch+1)*16]\n _, grad = model(input_ids=inputs['input_ids'], token_type_ids=inputs['token_type_ids'], attention_mask=inputs['attention_mask'], labels=inputs['labels'], tar_layer=tar_layer, tmp_score=one_batch_att, pred_labels=pred_labels)\n grad = grad.sum(dim=0) \n attr_all = grad if attr_all is None else torch.add(attr_all, grad)\n # prob_all = tar_prob if prob_all is None else torch.cat([prob_all, tar_prob])\n \n attr_all = attr_all[:,0:input_len,0:input_len] * step[:,0:input_len,0:input_len]\n if example_head_importances is None:\n example_head_importances = torch.amax(attr_all, dim=(1,2)).unsqueeze(0)\n else:\n tmp = torch.amax(attr_all, dim=(1,2))\n tmp = tmp.unsqueeze(0)\n example_head_importances = torch.cat((example_head_importances, tmp), dim=0)\n # att = att[:,0:input_len,0:input_len]\n res_attr.append(attr_all.data)\n # logger.info(f'Example Head Importances = {example_head_importances}')\n all_head_importances = example_head_importances.unsqueeze(0) if all_head_importances is None else torch.cat((all_head_importances, example_head_importances.unsqueeze(0)), dim=0)\n head_importances = example_head_importances if head_importances is None else torch.add(head_importances, example_head_importances)\n if counter == 100:\n break\n continue\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"labels\": batch[3],\n \"adapter_weights\": adapter_weight}\n # logger.info(f'Labels = {batch[3]}')\n if args.model_type != \"distilbert\":\n # XLM and RoBERTa don\"t use segment_ids\n inputs[\"token_type_ids\"] = batch[2] if args.model_type in [\"bert\", \"xlnet\"] else None\n if args.model_type == 'xlm':\n inputs[\"langs\"] = batch[4]\n\n\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n if args.n_gpu > 1:\n # mean() to average on multi-gpu parallel evaluating\n tmp_eval_loss = tmp_eval_loss.mean()\n\n eval_loss += tmp_eval_loss.item()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n if args.get_attr:\n head_importances = head_importances/counter\n logger.info(f'Head Importances = {head_importances}')\n torch.save(head_importances, os.path.join(args.output_dir,f'{mode}_{lang}_s{args.seed}_importances_100.pt'))\n torch.save(all_head_importances, os.path.join(args.output_dir,f'{mode}_{lang}_s{args.seed}_all_importances_100.pt'))\n return None, None\n\n if nb_eval_steps == 0:\n results = {k: 0 for k in [\"loss\", \"precision\", \"recall\", \"f1\"]}\n else:\n eval_loss = eval_loss / nb_eval_steps\n preds = np.argmax(preds, axis=2)\n\n label_map = {i: label for i, label in enumerate(labels)}\n\n out_label_list = [[] for _ in range(out_label_ids.shape[0])]\n preds_list = [[] for _ in range(out_label_ids.shape[0])]\n\n for i in range(out_label_ids.shape[0]):\n for j in range(out_label_ids.shape[1]):\n if out_label_ids[i, j] != pad_token_label_id:\n out_label_list[i].append(label_map[out_label_ids[i][j]])\n preds_list[i].append(label_map[preds[i][j]])\n\n results = {\n \"loss\": eval_loss,\n \"precision\": precision_score(out_label_list, preds_list),\n \"recall\": recall_score(out_label_list, preds_list),\n \"f1\": f1_score(out_label_list, preds_list)\n }\n\n if print_result:\n logger.info(\"***** Evaluation result %s in %s *****\" % (prefix, lang))\n for key in sorted(results.keys()):\n logger.info(\" %s = %s\", key, str(results[key]))\n return results, preds_list\n\n\ndef load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode, lang, lang2id=None, few_shot=-1):\n # Make sure only the first process in distributed training process\n # the dataset, and the others will use the cache\n if args.local_rank not in [-1, 0] and not evaluate:\n torch.distributed.barrier()\n\n # Load data features from cache or dataset file\n bpe_dropout = args.bpe_dropout\n if mode != 'train': bpe_dropout = 0\n if bpe_dropout > 0:\n cached_features_file = os.path.join(args.data_dir, \"cached_{}_{}_{}_{}_drop{}\".format(mode, lang,\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length), bpe_dropout))\n else:\n cached_features_file = os.path.join(args.data_dir, \"cached_{}_{}_{}_{}\".format(mode, lang,\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length)))\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n else:\n langs = lang.split(',')\n logger.info(\"all languages = {}\".format(lang))\n features = []\n for lg in langs:\n data_file = os.path.join(args.data_dir, lg, \"{}.{}\".format(mode, args.model_name_or_path))\n logger.info(\"Creating features from dataset file at {} in language {}\".format(data_file, lg))\n examples = read_examples_from_file(data_file, lg, lang2id)\n print(examples)\n features_lg = convert_examples_to_features(examples, labels, args.max_seq_length, tokenizer,\n cls_token_at_end=bool(args.model_type in [\"xlnet\"]),\n cls_token=tokenizer.cls_token,\n cls_token_segment_id=2 if args.model_type in [\"xlnet\"] else 0,\n sep_token=tokenizer.sep_token,\n sep_token_extra=bool(args.model_type in [\"roberta\", \"xlmr\"]),\n pad_on_left=bool(args.model_type in [\"xlnet\"]),\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0,\n pad_token_label_id=pad_token_label_id,\n lang=lg,\n bpe_dropout=bpe_dropout,\n )\n features.extend(features_lg)\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file {}, len(features)={}\".format(cached_features_file, len(features)))\n torch.save(features, cached_features_file)\n\n # Make sure only the first process in distributed training process\n # the dataset, and the others will use the cache\n if args.local_rank == 0 and not evaluate:\n torch.distributed.barrier()\n\n if few_shot > 0 and mode == 'train':\n logger.info(\"Original no. of examples = {}\".format(len(features)))\n features = features[: few_shot]\n logger.info('Using few-shot learning on {} examples'.format(len(features)))\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)\n if args.model_type == 'xlm' and features[0].langs is not None:\n all_langs = torch.tensor([f.langs for f in features], dtype=torch.long)\n logger.info('all_langs[0] = {}'.format(all_langs[0]))\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_langs)\n else:\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n return dataset\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n model_type: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n labels: str = field(\n default=None, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n data_dir: str = field(\n default=None, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n output_dir: str = field(\n default=None, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n max_seq_length: Optional[int] = field(\n default=128, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n do_train: Optional[bool] = field(default=False )\n do_eval: Optional[bool] = field(default=False )\n do_predict: Optional[bool] = field(default=False )\n do_adapter_predict: Optional[bool] = field(default=False )\n do_predict_dev: Optional[bool] = field(default=False )\n do_predict_train: Optional[bool] = field(default=False )\n init_checkpoint: Optional[str] = field(default=None )\n evaluate_during_training: Optional[bool] = field(default=False )\n do_lower_case: Optional[bool] = field(default=False )\n few_shot: Optional[int] = field(default=-1 )\n per_gpu_train_batch_size: Optional[int] = field(default=8)\n per_gpu_eval_batch_size: Optional[int] = field(default=8)\n gradient_accumulation_steps: Optional[int] = field(default=1)\n learning_rate: Optional[float] = field(default=5e-5)\n weight_decay: Optional[float] = field(default=0.0)\n adam_epsilon: Optional[float] = field(default=1e-8)\n max_grad_norm: Optional[float] = field(default=1.0)\n num_train_epochs: Optional[float] = field(default=3.0)\n max_steps: Optional[int] = field(default=-1)\n save_steps: Optional[int] = field(default=-1)\n warmup_steps: Optional[int] = field(default=0)\n logging_steps: Optional[int] = field(default=50)\n save_only_best_checkpoint: Optional[bool] = field(default=False)\n eval_all_checkpoints: Optional[bool] = field(default=False)\n no_cuda: Optional[bool] = field(default=False)\n overwrite_output_dir: Optional[bool] = field(default=False)\n overwrite_cache: Optional[bool] = field(default=False)\n seed: Optional[int] = field(default=42)\n fp16: Optional[bool] = field(default=False)\n fp16_opt_level: Optional[str] = field(default=\"O1\")\n local_rank: Optional[int] = field(default=-1)\n server_ip: Optional[str] = field(default=\"\")\n server_port: Optional[str] = field(default=\"\")\n predict_langs: Optional[str] = field(default=\"en\")\n train_langs: Optional[str] = field(default=\"en\")\n log_file: Optional[str] = field(default=None)\n eval_patience: Optional[int] = field(default=-1)\n bpe_dropout: Optional[float] = field(default=0)\n do_save_adapter_fusions: Optional[bool] = field(default=False)\n task_name: Optional[str] = field(default=\"ner\")\n\n predict_task_adapter: Optional[str] = field(default=None)\n predict_lang_adapter: Optional[str] = field(default=None)\n test_adapter: Optional[bool] = field(default=False)\n\n adapter_weight: Optional[str] = field(default=None)\n lang_to_vec: Optional[str] = field(default=None)\n\n calc_weight_step: Optional[int] = field(default=0)\n predict_save_prefix: Optional[str] = field(default=None)\n en_weight: Optional[float] = field(default=None)\n temperature: Optional[float] = field(default=1.0)\n\n get_attr: Optional[bool] = field(default=False)\n topk: Optional[int] = field(default=1)\n\n task: Optional[str] = field(default='udpos')\n\ndef setup_adapter(args, adapter_args, model, train_adapter=True, load_adapter=None, load_lang_adapter=None):\n task_name = args.task_name or \"ner\"\n # check if adapter already exists, otherwise add it\n if task_name not in model.config.adapters.adapter_list(AdapterType.text_task):\n logging.info(\"Trying to decide if add adapter\")\n # resolve the adapter config\n adapter_config = AdapterConfig.load(\n adapter_args.adapter_config,\n non_linearity=adapter_args.adapter_non_linearity,\n reduction_factor=adapter_args.adapter_reduction_factor,\n )\n # load a pre-trained from Hub if specified\n if adapter_args.load_adapter or load_adapter:\n logging.info(\"loading task adapter\")\n model.load_adapter(\n adapter_args.load_adapter if load_adapter is None else load_adapter,\n AdapterType.text_task,\n config=adapter_config,\n load_as=task_name,\n )\n # otherwise, add a fresh adapter\n else:\n logging.info(\"Adding task adapter\")\n model.add_adapter(task_name, AdapterType.text_task, config=adapter_config)\n # optionally load a pre-trained language adapter\n if adapter_args.load_lang_adapter or load_lang_adapter:\n\n if load_lang_adapter is None:\n # load a set of language adapters\n logging.info(\"loading lang adpater {}\".format(adapter_args.load_lang_adapter))\n # resolve the language adapter config\n lang_adapter_config = AdapterConfig.load(\n adapter_args.lang_adapter_config,\n non_linearity=adapter_args.lang_adapter_non_linearity,\n reduction_factor=adapter_args.lang_adapter_reduction_factor,\n )\n # load the language adapter from Hub\n # if adapter_args.language == 'topk':\n # assert len(args.predict_langs.split(',')) == 1\n # filename = f'scripts/{args.task}/en/{args.predict_langs}.json'\n # logger.info(f'Loading Adapter Languages from {filename}')\n # languages = []\n # with open(filename) as f:\n # for i,line in enumerate(f):\n # if i == args.topk:\n # break\n # line = json.loads(line)\n # languages.append(line['adapter'].strip())\n # adapter_names = [f'{lang}/wiki@ukp' for lang in languages]\n # else:\n # languages = adapter_args.language.split(\",\")\n # adapter_names = adapter_args.load_lang_adapter.split(\",\")\n # logger.info(f'Adapter Languages : {languages}, Length : {len(languages)}')\n # logger.info(f'Adapter Names {adapter_names}, Length : {len(adapter_names)}')\n # assert len(languages) == len(adapter_names)\n # lang_adapter_names = []\n # for language, adapter_name in zip(languages, adapter_names):\n # logger.info(f'Language = {language}')\n # logger.info(f'Adapter Name = {adapter_name}')\n # lang_adapter_name = model.load_adapter(\n # adapter_name,\n # AdapterType.text_lang,\n # config=lang_adapter_config,\n # load_as=language,\n # )\n # lang_adapter_names.append(lang_adapter_name)\n else:\n logging.info(\"loading lang adpater {}\".format(load_lang_adapter))\n # resolve the language adapter config\n lang_adapter_config = AdapterConfig.load(\n adapter_args.lang_adapter_config,\n non_linearity=adapter_args.lang_adapter_non_linearity,\n reduction_factor=adapter_args.lang_adapter_reduction_factor,\n )\n # load the language adapter from Hub\n # lang_adapter_name = model.load_adapter(\n # load_lang_adapter,\n # AdapterType.text_lang,\n # config=lang_adapter_config,\n # load_as=\"lang\",\n # )\n # lang_adapter_names = [lang_adapter_name]\n else:\n lang_adapter_name = None\n lang_adapter_names = []\n # Freeze all model weights except of those of this adapter\n model.train_adapter([task_name])\n\n # Set the adapters to be used in every forward pass\n if lang_adapter_name:\n model.set_active_adapters([lang_adapter_names, [task_name]])\n else:\n model.set_active_adapters([task_name])\n\n return model, lang_adapter_names, task_name\n\ndef load_model(args, num_labels):\n logger.info('Loading pretrained model and tokenizer')\n config = AutoConfig.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n num_labels=num_labels,\n cache_dir=args.cache_dir,\n )\n args.model_type = config.model_type\n tokenizer = AutoTokenizer.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir,\n use_fast=False,\n )\n if args.init_checkpoint:\n logger.info(\"loading from init_checkpoint={}\".format(args.init_checkpoint))\n model = AutoModelForTokenClassification.from_pretrained(\n args.init_checkpoint,\n config=config,\n cache_dir=args.cache_dir,\n )\n else:\n logger.info(\"loading from existing model {}\".format(args.model_name_or_path))\n model = AutoModelForTokenClassification.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir,\n )\n lang2id = config.lang2id if args.model_type == \"xlm\" else None\n logger.info(\"Using lang2id = {}\".format(lang2id))\n\n return model, tokenizer, lang2id\n\ndef predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, split):\n output_test_results_file = os.path.join(args.output_dir, f\"{split}_results.txt\")\n with open(output_test_results_file, \"a\") as result_writer:\n for lang in args.predict_langs.split(','):\n #Check if language data exists\n if not os.path.exists(os.path.join(args.data_dir, lang, '{}.{}'.format(split, args.model_name_or_path))):\n logger.info(\"Language {}, split {} does not exist\".format(lang, split))\n continue\n\n #Activate the required language adapter\n adapter_weight = None\n # if not args.adapter_weight and not args.lang_to_vec:\n # if (adapter_args.train_adapter or args.test_adapter) and not args.adapter_weight:\n # if lang in lang_adapter_names:\n # logger.info(f'Language adapter for {lang} found')\n # logger.info(\"Set active language adapter to {}\".format(lang))\n # model.set_active_adapters([[lang], [task_name]])\n # else:\n # logger.info(f'Language adapter for {lang} not found, using {lang_adapter_names[0]} instead')\n # logger.info(\"Set active language adapter to {}\".format(lang_adapter_names[0]))\n # model.set_active_adapters([[lang_adapter_names[0]], [task_name]])\n # else:\n # if args.adapter_weight == 'equal':\n # adapter_weight = [1/len(lang_adapter_names) for _ in lang_adapter_names]\n # elif args.adapter_weight == 'equal_en':\n # assert 'en' in lang_adapter_names, 'English language adapter not included'\n # adapter_weight = [(1-args.en_weight)/(len(lang_adapter_names)-1) for _ in lang_adapter_names]\n # en_index = lang_adapter_names.index('en')\n # adapter_weight[en_index] = args.en_weight\n # elif args.lang_to_vec:\n # if args.en_weight is not None:\n # logger.info(lang_adapter_names)\n # assert 'en' in lang_adapter_names, 'English language adapter not included'\n # adapter_weight = calc_l2v_weights(args, lang, lang_adapter_names)\n # elif args.adapter_weight == 'load':\n # filename = f'weights/{args.task}/{lang}/weights_s{args.seed}'\n # logger.info(f'Loading adapter weights from {filename}')\n # with open(filename) as f:\n # adapter_weight = json.loads(next(f))\n # elif args.adapter_weight != \"0\" and args.adapter_weight is not None:\n # adapter_weight = [float(w) for w in args.adapter_weight.split(\",\")]\n logger.info('Args Adapter Weight = {}'.format(args.adapter_weight))\n logger.info('Adapter Languages = {}'.format(lang_adapter_names))\n if adapter_weight is not None:\n logger.info(\"Adapter Weights = {}\".format(adapter_weight))\n logger.info('Sum of Adapter Weights = {}'.format(sum(adapter_weight)))\n logger.info(\"Length of Adapter Weights = {}\".format(len(adapter_weight))) \n # model.set_active_adapters([ lang_adapter_names, [task_name]])\n #Evaluate\n result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=split, lang=lang, lang2id=lang2id, adapter_weight=adapter_weight, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)\n\n if args.get_attr:\n continue\n result_json = {}\n # Save results\n if args.predict_save_prefix is not None and args.predict_save_prefix:\n result_json['language'] = f'{args.predict_save_prefix}_{lang}'\n else:\n result_json['language'] = f'{lang}'\n \n result_json['seed'] = args.seed\n result_json['language_adapters'] = lang_adapter_names\n if args.adapter_weight:\n result_json['adapter_weights'] = args.adapter_weight\n \n for key in sorted(result.keys()):\n result_json[key] = result[key]\n \n result_writer.write(json.dumps(result_json) + '\\n')\n # Save predictions\n if args.predict_save_prefix is not None and args.predict_save_prefix:\n output_test_predictions_file = os.path.join(args.output_dir, \"{}_{}_{}_s{}_predictions.txt\".format(split, args.predict_save_prefix, lang, args.seed))\n else:\n output_test_predictions_file = os.path.join(args.output_dir, \"{}_{}_s{}_predictions.txt\".format(split, lang, args.seed))\n infile = os.path.join(args.data_dir, lang, \"{}.{}\".format(split, args.model_name_or_path))\n idxfile = infile + '.idx'\n save_predictions(args, predictions, output_test_predictions_file, infile, idxfile)\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser = HfArgumentParser((ModelArguments, MultiLingAdapterArguments))\n args, adapter_args = parser.parse_args_into_dataclasses()\n\n\n if os.path.exists(args.output_dir) and os.listdir(\n args.output_dir) and args.do_train and not args.overwrite_output_dir:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir))\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else:\n # Initializes the distributed backend which sychronizes nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(handlers = [logging.FileHandler(args.log_file), logging.StreamHandler()],\n format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n logging.info(\"Input args: %r\" % args)\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)\n # Set seed\n set_seed(args)\n # Prepare NER/POS task\n labels = get_labels(args.labels)\n num_labels = len(labels)\n # Use cross entropy ignore index as padding label id\n # so that only real label ids contribute to the loss later\n pad_token_label_id = CrossEntropyLoss().ignore_index\n\n # Load pretrained model and tokenizer\n # Make sure only the first process in distributed training loads model/vocab\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() \n\n args.do_save_full_model= (not adapter_args.train_adapter)\n args.do_save_adapters=adapter_args.train_adapter\n if args.do_save_adapters:\n logging.info('save adapters')\n logging.info(adapter_args.train_adapter)\n if args.do_save_full_model:\n logging.info('save model')\n\n # Make sure only the first process in distributed training loads model/vocab\n if args.local_rank == 0:\n torch.distributed.barrier()\n\n logger.info(\"Training/evaluation parameters %s\", args)\n # Training\n if args.do_train:\n model, tokenizer, lang2id = load_model(args, num_labels)\n if adapter_args.train_adapter:\n model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model)\n logger.info(\"lang adapter names: {}\".format(\" \".join(lang_adapter_names)))\n else:\n lang_adatper_names = []\n task_name = None\n \n model.to(args.device)\n\n train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=\"train\", lang=args.train_langs, lang2id=lang2id, few_shot=args.few_shot)\n global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Saving best-practices: if you use default names for the model,\n # you can reload it using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n # Save model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n # Take care of distributed/parallel training\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n model_to_save = model.module if hasattr(model, \"module\") else model\n if args.do_save_adapters:\n logging.info(\"Save adapter\")\n model_to_save.save_all_adapters(args.output_dir)\n if args.do_save_adapter_fusions:\n logging.info(\"Save adapter fusion\")\n model_to_save.save_all_adapter_fusions(args.output_dir)\n if args.do_save_full_model:\n logging.info(\"Save full model\")\n model_to_save.save_pretrained(args.output_dir)\n\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Initialization for evaluation\n results = {}\n if args.init_checkpoint:\n best_checkpoint = args.init_checkpoint\n elif os.path.exists(os.path.join(args.output_dir, 'checkpoint-best')):\n best_checkpoint = os.path.join(args.output_dir, 'checkpoint-best')\n else:\n best_checkpoint = args.output_dir\n\n # Evaluation\n #This evaluates only if the entire model is saved, something we are not doing\n if args.do_eval and args.local_rank in [-1, 0]:\n model, tokenizer, lang2id = load_model(args, num_labels)\n\n logger.info('Evaluating the model on dev set of training language(en)')\n load_adapter = (best_checkpoint + \"/\" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter\n # load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'\n logger.info(f'Task Adapter will be loaded from this path {load_adapter}')\n model.model_name = args.model_name_or_path\n model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter)\n model.to(args.device)\n \n result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=\"dev\", prefix='debugging', lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)\n results.update(result)\n # for checkpoint in checkpoints:\n # global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n # model = AutoModelForTokenClassification.from_pretrained(checkpoint)\n # if adapter_args.train_adapter:\n # load_adapter = checkpoint + \"/\" + args.task_name\n # load_lang_adapter = \"{}/{}\".format(checkpoint, adapter_args.language)\n # model.model_name = args.model_name_or_path\n # model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter)\n# \n # model.to(args.device)\n # result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=\"dev\", prefix=global_step, lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)\n # if result[\"f1\"] > best_f1:\n # best_checkpoint = checkpoint\n # best_f1 = result[\"f1\"]\n # if global_step:\n # result = {\"{}_{}\".format(global_step, k): v for k, v in result.items()}\n # results.update(result)\n\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n # writer.write(\"best checkpoint = {}, best f1 = {}\\n\".format(best_checkpoint, best_f1))\n if args.do_predict and args.local_rank in [-1, 0]:\n model, tokenizer, lang2id = load_model(args, num_labels)\n # Prediction\n logger.info('Evaluating the model on test set of all the languages specified')\n \n #Set up the task adapter\n if adapter_args.train_adapter or args.test_adapter:\n load_adapter = (best_checkpoint + \"/\" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter\n # load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'\n logger.info(f'Task Adapter will be loaded from this path {load_adapter}')\n load_lang_adapter = args.predict_lang_adapter\n model.model_name = args.model_name_or_path\n model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)\n model.to(args.device)\n\n predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'test')\n\n if args.do_predict_train and args.local_rank in [-1, 0]:\n logger.info('Evaluating on the train set of all specified languages')\n model, tokenizer, lang2id = load_model(args, num_labels)\n\n if adapter_args.train_adapter or args.test_adapter:\n load_adapter = (best_checkpoint + \"/\" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter\n # load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'\n logger.info(f'Task Adapter will be loaded from this path {load_adapter}')\n load_lang_adapter = args.predict_lang_adapter\n model.model_name = args.model_name_or_path\n model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)\n model.to(args.device)\n\n predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'train')\n \n #Predict dev set\n if args.do_predict_dev and args.local_rank in [-1, 0]:\n model, tokenizer, lang2id = load_model(args, num_labels)\n\n logger.info('Evaluating on the dev sets of all the specified languages')\n \n #Set up task and language adapters\n if adapter_args.train_adapter or args.test_adapter:\n load_adapter = (best_checkpoint + \"/\" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter\n # load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'\n logger.info(f'Task Adapter will be loaded from this path {load_adapter}')\n load_lang_adapter = args.predict_lang_adapter\n model.model_name = args.model_name_or_path\n model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)\n model.to(args.device)\n\n predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'dev')\n\ndef save_predictions(args, predictions, output_file, text_file, idx_file, output_word_prediction=False):\n # Save predictions\n with open(text_file, \"r\") as text_reader, open(idx_file, \"r\") as idx_reader:\n text = text_reader.readlines()\n index = idx_reader.readlines()\n assert len(text) == len(index)\n\n # Sanity check on the predictions\n with open(output_file, \"w\") as writer:\n example_id = 0\n prev_id = int(index[0])\n for line, idx in zip(text, index):\n if line == \"\" or line == \"\\n\":\n example_id += 1\n else:\n cur_id = int(idx)\n output_line = '\\n' if cur_id != prev_id else ''\n if output_word_prediction:\n output_line += line.split()[0] + '\\t'\n output_line += predictions[example_id].pop(0) + '\\n'\n writer.write(output_line)\n prev_id = cur_id\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.distributed.get_world_size",
"torch.cat",
"torch.utils.data.RandomSampler",
"torch.amax",
"scipy.spatial.distance.cosine",
"torch.cuda.is_available",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel",
"torch.distributed.init_process_group",
"torch.FloatTensor",
"torch.manual_seed",
"torch.autograd.grad",
"torch.tensor",
"torch.utils.data.DataLoader",
"numpy.argmax",
"torch.zeros_like",
"torch.distributed.get_rank",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.save",
"torch.utils.data.SequentialSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.nn.functional.log_softmax",
"torch.cuda.set_device",
"torch.nn.functional.softmax",
"torch.distributed.barrier",
"torch.utils.data.TensorDataset",
"torch.argmax",
"numpy.random.seed",
"torch.no_grad",
"torch.add",
"torch.utils.data.distributed.DistributedSampler"
]
] |
swcjack6931677/ERINN
|
[
"a4f3d0ad213515bc86e2a18575537d6affd472ac"
] |
[
"erinn/python/models/DFN.py"
] |
[
"from __future__ import absolute_import, division, print_function\n\nfrom tensorflow.python.keras.layers import Input, Dense\nfrom tensorflow.python.keras.layers.normalization import BatchNormalization\nfrom tensorflow.python.keras.models import Model\n\n\n# 第一種架構: 深度前饋網路(deep feedforward network)\n# 也叫做前饋神經網路(feedforward neural network)或多層感知機(multilayer perceptron, MLP)\ndef get_dfn(output_size, img_height, img_width, show=True):\n\n model_input = Input(shape=(img_height * img_width,), name='Main_input')\n x = Dense(256, activation='selu', name='Dense_selu_1')(model_input)\n x = BatchNormalization(name='BN_1')(x)\n x = Dense(256, activation='tanh', name='Dense_tanh_1')(x)\n x = BatchNormalization(name='BN_2')(x)\n x = Dense(256, activation='tanh', name='Dense_tanh_2')(x)\n dfn_output = Dense(output_size, activation='linear',\n name='Output_Dense_linear')(x)\n dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN')\n\n if show:\n print('DFN summary:')\n dfn.summary()\n print()\n\n return dfn\n\n\ndef get_dfn_relu(output_size, img_height, img_width, show=True):\n\n model_input = Input(shape=(img_height * img_width,), name='Main_input')\n x = BatchNormalization(name='BN_1')(model_input)\n x = Dense(256, activation='relu', name='Dense_relu_1')(x)\n # x = BatchNormalization()(x)\n x = Dense(256, activation='relu', name='Dense_relu_2')(x)\n # x = BatchNormalization()(x)\n x = Dense(256, activation='relu', name='Dense_relu_3')(x)\n dfn_output = Dense(output_size, activation='linear',\n name='Output_Dense_linear')(x)\n dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN_relu')\n\n if show:\n print('DFN_relu summary:')\n dfn.summary()\n print()\n\n return dfn\n\n\ndef get_dfn_selu(output_size, img_height, img_width, show=True):\n\n model_input = Input(shape=(img_height * img_width,), name='Main_input')\n x = BatchNormalization()(model_input)\n x = Dense(256, activation='selu', name='Dense_selu_1')(x)\n # x = BatchNormalization()(x)\n x = Dense(256, activation='selu', name='Dense_selu_2')(x)\n # x = BatchNormalization()(x)\n x = Dense(256, activation='selu', name='Dense_selu_3')(x)\n dfn_output = Dense(output_size, activation='linear',\n name='Output_Dense_linear')(x)\n dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN_selu')\n\n if show:\n print('DFN_selu summary:')\n dfn.summary()\n print()\n\n return dfn\n"
] |
[
[
"tensorflow.python.keras.layers.normalization.BatchNormalization",
"tensorflow.python.keras.layers.Input",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.keras.models.Model"
]
] |
acumb/LatticeDNAOrigami
|
[
"0f2522286adc9815865d4abfc55f546da40e606b"
] |
[
"scripts/plotting/plot_lfes.py"
] |
[
"#!/usr/bin/python\n\n\"\"\"Plot LFEs of given order parameter.\"\"\"\n\nimport argparse\nimport sys\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib import gridspec\nfrom matplotlib.ticker import MaxNLocator\nimport numpy as np\nimport pandas as pd\n\nfrom matplotlibstyles import styles\nfrom matplotlibstyles import plotutils\n\n\ndef main():\n args = parse_args()\n f = setup_figure()\n gs = gridspec.GridSpec(1, 1, f)\n ax = f.add_subplot(gs[0, 0])\n if args.post_lfes == None:\n args.post_lfes = ['' for i in range(len(args.systems))]\n\n plot_figure(f, ax, vars(args))\n setup_axis(ax, args.tag)\n #set_labels(ax)\n save_figure(f, args.plot_filebase)\n\n\ndef setup_figure():\n styles.set_default_style()\n figsize = (plotutils.cm_to_inches(10), plotutils.cm_to_inches(7))\n\n return plt.figure(figsize=figsize, dpi=300, constrained_layout=True)\n\n\ndef plot_figure(f, ax, args):\n systems = args['systems']\n varis = args['varis']\n input_dir = args['input_dir']\n tag = args['tag']\n post_lfes = args['post_lfes']\n stacking_enes = args['stacking_enes']\n\n if stacking_enes is not None:\n stacking_enes = [abs(e) for e in stacking_enes]\n cmap = plotutils.create_truncated_colormap(\n 0.2, 0.8, name='plasma')\n #mappable = plotutils.create_linear_mappable(\n # cmap, abs(stacking_enes[0]), abs(stacking_enes[-1]))\n #colors = [mappable.to_rgba(abs(e)) for e in stacking_enes]\n increment = stacking_enes[1] - stacking_enes[0]\n cmap, norm, colors = plotutils.create_segmented_colormap(cmap, stacking_enes, increment)\n else:\n cmap = cm.get_cmap('tab10')\n colors = [cmap(i) for i in range(len(systems))]\n\n for i in range(len(systems)):\n system = systems[i]\n vari = varis[i]\n post_lfe = post_lfes[i]\n if post_lfe != '':\n post_lfe = '-' + post_lfe\n\n inp_filebase = f'{input_dir}/{system}-{vari}_lfes{post_lfe}-{tag}'\n lfes = pd.read_csv(f'{inp_filebase}.aves', sep=' ', index_col=0)\n lfe_stds = pd.read_csv(f'{inp_filebase}.stds', sep=' ', index_col=0)\n temp = lfes.columns[0]\n lfes = lfes[temp]\n lfes = lfes - lfes[0]\n lfe_stds = lfe_stds[temp]\n\n label = f'{system}-{vari}'\n ax.errorbar(lfes.index, lfes, yerr=lfe_stds, marker='o', label=label,\n color=colors[i])\n\n if stacking_enes is not None:\n label = r'$-U_\\text{stack} / \\SI{1000}{\\kb\\kelvin}$'\n tick_labels = [f'${e/1000:.1f}$' for e in stacking_enes]\n plotutils.plot_segmented_colorbar(\n f, ax, cmap, norm, label, tick_labels, 'horizontal')\n\n\ndef setup_axis(ax, ylabel=None, xlabel=None, ylim_top=None, xlim_right=None):\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n ax.set_ylim(top=ylim_top)\n ax.set_xlim(right=xlim_right)\n\n\ndef set_labels(ax):\n plt.legend()\n\n\ndef save_figure(f, plot_filebase):\n #f.savefig(plot_filebase + '.pgf', transparent=True)\n f.savefig(plot_filebase + '.pdf', transparent=True)\n f.savefig(plot_filebase + '.png', transparent=True)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n 'input_dir',\n type=str,\n help='Input directory')\n parser.add_argument(\n 'plot_filebase',\n type=str,\n help='Plots directory')\n parser.add_argument(\n 'tag',\n type=str,\n help='OP tag')\n parser.add_argument(\n '--systems',\n nargs='+',\n type=str,\n help='Systems')\n parser.add_argument(\n '--varis',\n nargs='+',\n type=str,\n help='Simulation variants')\n parser.add_argument(\n '--post_lfes',\n nargs='+',\n type=str,\n help='Filename additions after lfes, if any')\n parser.add_argument(\n '--stacking_enes',\n nargs='+',\n type=float,\n help='Stacking energies (for colormap)')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.ticker.MaxNLocator",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"pandas.read_csv",
"matplotlib.gridspec.GridSpec"
]
] |
sv2518/pymbolic
|
[
"42687a410b1c355beec510b91c18f97e5137795b"
] |
[
"test/test_pymbolic.py"
] |
[
"__copyright__ = \"Copyright (C) 2009-2013 Andreas Kloeckner\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport pymbolic.primitives as prim\nimport pytest\nfrom pymbolic import parse\nfrom pytools.lex import ParseError\n\n\nfrom pymbolic.mapper import IdentityMapper\n\ntry:\n reduce\nexcept NameError:\n from functools import reduce\n\n\n# {{{ utilities\n\ndef assert_parsed_same_as_python(expr_str):\n # makes sure that has only one line\n expr_str, = expr_str.split(\"\\n\")\n from pymbolic.interop.ast import ASTToPymbolic\n import ast\n ast2p = ASTToPymbolic()\n try:\n expr_parsed_by_python = ast2p(ast.parse(expr_str).body[0].value)\n except SyntaxError:\n with pytest.raises(ParseError):\n parse(expr_str)\n else:\n expr_parsed_by_pymbolic = parse(expr_str)\n assert expr_parsed_by_python == expr_parsed_by_pymbolic\n\n\ndef assert_parse_roundtrip(expr_str):\n expr = parse(expr_str)\n from pymbolic.mapper.stringifier import StringifyMapper\n strified = StringifyMapper()(expr)\n assert strified == expr_str, (strified, expr_str)\n\n# }}}\n\n\ndef test_integer_power():\n from pymbolic.algorithm import integer_power\n\n for base, expn in [\n (17, 5),\n (17, 2**10),\n (13, 20),\n (13, 1343),\n ]:\n assert base**expn == integer_power(base, expn)\n\n\ndef test_expand():\n from pymbolic import var, expand\n\n x = var(\"x\")\n u = (x+1)**5\n expand(u)\n\n\ndef test_substitute():\n from pymbolic import parse, substitute, evaluate\n u = parse(\"5+x.min**2\")\n xmin = parse(\"x.min\")\n assert evaluate(substitute(u, {xmin: 25})) == 630\n\n\ndef test_no_comparison():\n from pymbolic import parse\n\n x = parse(\"17+3*x\")\n y = parse(\"12-5*y\")\n\n def expect_typeerror(f):\n try:\n f()\n except TypeError:\n pass\n else:\n raise AssertionError\n\n expect_typeerror(lambda: x < y)\n expect_typeerror(lambda: x <= y)\n expect_typeerror(lambda: x > y)\n expect_typeerror(lambda: x >= y)\n\n\ndef test_structure_preservation():\n x = prim.Sum((5, 7))\n from pymbolic.mapper import IdentityMapper\n x2 = IdentityMapper()(x)\n assert x == x2\n\n\ndef test_sympy_interaction():\n pytest.importorskip(\"sympy\")\n\n import sympy as sp\n\n x, y = sp.symbols(\"x y\")\n f = sp.Function(\"f\")\n\n s1_expr = 1/f(x/sp.sqrt(x**2+y**2)).diff(x, 5) # pylint:disable=not-callable\n\n from pymbolic.interop.sympy import (\n SympyToPymbolicMapper,\n PymbolicToSympyMapper)\n s2p = SympyToPymbolicMapper()\n p2s = PymbolicToSympyMapper()\n\n p1_expr = s2p(s1_expr)\n s2_expr = p2s(p1_expr)\n\n assert sp.ratsimp(s1_expr - s2_expr) == 0\n\n p2_expr = s2p(s2_expr)\n s3_expr = p2s(p2_expr)\n\n assert sp.ratsimp(s1_expr - s3_expr) == 0\n\n\n# {{{ fft\n\ndef test_fft_with_floats():\n numpy = pytest.importorskip(\"numpy\")\n import numpy.linalg as la\n\n from pymbolic.algorithm import fft, ifft\n\n for n in [2**i for i in range(4, 10)]+[17, 12, 948]:\n a = numpy.random.rand(n) + 1j*numpy.random.rand(n)\n f_a = fft(a)\n a2 = ifft(f_a)\n assert la.norm(a-a2) < 1e-10\n\n f_a_numpy = numpy.fft.fft(a)\n assert la.norm(f_a-f_a_numpy) < 1e-10\n\n\nclass NearZeroKiller(IdentityMapper):\n def map_constant(self, expr):\n if isinstance(expr, complex):\n r = expr.real\n i = expr.imag\n if abs(r) < 1e-15:\n r = 0\n if abs(i) < 1e-15:\n i = 0\n return complex(r, i)\n else:\n return expr\n\n\ndef test_fft():\n numpy = pytest.importorskip(\"numpy\")\n\n from pymbolic import var\n from pymbolic.algorithm import fft, sym_fft\n\n vars = numpy.array([var(chr(97+i)) for i in range(16)], dtype=object)\n print(vars)\n\n print(fft(vars))\n traced_fft = sym_fft(vars)\n\n from pymbolic.mapper.stringifier import PREC_NONE\n from pymbolic.mapper.c_code import CCodeMapper\n ccm = CCodeMapper()\n\n code = [ccm(tfi, PREC_NONE) for tfi in traced_fft]\n\n for cse_name, cse_str in enumerate(ccm.cse_name_list):\n print(f\"{cse_name} = {cse_str}\")\n\n for i, line in enumerate(code):\n print(\"result[%d] = %s\" % (i, line))\n\n# }}}\n\n\ndef test_sparse_multiply():\n numpy = pytest.importorskip(\"numpy\")\n pytest.importorskip(\"scipy\")\n import scipy.sparse as ss\n\n la = numpy.linalg\n\n mat = numpy.random.randn(10, 10)\n s_mat = ss.csr_matrix(mat)\n\n vec = numpy.random.randn(10)\n mat_vec = s_mat*vec\n\n from pymbolic.algorithm import csr_matrix_multiply\n mat_vec_2 = csr_matrix_multiply(s_mat, vec)\n\n assert la.norm(mat_vec-mat_vec_2) < 1e-14\n\n\n# {{{ parser\n\ndef test_parser():\n from pymbolic import parse\n parse(\"(2*a[1]*b[1]+2*a[0]*b[0])*(hankel_1(-1,sqrt(a[1]**2+a[0]**2)*k) \"\n \"-hankel_1(1,sqrt(a[1]**2+a[0]**2)*k))*k /(4*sqrt(a[1]**2+a[0]**2)) \"\n \"+hankel_1(0,sqrt(a[1]**2+a[0]**2)*k)\")\n print(repr(parse(\"d4knl0\")))\n print(repr(parse(\"0.\")))\n print(repr(parse(\"0.e1\")))\n assert parse(\"0.e1\") == 0\n assert parse(\"1e-12\") == 1e-12\n print(repr(parse(\"a >= 1\")))\n print(repr(parse(\"a <= 1\")))\n\n print(repr(parse(\":\")))\n print(repr(parse(\"1:\")))\n print(repr(parse(\":2\")))\n print(repr(parse(\"1:2\")))\n print(repr(parse(\"::\")))\n print(repr(parse(\"1::\")))\n print(repr(parse(\":1:\")))\n print(repr(parse(\"::1\")))\n print(repr(parse(\"3::1\")))\n print(repr(parse(\":5:1\")))\n print(repr(parse(\"3:5:1\")))\n\n assert_parse_roundtrip(\"()\")\n assert_parse_roundtrip(\"(3,)\")\n\n assert_parse_roundtrip(\"[x + 3, 3, 5]\")\n assert_parse_roundtrip(\"[]\")\n assert_parse_roundtrip(\"[x]\")\n\n assert_parse_roundtrip(\"g[i, k] + 2.0*h[i, k]\")\n parse(\"g[i,k]+(+2.0)*h[i, k]\")\n\n print(repr(parse(\"a - b - c\")))\n print(repr(parse(\"-a - -b - -c\")))\n print(repr(parse(\"- - - a - - - - b - - - - - c\")))\n\n print(repr(parse(\"~(a ^ b)\")))\n print(repr(parse(\"(a | b) | ~(~a & ~b)\")))\n\n print(repr(parse(\"3 << 1\")))\n print(repr(parse(\"1 >> 3\")))\n\n print(parse(\"3::1\"))\n\n assert parse(\"e1\") == prim.Variable(\"e1\")\n assert parse(\"d1\") == prim.Variable(\"d1\")\n\n from pymbolic import variables\n f, x, y, z = variables(\"f x y z\")\n assert parse(\"f((x,y),z)\") == f((x, y), z)\n assert parse(\"f((x,),z)\") == f((x,), z)\n assert parse(\"f(x,(y,z),z)\") == f(x, (y, z), z)\n\n assert parse(\"f(x,(y,z),z, name=15)\") == f(x, (y, z), z, name=15)\n assert parse(\"f(x,(y,z),z, name=15, name2=17)\") == f(\n x, (y, z), z, name=15, name2=17)\n\n assert_parsed_same_as_python(\"5+i if i>=0 else (0 if i<-1 else 10)\")\n assert_parsed_same_as_python(\"0 if 1 if 2 else 3 else 4\")\n assert_parsed_same_as_python(\"0 if (1 if 2 else 3) else 4\")\n assert_parsed_same_as_python(\"(2, 3,)\")\n\n with pytest.deprecated_call():\n parse(\"1+if(0, 1, 2)\")\n\n# }}}\n\n\ndef test_mappers():\n from pymbolic import variables\n f, x, y, z = variables(\"f x y z\")\n\n for expr in [\n f(x, (y, z), name=z**2)\n ]:\n from pymbolic.mapper import WalkMapper\n from pymbolic.mapper.dependency import DependencyMapper\n str(expr)\n IdentityMapper()(expr)\n WalkMapper()(expr)\n DependencyMapper()(expr)\n\n\ndef test_func_dep_consistency():\n from pymbolic import var\n from pymbolic.mapper.dependency import DependencyMapper\n f = var(\"f\")\n x = var(\"x\")\n dep_map = DependencyMapper(include_calls=\"descend_args\")\n assert dep_map(f(x)) == {x}\n assert dep_map(f(x=x)) == {x}\n\n\ndef test_conditions():\n from pymbolic import var\n x = var(\"x\")\n y = var(\"y\")\n assert str(x.eq(y).and_(x.le(5))) == \"x == y and x <= 5\"\n\n\ndef test_graphviz():\n from pymbolic import parse\n expr = parse(\"(2*a[1]*b[1]+2*a[0]*b[0])*(hankel_1(-1,sqrt(a[1]**2+a[0]**2)*k) \"\n \"-hankel_1(1,sqrt(a[1]**2+a[0]**2)*k))*k /(4*sqrt(a[1]**2+a[0]**2)) \"\n \"+hankel_1(0,sqrt(a[1]**2+a[0]**2)*k)\")\n\n from pymbolic.mapper.graphviz import GraphvizMapper\n gvm = GraphvizMapper()\n gvm(expr)\n print(gvm.get_dot_code())\n\n\n# {{{ geometric algebra\n\n@pytest.mark.parametrize(\"dims\", [2, 3, 4, 5])\n# START_GA_TEST\ndef test_geometric_algebra(dims):\n pytest.importorskip(\"numpy\")\n\n import numpy as np\n from pymbolic.geometric_algebra import MultiVector as MV # noqa\n\n vec1 = MV(np.random.randn(dims))\n vec2 = MV(np.random.randn(dims))\n vec3 = MV(np.random.randn(dims))\n vec4 = MV(np.random.randn(dims))\n vec5 = MV(np.random.randn(dims))\n\n # Fundamental identity\n assert ((vec1 ^ vec2) + (vec1 | vec2)).close_to(vec1*vec2)\n\n # Antisymmetry\n assert (vec1 ^ vec2 ^ vec3).close_to(- vec2 ^ vec1 ^ vec3)\n\n vecs = [vec1, vec2, vec3, vec4, vec5]\n\n if len(vecs) > dims:\n from operator import xor as outer\n assert reduce(outer, vecs).close_to(0)\n\n assert (vec1.inv()*vec1).close_to(1)\n assert (vec1*vec1.inv()).close_to(1)\n assert ((1/vec1)*vec1).close_to(1)\n assert (vec1/vec1).close_to(1)\n\n for a, b, c in [\n (vec1, vec2, vec3),\n (vec1*vec2, vec3, vec4),\n (vec1, vec2*vec3, vec4),\n (vec1, vec2, vec3*vec4),\n (vec1, vec2, vec3*vec4*vec5),\n (vec1, vec2*vec1, vec3*vec4*vec5),\n ]:\n\n # Associativity\n assert ((a*b)*c).close_to(a*(b*c))\n assert ((a ^ b) ^ c).close_to(a ^ (b ^ c))\n # The inner product is not associative.\n\n # scalar product\n assert ((c*b).project(0)) .close_to(b.scalar_product(c))\n assert ((c.rev()*b).project(0)) .close_to(b.rev().scalar_product(c))\n assert ((b.rev()*b).project(0)) .close_to(b.norm_squared())\n\n assert b.norm_squared() >= 0\n assert c.norm_squared() >= 0\n\n # Cauchy's inequality\n assert b.scalar_product(c) <= abs(b)*abs(c) + 1e-13\n\n # contractions\n\n # (3.18) in [DFM]\n assert abs(b.scalar_product(a ^ c) - (b >> a).scalar_product(c)) < 1e-13\n\n # duality, (3.20) in [DFM]\n assert ((a ^ b) << c) .close_to(a << (b << c))\n\n # two definitions of the dual agree: (1.2.26) in [HS]\n # and (sec 3.5.3) in [DFW]\n assert (c << c.I.rev()).close_to(c | c.I.rev())\n\n # inverse\n for div in list(b.gen_blades()) + [vec1, vec1.I]:\n assert (div.inv()*div).close_to(1)\n assert (div*div.inv()).close_to(1)\n assert ((1/div)*div).close_to(1)\n assert (div/div).close_to(1)\n assert ((c/div)*div).close_to(c)\n assert ((c*div)/div).close_to(c)\n\n # reverse properties (Sec 2.9.5 [DFM])\n assert c.rev().rev() == c\n assert (b ^ c).rev() .close_to(c.rev() ^ b.rev())\n\n # dual properties\n # (1.2.26) in [HS]\n assert c.dual() .close_to(c | c.I.rev())\n assert c.dual() .close_to(c*c.I.rev())\n\n # involution properties (Sec 2.9.5 DFW)\n assert c.invol().invol() == c\n assert (b ^ c).invol() .close_to(b.invol() ^ c.invol())\n\n # commutator properties\n\n # Jacobi identity (1.1.56c) in [HS] or (8.2) in [DFW]\n assert (a.x(b.x(c)) + b.x(c.x(a)) + c.x(a.x(b))).close_to(0)\n\n # (1.57) in [HS]\n assert a.x(b*c) .close_to(a.x(b)*c + b*a.x(c))\n# END_GA_TEST\n\n# }}}\n\n\ndef test_ast_interop():\n src = \"\"\"\n def f():\n xx = 3*y + z * (12 if x < 13 else 13)\n yy = f(x, y=y)\n \"\"\"\n\n import ast\n mod = ast.parse(src.replace(\"\\n \", \"\\n\"))\n\n print(ast.dump(mod))\n\n from pymbolic.interop.ast import ASTToPymbolic\n ast2p = ASTToPymbolic()\n\n for f in mod.body:\n if not isinstance(f, ast.FunctionDef):\n continue\n\n for stmt in f.body:\n if not isinstance(stmt, ast.Assign):\n continue\n\n lhs, = stmt.targets\n lhs = ast2p(lhs)\n rhs = ast2p(stmt.value)\n\n print(lhs, rhs)\n\n\ndef test_compile():\n from pymbolic import parse, compile\n code = compile(parse(\"x ** y\"), [\"x\", \"y\"])\n assert code(2, 5) == 32\n\n # Test pickling of compiled code.\n import pickle\n code = pickle.loads(pickle.dumps(code))\n assert code(3, 3) == 27\n\n\ndef test_unifier():\n from pymbolic import var\n from pymbolic.mapper.unifier import UnidirectionalUnifier\n a, b, c, d, e, f = [var(s) for s in \"abcdef\"]\n\n def match_found(records, eqns):\n for record in records:\n if eqns <= set(record.equations):\n return True\n return False\n\n recs = UnidirectionalUnifier(\"abc\")(a+b*c, d+e*f)\n assert len(recs) == 2\n assert match_found(recs, {(a, d), (b, e), (c, f)})\n assert match_found(recs, {(a, d), (b, f), (c, e)})\n\n recs = UnidirectionalUnifier(\"abc\")(a+b, d+e+f)\n assert len(recs) == 6\n assert match_found(recs, {(a, d), (b, e+f)})\n assert match_found(recs, {(a, e), (b, d+f)})\n assert match_found(recs, {(a, f), (b, d+e)})\n assert match_found(recs, {(b, d), (a, e+f)})\n assert match_found(recs, {(b, e), (a, d+f)})\n assert match_found(recs, {(b, f), (a, d+e)})\n\n vals = [var(\"v\" + str(i)) for i in range(100)]\n recs = UnidirectionalUnifier(\"a\")(sum(vals[1:]) + a, sum(vals))\n assert len(recs) == 1\n assert match_found(recs, {(a, var(\"v0\"))})\n\n recs = UnidirectionalUnifier(\"abc\")(a+b+c, d+e)\n assert len(recs) == 0\n\n recs = UnidirectionalUnifier(\"abc\")(f(a+b, f(a+c)), f(b+c, f(b+d)))\n assert len(recs) == 1\n assert match_found(recs, {(a, b), (b, c), (c, d)})\n\n\ndef test_long_sympy_mapping():\n sp = pytest.importorskip(\"sympy\")\n from pymbolic.interop.sympy import SympyToPymbolicMapper\n SympyToPymbolicMapper()(sp.sympify(int(10**20)))\n SympyToPymbolicMapper()(sp.sympify(int(10)))\n\n\ndef test_stringifier_preserve_shift_order():\n for expr in [\n parse(\"(a << b) >> 2\"),\n parse(\"a << (b >> 2)\")\n ]:\n assert parse(str(expr)) == expr\n\n\nLATEX_TEMPLATE = r\"\"\"\\documentclass{article}\n\\usepackage{amsmath}\n\n\\begin{document}\n%s\n\\end{document}\"\"\"\n\n\ndef test_latex_mapper():\n from pymbolic import parse\n from pymbolic.mapper.stringifier import LaTeXMapper, StringifyMapper\n\n tm = LaTeXMapper()\n sm = StringifyMapper()\n\n equations = []\n\n def add(expr):\n # Add an equation to the list of tests.\n equations.append(r\"\\[{}\\] % from: {}\".format(tm(expr), sm(expr)))\n\n add(parse(\"a * b + c\"))\n add(parse(\"f(a,b,c)\"))\n add(parse(\"a ** b ** c\"))\n add(parse(\"(a | b) ^ ~c\"))\n add(parse(\"a << b\"))\n add(parse(\"a >> b\"))\n add(parse(\"a[i,j,k]\"))\n add(parse(\"a[1:3]\"))\n add(parse(\"a // b\"))\n add(parse(\"not (a or b) and c\"))\n add(parse(\"(a % b) % c\"))\n add(parse(\"(a >= b) or (b <= c)\"))\n add(prim.Min((1,)) + prim.Max((1, 2)))\n add(prim.Substitution(prim.Variable(\"x\") ** 2, (\"x\",), (2,)))\n add(prim.Derivative(parse(\"x**2\"), (\"x\",)))\n\n # Run LaTeX and ensure the file compiles.\n import os\n import tempfile\n import subprocess\n import shutil\n\n latex_dir = tempfile.mkdtemp(\"pymbolic\")\n\n try:\n tex_file_path = os.path.join(latex_dir, \"input.tex\")\n\n with open(tex_file_path, \"w\") as tex_file:\n contents = LATEX_TEMPLATE % \"\\n\".join(equations)\n tex_file.write(contents)\n\n try:\n subprocess.check_output(\n [\"latex\",\n \"-interaction=nonstopmode\",\n \"-output-directory=%s\" % latex_dir,\n tex_file_path],\n universal_newlines=True)\n except OSError: # FIXME: Should be FileNotFoundError on Py3\n pytest.skip(\"latex command not found\")\n except subprocess.CalledProcessError as err:\n raise AssertionError(str(err.output))\n\n finally:\n shutil.rmtree(latex_dir)\n\n\ndef test_flop_counter():\n x = prim.Variable(\"x\")\n y = prim.Variable(\"y\")\n z = prim.Variable(\"z\")\n\n subexpr = prim.CommonSubexpression(3 * (x**2 + y + z))\n expr = 3*subexpr + subexpr\n\n from pymbolic.mapper.flop_counter import FlopCounter, CSEAwareFlopCounter\n assert FlopCounter()(expr) == 4 * 2 + 2\n\n assert CSEAwareFlopCounter()(expr) == 4 + 2\n\n\ndef test_make_sym_vector():\n numpy = pytest.importorskip(\"numpy\")\n from pymbolic.primitives import make_sym_vector\n\n assert len(make_sym_vector(\"vec\", 2)) == 2\n assert len(make_sym_vector(\"vec\", numpy.int32(2))) == 2\n assert len(make_sym_vector(\"vec\", [1, 2, 3])) == 3\n\n\ndef test_multiplicative_stringify_preserves_association():\n for inner in [\"*\", \" / \", \" // \", \" % \"]:\n for outer in [\"*\", \" / \", \" // \", \" % \"]:\n if outer == inner:\n continue\n\n assert_parse_roundtrip(f\"x{outer}(y{inner}z)\")\n assert_parse_roundtrip(f\"(y{inner}z){outer}x\")\n\n assert_parse_roundtrip(\"(-1)*(((-1)*x) / 5)\")\n\n\ndef test_differentiator_flags_for_nonsmooth_and_discontinuous():\n import pymbolic.functions as pf\n from pymbolic.mapper.differentiator import differentiate\n\n x = prim.Variable(\"x\")\n\n with pytest.raises(ValueError):\n differentiate(pf.fabs(x), x)\n\n result = differentiate(pf.fabs(x), x, allowed_nonsmoothness=\"continuous\")\n assert result == pf.sign(x)\n\n with pytest.raises(ValueError):\n differentiate(pf.sign(x), x)\n\n result = differentiate(pf.sign(x), x, allowed_nonsmoothness=\"discontinuous\")\n assert result == 0\n\n\ndef test_np_bool_handling():\n from pymbolic.mapper.evaluator import evaluate\n numpy = pytest.importorskip(\"numpy\")\n expr = prim.LogicalNot(numpy.bool_(False))\n assert evaluate(expr) is True\n\n\nif __name__ == \"__main__\":\n import sys\n if len(sys.argv) > 1:\n exec(sys.argv[1])\n else:\n from pytest import main\n main([__file__])\n\n# vim: fdm=marker\n"
] |
[
[
"scipy.sparse.csr_matrix",
"numpy.random.randn",
"numpy.linalg.norm"
]
] |
egornagornov3tf4k/Zeta36y
|
[
"4502be4c86195b0aa5184c45d6f221b34daee7a8"
] |
[
"web/node_modules/weblas/test/data/binary_matrix.py"
] |
[
"#!/usr/bin/env python\n\"\"\"Create two randomly generated matrices, of the specified sizes and write them\nto JSON files.\n\n\"\"\"\nimport json\nimport numpy as np\n\n\ndef read(path):\n\n\twith open(path, 'rb') as f:\n\t\tmatrix = np.fromfile(f, dtype=np.float32)\n\n\treturn matrix\n\ndef write(path, matrix):\n\n\twith open(path, 'wb') as f:\n\t\tf.write(matrix.astype(np.float32).tostring())\n\n\treturn matrix\n"
] |
[
[
"numpy.fromfile"
]
] |
WZzhaoyi/MTLNAS
|
[
"c04fcce1437eef306a41a6a224551be99d88f9a3"
] |
[
"core/models/nddr_net.py"
] |
[
"import numpy as np\nfrom time import sleep\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom core.models.common_layers import batch_norm, get_nddr\nfrom core.tasks import get_tasks\nfrom core.utils import AttrDict\nfrom core.utils.losses import poly\n\n\nclass SingleTaskNet(nn.Module):\n def __init__(self, cfg, net1, net2):\n super(SingleTaskNet, self).__init__()\n self.cfg = cfg\n self.net1 = net1\n self.net2 = net2\n assert len(net1.stages) == len(net2.stages)\n self.task1, self.task2 = get_tasks(cfg)\n\n self.num_stages = len(net1.stages)\n self._step = 0\n \n def step(self):\n self._step += 1\n \n def loss(self, x, labels):\n label_1, label_2 = labels\n result = self.forward(x)\n result.loss1 = self.task1.loss(result.out1, label_1)\n result.loss2 = self.task2.loss(result.out2, label_2)\n result.loss = result.loss1 + self.cfg.TRAIN.TASK2_FACTOR * result.loss2\n return result\n\n def forward(self, x):\n N, C, H, W = x.size()\n y = x.clone()\n x = self.net1.base(x)\n y = self.net2.base(y)\n for stage_id in range(self.num_stages):\n x = self.net1.stages[stage_id](x)\n y = self.net2.stages[stage_id](y)\n x = self.net1.head(x)\n y = self.net2.head(y)\n return AttrDict({'out1': x, 'out2': y})\n \n \nclass SharedFeatureNet(nn.Module):\n def __init__(self, cfg, net1, net2):\n super(SharedFeatureNet, self).__init__()\n self.cfg = cfg\n self.net1 = net1\n self.net2 = net2\n assert len(net1.stages) == len(net2.stages)\n self.task1, self.task2 = get_tasks(cfg)\n\n self.num_stages = len(net1.stages)\n self._step = 0\n \n def step(self):\n self._step += 1\n \n def loss(self, x, labels):\n label_1, label_2 = labels\n result = self.forward(x)\n result.loss1 = self.task1.loss(result.out1, label_1)\n result.loss2 = self.task2.loss(result.out2, label_2)\n result.loss = result.loss1 + self.cfg.TRAIN.TASK2_FACTOR * result.loss2\n return result\n\n def forward(self, x):\n x = self.net1.base(x)\n for stage_id in range(self.num_stages):\n x = self.net1.stages[stage_id](x)\n out1 = self.net1.head(x)\n out2 = self.net2.head(x)\n return AttrDict({'out1': out1, 'out2': out2})\n \n\nclass NDDRNet(nn.Module):\n def __init__(self, cfg, net1, net2):\n super(NDDRNet, self).__init__()\n self.cfg = cfg\n self.net1 = net1\n self.net2 = net2\n assert len(net1.stages) == len(net2.stages)\n self.task1, self.task2 = get_tasks(cfg)\n\n self.num_stages = len(net1.stages)\n nddrs = []\n total_channels = 0\n for stage_id in range(self.num_stages):\n out_channels = net1.stages[stage_id].out_channels\n assert out_channels == net2.stages[stage_id].out_channels\n if stage_id in cfg.TRAIN.AUX_LAYERS:\n total_channels += out_channels\n nddr = get_nddr(cfg, out_channels, out_channels)\n nddrs.append(nddr)\n nddrs = nn.ModuleList(nddrs)\n \n self.aux = cfg.TRAIN.AUX\n if self.aux:\n print(\"Using shortcut\")\n self.aux_conv1 = nn.Sequential(\n nn.Conv2d(total_channels, 256, kernel_size=3, padding=1, bias=False),\n batch_norm(256, eps=1e-03, momentum=cfg.MODEL.BATCH_NORM_MOMENTUM),\n nn.ReLU(inplace=True),\n nn.Dropout2d(p=0.5),\n nn.Conv2d(256, cfg.MODEL.NET1_CLASSES, kernel_size=1)\n )\n self.aux_conv2 = nn.Sequential(\n nn.Conv2d(total_channels, 256, kernel_size=3, padding=1, bias=False),\n batch_norm(256, eps=1e-03, momentum=cfg.MODEL.BATCH_NORM_MOMENTUM),\n nn.ReLU(inplace=True),\n nn.Dropout2d(p=0.5),\n nn.Conv2d(256, cfg.MODEL.NET2_CLASSES, kernel_size=1)\n )\n\n self.nddrs = nn.ModuleDict({\n 'nddrs': nddrs,\n })\n \n self._step = 0\n \n def step(self):\n self._step += 1\n \n def loss(self, x, labels):\n label_1, label_2 = labels\n result = self.forward(x)\n result.loss1 = self.task1.loss(result.out1, label_1)\n result.loss2 = self.task2.loss(result.out2, label_2)\n result.loss = result.loss1 + self.cfg.TRAIN.TASK2_FACTOR * result.loss2\n if self.aux:\n result.aux_loss1 = self.task1.loss(result.aux1, label_1)\n result.aux_loss2 = self.task2.loss(result.aux2, label_2)\n result.aux_loss = result.aux_loss1 + self.cfg.TRAIN.TASK2_FACTOR * result.aux_loss2\n result.aux_weight = poly(start=self.cfg.TRAIN.AUX_WEIGHT, end=0.,\n steps=self._step, total_steps=self.cfg.TRAIN.STEPS,\n period=self.cfg.TRAIN.AUX_PERIOD,\n power=1.)\n result.loss += result.aux_weight * result.aux_loss\n return result\n\n def forward(self, x):\n N, C, H, W = x.size()\n y = x.clone()\n x = self.net1.base(x)\n y = self.net2.base(y)\n xs, ys = [], []\n for stage_id in range(self.num_stages):\n x = self.net1.stages[stage_id](x)\n y = self.net2.stages[stage_id](y)\n if isinstance(x, list):\n x[0], y[0] = self.nddrs['nddrs'][stage_id](x[0], y[0])\n else:\n x, y = self.nddrs['nddrs'][stage_id](x, y)\n if self.aux and self.training and stage_id in self.cfg.TRAIN.AUX_LAYERS:\n xs.append(x)\n ys.append(y)\n x = self.net1.head(x)\n y = self.net2.head(y)\n result = AttrDict({'out1': x, 'out2': y})\n \n if self.aux and self.training:\n _, _, h, w = x.size()\n aux_x = torch.cat([F.interpolate(_x, (h, w), mode='bilinear', align_corners=True) for _x in xs[:-1]] + [xs[-1]],\n dim=1)\n aux_y = torch.cat([F.interpolate(_y, (h, w), mode='bilinear', align_corners=True) for _y in ys[:-1]] + [ys[-1]],\n dim=1)\n result.aux1 = self.aux_conv1(aux_x)\n result.aux2 = self.aux_conv2(aux_y)\n return result\n"
] |
[
[
"torch.nn.ModuleList",
"torch.nn.ModuleDict",
"torch.nn.functional.interpolate",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Dropout2d"
]
] |
akb89/word2vec
|
[
"0533e6c1b5ee02d2523bc18359423f94651f7805"
] |
[
"word2vec/estimators/word2vec.py"
] |
[
"\"\"\"A word2vec implementation using Tensorflow and estimators.\"\"\"\n\nimport os\n\nfrom collections import defaultdict\nimport logging\nimport tensorflow as tf\n\n# from tensorflow.python import debug as tf_debug # pylint: disable=E0611\n\nimport word2vec.utils.datasets as datasets_utils\nimport word2vec.models.word2vec as w2v_model\n\nfrom word2vec.evaluation.men import MEN\n\nlogger = logging.getLogger(__name__)\n\n__all__ = ('Word2Vec')\n\n\nclass Word2Vec():\n \"\"\"Tensorflow implementation of Word2vec.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize vocab dictionaries.\"\"\"\n self._words = []\n self._counts = []\n self._total_count = 0\n\n @property\n def vocab_size(self):\n \"\"\"Return the number of items in vocabulary.\n\n Since we use len(word_freq_dict) as the default index for UKN in\n the index_table, we have to add 1 to the length\n \"\"\"\n return len(self._words) + 1\n\n def build_vocab(self, data_filepath, vocab_filepath, min_count):\n \"\"\"Create vocabulary-related data.\"\"\"\n logger.info('Building vocabulary from file {}'.format(data_filepath))\n logger.info('Loading word counts...')\n if self.vocab_size > 1:\n logger.warning('This instance of W2V\\'s vocabulary does not seem '\n 'to be empty. Erasing previously stored vocab...')\n self._words, self._counts, self._total_count = [], [], 0\n word_count_dict = defaultdict(int)\n with open(data_filepath, 'r') as data_stream:\n for line in data_stream:\n for word in line.strip().split():\n word_count_dict[word] += 1\n logger.info('Saving word frequencies to file: {}'\n .format(vocab_filepath))\n with open(vocab_filepath, 'w') as vocab_stream:\n # words need to be sorted in decreasing frequency to be able\n # to rely on the default tf.nn.log_uniform_candidate_sampler\n # later on in the tf.nn.nce_loss\n for word, count in sorted(word_count_dict.items(),\n key=lambda x: x[1], reverse=True):\n print('{}\\t{}'.format(word, count), file=vocab_stream)\n if count >= min_count:\n self._words.append(word)\n self._counts.append(count)\n self._total_count += count\n\n def load_vocab(self, vocab_filepath, min_count):\n \"\"\"Load a previously saved vocabulary file.\"\"\"\n logger.info('Loading word counts from file {}'.format(vocab_filepath))\n self._words, self._counts, self._total_count = [], [], 0\n with open(vocab_filepath, 'r', encoding='UTF-8') as vocab_stream:\n for line in vocab_stream:\n word_count = line.strip().split('\\t', 1)\n word, count = word_count[0], int(word_count[1])\n if count >= min_count:\n self._words.append(word)\n self._counts.append(count)\n self._total_count += count\n logger.info('Done loading word counts')\n\n # pylint: disable=R0914,W0613\n def train(self, train_mode, training_data_filepath, model_dirpath,\n batch_size, embedding_size, num_neg_samples,\n learning_rate, window_size, num_epochs, sampling_rate,\n p_num_threads, t_num_threads, shuffling_buffer_size,\n save_summary_steps, save_checkpoints_steps, keep_checkpoint_max,\n log_step_count_steps, debug, debug_port, xla):\n \"\"\"Train Word2Vec.\"\"\"\n if self.vocab_size == 1:\n raise Exception('You need to build or load a vocabulary before '\n 'training word2vec')\n if train_mode not in ('cbow', 'skipgram'):\n raise Exception('Unsupported train_mode \\'{}\\''.format(train_mode))\n sess_config = tf.compat.v1.ConfigProto(log_device_placement=True)\n sess_config.intra_op_parallelism_threads = t_num_threads\n sess_config.inter_op_parallelism_threads = t_num_threads\n # if xla:\n # sess_config.graph_options.optimizer_options.global_jit_level = \\\n # tf.OptimizerOptions.ON_1 # JIT compilation on GPU\n run_config = tf.estimator.RunConfig(\n session_config=sess_config, save_summary_steps=save_summary_steps,\n save_checkpoints_steps=save_checkpoints_steps,\n keep_checkpoint_max=keep_checkpoint_max,\n log_step_count_steps=log_step_count_steps)\n estimator = tf.estimator.Estimator(\n model_fn=w2v_model.model,\n model_dir=model_dirpath,\n config=run_config,\n params={\n 'mode': train_mode,\n 'vocab_size': self.vocab_size,\n 'batch_size': batch_size,\n 'embedding_size': embedding_size,\n 'num_neg_samples': num_neg_samples,\n 'learning_rate': learning_rate,\n 'words': self._words,\n 'p_num_threads': p_num_threads,\n 'xla': xla,\n 'men': MEN(os.path.join(\n os.path.dirname(os.path.dirname(__file__)),\n 'resources', 'MEN_dataset_natural_form_full'))\n })\n # waiting for v2 fix in tf.summary.FileWriter:\n tf.compat.v1.disable_eager_execution()\n if debug:\n raise Exception('Unsupported parameter: waiting for the TF team '\n 'to release v2 equivalents for TensorBoardDebugHook')\n # hooks = [tf.estimator.ProfilerHook(\n # save_steps=save_summary_steps, show_dataflow=True,\n # show_memory=True, output_dir=model_dirpath),\n # tf_debug.TensorBoardDebugHook('localhost:{}'\n # .format(debug_port))]\n # else:\n hooks = [tf.estimator.ProfilerHook(\n save_steps=save_summary_steps, show_dataflow=True,\n show_memory=True, output_dir=model_dirpath)]\n estimator.train(\n input_fn=lambda: datasets_utils.get_w2v_train_dataset(\n training_data_filepath, train_mode, self._words, self._counts,\n self._total_count, window_size, sampling_rate, batch_size,\n num_epochs, p_num_threads, shuffling_buffer_size),\n hooks=hooks)\n"
] |
[
[
"tensorflow.estimator.RunConfig",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.estimator.ProfilerHook",
"tensorflow.compat.v1.disable_eager_execution"
]
] |
mikepackard415/Scientific-Environmental-Discourse
|
[
"f8d08734f7c2ce98e088479ac7b58c7b348c0401"
] |
[
"Programs/env_lda.py"
] |
[
"import pandas as pd\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom lda import LDA\r\n\r\ndef learn_topics(texts, topicnum):\r\n\r\n # Get vocabulary and word counts. Use the top 10,000 most frequent\r\n # lowercase unigrams with at least 3 alphabetical, non-numeric characters,\r\n # punctuation treated as separators.\r\n print(\"Vectorizing...\")\r\n CVzer = CountVectorizer(max_features=10000,\r\n lowercase=True)\r\n doc_vcnts = CVzer.fit_transform(texts)\r\n vocabulary = CVzer.get_feature_names()\r\n\r\n # Learn topics. Refresh conrols print frequency.\r\n print(\"LDA\")\r\n lda_model = LDA(topicnum, n_iter=4000, refresh=500) \r\n doc_topic = lda_model.fit_transform(doc_vcnts)\r\n topic_word = lda_model.topic_word_\r\n\r\n return doc_topic, topic_word, vocabulary\r\n\r\nprint(\"Reading data...\")\r\nenv = pd.read_csv('../Data/Environmental Discourse/env_processed.csv', index_col=0)\r\nenv = env[~env.text_processed.isna()]\r\n\r\ndoc_topic, topic_word, vocabulary = learn_topics(env.text_processed, 100)\r\n\r\nprint(doc_topic[0,:])\r\n\r\nfor i in range(100):\r\n env['topic_{}'.format(i)] = doc_topic[:, i]\r\n\r\nenv.to_csv('../Data/Environmental Discourse/env_lda.csv')"
] |
[
[
"pandas.read_csv",
"sklearn.feature_extraction.text.CountVectorizer"
]
] |
Leo-xxx/flappy
|
[
"746cd33d8b56f09b71a308ce041150d8eb61344b"
] |
[
"test.py"
] |
[
"########################## FWMAV Simulation #########################\n# Version 0.3\n# Fan Fei\t\tFeb 2019\n# Direct motor driven flapping wing MAV simulation\n#######################################################################\n\nimport gym\nimport flappy\n\nfrom stable_baselines.common.policies import MlpPolicy\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.common.vec_env import SubprocVecEnv\nfrom stable_baselines.common import set_global_seeds\n\nfrom flappy.envs.fwmav.controllers.arc_xy_arc_z import ARCController\nfrom flappy.envs.fwmav.controllers.pid_controller import PIDController\n\nimport time\nimport argparse\nimport importlib\nimport numpy as np\n\ndef make_env(env_id, rank, seed=0, random_init = True, randomize_sim = True, phantom_sensor = False):\n\tdef _init():\n\t\tenv = gym.make(env_id)\n\t\tenv.config(random_init, randomize_sim, phantom_sensor)\n\t\tif rank == 0:\n\t\t\tenv.enable_visualization()\n\t\t\tenv.enable_print()\n\t\tenv.seed(seed + rank)\n\t\treturn env\n\n\t# set_global_seeds(seed)\n\treturn _init\n\nclass LazyModel:\n\tdef __init__(self,env,model_type):\n\t\tself.action_lb = env.action_lb\n\t\tself.action_ub = env.action_ub\n\t\tself.observation_bound = env.observation_bound\n\t\tif model_type == 'PID':\n\t\t\tself.policy = PIDController(env.sim.dt_c)\n\t\telif model_type == 'ARC':\n\t\t\tself.policy = ARCController(env.sim.dt_c)\n\t\telse:\n\t\t\traise Exception('Error')\n\n\tdef predict(self, obs):\n\t\taction = self.policy.get_action(obs[0]*self.observation_bound)\n\t\t# scale action from [action_lb, action_ub] to [-1,1]\n\t\t# since baseline does not support asymmetric action space\n\t\tnormalized_action = (action-self.action_lb)/(self.action_ub - self.action_lb)*2 - 1\n\t\taction = np.array([normalized_action])\n\t\treturn action, None\n\ndef main(args):\n\tenv_id = 'fwmav_hover-v0'\n\n\tenv = DummyVecEnv([make_env(env_id, 0, random_init = args.rand_init, randomize_sim = args.rand_dynamics, phantom_sensor = args.phantom_sensor)])\n\n\tif args.model_type != 'PID' and args.model_type != 'ARC':\n\t\ttry:\n\t\t\tmodel_cls = getattr(\n\t\t\t\timportlib.import_module('stable_baselines'), args.model_type)\n\t\texcept AttributeError:\n\t\t\tprint(args.model_type, \"Error: wrong model type\")\n\t\t\treturn\n\t\ttry:\n\t\t\tmodel = model_cls.load(args.model_path)\n\t\texcept:\n\t\t\tprint(args.model_path, \"Error: wrong model path\")\n\telse:\n\t\tmodel = LazyModel(env.envs[0],args.model_type)\n\n\tobs = env.reset()\n\n\twhile True:\n\t\tif env.envs[0].is_sim_on == False:\n\t\t\tenv.envs[0].gui.cv.wait()\n\t\telif env.envs[0].is_sim_on:\n\t\t\taction, _ = model.predict(obs)\n\t\t\tobs, rewards, done, info = env.step(action)\n\t\t\t# if done:\n\t\t\t# \tobs = env.reset()\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--model_type', required=True)\n\tparser.add_argument('--model_path')\n\tparser.add_argument(\n\t\t'--policy_type', const='MlpPolicy', default='MlpPolicy', nargs='?')\n\tparser.add_argument('--rand_init', action='store_true', default=False)\n\tparser.add_argument('--rand_dynamics', action='store_true', default=False)\n\tparser.add_argument('--phantom_sensor', action='store_true', default=False)\n\n\targs = parser.parse_args()\n\n\tmain(args)"
] |
[
[
"numpy.array"
]
] |
MitI-7/MachineLearning
|
[
"6450e2a9260ae70cb75cd2f195729143fe427431"
] |
[
"ReinforcementLearning/Bandit/EpsilonGreedy.py"
] |
[
"import random\nimport math\nimport numpy as np\nfrom typing import List\n\n\nclass EpsilonGreedy:\n def __init__(self, epsilon: float, counts: List[int], values: List[float]):\n assert epsilon is None or 0.0 <= epsilon <= 1.0\n self.epsilon = epsilon\n self.counts = counts\n self.values = values\n\n def initialize(self, n_arms):\n self.counts = [0] * n_arms\n self.values = [0.0] * n_arms\n\n def select_arm(self):\n epsilon = self.epsilon\n if epsilon is None:\n t = sum(self.counts) + 1\n epsilon = 1 / math.log(t + 0.0000001)\n\n # 活用\n if random.random() > epsilon:\n return np.argmax(self.values)\n # 探索\n else:\n return random.randrange(len(self.values))\n\n def update(self, chosen_arm, reward):\n self.counts[chosen_arm] += 1\n n = self.counts[chosen_arm]\n\n value = self.values[chosen_arm]\n self.values[chosen_arm] = ((n - 1) / float(n)) * value + (1 / float(n)) * reward # online average\n\n def __str__(self):\n return \"EpsilonGreedy(epsilon={0})\".format(self.epsilon)\n"
] |
[
[
"numpy.argmax"
]
] |
997Yi/Flask-web
|
[
"901ac307b68486d8289105c159ca702318bea5b0",
"6b5e5d274bfa25fbd3db5af02723a5671f1e901d",
"6b5e5d274bfa25fbd3db5af02723a5671f1e901d"
] |
[
"venv/Lib/site-packages/skimage/conftest.py",
"venv/Lib/site-packages/matplotlib/testing/decorators.py",
"venv/Lib/site-packages/skimage/feature/masked_register_translation.py"
] |
[
"# Use legacy numpy printing. This fix is made to keep doctests functional.\n# For more info, see https://github.com/scikit-image/scikit-image/pull/2935 .\n# TODO: remove this workaround once minimal required numpy is set to 1.14.0\nfrom distutils.version import LooseVersion as Version\nimport numpy as np\n\nif Version(np.__version__) >= Version('1.14'):\n np.set_printoptions(legacy='1.13')\n\n# List of files that pytest should ignore\ncollect_ignore = [\"io/_plugins\",]\ntry:\n import visvis\nexcept ImportError:\n collect_ignore.append(\"measure/mc_meta/visual_test.py\")\n",
"import contextlib\nfrom distutils.version import StrictVersion\nimport functools\nimport inspect\nimport os\nfrom pathlib import Path\nimport shutil\nimport sys\nimport unittest\nimport warnings\n\nimport matplotlib as mpl\nimport matplotlib.style\nimport matplotlib.units\nimport matplotlib.testing\nfrom matplotlib import cbook\nfrom matplotlib import ft2font\nfrom matplotlib import pyplot as plt\nfrom matplotlib import ticker\nfrom . import is_called_from_pytest\nfrom .compare import comparable_formats, compare_images, make_test_filename\nfrom .exceptions import ImageComparisonFailure\n\n\n@contextlib.contextmanager\ndef _cleanup_cm():\n orig_units_registry = matplotlib.units.registry.copy()\n try:\n with warnings.catch_warnings(), matplotlib.rc_context():\n yield\n finally:\n matplotlib.units.registry.clear()\n matplotlib.units.registry.update(orig_units_registry)\n plt.close(\"all\")\n\n\nclass CleanupTestCase(unittest.TestCase):\n \"\"\"A wrapper for unittest.TestCase that includes cleanup operations.\"\"\"\n @classmethod\n def setUpClass(cls):\n cls._cm = _cleanup_cm().__enter__()\n\n @classmethod\n def tearDownClass(cls):\n cls._cm.__exit__(None, None, None)\n\n\ndef cleanup(style=None):\n \"\"\"\n A decorator to ensure that any global state is reset before\n running a test.\n\n Parameters\n ----------\n style : str, dict, or list, optional\n The style(s) to apply. Defaults to ``[\"classic\",\n \"_classic_test_patch\"]``.\n \"\"\"\n\n # If cleanup is used without arguments, *style* will be a callable, and we\n # pass it directly to the wrapper generator. If cleanup if called with an\n # argument, it is a string naming a style, and the function will be passed\n # as an argument to what we return. This is a confusing, but somewhat\n # standard, pattern for writing a decorator with optional arguments.\n\n def make_cleanup(func):\n if inspect.isgeneratorfunction(func):\n @functools.wraps(func)\n def wrapped_callable(*args, **kwargs):\n with _cleanup_cm(), matplotlib.style.context(style):\n yield from func(*args, **kwargs)\n else:\n @functools.wraps(func)\n def wrapped_callable(*args, **kwargs):\n with _cleanup_cm(), matplotlib.style.context(style):\n func(*args, **kwargs)\n\n return wrapped_callable\n\n if callable(style):\n result = make_cleanup(style)\n # Default of mpl_test_settings fixture and image_comparison too.\n style = [\"classic\", \"_classic_test_patch\"]\n return result\n else:\n return make_cleanup\n\n\ndef check_freetype_version(ver):\n if ver is None:\n return True\n\n if isinstance(ver, str):\n ver = (ver, ver)\n ver = [StrictVersion(x) for x in ver]\n found = StrictVersion(ft2font.__freetype_version__)\n\n return ver[0] <= found <= ver[1]\n\n\ndef _checked_on_freetype_version(required_freetype_version):\n import pytest\n reason = (\"Mismatched version of freetype. \"\n \"Test requires '%s', you have '%s'\" %\n (required_freetype_version, ft2font.__freetype_version__))\n return pytest.mark.xfail(\n not check_freetype_version(required_freetype_version),\n reason=reason, raises=ImageComparisonFailure, strict=False)\n\n\ndef remove_ticks_and_titles(figure):\n figure.suptitle(\"\")\n null_formatter = ticker.NullFormatter()\n for ax in figure.get_axes():\n ax.set_title(\"\")\n ax.xaxis.set_major_formatter(null_formatter)\n ax.xaxis.set_minor_formatter(null_formatter)\n ax.yaxis.set_major_formatter(null_formatter)\n ax.yaxis.set_minor_formatter(null_formatter)\n try:\n ax.zaxis.set_major_formatter(null_formatter)\n ax.zaxis.set_minor_formatter(null_formatter)\n except AttributeError:\n pass\n\n\ndef _raise_on_image_difference(expected, actual, tol):\n __tracebackhide__ = True\n\n err = compare_images(expected, actual, tol, in_decorator=True)\n if err:\n for key in [\"actual\", \"expected\"]:\n err[key] = os.path.relpath(err[key])\n raise ImageComparisonFailure(\n 'images not close (RMS %(rms).3f):\\n\\t%(actual)s\\n\\t%(expected)s '\n % err)\n\n\ndef _skip_if_format_is_uncomparable(extension):\n import pytest\n return pytest.mark.skipif(\n extension not in comparable_formats(),\n reason='Cannot compare {} files on this system'.format(extension))\n\n\ndef _mark_skip_if_format_is_uncomparable(extension):\n import pytest\n if isinstance(extension, str):\n name = extension\n marks = []\n elif isinstance(extension, tuple):\n # Extension might be a pytest ParameterSet instead of a plain string.\n # Unfortunately, this type is not exposed, so since it's a namedtuple,\n # check for a tuple instead.\n name, = extension.values\n marks = [*extension.marks]\n else:\n # Extension might be a pytest marker instead of a plain string.\n name, = extension.args\n marks = [extension.mark]\n return pytest.param(name,\n marks=[*marks, _skip_if_format_is_uncomparable(name)])\n\n\nclass _ImageComparisonBase:\n \"\"\"\n Image comparison base class\n\n This class provides *just* the comparison-related functionality and avoids\n any code that would be specific to any testing framework.\n \"\"\"\n\n def __init__(self, func, tol, remove_text, savefig_kwargs):\n self.func = func\n self.baseline_dir, self.result_dir = _image_directories(func)\n self.tol = tol\n self.remove_text = remove_text\n self.savefig_kwargs = savefig_kwargs\n\n def copy_baseline(self, baseline, extension):\n baseline_path = self.baseline_dir / baseline\n orig_expected_path = baseline_path.with_suffix(f'.{extension}')\n if extension == 'eps' and not orig_expected_path.exists():\n orig_expected_path = orig_expected_path.with_suffix('.pdf')\n expected_fname = make_test_filename(\n self.result_dir / orig_expected_path.name, 'expected')\n try:\n # os.symlink errors if the target already exists.\n with contextlib.suppress(OSError):\n os.remove(expected_fname)\n try:\n os.symlink(orig_expected_path, expected_fname)\n except OSError: # On Windows, symlink *may* be unavailable.\n shutil.copyfile(orig_expected_path, expected_fname)\n except OSError:\n raise ImageComparisonFailure(\n f\"Missing baseline image {expected_fname} because the \"\n f\"following file cannot be accessed: {orig_expected_path}\")\n return expected_fname\n\n def compare(self, idx, baseline, extension):\n __tracebackhide__ = True\n fignum = plt.get_fignums()[idx]\n fig = plt.figure(fignum)\n\n if self.remove_text:\n remove_ticks_and_titles(fig)\n\n actual_path = (self.result_dir / baseline).with_suffix(f'.{extension}')\n kwargs = self.savefig_kwargs.copy()\n if extension == 'pdf':\n kwargs.setdefault('metadata',\n {'Creator': None, 'Producer': None,\n 'CreationDate': None})\n fig.savefig(actual_path, **kwargs)\n\n expected_path = self.copy_baseline(baseline, extension)\n _raise_on_image_difference(expected_path, actual_path, self.tol)\n\n\ndef _pytest_image_comparison(baseline_images, extensions, tol,\n freetype_version, remove_text, savefig_kwargs,\n style):\n \"\"\"\n Decorate function with image comparison for pytest.\n\n This function creates a decorator that wraps a figure-generating function\n with image comparison code. Pytest can become confused if we change the\n signature of the function, so we indirectly pass anything we need via the\n `mpl_image_comparison_parameters` fixture and extra markers.\n \"\"\"\n import pytest\n\n extensions = map(_mark_skip_if_format_is_uncomparable, extensions)\n\n def decorator(func):\n @functools.wraps(func)\n # Parameter indirection; see docstring above and comment below.\n @pytest.mark.usefixtures('mpl_image_comparison_parameters')\n @pytest.mark.parametrize('extension', extensions)\n @pytest.mark.baseline_images(baseline_images)\n # END Parameter indirection.\n @pytest.mark.style(style)\n @_checked_on_freetype_version(freetype_version)\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n __tracebackhide__ = True\n img = _ImageComparisonBase(func, tol=tol, remove_text=remove_text,\n savefig_kwargs=savefig_kwargs)\n matplotlib.testing.set_font_settings_for_testing()\n func(*args, **kwargs)\n\n # Parameter indirection:\n # This is hacked on via the mpl_image_comparison_parameters fixture\n # so that we don't need to modify the function's real signature for\n # any parametrization. Modifying the signature is very very tricky\n # and likely to confuse pytest.\n baseline_images, extension = func.parameters\n\n assert len(plt.get_fignums()) == len(baseline_images), (\n \"Test generated {} images but there are {} baseline images\"\n .format(len(plt.get_fignums()), len(baseline_images)))\n for idx, baseline in enumerate(baseline_images):\n img.compare(idx, baseline, extension)\n\n return wrapper\n\n return decorator\n\n\ndef image_comparison(baseline_images, extensions=None, tol=0,\n freetype_version=None, remove_text=False,\n savefig_kwarg=None,\n # Default of mpl_test_settings fixture and cleanup too.\n style=(\"classic\", \"_classic_test_patch\")):\n \"\"\"\n Compare images generated by the test with those specified in\n *baseline_images*, which must correspond, else an `ImageComparisonFailure`\n exception will be raised.\n\n Parameters\n ----------\n baseline_images : list or None\n A list of strings specifying the names of the images generated by\n calls to :meth:`matplotlib.figure.savefig`.\n\n If *None*, the test function must use the ``baseline_images`` fixture,\n either as a parameter or with `pytest.mark.usefixtures`. This value is\n only allowed when using pytest.\n\n extensions : None or list of str\n The list of extensions to test, e.g. ``['png', 'pdf']``.\n\n If *None*, defaults to all supported extensions: png, pdf, and svg.\n\n When testing a single extension, it can be directly included in the\n names passed to *baseline_images*. In that case, *extensions* must not\n be set.\n\n In order to keep the size of the test suite from ballooning, we only\n include the ``svg`` or ``pdf`` outputs if the test is explicitly\n exercising a feature dependent on that backend (see also the\n `check_figures_equal` decorator for that purpose).\n\n tol : float, optional, default: 0\n The RMS threshold above which the test is considered failed.\n\n freetype_version : str or tuple\n The expected freetype version or range of versions for this test to\n pass.\n\n remove_text : bool\n Remove the title and tick text from the figure before comparison. This\n is useful to make the baseline images independent of variations in text\n rendering between different versions of FreeType.\n\n This does not remove other, more deliberate, text, such as legends and\n annotations.\n\n savefig_kwarg : dict\n Optional arguments that are passed to the savefig method.\n\n style : str, dict, or list\n The optional style(s) to apply to the image test. The test itself\n can also apply additional styles if desired. Defaults to ``[\"classic\",\n \"_classic_test_patch\"]``.\n \"\"\"\n\n if baseline_images is not None:\n # List of non-empty filename extensions.\n baseline_exts = [*filter(None, {Path(baseline).suffix[1:]\n for baseline in baseline_images})]\n if baseline_exts:\n if extensions is not None:\n raise ValueError(\n \"When including extensions directly in 'baseline_images', \"\n \"'extensions' cannot be set as well\")\n if len(baseline_exts) > 1:\n raise ValueError(\n \"When including extensions directly in 'baseline_images', \"\n \"all baselines must share the same suffix\")\n extensions = baseline_exts\n baseline_images = [ # Chop suffix out from baseline_images.\n Path(baseline).stem for baseline in baseline_images]\n if extensions is None:\n # Default extensions to test, if not set via baseline_images.\n extensions = ['png', 'pdf', 'svg']\n if savefig_kwarg is None:\n savefig_kwarg = dict() # default no kwargs to savefig\n return _pytest_image_comparison(\n baseline_images=baseline_images, extensions=extensions, tol=tol,\n freetype_version=freetype_version, remove_text=remove_text,\n savefig_kwargs=savefig_kwarg, style=style)\n\n\ndef check_figures_equal(*, extensions=(\"png\", \"pdf\", \"svg\"), tol=0):\n \"\"\"\n Decorator for test cases that generate and compare two figures.\n\n The decorated function must take two arguments, *fig_test* and *fig_ref*,\n and draw the test and reference images on them. After the function\n returns, the figures are saved and compared.\n\n This decorator should be preferred over `image_comparison` when possible in\n order to keep the size of the test suite from ballooning.\n\n Parameters\n ----------\n extensions : list, default: [\"png\", \"pdf\", \"svg\"]\n The extensions to test.\n tol : float\n The RMS threshold above which the test is considered failed.\n\n Examples\n --------\n Check that calling `Axes.plot` with a single argument plots it against\n ``[0, 1, 2, ...]``::\n\n @check_figures_equal()\n def test_plot(fig_test, fig_ref):\n fig_test.subplots().plot([1, 3, 5])\n fig_ref.subplots().plot([0, 1, 2], [1, 3, 5])\n \"\"\"\n\n def decorator(func):\n import pytest\n\n _, result_dir = _image_directories(func)\n\n if len(inspect.signature(func).parameters) == 2:\n # Free-standing function.\n @pytest.mark.parametrize(\"ext\", extensions)\n def wrapper(ext):\n fig_test = plt.figure(\"test\")\n fig_ref = plt.figure(\"reference\")\n func(fig_test, fig_ref)\n test_image_path = result_dir / (func.__name__ + \".\" + ext)\n ref_image_path = (\n result_dir / (func.__name__ + \"-expected.\" + ext))\n fig_test.savefig(test_image_path)\n fig_ref.savefig(ref_image_path)\n _raise_on_image_difference(\n ref_image_path, test_image_path, tol=tol)\n\n elif len(inspect.signature(func).parameters) == 3:\n # Method.\n @pytest.mark.parametrize(\"ext\", extensions)\n def wrapper(self, ext):\n fig_test = plt.figure(\"test\")\n fig_ref = plt.figure(\"reference\")\n func(self, fig_test, fig_ref)\n test_image_path = result_dir / (func.__name__ + \".\" + ext)\n ref_image_path = (\n result_dir / (func.__name__ + \"-expected.\" + ext))\n fig_test.savefig(test_image_path)\n fig_ref.savefig(ref_image_path)\n _raise_on_image_difference(\n ref_image_path, test_image_path, tol=tol)\n\n return wrapper\n\n return decorator\n\n\ndef _image_directories(func):\n \"\"\"\n Compute the baseline and result image directories for testing *func*.\n\n For test module ``foo.bar.test_baz``, the baseline directory is at\n ``foo/bar/baseline_images/test_baz`` and the result directory at\n ``$(pwd)/result_images/test_baz``. The result directory is created if it\n doesn't exist.\n \"\"\"\n module_path = Path(sys.modules[func.__module__].__file__)\n baseline_dir = module_path.parent / \"baseline_images\" / module_path.stem\n result_dir = Path().resolve() / \"result_images\" / module_path.stem\n result_dir.mkdir(parents=True, exist_ok=True)\n return baseline_dir, result_dir\n\n\n@cbook.deprecated(\"3.1\", alternative=\"pytest.mark.backend\")\ndef switch_backend(backend):\n\n def switch_backend_decorator(func):\n\n @functools.wraps(func)\n def backend_switcher(*args, **kwargs):\n try:\n prev_backend = mpl.get_backend()\n matplotlib.testing.setup()\n plt.switch_backend(backend)\n return func(*args, **kwargs)\n finally:\n plt.switch_backend(prev_backend)\n\n return backend_switcher\n\n return switch_backend_decorator\n",
"\"\"\"\nImplementation of the masked normalized cross-correlation.\n\nBased on the following publication:\nD. Padfield. Masked object registration in the Fourier domain.\nIEEE Transactions on Image Processing (2012)\n\nand the author's original MATLAB implementation, available on this website:\nhttp://www.dirkpadfield.com/\n\"\"\"\n\nimport numpy as np\nfrom functools import partial\n\nfrom .._shared.fft import fftmodule, next_fast_len\n\n\ndef masked_register_translation(\n src_image,\n target_image,\n src_mask,\n target_mask=None,\n overlap_ratio=3 / 10):\n \"\"\"\n Masked image translation registration by masked normalized\n cross-correlation.\n\n Parameters\n ----------\n src_image : ndarray\n Reference image.\n target_image : ndarray\n Image to register. Must be same dimensionality as ``src_image``,\n but not necessarily the same size.\n src_mask : ndarray\n Boolean mask for ``src_image``. The mask should evaluate to ``True``\n (or 1) on valid pixels. ``src_mask`` should have the same shape\n as ``src_mask``.\n target_mask : ndarray or None, optional\n Boolean mask for ``target_image``. The mask should evaluate to ``True``\n (or 1) on valid pixels. ``target_mask`` should have the same shape\n as ``target_image``. If ``None``, ``src_mask`` will be used.\n overlap_ratio : float, optional\n Minimum allowed overlap ratio between images. The correlation for\n translations corresponding with an overlap ratio lower than this\n threshold will be ignored. A lower `overlap_ratio` leads to smaller\n maximum translation, while a higher `overlap_ratio` leads to greater\n robustness against spurious matches due to small overlap between\n masked images.\n\n Returns\n -------\n shifts : ndarray\n Shift vector (in pixels) required to register ``target_image`` with\n ``src_image``. Axis ordering is consistent with numpy (e.g. Z, Y, X)\n\n References\n ----------\n .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain.\n IEEE Transactions on Image Processing, vol. 21(5),\n pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`\n .. [2] D. Padfield. \"Masked FFT registration\". In Proc. Computer Vision and\n Pattern Recognition, pp. 2918-2925 (2010).\n :DOI:`10.1109/CVPR.2010.5540032`\n \"\"\"\n if target_mask is None:\n target_mask = np.array(src_mask, dtype=np.bool, copy=True)\n\n # We need masks to be of the same size as their respective images\n for (im, mask) in [(src_image, src_mask), (target_image, target_mask)]:\n if im.shape != mask.shape:\n raise ValueError(\n \"Error: image sizes must match their respective mask sizes.\")\n\n # The mismatch in size will impact the center location of the\n # cross-correlation\n size_mismatch = np.array(target_image.shape) - np.array(src_image.shape)\n\n xcorr = cross_correlate_masked(target_image, src_image, target_mask,\n src_mask, axes=(0, 1), mode='full',\n overlap_ratio=overlap_ratio)\n\n # Generalize to the average of multiple equal maxima\n maxima = np.transpose(np.nonzero(xcorr == xcorr.max()))\n center = np.mean(maxima, axis=0)\n shifts = center - np.array(src_image.shape) + 1\n return -shifts + (size_mismatch / 2)\n\n\ndef cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1),\n overlap_ratio=3 / 10):\n \"\"\"\n Masked normalized cross-correlation between arrays.\n\n Parameters\n ----------\n arr1 : ndarray\n First array.\n arr2 : ndarray\n Seconds array. The dimensions of `arr2` along axes that are not\n transformed should be equal to that of `arr1`.\n m1 : ndarray\n Mask of `arr1`. The mask should evaluate to `True`\n (or 1) on valid pixels. `m1` should have the same shape as `arr1`.\n m2 : ndarray\n Mask of `arr2`. The mask should evaluate to `True`\n (or 1) on valid pixels. `m2` should have the same shape as `arr2`.\n mode : {'full', 'same'}, optional\n 'full':\n This returns the convolution at each point of overlap. At\n the end-points of the convolution, the signals do not overlap\n completely, and boundary effects may be seen.\n 'same':\n The output is the same size as `arr1`, centered with respect\n to the `‘full’` output. Boundary effects are less prominent.\n axes : tuple of ints, optional\n Axes along which to compute the cross-correlation.\n overlap_ratio : float, optional\n Minimum allowed overlap ratio between images. The correlation for\n translations corresponding with an overlap ratio lower than this\n threshold will be ignored. A lower `overlap_ratio` leads to smaller\n maximum translation, while a higher `overlap_ratio` leads to greater\n robustness against spurious matches due to small overlap between\n masked images.\n\n Returns\n -------\n out : ndarray\n Masked normalized cross-correlation.\n\n Raises\n ------\n ValueError : if correlation `mode` is not valid, or array dimensions along\n non-transformation axes are not equal.\n\n References\n ----------\n .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain.\n IEEE Transactions on Image Processing, vol. 21(5),\n pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`\n .. [2] D. Padfield. \"Masked FFT registration\". In Proc. Computer Vision and\n Pattern Recognition, pp. 2918-2925 (2010).\n :DOI:`10.1109/CVPR.2010.5540032`\n \"\"\"\n if mode not in {'full', 'same'}:\n raise ValueError(\"Correlation mode {} is not valid.\".format(mode))\n\n fixed_image = np.array(arr1, dtype=np.float)\n fixed_mask = np.array(m1, dtype=np.bool)\n moving_image = np.array(arr2, dtype=np.float)\n moving_mask = np.array(m2, dtype=np.bool)\n eps = np.finfo(np.float).eps\n\n # Array dimensions along non-transformation axes should be equal.\n all_axes = set(range(fixed_image.ndim))\n for axis in (all_axes - set(axes)):\n if fixed_image.shape[axis] != moving_image.shape[axis]:\n raise ValueError(\n \"Array shapes along non-transformation axes should be \"\n \"equal, but dimensions along axis {a} are not\".format(a=axis))\n\n # Determine final size along transformation axes\n # Note that it might be faster to compute Fourier transform in a slightly\n # larger shape (`fast_shape`). Then, after all fourier transforms are done,\n # we slice back to`final_shape` using `final_slice`.\n final_shape = list(arr1.shape)\n for axis in axes:\n final_shape[axis] = fixed_image.shape[axis] + \\\n moving_image.shape[axis] - 1\n final_shape = tuple(final_shape)\n final_slice = tuple([slice(0, int(sz)) for sz in final_shape])\n\n # Extent transform axes to the next fast length (i.e. multiple of 3, 5, or\n # 7)\n fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes])\n\n # We use numpy.fft or the new scipy.fft because they allow leaving the\n # transform axes unchanged which was not possible with scipy.fftpack's\n # fftn/ifftn in older versions of SciPy.\n # E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4)\n # results in arr_fft shape (4, 4, 7)\n fft = partial(fftmodule.fftn, s=fast_shape, axes=axes)\n ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes)\n\n fixed_image[np.logical_not(fixed_mask)] = 0.0\n moving_image[np.logical_not(moving_mask)] = 0.0\n\n # N-dimensional analog to rotation by 180deg is flip over all relevant axes.\n # See [1] for discussion.\n rotated_moving_image = _flip(moving_image, axes=axes)\n rotated_moving_mask = _flip(moving_mask, axes=axes)\n\n fixed_fft = fft(fixed_image)\n rotated_moving_fft = fft(rotated_moving_image)\n fixed_mask_fft = fft(fixed_mask)\n rotated_moving_mask_fft = fft(rotated_moving_mask)\n\n # Calculate overlap of masks at every point in the convolution.\n # Locations with high overlap should not be taken into account.\n number_overlap_masked_px = np.real(\n ifft(rotated_moving_mask_fft * fixed_mask_fft))\n number_overlap_masked_px[:] = np.round(number_overlap_masked_px)\n number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps)\n masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft)\n masked_correlated_rotated_moving_fft = ifft(\n fixed_mask_fft * rotated_moving_fft)\n\n numerator = ifft(rotated_moving_fft * fixed_fft)\n numerator -= masked_correlated_fixed_fft * \\\n masked_correlated_rotated_moving_fft / number_overlap_masked_px\n\n fixed_squared_fft = fft(np.square(fixed_image))\n fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft)\n fixed_denom -= np.square(masked_correlated_fixed_fft) / \\\n number_overlap_masked_px\n fixed_denom[:] = np.fmax(fixed_denom, 0.0)\n\n rotated_moving_squared_fft = fft(np.square(rotated_moving_image))\n moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft)\n moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \\\n number_overlap_masked_px\n moving_denom[:] = np.fmax(moving_denom, 0.0)\n\n denom = np.sqrt(fixed_denom * moving_denom)\n\n # Slice back to expected convolution shape.\n numerator = numerator[final_slice]\n denom = denom[final_slice]\n number_overlap_masked_px = number_overlap_masked_px[final_slice]\n\n if mode == 'same':\n _centering = partial(_centered,\n newshape=fixed_image.shape, axes=axes)\n denom = _centering(denom)\n numerator = _centering(numerator)\n number_overlap_masked_px = _centering(number_overlap_masked_px)\n\n # Pixels where `denom` is very small will introduce large\n # numbers after division. To get around this problem,\n # we zero-out problematic pixels.\n tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True)\n nonzero_indices = denom > tol\n\n out = np.zeros_like(denom)\n out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices]\n np.clip(out, a_min=-1, a_max=1, out=out)\n\n # Apply overlap ratio threshold\n number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px,\n axis=axes, keepdims=True)\n out[number_overlap_masked_px < number_px_threshold] = 0.0\n\n return out\n\n\ndef _centered(arr, newshape, axes):\n \"\"\" Return the center `newshape` portion of `arr`, leaving axes not\n in `axes` untouched. \"\"\"\n newshape = np.asarray(newshape)\n currshape = np.array(arr.shape)\n\n slices = [slice(None, None)] * arr.ndim\n\n for ax in axes:\n startind = (currshape[ax] - newshape[ax]) // 2\n endind = startind + newshape[ax]\n slices[ax] = slice(startind, endind)\n\n return arr[tuple(slices)]\n\n\ndef _flip(arr, axes=None):\n \"\"\" Reverse array over many axes. Generalization of arr[::-1] for many\n dimensions. If `axes` is `None`, flip along all axes. \"\"\"\n if axes is None:\n reverse = [slice(None, None, -1)] * arr.ndim\n else:\n reverse = [slice(None, None, None)] * arr.ndim\n for axis in axes:\n reverse[axis] = slice(None, None, -1)\n\n return arr[tuple(reverse)]\n"
] |
[
[
"numpy.set_printoptions"
],
[
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.get_fignums",
"matplotlib.cbook.deprecated",
"matplotlib.ticker.NullFormatter",
"matplotlib.get_backend"
],
[
"numpy.square",
"numpy.max",
"numpy.array",
"numpy.zeros_like",
"numpy.logical_not",
"numpy.asarray",
"numpy.round",
"numpy.fmax",
"numpy.mean",
"numpy.finfo",
"numpy.sqrt",
"numpy.clip",
"numpy.abs"
]
] |
Phuong-Le/mutationorigin
|
[
"fad00fca3c1073637ede2c6948f5278a030971dc"
] |
[
"mutation_origin/cli.py"
] |
[
"\"\"\"command line interface for mutation_origin\"\"\"\nimport os\nimport time\nimport pickle\nfrom collections import defaultdict\nimport click\nfrom tqdm import tqdm\nimport pandas\nfrom numpy import log\nfrom numpy.random import seed as np_seed\nfrom scitrack import CachingLogger\nfrom sklearn.model_selection import train_test_split\n\nfrom mutation_origin.opt import (_seed, _feature_dim, _enu_path,\n _germline_path, _output_path, _flank_size,\n _train_size, _enu_ratio,\n _numreps, _label_col, _proximal, _usegc,\n _training_path, _c_values, _penalty_options,\n _n_jobs, _classifier_path, _data_path,\n _predictions_path, _alpha_options,\n _overwrite, _verbose, _class_prior,\n _strategy, _score)\nfrom mutation_origin.preprocess import data_to_numeric\nfrom mutation_origin.encoder import (get_scaler, inverse_transform_response,\n transform_response)\nfrom mutation_origin.classify import (logistic_regression, one_class_svm,\n predict_origin, naive_bayes, xgboost)\nfrom mutation_origin.util import (dump_json, load_predictions,\n get_basename, get_classifier_label,\n get_enu_germline_sizes, iter_indices,\n load_classifier, open_)\nfrom mutation_origin.postprocess import measure_performance\n\n\n__author__ = \"Gavin Huttley\"\n__copyright__ = \"Copyright 2014, Gavin Huttley\"\n__credits__ = [\"Yicheng Zhu\", \"Cheng Soon Ong\", \"Gavin Huttley\"]\n__license__ = \"BSD\"\n__version__ = \"0.3\"\n__maintainer__ = \"Gavin Huttley\"\n__email__ = \"Gavin.Huttley@anu.edu.au\"\n__status__ = \"Development\"\n\n\nLOGGER = CachingLogger()\n\n\n@click.group()\ndef main():\n \"\"\"mutori -- for building and applying classifiers of mutation origin\"\"\"\n pass\n\n\n@main.command()\n@_seed\n@_enu_path\n@_germline_path\n@_output_path\n@_train_size\n@_enu_ratio\n@_numreps\n@_overwrite\ndef sample_data(enu_path, germline_path, output_path, seed,\n train_size,\n enu_ratio, numreps, overwrite):\n \"\"\"creates train/test sample data\"\"\"\n if seed is None:\n seed = int(time.time())\n LOGGER.log_args()\n LOGGER.log_versions(['sklearn', 'numpy'])\n\n # set the random number seed\n np_seed(seed)\n start_time = time.time()\n os.makedirs(output_path, exist_ok=True)\n logfile_path = os.path.join(output_path, \"logs/data_sampling.log\")\n if os.path.exists(logfile_path) and not overwrite:\n click.secho(f\"Exists: {logfile_path}! use overwrite to force.\",\n fg='red')\n return\n\n LOGGER.log_file_path = logfile_path\n LOGGER.input_file(enu_path)\n LOGGER.input_file(germline_path)\n\n enu = pandas.read_csv(enu_path, sep=\"\\t\", header=0)\n germline = pandas.read_csv(germline_path, sep=\"\\t\", header=0)\n train_size = train_size // 2\n test_size = train_size\n train_enu_ratio, test_enu_ratio = enu_ratio\n enu_train_size, germ_train_size = get_enu_germline_sizes(train_size,\n train_enu_ratio)\n enu_test_size, germ_test_size = get_enu_germline_sizes(test_size,\n test_enu_ratio)\n assert min(enu_train_size, germ_train_size,\n enu_test_size, germ_test_size) > 0\n\n if (2 * train_size > enu.shape[0] or\n 2 * train_size > germline.shape[0]):\n print(f\"ENU data set size: {enu.shape[0]}\")\n print(f\"Germline data set size: {germline.shape[0]}\")\n print(f\"Train set size: {train_size}\")\n raise ValueError(\"2 x train size exceeds\"\n \" size of training data source(s)\")\n\n for rep in range(numreps):\n test_outpath = os.path.join(output_path, f\"test-{rep}.tsv.gz\")\n train_outpath = os.path.join(output_path, f\"train-{rep}.tsv.gz\")\n enu_training, enu_testing = train_test_split(\n enu,\n test_size=enu_test_size,\n train_size=enu_train_size)\n\n germ_training, germ_testing = train_test_split(\n germline,\n test_size=germ_test_size,\n train_size=germ_train_size)\n if any(map(lambda x: x.shape[0] == 0,\n [enu_training, enu_testing, germ_training, germ_testing])):\n raise RuntimeError(\"screw up in creating test/train set\")\n\n # concat the data frames\n testing = pandas.concat([enu_testing, germ_testing])\n training = pandas.concat([enu_training, germ_training])\n # write out, separately, the ENU and Germline data for train and test\n testing.to_csv(test_outpath, index=False,\n sep=\"\\t\", compression='gzip')\n training.to_csv(train_outpath, index=False,\n sep=\"\\t\", compression='gzip')\n\n LOGGER.output_file(test_outpath)\n LOGGER.output_file(train_outpath)\n\n duration = time.time() - start_time\n LOGGER.log_message(\"%.2f\" % (duration / 60.),\n label=\"run duration (minutes)\")\n LOGGER.shutdown()\n\n\n@main.command()\n@_training_path\n@_output_path\n@_label_col\n@_seed\n@_score\n@_flank_size\n@_feature_dim\n@_proximal\n@_usegc\n@_c_values\n@_penalty_options\n@_n_jobs\n@_overwrite\n@_verbose\ndef lr_train(training_path, output_path, label_col, seed, scoring,\n flank_size, feature_dim, proximal,\n usegc, c_values, penalty_options, n_jobs, overwrite, verbose):\n \"\"\"logistic regression training, validation, dumps optimal model\"\"\"\n if not seed:\n seed = int(time.time())\n\n np_seed(seed)\n LOGGER.log_args()\n LOGGER.log_versions(['sklearn', 'numpy'])\n\n os.makedirs(output_path, exist_ok=True)\n\n basename = get_basename(training_path)\n outpath = os.path.join(output_path, f\"{basename}-classifier-lr.pkl.gz\")\n if os.path.exists(outpath) and not overwrite:\n if verbose > 1:\n click.secho(f\"Skipping. {outpath} exists. \"\n \"use overwrite to force.\",\n fg='green')\n return\n\n logfile_path = os.path.join(output_path,\n f\"logs/{basename}-training-lr.log\")\n LOGGER.log_file_path = logfile_path\n LOGGER.input_file(training_path)\n\n start_time = time.time()\n _, resp, feat, n_dims, names = data_to_numeric(training_path,\n label_col, flank_size,\n feature_dim, proximal,\n usegc)\n\n if usegc:\n # we need to scale the data\n scaler = get_scaler(feat)\n feat = scaler.transform(feat)\n classifier = logistic_regression(feat, resp, seed, scoring,\n c_values,\n penalty_options.split(\",\"), n_jobs)\n betas = dict(zip(names, classifier.best_estimator_.coef_.tolist()[0]))\n result = dict(classifier=classifier.best_estimator_, betas=betas,\n scoring=scoring)\n result['feature_params'] = dict(feature_dim=feature_dim,\n flank_size=flank_size, proximal=proximal,\n usegc=usegc)\n if usegc:\n result['scaler'] = scaler\n\n with open(outpath, 'wb') as clf_file:\n pickle.dump(result, clf_file)\n\n LOGGER.output_file(outpath)\n duration = time.time() - start_time\n LOGGER.log_message(\"%.2f\" % (duration / 60.),\n label=\"run duration (minutes)\")\n LOGGER.shutdown()\n\n\n@main.command()\n@_training_path\n@_output_path\n@_label_col\n@_seed\n@_score\n@_flank_size\n@_feature_dim\n@_proximal\n@_usegc\n@_alpha_options\n@_class_prior\n@_n_jobs\n@_overwrite\n@_verbose\ndef nb_train(training_path, output_path, label_col, seed, scoring,\n flank_size, feature_dim, proximal,\n usegc, alpha_options, class_prior, n_jobs, overwrite, verbose):\n \"\"\"Naive Bayes training, validation, dumps optimal model\"\"\"\n if not seed:\n seed = int(time.time())\n\n np_seed(seed)\n LOGGER.log_args()\n LOGGER.log_versions(['sklearn', 'numpy'])\n os.makedirs(output_path, exist_ok=True)\n\n basename = get_basename(training_path)\n outpath = os.path.join(output_path, f\"{basename}-classifier-nb.pkl.gz\")\n logfile_path = os.path.join(output_path,\n f\"logs/{basename}-training-nb.log\")\n if os.path.exists(outpath) and not overwrite:\n if verbose > 1:\n click.secho(f\"Skipping. {outpath} exists. \"\n \"use overwrite to force.\",\n fg='green')\n return\n\n LOGGER.log_file_path = logfile_path\n LOGGER.input_file(training_path)\n\n start_time = time.time()\n if class_prior is not None:\n class_labels = list(class_prior)\n encoded = transform_response(class_labels)\n ordered = sorted(zip(encoded, class_labels))\n class_prior = [class_prior[l] for _, l in ordered]\n\n _, resp, feat, n_dims, names = data_to_numeric(training_path,\n label_col, flank_size,\n feature_dim, proximal,\n usegc)\n\n if usegc:\n # we need to scale the data\n scaler = get_scaler(feat)\n feat = scaler.transform(feat)\n classifier = naive_bayes(feat, resp, seed, alpha_options, scoring,\n class_prior=class_prior, n_jobs=n_jobs)\n betas = dict(zip(names, classifier.best_estimator_.coef_.tolist()[0]))\n result = dict(classifier=classifier.best_estimator_, betas=betas,\n scoring=scoring)\n result['feature_params'] = dict(feature_dim=feature_dim,\n flank_size=flank_size, proximal=proximal,\n usegc=usegc)\n if usegc:\n result['scaler'] = scaler\n\n with open_(outpath, 'wb') as clf_file:\n pickle.dump(result, clf_file)\n\n LOGGER.output_file(outpath)\n duration = time.time() - start_time\n LOGGER.log_message(\"%.2f\" % (duration / 60.),\n label=\"run duration (minutes)\")\n LOGGER.shutdown()\n\n\n@main.command()\n@_training_path\n@_output_path\n@_label_col\n@_seed\n@_flank_size\n@_feature_dim\n@_proximal\n@_usegc\n@_strategy\n@_n_jobs\n@_overwrite\n@_verbose\ndef xgboost_train(training_path, output_path, label_col, seed,\n flank_size, feature_dim, proximal,\n usegc, strategy, n_jobs, overwrite, verbose):\n \"\"\"Naive Bayes training, validation, dumps optimal model\"\"\"\n if not seed:\n seed = int(time.time())\n\n np_seed(seed)\n LOGGER.log_args()\n LOGGER.log_versions(['sklearn', 'numpy'])\n os.makedirs(output_path, exist_ok=True)\n\n basename = get_basename(training_path)\n outpath = os.path.join(output_path, f\"{basename}-classifier-xgb.pkl.gz\")\n logfile_path = os.path.join(output_path,\n f\"logs/{basename}-training-xgb.log\")\n if os.path.exists(outpath) and not overwrite:\n if verbose > 1:\n click.secho(f\"Skipping. {outpath} exists. \"\n \"use overwrite to force.\",\n fg='green')\n return\n\n LOGGER.log_file_path = logfile_path\n LOGGER.input_file(training_path)\n start_time = time.time()\n _, resp, feat, n_dims, names = data_to_numeric(training_path,\n label_col, flank_size,\n feature_dim, proximal,\n usegc)\n\n # hacking feature so all -1 > 0\n resp = [v if v > 0 else 0 for v in resp]\n\n if usegc:\n # we need to scale the data\n scaler = get_scaler(feat)\n feat = scaler.transform(feat)\n\n classifier = xgboost(feat, resp, seed, strategy, n_jobs, verbose)\n result = dict(classifier=classifier)\n result['feature_params'] = dict(feature_dim=feature_dim,\n flank_size=flank_size, proximal=proximal,\n usegc=usegc)\n if usegc:\n result['scaler'] = scaler\n\n with open(outpath, 'wb') as clf_file:\n pickle.dump(result, clf_file)\n\n LOGGER.output_file(outpath)\n duration = time.time() - start_time\n LOGGER.log_message(\"%.2f\" % (duration / 60.),\n label=\"run duration (minutes)\")\n LOGGER.shutdown()\n\n\n@main.command()\n@_training_path\n@_output_path\n@_label_col\n@_seed\n@_flank_size\n@_feature_dim\n@_proximal\n@_usegc\n@_overwrite\n@_verbose\ndef ocs_train(training_path, output_path, label_col, seed,\n flank_size, feature_dim, proximal, usegc, overwrite, verbose):\n \"\"\"one-class svm training for outlier detection\"\"\"\n if seed is None:\n seed = int(time.time())\n LOGGER.log_args()\n LOGGER.log_versions(['sklearn', 'numpy'])\n start_time = time.time()\n os.makedirs(output_path, exist_ok=True)\n\n basename = get_basename(training_path)\n outpath = os.path.join(output_path, f\"{basename}-classifier-ocs.pkl.gz\")\n logfile_path = os.path.join(output_path,\n f\"logs/{basename}-training-ocs.log\")\n if os.path.exists(outpath) and not overwrite:\n if verbose > 1:\n click.secho(f\"Skipping. {outpath} exists. \"\n \"use overwrite to force.\",\n fg='green')\n return\n\n LOGGER.log_file_path = logfile_path\n LOGGER.input_file(training_path)\n\n start_time = time.time()\n _, _, feat, n_dims, names = data_to_numeric(training_path,\n label_col, flank_size,\n feature_dim, proximal,\n usegc=usegc,\n one_class='g')\n\n classifier = one_class_svm(feat, seed)\n result = dict(classifier=classifier)\n result['feature_params'] = dict(feature_dim=feature_dim,\n flank_size=flank_size, proximal=proximal,\n usegc=usegc)\n\n with open(outpath, 'wb') as clf_file:\n pickle.dump(result, clf_file)\n\n LOGGER.output_file(outpath)\n duration = time.time() - start_time\n LOGGER.log_message(\"%.2f\" % (duration / 60.),\n label=\"run duration (minutes)\")\n LOGGER.shutdown()\n\n\n@main.command()\n@_classifier_path\n@_data_path\n@_output_path\n@_label_col\n@_class_prior\n@_overwrite\n@_verbose\ndef predict(classifier_path, data_path, output_path, label_col, class_prior,\n overwrite, verbose):\n \"\"\"predict labels for data\"\"\"\n LOGGER.log_args()\n LOGGER.log_versions(['sklearn', 'numpy'])\n classifier, feature_params, scaler = load_classifier(classifier_path)\n class_label = get_classifier_label(classifier)\n if class_prior is not None and class_label == 'lr':\n # https://stats.stackexchange.com/questions/117592/logistic-regression-prior-correction-at-test-time\n # based on above and King and Zeng, we adjust the intercept term such\n # that it is incremented by ln(p(1) / p(-1)) where p(1) is the prior\n # of a 1 label, p(-1)=1-p(1)\n class_labels = list(class_prior)\n encoded = transform_response(class_labels)\n ordered = sorted(zip(encoded, class_labels))\n if 'e' in ordered[0]:\n adj = log(class_prior['g'] / class_prior['e'])\n else:\n adj = log(class_prior['e'] / class_prior['g'])\n\n classifier.intercept_ += adj\n\n basename_class = get_basename(classifier_path)\n basename_data = get_basename(data_path)\n basename = f\"{basename_class}-{basename_data}\"\n outpath = os.path.join(\n output_path,\n f\"{basename}-predicted-{class_label}.json.gz\")\n os.makedirs(output_path, exist_ok=True)\n logfile_path = os.path.join(output_path,\n f\"logs/{basename}-predict-{class_label}.log\")\n if os.path.exists(outpath) and not overwrite:\n if verbose > 1:\n click.secho(f\"Skipping. {outpath} exists. \"\n \"use overwrite to force.\",\n fg='green')\n return\n\n LOGGER.log_file_path = logfile_path\n LOGGER.input_file(classifier_path)\n LOGGER.input_file(data_path)\n\n start_time = time.time()\n # if NB, the score func name is different\n if class_label in (\"nb\", \"xgb\"):\n classifier.decision_function = classifier.predict_proba\n\n fulldata = pandas.read_csv(data_path, sep='\\t')\n\n result = {}\n result['feature_params'] = feature_params\n result['classifier_label'] = class_label\n result['classifier_path'] = classifier_path\n result['predictions'] = defaultdict(list)\n total = fulldata.shape[0] // 2000\n pbar = tqdm(iter_indices(\n fulldata.shape[0], block_size=2000), ncols=80, total=total)\n for indices in pbar:\n data = fulldata.iloc[indices]\n ids, resp, feat, n_dims, names = data_to_numeric(data,\n label_col=label_col,\n **feature_params)\n if scaler:\n feat = scaler.transform(feat)\n\n predictions, scores = predict_origin(classifier, feat)\n if class_label in (\"nb\", \"xgb\"):\n # each `score' is the probability of belong to either class\n # reduce to just the first class\n scores = scores[:, 1].tolist()\n elif class_label == 'ocs':\n scores = scores[:, 0].tolist()\n\n predictions = inverse_transform_response(predictions)\n result['predictions']['varid'].extend(list(ids))\n result['predictions']['predicted'].extend(list(predictions))\n result['predictions']['scores'].extend(list(scores))\n\n dump_json(outpath, result)\n LOGGER.output_file(outpath)\n duration = time.time() - start_time\n LOGGER.log_message(\"%.2f\" % (duration / 60.),\n label=\"run duration (minutes)\")\n LOGGER.shutdown()\n\n\n# def performance -> produces summary stats on trained classifiers\n# requires input data and the predicted results\n@main.command()\n@_data_path\n@_predictions_path\n@_output_path\n@_label_col\n@_overwrite\n@_verbose\ndef performance(data_path, predictions_path, output_path, label_col,\n overwrite, verbose):\n \"\"\"produce measures of classifier performance\"\"\"\n LOGGER.log_args()\n LOGGER.log_versions(['sklearn', 'numpy'])\n if not (data_path or predictions_path):\n click.secho(\"Need data sets!\", fg=\"red\")\n exit()\n\n basename_train = get_basename(data_path)\n basename_pred = get_basename(predictions_path)\n basename = f\"{basename_train}-{basename_pred}\"\n outpath = os.path.join(\n output_path,\n f\"{basename}-performance.json.gz\")\n logfile_path = os.path.join(output_path,\n f\"logs/{basename}-performance.log\")\n if os.path.exists(outpath) and not overwrite:\n if verbose > 1:\n click.secho(f\"Skipping. {outpath} exists. \"\n \"Use overwrite to force.\",\n fg='green')\n return\n\n LOGGER.log_file_path = logfile_path\n\n LOGGER.input_file(data_path)\n LOGGER.input_file(predictions_path)\n orig = pandas.read_csv(data_path, sep=\"\\t\")\n predicted, feature_params, classifier_path, label =\\\n load_predictions(predictions_path)\n result = measure_performance(orig, predicted,\n label_col)\n result[\"feature_params\"] = feature_params\n result[\"classifier_path\"] = classifier_path\n result[\"classifier_label\"] = label\n dump_json(outpath, result)\n LOGGER.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.log",
"numpy.random.seed",
"pandas.concat",
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
papaispicolo/CarNDT3-SemanticSegmentation
|
[
"c1940c01769cbf03d7c28c3a72946e4bd9682d59"
] |
[
"main.py"
] |
[
"#!/usr/bin/env python3\nimport os.path\nimport tensorflow as tf\nimport helper\nimport warnings\nfrom distutils.version import LooseVersion\nimport project_tests as tests\n\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\n\ndef load_vgg(sess, vgg_path):\n \"\"\"\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing \"variables/\" and \"saved_model.pb\"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n \"\"\"\n # TODO: Implement function\n # Use tf.saved_model.loader.load to load the model and weights\n\n vgg_tag = 'vgg16'\n tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)\n vgg_input_tensor_name = 'image_input:0'\n vgg_keep_prob_tensor_name = 'keep_prob:0'\n vgg_layer3_out_tensor_name = 'layer3_out:0'\n vgg_layer4_out_tensor_name = 'layer4_out:0'\n vgg_layer7_out_tensor_name = 'layer7_out:0'\n\n graph = tf.get_default_graph()\n input_img = graph.get_tensor_by_name(vgg_input_tensor_name)\n prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)\n layer3_o = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)\n layer4_o = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)\n layer7_o = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)\n\n return input_img, prob, layer3_o, layer4_o, layer7_o\ntests.test_load_vgg(load_vgg, tf)\n\n\ndef layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n \"\"\"\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer3_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer7_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n \"\"\"\n # TODO: Implement function\n # 1x1 convolution layer with road / not-road features only\n conv_1by1_l7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n # upscaling size/ add features\n output = tf.layers.conv2d_transpose(conv_1by1_l7, 512, 4, strides=(2,2), padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n # skip connections / add to upscaled output\n output = tf.add(output, vgg_layer4_out)\n\n # upscaling size/ reduce features\n output = tf.layers.conv2d_transpose(output, 256, 4, strides=(2,2), padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n # skip connections / add to upscaled output\n output = tf.add(output, vgg_layer3_out)\n\n # upscaling size/ reduce features to road OR not-road\n output = tf.layers.conv2d_transpose(output, num_classes, 16, strides=(8,8), padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='nn_final_output')\n\n return output\ntests.test_layers(layers)\n\n\ndef optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n \"\"\"\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n \"\"\"\n # TODO: Implement function\n logits = tf.reshape(nn_last_layer, (-1, num_classes))\n\n # add loss function\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n # training_op\n training_operation = optimizer.minimize(cross_entropy_loss)\n\n return logits, training_operation, cross_entropy_loss\ntests.test_optimize(optimize)\n\n\ndef train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate):\n \"\"\"\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n \"\"\"\n # TODO: Implement function\n\n # initialize global variables\n sess.run(tf.global_variables_initializer())\n\n # going through the batches of images i.e. epoch\n for epoch in range(epochs):\n for (input_img, gt_img) in get_batches_fn(batch_size):\n _, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: input_img,\n correct_label: gt_img,\n keep_prob: 0.7,\n learning_rate: 5e-04})\n print(\"Loss of {} at epoch {}/{}\".format(loss, epoch, epochs))\n\ntests.test_train_nn(train_nn)\n\n\ndef run():\n num_classes = 2\n image_shape = (160, 576) # KITTI dataset uses 160x576 images\n data_dir = './data'\n runs_dir = './runs'\n tests.test_for_kitti_dataset(data_dir)\n\n # Download pretrained vgg model\n helper.maybe_download_pretrained_vgg(data_dir)\n\n # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.\n # You'll need a GPU with at least 10 teraFLOPS to train on.\n # https://www.cityscapes-dataset.com/\n\n epochs = 20\n batch_size = 5\n\n with tf.Session() as sess:\n # Path to vgg model\n vgg_path = os.path.join(data_dir, 'vgg')\n # Create function to get batches\n get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n\n # OPTIONAL: Augment Images for better results\n # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network\n\n correct_label = tf.placeholder(tf.int32)\n learning_rate = tf.placeholder(tf.float32)\n # TODO: Build NN using load_vgg, layers, and optimize function\n input_img, keep_prob, layer3_o, layer4_o, layer7_o = load_vgg(sess, vgg_path)\n layer_output = layers(layer3_o, layer4_o, layer7_o, num_classes)\n logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)\n\n # TODO: Train NN using the train_nn function\n train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_img,\n correct_label, keep_prob, learning_rate)\n\n # TODO: Save inference data using helper.save_inference_samples\n helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_img)\n\n # OPTIONAL: Apply the trained model to a video\n\n\nif __name__ == '__main__':\n run()\n"
] |
[
[
"tensorflow.test.gpu_device_name",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.get_default_graph",
"tensorflow.Session",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.saved_model.loader.load",
"tensorflow.add",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.layers.l2_regularizer"
]
] |
Alexandre-Bonneau/uwds3_perception
|
[
"21529f63b3b3d2ad5e30eefece2d75378ae7651f"
] |
[
"scripts/camera_publisher_node.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport os\nimport rospy\nimport cv2\nimport sensor_msgs\nimport numpy as np\nfrom cv_bridge import CvBridge\n\n\nclass CameraPublisher(object):\n \"\"\" \"\"\"\n def __init__(self):\n \"\"\"Default constructor\"\"\"\n\n self.rgb_image_topic = rospy.get_param(\"~rgb_image_topic\", \"/camera/rgb/image_raw\")\n self.camera_publisher = rospy.Publisher(self.rgb_image_topic, sensor_msgs.msg.Image, queue_size=1)\n\n self.camera_pub_frequency = rospy.get_param(\"~camera_pub_frequency\", 20)\n\n self.bridge = CvBridge()\n self.camera_info_topic = rospy.get_param(\"~camera_info_topic\", \"/camera/rgb/camera_info\")\n self.camera_info = sensor_msgs.msg.CameraInfo()\n self.camera_info_publisher = rospy.Publisher(self.camera_info_topic, sensor_msgs.msg.CameraInfo, queue_size=1)\n\n self.camera_frame_id = rospy.get_param(\"~camera_frame_id\", \"camera_link\")\n self.camera_info.header.frame_id = self.camera_frame_id\n\n self.capture = cv2.VideoCapture(0)\n ok, frame = self.capture.read()\n\n width, height, _ = frame.shape\n\n focal_length = height\n center = (height/2, width/2)\n camera_matrix = np.array([[focal_length, 0, center[0]],\n [0, focal_length, center[1]],\n [0, 0, 1]], dtype=\"double\")\n P_matrix = np.array([[focal_length, 0, center[0], 0],\n [0, focal_length, center[1], 0],\n [0, 0, 1, 0]], dtype=\"double\")\n\n dist_coeffs = np.zeros((4, 1))\n self.camera_info.D = list(dist_coeffs)\n self.camera_info.K = list(camera_matrix.flatten())\n self.camera_info.P = list(P_matrix.flatten())\n\n self.timer = rospy.Timer(rospy.Duration(1.0/self.camera_pub_frequency), self.timer_callback)\n rospy.loginfo(\"Camera publisher ready !\")\n while not rospy.is_shutdown():\n rospy.spin()\n\n self.capture.release()\n\n def timer_callback(self, event):\n ok, frame = self.capture.read()\n if ok:\n bgr_image_msg = self.bridge.cv2_to_imgmsg(frame, \"bgr8\")\n bgr_image_msg.header.stamp = rospy.Time().now()\n self.camera_info.header = bgr_image_msg.header\n bgr_image_msg.header.frame_id = self.camera_frame_id\n self.camera_publisher.publish(bgr_image_msg)\n self.camera_info_publisher.publish(self.camera_info)\n\n\nif __name__ == '__main__':\n rospy.init_node(\"camera_publisher\", anonymous=False)\n c = CameraPublisher()\n"
] |
[
[
"numpy.array",
"numpy.zeros"
]
] |
ethz-asl/modular_semantic_segmentation
|
[
"5f9e34243915b862e8fef5e6195f1e29f4cebf50"
] |
[
"xview/models/dirichletEstimation.py"
] |
[
"#!/usr/bin/python\n#\n# A library for finding the optimal dirichlet prior from counts\n# By: Max Sklar\n# @maxsklar\n# https://github.com/maxsklar\n\n# Copyright 2013 Max Sklar\n\nimport math\nimport logging\nimport random\nimport scipy.special as mathExtra\nimport scipy\nimport numpy as np\n\ndef digamma(x): return mathExtra.psi(x)\ndef trigamma(x): return mathExtra.polygamma(1, x)\n\n\n# Find the \"sufficient statistic\" for a group of multinomials.\n# Essential, it's the average of the log probabilities\ndef getSufficientStatistic(multinomials):\n N = len(multinomials)\n K = len(multinomials[0])\n\n retVal = [0]*K\n\n for m in multinomials:\n for k in range(0, K):\n retVal[k] += math.log(m[k])\n\n for k in range(0, K): retVal[k] /= N\n return retVal\n\n# Find the log probability of the data for a given dirichlet\n# This is equal to the log probabiliy of the data.. up to a linear transform\ndef logProbForMultinomials(alphas, ss, delta):\n alpha_sum = np.sum(alphas)\n retVal = mathExtra.gammaln(alpha_sum)\n retVal -= np.sum(mathExtra.gammaln(alphas))\n retVal += np.sum(np.multiply(alphas, ss))\n retVal -= delta * np.square(alphas).sum()\n return retVal\n\n#Gives the derivative with respect to the log of prior. This will be used to adjust the loss\ndef getGradientForMultinomials(alphas, ss, delta):\n K = len(alphas)\n C = digamma(sum(alphas)) # - DELTA * sum(alphas)\n retVal = [C]*K\n for k in range(0, K):\n retVal[k] += ss[k] - digamma(alphas[k]) - 2 * delta * alphas[k]\n\n\n return retVal\n\n#The hessian is actually the sum of two matrices: a diagonal matrix and a constant-value matrix.\n#We'll write two functions to get both\ndef priorHessianConst(alphas, ss, delta): return -trigamma(sum(alphas)) + 2 * delta\ndef priorHessianDiag(alphas, ss): return [trigamma(a) for a in alphas]\n\n# Compute the next value to try here\n# http://research.microsoft.com/en-us/um/people/minka/papers/dirichlet/minka-dirichlet.pdf (eq 18)\ndef getPredictedStep(hConst, hDiag, gradient):\n K = len(gradient)\n numSum = 0.0\n for i in range(0, K):\n numSum += gradient[i] / hDiag[i]\n\n denSum = 0.0\n for i in range(0, K): denSum += 1.0 / hDiag[i]\n\n b = numSum / ((1.0/hConst) + denSum)\n\n retVal = [0]*K\n for i in range(0, K): retVal[i] = (b - gradient[i]) / hDiag[i]\n return retVal\n\n# Uses the diagonal hessian on the log-alpha values\ndef getPredictedStepAlt(hConst, hDiag, gradient, alphas):\n K = len(gradient)\n\n Z = 0\n for k in range(0, K):\n Z += alphas[k] / (gradient[k] - alphas[k]*hDiag[k])\n Z *= hConst\n\n Ss = [0]*K\n for k in range(0, K):\n Ss[k] = 1.0 / (gradient[k] - alphas[k]*hDiag[k]) / (1 + Z)\n S = sum(Ss)\n\n retVal = [0]*K\n for i in range(0, K):\n retVal[i] = gradient[i] / (gradient[i] - alphas[i]*hDiag[i]) * (1 - hConst * alphas[i] * S)\n\n return retVal\n\n#The priors and data are global, so we don't need to pass them in\ndef getTotalLoss(trialPriors, ss, delta):\n return -1*logProbForMultinomials(trialPriors, ss, delta)\n\ndef predictStepUsingHessian(gradient, priors, ss, delta):\n\ttotalHConst = priorHessianConst(priors, ss, delta)\n\ttotalHDiag = priorHessianDiag(priors, ss)\n\treturn getPredictedStep(totalHConst, totalHDiag, gradient)\n\ndef predictStepLogSpace(gradient, priors, ss, delta):\n\ttotalHConst = priorHessianConst(priors, ss, delta)\n\ttotalHDiag = priorHessianDiag(priors, ss)\n\treturn getPredictedStepAlt(totalHConst, totalHDiag, gradient, priors)\n\n\n# Returns whether it's a good step, and the loss\ndef testTrialPriors(trialPriors, ss, delta):\n\tfor alpha in trialPriors:\n\t\tif alpha <= 0:\n\t\t\treturn float(\"inf\")\n\n\treturn getTotalLoss(trialPriors, ss, delta)\n\ndef sqVectorSize(v):\n\ts = 0\n\tfor i in range(0, len(v)): s += v[i] ** 2\n\treturn s\n\ndef findDirichletPriors(ss, initAlphas, max_iter=1000, delta=1e-2):\n priors = initAlphas\n\n # Let the learning begin!!\n #Only step in a positive direction, get the current best loss.\n currentLoss = getTotalLoss(priors, ss, delta)\n\n gradientToleranceSq = 2 ** -20\n learnRateTolerance = 2 ** -10\n\n count = 0\n while(count < max_iter):\n count += 1\n\n #Get the data for taking steps\n gradient = getGradientForMultinomials(priors, ss, delta)\n gradientSize = sqVectorSize(gradient)\n #print(count, \"Loss: \", currentLoss, \", Priors: \", priors, \", Gradient Size: \", gradientSize, gradient)\n\n if (gradientSize < gradientToleranceSq):\n #print(\"Converged with small gradient\")\n return priors\n\n trialStep = predictStepUsingHessian(gradient, priors, ss, delta)\n\n #First, try the second order method\n trialPriors = [0]*len(priors)\n for i in range(0, len(priors)): trialPriors[i] = priors[i] + trialStep[i]\n\n loss = testTrialPriors(trialPriors, ss, delta)\n if loss < currentLoss:\n currentLoss = loss\n priors = trialPriors\n continue\n\n trialStep = predictStepLogSpace(gradient, priors, ss, delta)\n trialPriors = [0]*len(priors)\n for i in range(0, len(priors)): trialPriors[i] = priors[i] * math.exp(trialStep[i])\n loss = testTrialPriors(trialPriors, ss, delta)\n\n #Step in the direction of the gradient until there is a loss improvement\n loss = 10000000\n learnRate = 1.0\n while loss > currentLoss:\n learnRate *= 0.9\n trialPriors = [0]*len(priors)\n for i in range(0, len(priors)): trialPriors[i] = priors[i] + gradient[i]*learnRate\n loss = testTrialPriors(trialPriors, ss, delta)\n\n if (learnRate < learnRateTolerance):\n #print(\"Converged with small learn rate\")\n return priors\n\n currentLoss = loss\n priors = trialPriors\n\n #print(\"Reached max iterations\")\n return priors\n\ndef findDirichletPriorsFromMultinomials(multinomials, initAlphas):\n\tss = getSufficientStatistic(multinomials)\n\treturn findDirichletPriors(ss, initAlphas)\n"
] |
[
[
"numpy.square",
"scipy.special.gammaln",
"scipy.special.psi",
"scipy.special.polygamma",
"numpy.sum",
"numpy.multiply"
]
] |
hmorimitsu/ptlflow
|
[
"26f753322aef91b95ad78e743d847064e5d531b9"
] |
[
"ptlflow/__init__.py"
] |
[
"\"\"\"Provide useful functions for using PTLFlow.\"\"\"\n\n# =============================================================================\n# Copyright 2021 Henrique Morimitsu\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n__version__ = '0.2.5'\n\nimport logging\nfrom argparse import Namespace\nfrom pathlib import Path\nfrom typing import List, Optional\n\nimport requests\nimport torch\nfrom torch import hub\n\nfrom ptlflow.models.base_model.base_model import BaseModel\nfrom ptlflow.models.dicl.dicl import DICL\nfrom ptlflow.models.fastflownet.fastflownet import FastFlowNet\nfrom ptlflow.models.flownet.flownet2 import FlowNet2\nfrom ptlflow.models.flownet.flownetc import FlowNetC\nfrom ptlflow.models.flownet.flownetcs import FlowNetCS\nfrom ptlflow.models.flownet.flownetcss import FlowNetCSS\nfrom ptlflow.models.flownet.flownets import FlowNetS\nfrom ptlflow.models.flownet.flownetsd import FlowNetSD\nfrom ptlflow.models.gma.gma import GMA\nfrom ptlflow.models.hd3.hd3 import HD3, HD3Context\nfrom ptlflow.models.irr.pwcnet import IRRPWCNet\nfrom ptlflow.models.irr.pwcnet_irr import IRRPWCNetIRR\nfrom ptlflow.models.irr.irr_pwc import IRRPWC\nfrom ptlflow.models.lcv.lcv_raft import LCV_RAFT, LCV_RAFTSmall\nfrom ptlflow.models.liteflownet.liteflownet import LiteFlowNet\nfrom ptlflow.models.liteflownet.liteflownet3 import (\n LiteFlowNet3, LiteFlowNet3PseudoReg, LiteFlowNet3S, LiteFlowNet3SPseudoReg)\nfrom ptlflow.models.liteflownet.liteflownet2 import LiteFlowNet2, LiteFlowNet2PseudoReg\nfrom ptlflow.models.maskflownet.maskflownet import MaskFlownet, MaskFlownet_S\nfrom ptlflow.models.pwcnet.pwcnet import PWCNet, PWCDCNet\nfrom ptlflow.models.raft.raft import RAFT, RAFTSmall\nfrom ptlflow.models.scopeflow.irr_pwc_v2 import ScopeFlow\nfrom ptlflow.models.starflow.starflow import StarFlow\nfrom ptlflow.models.vcn.vcn import VCN, VCNSmall\nfrom ptlflow.utils.utils import config_logging\n\ntry:\n from ptlflow.models.scv.scv import SCVEighth, SCVQuarter\nexcept ImportError as e:\n print(e)\n SCVEighth = None\n SCVQuarter = None\n\nconfig_logging()\n\n\nmodels_dict = {\n 'dicl': DICL,\n 'fastflownet': FastFlowNet,\n 'flownet2': FlowNet2,\n 'flownetc': FlowNetC,\n 'flownetcs': FlowNetCS,\n 'flownetcss': FlowNetCSS,\n 'flownets': FlowNetS,\n 'flownetsd': FlowNetSD,\n 'gma': GMA,\n 'hd3': HD3,\n 'hd3_ctxt': HD3Context,\n 'irr_pwc': IRRPWC,\n 'irr_pwcnet': IRRPWCNet,\n 'irr_pwcnet_irr': IRRPWCNetIRR,\n 'lcv_raft': LCV_RAFT,\n 'lcv_raft_small': LCV_RAFTSmall,\n 'liteflownet': LiteFlowNet,\n 'liteflownet2': LiteFlowNet2,\n 'liteflownet2_pseudoreg': LiteFlowNet2PseudoReg,\n 'liteflownet3': LiteFlowNet3,\n 'liteflownet3_pseudoreg': LiteFlowNet3PseudoReg,\n 'liteflownet3s': LiteFlowNet3S,\n 'liteflownet3s_pseudoreg': LiteFlowNet3SPseudoReg,\n 'maskflownet': MaskFlownet,\n 'maskflownet_s': MaskFlownet_S,\n 'pwcnet': PWCNet,\n 'pwcdcnet': PWCDCNet,\n 'raft': RAFT,\n 'raft_small': RAFTSmall,\n 'scopeflow': ScopeFlow,\n 'scv4': SCVQuarter,\n 'scv8': SCVEighth,\n 'starflow': StarFlow,\n 'vcn': VCN,\n 'vcn_small': VCNSmall,\n}\n\n\ndef download_scripts(\n destination_dir: Path = Path('ptlflow_scripts')\n) -> None:\n \"\"\"Download the main scripts and configs to start working with PTLFlow.\"\"\"\n github_url = 'https://raw.githubusercontent.com/hmorimitsu/ptlflow/main/'\n script_names = [\n 'datasets.yml',\n 'infer.py',\n 'test.py',\n 'train.py',\n 'validate.py'\n ]\n\n destination_dir.mkdir(parents=True, exist_ok=True)\n\n for sname in script_names:\n script_url = github_url + sname\n data = requests.get(script_url)\n if data.status_code == 200:\n with open(destination_dir / sname, 'wb') as f:\n f.write(data.content)\n else:\n logging.warning('Script %s was not found.', script_url)\n\n logging.info('Downloaded scripts to %s.', str(destination_dir))\n\n\ndef get_model(\n model_name: str,\n pretrained_ckpt: Optional[str] = None,\n args: Optional[Namespace] = None\n) -> BaseModel:\n \"\"\"Return an instance of a chosen model.\n\n The instance can have configured by he arguments, and load some existing pretrained weights.\n\n Note that this is different from get_model_reference(), which returns a reference to the model class. The instance,\n returned by this function, is a class already instantiated. Therefore, the return of this function is equivalent to\n \"return get_model_reference()()\", which looks confusing. This can be rewritten as\n \"model_ref = get_model_reference(); return model_ref()\".\n\n Parameters\n ----------\n model_name : str\n Name of the model to get an instance of.\n pretrained_ckpt : Optional[str], optional\n Name of the pretrained weight to load or a path to a local checkpoint file.\n args : Optional[Namespace], optional\n Some arguments that ill be provided to the model.\n\n Returns\n -------\n BaseModel\n The instance of the chosen model.\n\n Raises\n ------\n ValueError\n If the given checkpoint name is not a valid choice.\n ValueError\n If a checkpoint name is given, but the model does not have any pretrained weights available.\n\n See Also\n --------\n get_model_reference : To get a reference to the class of a model.\n \"\"\"\n model_ref = get_model_reference(model_name)\n if args is None:\n parser = model_ref.add_model_specific_args()\n args = parser.parse_args([])\n model = model_ref(args)\n\n if pretrained_ckpt is None and args is not None and args.pretrained_ckpt is not None:\n pretrained_ckpt = args.pretrained_ckpt\n\n if pretrained_ckpt is not None:\n if Path(pretrained_ckpt).exists():\n ckpt_path = pretrained_ckpt\n elif hasattr(model_ref, 'pretrained_checkpoints'):\n ckpt_path = model_ref.pretrained_checkpoints.get(pretrained_ckpt)\n if ckpt_path is None:\n raise ValueError(\n f'Invalid checkpoint name {pretrained_ckpt}. '\n f'Choose one from {{{\",\".join(model.pretrained_checkpoints.keys())}}}')\n else:\n raise ValueError(f'Cannot find checkpoint {pretrained_ckpt} for model {model_name}')\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n if Path(ckpt_path).exists():\n ckpt = torch.load(ckpt_path, map_location=torch.device(device))\n else:\n model_dir = Path(hub.get_dir()) / 'ptlflow' / 'checkpoints'\n ckpt = hub.load_state_dict_from_url(\n ckpt_path, model_dir=model_dir, map_location=torch.device(device), check_hash=True)\n\n state_dict = ckpt['state_dict']\n model.load_state_dict(state_dict)\n return model\n\n\ndef get_model_reference(\n model_name: str\n) -> BaseModel:\n \"\"\"Return a reference to the class of a chosen model.\n\n Note that this is different from get_model(), which returns an instance of a model. The reference, returned by this\n function, is a class before instantiation. Therefore, the return of this function can be used to instantiate a model as\n \"model_ref = get_model_reference(); model_instance = model_ref()\".\n\n Parameters\n ----------\n model_name : str\n Name of the model to get a reference of.\n\n Returns\n -------\n BaseModel\n A reference to the chosen model.\n\n Raises\n ------\n ValueError\n If the given name is not a valid choice.\n\n See Also\n --------\n get_model : To get an instance of a model.\n \"\"\"\n try:\n return models_dict[model_name]\n except KeyError:\n raise ValueError(f'Unknown model name: {model_name}. Choose from [{\", \".join(models_dict.keys())}]')\n\n\ndef get_trainable_model_names() -> List[str]:\n \"\"\"Return a list of model names that are able to be trained.\n \n This function return the names of the model that have a loss function defined.\n\n Returns\n =======\n List[str]\n The list of the model names that can be trained.\n \"\"\"\n return [mname for mname in models_dict.keys() if get_model(mname).loss_fn is not None]\n"
] |
[
[
"torch.device",
"torch.cuda.is_available",
"torch.hub.get_dir"
]
] |
HIT-cwh/mmrazor
|
[
"2dad24044d7f1dad88f20221f8fc071dd40fdd4f"
] |
[
"mmrazor/models/architectures/components/backbones/darts_backbone.py"
] |
[
"# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport torch\nimport torch.nn as nn\nfrom mmcls.models.builder import BACKBONES\nfrom mmcv.cnn import build_activation_layer, build_norm_layer\n\nfrom ...utils import Placeholder\n\n\nclass FactorizedReduce(nn.Module):\n \"\"\"Reduce feature map size by factorized pointwise (stride=2).\"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n act_cfg=dict(type='ReLU'),\n norm_cfg=dict(type='BN')):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.act_cfg = act_cfg\n self.norm_cfg = norm_cfg\n self.relu = build_activation_layer(self.act_cfg)\n self.conv1 = nn.Conv2d(\n self.in_channels,\n self.out_channels // 2,\n 1,\n stride=2,\n padding=0,\n bias=False)\n self.conv2 = nn.Conv2d(\n self.in_channels,\n self.out_channels // 2,\n 1,\n stride=2,\n padding=0,\n bias=False)\n self.bn = build_norm_layer(self.norm_cfg, self.out_channels)[1]\n\n def forward(self, x):\n x = self.relu(x)\n out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1)\n out = self.bn(out)\n return out\n\n\nclass StandardConv(nn.Module):\n \"\"\"\n Standard conv: ReLU - Conv - BN\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n act_cfg=dict(type='ReLU'),\n norm_cfg=dict(type='BN')):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.act_cfg = act_cfg\n self.norm_cfg = norm_cfg\n self.net = nn.Sequential(\n build_activation_layer(self.act_cfg),\n nn.Conv2d(\n self.in_channels,\n self.out_channels,\n self.kernel_size,\n self.stride,\n self.padding,\n bias=False),\n build_norm_layer(self.norm_cfg, self.out_channels)[1])\n\n def forward(self, x):\n return self.net(x)\n\n\nclass Node(nn.Module):\n\n def __init__(self, node_id, num_prev_nodes, channels,\n num_downsample_nodes):\n super().__init__()\n edges = nn.ModuleDict()\n for i in range(num_prev_nodes):\n if i < num_downsample_nodes:\n stride = 2\n else:\n stride = 1\n\n edge_id = '{}_p{}'.format(node_id, i)\n edges.add_module(\n edge_id,\n nn.Sequential(\n Placeholder(\n group='node',\n space_id=edge_id,\n choice_args=dict(\n stride=stride,\n in_channels=channels,\n out_channels=channels)), ))\n\n self.edges = Placeholder(\n group='node_edge', space_id=node_id, choices=edges)\n\n def forward(self, prev_nodes):\n return self.edges(prev_nodes)\n\n\nclass Cell(nn.Module):\n\n def __init__(self,\n num_nodes,\n channels,\n prev_channels,\n prev_prev_channels,\n reduction,\n prev_reduction,\n act_cfg=dict(type='ReLU'),\n norm_cfg=dict(type='BN')):\n super().__init__()\n self.act_cfg = act_cfg\n self.norm_cfg = norm_cfg\n self.reduction = reduction\n self.num_nodes = num_nodes\n\n # If previous cell is reduction cell, current input size does not match\n # with output size of cell[k-2]. So the output[k-2] should be reduced\n # by preprocessing.\n if prev_reduction:\n self.preproc0 = FactorizedReduce(prev_prev_channels, channels,\n self.act_cfg, self.norm_cfg)\n else:\n self.preproc0 = StandardConv(prev_prev_channels, channels, 1, 1, 0,\n self.act_cfg, self.norm_cfg)\n self.preproc1 = StandardConv(prev_channels, channels, 1, 1, 0,\n self.act_cfg, self.norm_cfg)\n\n # generate dag\n self.nodes = nn.ModuleList()\n for depth in range(2, self.num_nodes + 2):\n if reduction:\n node_id = f'reduce_n{depth}'\n num_downsample_nodes = 2\n else:\n node_id = f'normal_n{depth}'\n num_downsample_nodes = 0\n self.nodes.append(\n Node(node_id, depth, channels, num_downsample_nodes))\n\n def forward(self, s0, s1):\n # s0, s1 are the outputs of previous previous cell and previous cell,\n # respectively.\n tensors = [self.preproc0(s0), self.preproc1(s1)]\n for node in self.nodes:\n cur_tensor = node(tensors)\n tensors.append(cur_tensor)\n\n output = torch.cat(tensors[2:], dim=1)\n return output\n\n\nclass AuxiliaryModule(nn.Module):\n \"\"\"Auxiliary head in 2/3 place of network to let the gradient flow well.\"\"\"\n\n def __init__(self,\n in_channels,\n base_channels,\n out_channels,\n norm_cfg=dict(type='BN')):\n\n super().__init__()\n self.norm_cfg = norm_cfg\n self.net = nn.Sequential(\n nn.ReLU(),\n nn.AvgPool2d(5, stride=2, padding=0,\n count_include_pad=False), # 2x2 out\n nn.Conv2d(in_channels, base_channels, kernel_size=1, bias=False),\n build_norm_layer(self.norm_cfg, base_channels)[1],\n nn.ReLU(inplace=True),\n nn.Conv2d(base_channels, out_channels, kernel_size=2,\n bias=False), # 1x1 out\n build_norm_layer(self.norm_cfg, out_channels)[1],\n nn.ReLU(inplace=True))\n\n def forward(self, x):\n return self.net(x)\n\n\n@BACKBONES.register_module()\nclass DartsBackbone(nn.Module):\n\n def __init__(self,\n in_channels,\n base_channels,\n num_layers=8,\n num_nodes=4,\n stem_multiplier=3,\n out_indices=(7, ),\n auxliary=False,\n aux_channels=None,\n aux_out_channels=None,\n act_cfg=dict(type='ReLU'),\n norm_cfg=dict(type='BN')):\n super().__init__()\n\n self.in_channels = in_channels\n self.base_channels = base_channels\n self.num_layers = num_layers\n self.num_nodes = num_nodes\n self.stem_multiplier = stem_multiplier\n self.out_indices = out_indices\n assert self.out_indices[-1] == self.num_layers - 1\n if auxliary:\n assert aux_channels is not None\n assert aux_out_channels is not None\n self.aux_channels = aux_channels\n self.aux_out_channels = aux_out_channels\n self.auxliary_indice = 2 * self.num_layers // 3\n\n else:\n self.auxliary_indice = -1\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.out_channels = self.stem_multiplier * self.base_channels\n stem_norm_cfg = copy.deepcopy(self.norm_cfg)\n stem_norm_cfg.update(dict(affine=True))\n self.stem = nn.Sequential(\n nn.Conv2d(\n self.in_channels, self.out_channels, 3, 1, 1, bias=False),\n build_norm_layer(self.norm_cfg, self.out_channels)[1])\n\n # for the first cell, stem is used for both s0 and s1\n # [!] prev_prev_channels and prev_channels is output channel size,\n # but c_cur is input channel size.\n prev_prev_channels = self.out_channels\n prev_channels = self.out_channels\n self.out_channels = self.base_channels\n\n self.cells = nn.ModuleList()\n prev_reduction, reduction = False, False\n for i in range(self.num_layers):\n prev_reduction, reduction = reduction, False\n # Reduce featuremap size and double channels in 1/3\n # and 2/3 layer.\n if i == self.num_layers // 3 or i == 2 * self.num_layers // 3:\n self.out_channels *= 2\n reduction = True\n\n cell = Cell(self.num_nodes, self.out_channels, prev_channels,\n prev_prev_channels, reduction, prev_reduction,\n self.act_cfg, self.norm_cfg)\n self.cells.append(cell)\n\n prev_prev_channels = prev_channels\n prev_channels = self.out_channels * self.num_nodes\n\n if i == self.auxliary_indice:\n self.auxliary_module = AuxiliaryModule(prev_channels,\n self.aux_channels,\n self.aux_out_channels,\n self.norm_cfg)\n\n def forward(self, x):\n outs = []\n s0 = s1 = self.stem(x)\n for i, cell in enumerate(self.cells):\n s0, s1 = s1, cell(s0, s1)\n if i in self.out_indices:\n outs.append(s1)\n if i == self.auxliary_indice and self.training:\n aux_feature = self.auxliary_module(s1)\n\n outs.insert(0, aux_feature)\n\n return tuple(outs)\n"
] |
[
[
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.ModuleDict",
"torch.nn.AvgPool2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] |
viddik13/katna
|
[
"12256602a5fd24368ffffe2c1a82a46a49215c15"
] |
[
"Katna/image_filters/text_detector.py"
] |
[
"\"\"\"\n.. module:: Katna.image_filters.text_detector\n :platform: OS X\n :synopsis: This module is implementation of text detector filter\n\"\"\"\n\nimport os\nimport cv2\nimport numpy as np\nimport time\nimport requests\nimport random\nfrom imutils.object_detection import non_max_suppression\nfrom Katna.image_filters.filter import Filter\nimport Katna.config as config\n\n\nclass TextDetector(Filter):\n \"\"\"TextDetector Class: Class for implementation of text detector filter, inherit from Filter class\n \"\"\"\n\n def __init__(self, weight=1.0):\n \"\"\"Constructor for this class does following tasks, if not already downloaded\\\n , it first downloads text detector dnn weights file from public URL\\\n ands save it at USER_HOME/.katna directory, or /tmp/.katna directory.\\\n After this initializer code initializes internal parameter: \\\n min_confidence (for text detection)\n \"\"\"\n super().__init__(weight)\n self.min_confidence = config.TextDetector.min_confidence\n self.merge_threshold = config.TextDetector.merge_threshold\n self.layerNames = config.TextDetector.layerNames\n self.frozen_weights = config.TextDetector.frozen_weights\n self.cache_subdir = config.TextDetector.cache_subdir\n\n try:\n self.network_folder_path = os.path.join(os.path.expanduser(\"~\"), \".katna\")\n if not os.access(self.network_folder_path, os.W_OK):\n self.network_folder_path = os.path.join(\"/tmp\", \".katna\")\n self.datadir = os.path.join(self.network_folder_path, self.cache_subdir)\n if not os.path.exists(self.datadir):\n os.makedirs(self.datadir)\n\n self.network_file_path = os.path.join(self.datadir, self.frozen_weights)\n if not os.path.exists(self.network_file_path):\n self.download_data()\n\n self.net = cv2.dnn.readNet(self.network_file_path)\n\n except Exception:\n raise FileNotFoundError(\n self.frozen_weights\n + \" seems to be missing.\\\n Download the file and specify the full path\\\n while initializing TextDetector class\"\n )\n\n def download_data(self):\n \"\"\"Public function for downloading the network weight from the URL link, to be used for\n text detection functionality. \n Troubleshooting tip: If you get FileNotFound error during text detector initialization,\n initialize the text detector and call this function directly to download the model file from public URL link.\n \"\"\"\n # create response object\n link = config.TextDetector.model_download_link\n r = requests.get(link, stream=True)\n # download started\n print(\"Downloading model file...\")\n # if not os.path.isfile(self.network_file_path) or not os.path.exists(self.network_file_path):\n with open(os.path.join(self.datadir, self.frozen_weights), \"wb\") as f:\n for chunk in r.iter_content(chunk_size=1024 * 1024):\n if chunk:\n f.write(chunk)\n print(\"Model file downloaded.\")\n\n def __decode_predictions(self, scores, geometry):\n \"\"\"Internal Function for getting bounding box and confidence values \n from text detector dnn network output (scores, geometry)\n function takes the number of rows and columns from the scores volume, then\n initializes set of bounding box rectangles and corresponding confidence scores\n \"\"\"\n (numRows, numCols) = scores.shape[2:4]\n rects = []\n confidences = []\n\n # loop over the number of rows\n for y in range(0, numRows):\n # extract the scores (probabilities), followed by the\n # geometrical data used to derive potential bounding box\n # coordinates that surround text\n scoresData = scores[0, 0, y]\n xData0 = geometry[0, 0, y]\n xData1 = geometry[0, 1, y]\n xData2 = geometry[0, 2, y]\n xData3 = geometry[0, 3, y]\n anglesData = geometry[0, 4, y]\n\n # loop over the number of columns\n for x in range(0, numCols):\n # if our score does not have sufficient probability,\n # ignore it\n if scoresData[x] < self.min_confidence:\n continue\n\n # compute the offset factor as our resulting feature\n # maps will be 4x smaller than the input image\n (offsetX, offsetY) = (x * 4.0, y * 4.0)\n\n # extract the rotation angle for the prediction and\n # then compute the sin and cosine\n angle = anglesData[x]\n cos = np.cos(angle)\n sin = np.sin(angle)\n\n # use the geometry volume to derive the width and height\n # of the bounding box\n h = xData0[x] + xData2[x]\n w = xData1[x] + xData3[x]\n\n # compute both the starting and ending (x, y)-coordinates\n # for the text prediction bounding box\n endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))\n endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))\n startX = int(endX - w)\n startY = int(endY - h)\n\n # add the bounding box coordinates and probability score\n # to our respective lists\n rects.append((startX, startY, endX, endY))\n confidences.append(scoresData[x])\n\n # return a tuple of the bounding boxes and associated confidences\n return (rects, confidences)\n\n def __merge_boxes(self, rects):\n \"\"\"main function to detect text boxes from image\n\n :param rects: list of \n :type rects: numpy array\n :param rectsUsed: image file in numpy array/opencv format\n :type rectsUsed: numpy array\n\n :return: output image with the list of text boxes\n :rtype: file, list\n \"\"\"\n\n def grouper(iterable, interval=2):\n prev = None\n group = []\n for item in iterable:\n if not prev or abs(item[1] - prev[1]) <= interval:\n group.append(item)\n else:\n yield group\n group = [item]\n prev = item\n if group:\n yield group\n\n rects_used = []\n heights = list()\n for bbox in rects:\n heights.append(bbox[3] - bbox[1])\n heights = sorted(heights) # Sort heights\n median_height = heights[len(heights) // 2] / 2 # Find half of the median height\n\n bboxes_list = sorted(\n rects, key=lambda k: k[1]\n ) # Sort the bounding boxes based on y1 coordinate ( y of the left-top coordinate )\n combined_bboxes = grouper(\n bboxes_list, median_height\n ) # Group the bounding boxes\n for group in combined_bboxes:\n x_min = min(group, key=lambda k: k[0])[0] # Find min of x1\n x_max = max(group, key=lambda k: k[2])[2] # Find max of x2\n y_min = min(group, key=lambda k: k[1])[1] # Find min of y1\n y_max = max(group, key=lambda k: k[3])[3] # Find max of y2\n rects_used.append([x_min, y_min, x_max, y_max])\n return rects_used\n\n def __detect_text(self):\n \"\"\"Internal function to detect text bounding boxes from input image.\n Returns list of bounding boxes of each detected text field in input image.\n\n :param image: image file in numpy array/opencv format\n :type image: numpy array\n :param output_image: image file in numpy array/opencv format\n :type output_image: numpy array\n\n :return: output image with the list of text boxes\n :rtype: file, list\n \"\"\"\n (H, W) = self.image.shape[:2]\n rW = W / 320\n rH = H / 320\n image = cv2.resize(self.image, (320, 320))\n (H, W) = image.shape[:2]\n\n # construct a blob from the image and then perform a forward pass of\n # the model to obtain the two output layer sets\n blob = cv2.dnn.blobFromImage(\n self.image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=True, crop=False\n )\n\n self.net.setInput(blob)\n (scores, geometry) = self.net.forward(self.layerNames)\n\n rects, confidences = self.__decode_predictions(scores, geometry)\n # apply non-maxima suppression to suppress weak, overlapping bounding\n # boxes\n boxes = non_max_suppression(np.array(rects), probs=confidences)\n text_rects = []\n # loop over the bounding boxes\n for (startX, startY, endX, endY) in boxes:\n # scale the bounding box coordinates based on the respective\n # ratios\n\n startX = int(startX * rW)\n startY = int(startY * rH)\n endX = int(endX * rW)\n endY = int(endY * rH)\n cv2.rectangle(self.image, (startX, startY), (endX, endY), (0, 0, 255), 3)\n text_rects.append([startX, startY, endX, endY])\n\n text_rects = sorted(text_rects, key=lambda item: item[0])\n final_rects = text_rects\n if len(text_rects) > 0:\n final_rects = self.__merge_boxes(text_rects)\n\n return final_rects\n\n def set_image(self, image):\n \"\"\"Public set_image function, This will detect all text boxes in input image and\n will saves them as internal list of text_rect to be used in get_filter_result\n\n :param image: input image from which needs to be cropped\n :type image: numpy array(opencv)\n \"\"\"\n if image is None:\n return None\n self.image = image\n self.text_rects = self.__detect_text()\n\n def get_filter_result(self, crop):\n \"\"\"Main public function of TextDetector filter class,\n this filter Returns false if crop contains no text, additionally\n checks for overlap between input crop rectangle and the detected\n text bounding box, returns True if No overlap (Filter will not discard input crop)\n otherwise returns False (signal for discarding input crop).\n \n :param crop: input crop rectangle to test\n :type crop: crop_rect\n :return: True if No overlap (Filter will not discard input crop) otherwise returns False \n :rtype: bool\n \"\"\"\n # rect: xs,ys,xe,ye\n # crop: x,y,w,h\n if self.text_rects is None or len(self.text_rects) == 0:\n return True\n\n for rect in self.text_rects:\n if not (\n (rect[2]) <= (crop.x + crop.w)\n and (rect[0]) >= (crop.x)\n and (rect[1]) >= (crop.y)\n and (rect[3]) <= (crop.y + crop.h)\n ):\n return False\n else:\n return True\n\n return True\n"
] |
[
[
"numpy.array",
"numpy.sin",
"numpy.cos"
]
] |
manabukosaka/cleanrl
|
[
"31ae5f640ac7f7225375bc51759c4e8baa4880b4"
] |
[
"cleanrl/experiments/dqn2_atari_visual.py"
] |
[
"# https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py\n\nimport numpy as np\nfrom collections import deque\nimport gym\nfrom gym import spaces\nimport cv2\ncv2.ocl.setUseOpenCL(False)\n\n\nclass NoopResetEnv(gym.Wrapper):\n def __init__(self, env, noop_max=30):\n \"\"\"Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP'\n\n def reset(self, **kwargs):\n \"\"\" Do no-op action for a number of steps in [1, noop_max].\"\"\"\n self.env.reset(**kwargs)\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n\n def step(self, ac):\n return self.env.step(ac)\n\nclass FireResetEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Take action on reset for environments that are fixed until firing.\"\"\"\n gym.Wrapper.__init__(self, env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3\n\n def reset(self, **kwargs):\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(2)\n if done:\n self.env.reset(**kwargs)\n return obs\n\n def step(self, ac):\n return self.env.step(ac)\n\nclass EpisodicLifeEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.was_real_done = done\n # check current lives, make loss of life terminal,\n # then update lives to handle bonus lives\n lives = self.env.unwrapped.ale.lives()\n if lives < self.lives and lives > 0:\n # for Qbert sometimes we stay in lives == 0 condition for a few frames\n # so it's important to keep lives > 0, so that we only reset once\n # the environment advertises done.\n done = True\n self.lives = lives\n return obs, reward, done, info\n\n def reset(self, **kwargs):\n \"\"\"Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n \"\"\"\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs\n\nclass MaxAndSkipEnv(gym.Wrapper):\n def __init__(self, env, skip=4):\n \"\"\"Return only every `skip`-th frame\"\"\"\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)\n self._skip = skip\n\n def step(self, action):\n \"\"\"Repeat action, sum reward, and max over last observations.\"\"\"\n total_reward = 0.0\n done = None\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n if i == self._skip - 2: self._obs_buffer[0] = obs\n if i == self._skip - 1: self._obs_buffer[1] = obs\n total_reward += reward\n if done:\n break\n # Note that the observation on the done=True frame\n # doesn't matter\n max_frame = self._obs_buffer.max(axis=0)\n\n return max_frame, total_reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\nclass ClipRewardEnv(gym.RewardWrapper):\n def __init__(self, env):\n gym.RewardWrapper.__init__(self, env)\n\n def reward(self, reward):\n \"\"\"Bin reward to {+1, 0, -1} by its sign.\"\"\"\n return np.sign(reward)\n\n\nclass WarpFrame(gym.ObservationWrapper):\n def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):\n \"\"\"\n Warp frames to 84x84 as done in the Nature paper and later work.\n If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which\n observation should be warped.\n \"\"\"\n super().__init__(env)\n self._width = width\n self._height = height\n self._grayscale = grayscale\n self._key = dict_space_key\n if self._grayscale:\n num_colors = 1\n else:\n num_colors = 3\n\n new_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=(self._height, self._width, num_colors),\n dtype=np.uint8,\n )\n if self._key is None:\n original_space = self.observation_space\n self.observation_space = new_space\n else:\n original_space = self.observation_space.spaces[self._key]\n self.observation_space.spaces[self._key] = new_space\n assert original_space.dtype == np.uint8 and len(original_space.shape) == 3\n\n def observation(self, obs):\n if self._key is None:\n frame = obs\n else:\n frame = obs[self._key]\n\n if self._grayscale:\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n frame = cv2.resize(\n frame, (self._width, self._height), interpolation=cv2.INTER_AREA\n )\n if self._grayscale:\n frame = np.expand_dims(frame, -1)\n\n if self._key is None:\n obs = frame\n else:\n obs = obs.copy()\n obs[self._key] = frame\n return obs\n\n\nclass FrameStack(gym.Wrapper):\n def __init__(self, env, k):\n \"\"\"Stack k last frames.\n Returns lazy array, which is much more memory efficient.\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)\n\n def reset(self):\n ob = self.env.reset()\n for _ in range(self.k):\n self.frames.append(ob)\n return self._get_ob()\n\n def step(self, action):\n ob, reward, done, info = self.env.step(action)\n self.frames.append(ob)\n return self._get_ob(), reward, done, info\n\n def _get_ob(self):\n assert len(self.frames) == self.k\n return LazyFrames(list(self.frames))\n\nclass ScaledFloatFrame(gym.ObservationWrapper):\n def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)\n\n def observation(self, observation):\n # careful! This undoes the memory optimization, use\n # with smaller replay buffers only.\n return np.array(observation).astype(np.float32) / 255.0\n\nclass LazyFrames(object):\n def __init__(self, frames):\n \"\"\"This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n This object should only be converted to numpy array before being passed to the model.\n You'd not believe how complex the previous solution was.\"\"\"\n self._frames = frames\n self._out = None\n\n def _force(self):\n if self._out is None:\n self._out = np.concatenate(self._frames, axis=0)\n self._frames = None\n return self._out\n\n def __array__(self, dtype=None):\n out = self._force()\n if dtype is not None:\n out = out.astype(dtype)\n return out\n\n def __len__(self):\n return len(self._force())\n\n def __getitem__(self, i):\n return self._force()[i]\n\n def count(self):\n frames = self._force()\n return frames.shape[frames.ndim - 1]\n\n def frame(self, i):\n return self._force()[..., i]\n\ndef wrap_atari(env, max_episode_steps=None):\n assert 'NoFrameskip' in env.spec.id\n env = NoopResetEnv(env, noop_max=30)\n env = MaxAndSkipEnv(env, skip=4)\n\n assert max_episode_steps is None\n\n return env\n\nclass ImageToPyTorch(gym.ObservationWrapper):\n \"\"\"\n Image shape to channels x weight x height\n \"\"\"\n\n def __init__(self, env):\n super(ImageToPyTorch, self).__init__(env)\n old_shape = self.observation_space.shape\n self.observation_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=(old_shape[-1], old_shape[0], old_shape[1]),\n dtype=np.uint8,\n )\n\n def observation(self, observation):\n return np.transpose(observation, axes=(2, 0, 1))\n\ndef wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):\n \"\"\"Configure environment for DeepMind-style Atari.\n \"\"\"\n if episode_life:\n env = EpisodicLifeEnv(env)\n if 'FIRE' in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = WarpFrame(env)\n if scale:\n env = ScaledFloatFrame(env)\n if clip_rewards:\n env = ClipRewardEnv(env)\n env = ImageToPyTorch(env)\n if frame_stack:\n env = FrameStack(env, 4)\n return env\n\n# Reference: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport argparse\nfrom distutils.util import strtobool\nimport collections\nimport numpy as np\nimport gym\nfrom gym.wrappers import TimeLimit, Monitor\nfrom gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space\nimport time\nimport random\nimport os\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom PIL import Image\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Double DQN Agent')\n # Common arguments\n parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(\".py\"),\n help='the name of this experiment')\n parser.add_argument('--gym-id', type=str, default=\"BreakoutNoFrameskip-v4\",\n help='the id of the gym environment')\n parser.add_argument('--learning-rate', type=float, default=1e-4,\n help='the learning rate of the optimizer')\n parser.add_argument('--seed', type=int, default=2,\n help='seed of the experiment')\n parser.add_argument('--total-timesteps', type=int, default=10000000,\n help='total timesteps of the experiments')\n parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,\n help='if toggled, `torch.backends.cudnn.deterministic=False`')\n parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,\n help='if toggled, cuda will not be enabled by default')\n parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,\n help='run the script in production mode and use wandb to log outputs')\n parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,\n help='weather to capture videos of the agent performances (check out `videos` folder)')\n parser.add_argument('--wandb-project-name', type=str, default=\"cleanRL\",\n help=\"the wandb's project name\")\n parser.add_argument('--wandb-entity', type=str, default=None,\n help=\"the entity (team) of wandb's project\")\n \n # Algorithm specific arguments\n parser.add_argument('--buffer-size', type=int, default=1000000,\n help='the replay memory buffer size')\n parser.add_argument('--gamma', type=float, default=0.99,\n help='the discount factor gamma')\n parser.add_argument('--target-network-frequency', type=int, default=1000,\n help=\"the timesteps it takes to update the target network\")\n parser.add_argument('--max-grad-norm', type=float, default=0.5,\n help='the maximum norm for the gradient clipping')\n parser.add_argument('--batch-size', type=int, default=32,\n help=\"the batch size of sample from the reply memory\")\n parser.add_argument('--start-e', type=float, default=1.,\n help=\"the starting epsilon for exploration\")\n parser.add_argument('--end-e', type=float, default=0.02,\n help=\"the ending epsilon for exploration\")\n parser.add_argument('--exploration-fraction', type=float, default=0.10,\n help=\"the fraction of `total-timesteps` it takes from start-e to go end-e\")\n parser.add_argument('--learning-starts', type=int, default=80000,\n help=\"timestep to start learning\")\n parser.add_argument('--train-frequency', type=int, default=4,\n help=\"the frequency of training\")\n args = parser.parse_args()\n if not args.seed:\n args.seed = int(time.time())\n\nclass QValueVisualizationWrapper(gym.Wrapper):\n def __init__(self, env):\n super().__init__(env)\n self.env.reset()\n self.image_shape = self.env.render(mode=\"rgb_array\").shape\n self.q_values = [[0.,0.,0.,0.]]\n # self.metadata['video.frames_per_second'] = 60\n \n def set_q_values(self, q_values):\n self.q_values = q_values\n\n def render(self, mode=\"human\"):\n if mode==\"rgb_array\":\n env_rgb_array = super().render(mode)\n fig, ax = plt.subplots(figsize=(self.image_shape[1]/100,self.image_shape[0]/100), constrained_layout=True, dpi=100)\n df = pd.DataFrame(np.array(self.q_values).T)\n sns.barplot(x=df.index, y=0, data=df, ax=ax)\n ax.set(xlabel='actions', ylabel='q-values')\n fig.canvas.draw()\n X = np.array(fig.canvas.renderer.buffer_rgba())\n Image.fromarray(X)\n # Image.fromarray(X)\n rgb_image = np.array(Image.fromarray(X).convert('RGB'))\n plt.close(fig)\n q_value_rgb_array = rgb_image\n return np.append(env_rgb_array, q_value_rgb_array, axis=1)\n else:\n super().render(mode)\n\n# TRY NOT TO MODIFY: setup the environment\nexperiment_name = f\"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}\"\nwriter = SummaryWriter(f\"runs/{experiment_name}\")\nwriter.add_text('hyperparameters', \"|param|value|\\n|-|-|\\n%s\" % (\n '\\n'.join([f\"|{key}|{value}|\" for key, value in vars(args).items()])))\nif args.prod_mode:\n import wandb\n wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)\n writer = SummaryWriter(f\"/tmp/{experiment_name}\")\n\n# TRY NOT TO MODIFY: seeding\ndevice = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')\nenv = gym.make(args.gym_id)\nenv = wrap_atari(env)\nenv = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`\nif args.capture_video:\n env = QValueVisualizationWrapper(env)\n env = Monitor(env, f'videos/{experiment_name}')\nenv = wrap_deepmind(\n env,\n clip_rewards=True,\n frame_stack=True,\n scale=False,\n)\nrandom.seed(args.seed)\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\ntorch.backends.cudnn.deterministic = args.torch_deterministic\nenv.seed(args.seed)\nenv.action_space.seed(args.seed)\nenv.observation_space.seed(args.seed)\n# respect the default timelimit\nassert isinstance(env.action_space, Discrete), \"only discrete action space is supported\"\n\n# modified from https://github.com/seungeunrho/minimalRL/blob/master/dqn.py#\nclass ReplayBuffer():\n def __init__(self, buffer_limit):\n self.buffer = collections.deque(maxlen=buffer_limit)\n \n def put(self, transition):\n self.buffer.append(transition)\n \n def sample(self, n):\n mini_batch = random.sample(self.buffer, n)\n s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []\n \n for transition in mini_batch:\n s, a, r, s_prime, done_mask = transition\n s_lst.append(s)\n a_lst.append(a)\n r_lst.append(r)\n s_prime_lst.append(s_prime)\n done_mask_lst.append(done_mask)\n\n return np.array(s_lst), np.array(a_lst), \\\n np.array(r_lst), np.array(s_prime_lst), \\\n np.array(done_mask_lst)\n\n# ALGO LOGIC: initialize agent here:\n# tricks taken from https://github.com/cpnota/autonomous-learning-library/blob/6d1111afce0d1582de463326f7d078a86e850551/all/presets/atari/models/__init__.py#L16\n# apparently matters\nclass Linear0(nn.Linear):\n def reset_parameters(self):\n nn.init.constant_(self.weight, 0.0)\n if self.bias is not None:\n nn.init.constant_(self.bias, 0.0)\n\nclass Scale(nn.Module):\n def __init__(self, scale):\n super().__init__()\n self.scale = scale\n\n def forward(self, x):\n return x * self.scale\nclass QNetwork(nn.Module):\n def __init__(self, frames=4):\n super(QNetwork, self).__init__()\n self.network = nn.Sequential(\n Scale(1/255),\n nn.Conv2d(frames, 32, 8, stride=4),\n nn.ReLU(),\n nn.Conv2d(32, 64, 4, stride=2),\n nn.ReLU(),\n nn.Conv2d(64, 64, 3, stride=1),\n nn.ReLU(),\n nn.Flatten(),\n nn.Linear(3136, 512),\n nn.ReLU(),\n Linear0(512, env.action_space.n)\n )\n\n def forward(self, x):\n x = torch.Tensor(x).to(device)\n return self.network(x)\n\ndef linear_schedule(start_e: float, end_e: float, duration: int, t: int):\n slope = (end_e - start_e) / duration\n return max(slope * t + start_e, end_e)\n\nrb = ReplayBuffer(args.buffer_size)\nq_network = QNetwork().to(device)\ntarget_network = QNetwork().to(device)\ntarget_network.load_state_dict(q_network.state_dict())\noptimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)\nloss_fn = nn.MSELoss()\nprint(device.__repr__())\nprint(q_network)\n\n# TRY NOT TO MODIFY: start the game\nobs = env.reset()\nepisode_reward = 0\nfor global_step in range(args.total_timesteps):\n # ALGO LOGIC: put action logic here\n epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction*args.total_timesteps, global_step)\n obs = np.array(obs)\n logits = q_network.forward(obs.reshape((1,)+obs.shape))\n if args.capture_video:\n env.set_q_values(logits.tolist())\n if random.random() < epsilon:\n action = env.action_space.sample()\n else:\n action = torch.argmax(logits, dim=1).tolist()[0]\n\n # TRY NOT TO MODIFY: execute the game and log data.\n next_obs, reward, done, info = env.step(action)\n episode_reward += reward\n \n # TRY NOT TO MODIFY: record rewards for plotting purposes\n if 'episode' in info.keys():\n print(f\"global_step={global_step}, episode_reward={info['episode']['r']}\")\n writer.add_scalar(\"charts/episode_reward\", info['episode']['r'], global_step)\n writer.add_scalar(\"charts/epsilon\", epsilon, global_step)\n\n # ALGO LOGIC: training.\n rb.put((obs, action, reward, next_obs, done))\n if global_step > args.learning_starts and global_step % args.train_frequency == 0:\n s_obs, s_actions, s_rewards, s_next_obses, s_dones = rb.sample(args.batch_size)\n with torch.no_grad():\n # target_max = torch.max(target_network.forward(s_next_obses), dim=1)[0]\n current_value = q_network.forward(s_next_obses)\n target_value = target_network.forward(s_next_obses)\n target_max = target_value.gather(1, torch.max(current_value, 1)[1].unsqueeze(1)).squeeze(1)\n td_target = torch.Tensor(s_rewards).to(device) + args.gamma * target_max * (1 - torch.Tensor(s_dones).to(device))\n\n old_val = q_network.forward(s_obs).gather(1, torch.LongTensor(s_actions).view(-1,1).to(device)).squeeze()\n loss = loss_fn(td_target, old_val)\n writer.add_scalar(\"losses/td_loss\", loss, global_step)\n\n # optimize the midel\n optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(list(q_network.parameters()), args.max_grad_norm)\n optimizer.step()\n\n # update the target network\n if global_step % args.target_network_frequency == 0:\n target_network.load_state_dict(q_network.state_dict())\n\n # TRY NOT TO MODIFY: CRUCIAL step easy to overlook \n obs = next_obs\n if done:\n # important to note that because `EpisodicLifeEnv` wrapper is applied,\n # the real episode reward is actually the sum of episode reward of 5 lives\n # which we record through `info['episode']['r']` provided by gym.wrappers.RecordEpisodeStatistics\n obs, episode_reward = env.reset(), 0\n\nenv.close()\nwriter.close()\n"
] |
[
[
"torch.nn.Linear",
"numpy.sign",
"torch.cuda.is_available",
"torch.LongTensor",
"numpy.concatenate",
"torch.nn.init.constant_",
"matplotlib.pyplot.subplots",
"torch.manual_seed",
"numpy.transpose",
"numpy.append",
"torch.Tensor",
"torch.utils.tensorboard.SummaryWriter",
"numpy.expand_dims",
"torch.nn.Flatten",
"matplotlib.use",
"numpy.array",
"numpy.zeros",
"torch.max",
"matplotlib.pyplot.close",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.argmax",
"torch.nn.MSELoss",
"numpy.random.seed",
"torch.no_grad"
]
] |
dropoutlabs/tf-encrypted
|
[
"48c9dc7419163425e736ad05bb19980d134fc851"
] |
[
"tf_encrypted/convert/register.py"
] |
[
"\"\"\"Registry for the TF Encrypted Converter.\"\"\"\nimport array\nimport logging\nimport os\nfrom typing import Any, List\nfrom collections import OrderedDict\n\nimport yaml\nimport numpy as np\nimport tensorflow as tf\n\nfrom ..layers import Conv2D, Relu, Sigmoid, Dense, AveragePooling2D, MaxPooling2D\nfrom ..protocol.pond import PondPrivateTensor, PondMaskedTensor\n\n\ndef registry():\n \"\"\"Map reserved names and scopes to their conversion functions.\"\"\"\n reg = {\n 'Placeholder': _placeholder,\n 'Const': _constant,\n 'Conv2D': _conv2d,\n 'Relu': _relu,\n 'Sigmoid': _sigmoid,\n 'MatMul': _matmul,\n 'Shape': _shape,\n 'StridedSlice': _strided_slice,\n 'Add': _add,\n 'Sub': _sub,\n 'Transpose': _transpose,\n 'Reshape': _reshape,\n 'Pack': _pack,\n 'Rsqrt': _rsqrt,\n 'Mul': _mul,\n 'ExpandDims': _expand_dims,\n 'AvgPool': _avgpool,\n 'Squeeze': _squeeze,\n 'ConcatV2': _concat,\n 'BiasAdd': _bias_add,\n 'MaxPool': _maxpool,\n 'Pad': _pad,\n 'BatchToSpaceND': _batch_to_space_nd,\n 'SpaceToBatchND': _space_to_batch_nd,\n 'ArgMax': _argmax,\n 'required_space_to_batch_paddings': _required_space_to_batch_paddings,\n 'flatten': _flatten,\n 'conv2d': _keras_conv2d,\n 'Slice': _slice,\n 'Neg': _negative,\n 'Split': _split,\n 'Identity': _identity,\n \"GatherV2\": _gather,\n \"dense\": _keras_dense,\n }\n\n return reg\n\n\nconvert_dir = os.path.dirname(os.path.abspath(__file__))\nspecops_path = os.path.join(convert_dir, \"specops.yaml\")\nwith open(specops_path, \"r\") as stream:\n loaded_yaml = yaml.load(stream, Loader=yaml.SafeLoader)\n sorted_yaml = sorted(loaded_yaml.items(), key=lambda kv: kv[0])\n REGISTERED_SPECOPS = OrderedDict(sorted_yaml)\n\n\n# pylint: disable=unused-argument\n# pylint: disable=missing-docstring\ndef _placeholder(converter, node: Any, inputs: List[str]) -> Any:\n return tf.placeholder(node.attr[\"dtype\"].type,\n shape=node.attr[\"shape\"].shape)\n\n\ndef _constant(converter, node: Any, inputs: List[str]) -> Any:\n # need to able to access the underlying weights return the node\n return node\n\n\ndef _identity(converter, node: Any, inputs: List[str]) -> Any:\n # need to able to access the underlying weights return the node\n return converter.outputs[inputs[0]]\n\n\ndef _matmul(converter, node: Any, inputs: List[str]) -> Any:\n a = converter.outputs[inputs[0]]\n b = converter.outputs[inputs[1]]\n\n tensor = b.attr[\"value\"].tensor\n\n b_shape = [i.size for i in tensor.tensor_shape.dim]\n\n transpose_a = node.attr[\"transpose_a\"].b\n transpose_b = node.attr[\"transpose_b\"].b\n\n layer = Dense(a.shape.as_list(),\n b_shape[1],\n transpose_input=transpose_a,\n transpose_weight=transpose_b)\n\n dtype = tensor.dtype\n\n if dtype == tf.float32:\n nums = array.array('f', tensor.tensor_content)\n elif dtype == tf.float64:\n nums = array.array('d', tensor.tensor_content)\n else:\n raise TypeError(\"Unsupported dtype for weights\")\n\n def inputter_fn():\n return tf.constant(np.array(nums).reshape(b_shape))\n\n w = converter.protocol.define_private_input(converter.model_provider,\n inputter_fn)\n\n layer.initialize(initial_weights=w)\n\n return layer.forward(a)\n\n\ndef _conv2d(converter, node, inputs):\n x_in = converter.outputs[inputs[0]]\n kernel = converter.outputs[inputs[1]]\n\n if isinstance(kernel, tf.NodeDef):\n shape = [i.size for i in kernel.attr[\"value\"].tensor.tensor_shape.dim]\n w = _nodef_to_private_pond(converter, kernel)\n else:\n shape = kernel.shape.as_list()\n w = kernel\n\n fmt = node.attr[\"data_format\"].s.decode('ascii')\n\n layer = Conv2D(x_in.shape.as_list(),\n shape,\n strides=int(max(node.attr[\"strides\"].list.i)),\n padding=node.attr[\"padding\"].s.decode('ascii'),\n channels_first=fmt == \"NCHW\")\n\n layer.initialize(initial_weights=w)\n\n out = layer.forward(x_in)\n\n return out\n\n\ndef _keras_conv2d(converter, interiors, inputs):\n x_in = converter.outputs[inputs[0]]\n\n conv_op = interiors[\"Conv2D\"]\n kernel = interiors[\"kernel\"]\n k = _nodef_to_private_pond(converter, kernel)\n try:\n bias = interiors[\"bias\"]\n b = _nodef_to_private_pond(converter, bias)\n for ax in [0, -1, -1]:\n b = b.expand_dims(axis=ax)\n except KeyError:\n b = None\n\n input_shape = x_in.shape.as_list()\n shape = [i.size for i in kernel.attr[\"value\"].tensor.tensor_shape.dim]\n fmt = conv_op.attr[\"data_format\"].s.decode('ascii')\n strides = int(max(conv_op.attr[\"strides\"].list.i))\n padding = conv_op.attr[\"padding\"].s.decode('ascii')\n\n layer = Conv2D(\n input_shape, shape,\n strides=strides,\n padding=padding,\n channels_first=fmt == \"NCHW\"\n )\n\n layer.initialize(initial_weights=k, initial_bias=b)\n out = layer.forward(x_in)\n\n return out\n\n\ndef _keras_dense(converter, interiors, inputs):\n x_in = converter.outputs[inputs[0]]\n\n kernel = interiors[\"kernel\"]\n k = _nodef_to_private_pond(converter, kernel)\n try:\n bias = interiors[\"bias\"]\n b = _nodef_to_private_pond(converter, bias)\n except KeyError:\n b = None\n\n input_shape = x_in.shape.as_list()\n shape = [i.size for i in kernel.attr[\"value\"].tensor.tensor_shape.dim]\n\n layer = Dense(input_shape,\n out_features=shape[1])\n\n layer.initialize(initial_weights=k, initial_bias=b)\n out = layer.forward(x_in)\n\n return out\n\n\ndef _relu(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n\n return Relu(x_in.shape.as_list()).forward(x_in)\n\n\ndef _sigmoid(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n\n return Sigmoid(x_in.shape.as_list()).forward(x_in)\n\n\ndef _strided_slice(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n\n if isinstance(x_in, tf.NodeDef):\n input_out = _nodef_to_private_pond(converter, x_in)\n else:\n input_out = x_in\n\n begin = converter.outputs[inputs[1]]\n end = converter.outputs[inputs[2]]\n strides = converter.outputs[inputs[3]]\n\n begin_mask = node.attr[\"begin_mask\"].i\n end_mask = node.attr[\"end_mask\"].i\n ellipsis_mask = node.attr[\"ellipsis_mask\"].i\n new_axis_mask = node.attr[\"new_axis_mask\"].i\n shrink_axis_mask = node.attr[\"shrink_axis_mask\"].i\n\n begin = tf.constant(begin.attr[\"value\"].tensor)\n end = tf.constant(end.attr[\"value\"].tensor)\n strides = tf.constant(strides.attr[\"value\"].tensor)\n\n return converter.protocol.strided_slice(input_out, begin, end,\n strides=strides,\n begin_mask=begin_mask,\n end_mask=end_mask,\n ellipsis_mask=ellipsis_mask,\n new_axis_mask=new_axis_mask,\n shrink_axis_mask=shrink_axis_mask)\n\n\ndef _pack(converter, node: Any, inputs: List[str]) -> Any:\n final_inputs = []\n\n for x_in in inputs:\n input_c = converter.outputs[x_in]\n if isinstance(input_c, tf.NodeDef):\n final_inputs.append(_nodef_to_private_pond(converter, input_c))\n else:\n final_inputs.append(input_c)\n\n return converter.protocol.stack(final_inputs, axis=node.attr[\"axis\"].i)\n\n\ndef _bias_add(converter, node: Any, inputs: List[str]) -> Any:\n a = converter.outputs[inputs[0]]\n b = converter.outputs[inputs[1]]\n\n if isinstance(a, tf.NodeDef):\n a_out = _nodef_to_private_pond(converter, a)\n else:\n a_out = a\n\n if isinstance(b, tf.NodeDef):\n b_out = _nodef_to_private_pond(converter, b)\n else:\n b_out = b\n\n return converter.protocol.add(a_out, b_out)\n\n\ndef _maxpool(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n\n ksize = node.attr[\"ksize\"].list.i\n s = node.attr[\"strides\"].list.i\n\n padding = node.attr[\"padding\"].s.decode('ascii')\n pool_size = [ksize[1], ksize[2]]\n strides = [s[1], s[2]]\n\n shape = [int(i) for i in x_in.shape]\n\n channels_first = node.attr[\"data_format\"].s.decode('ascii') == \"NCHW\"\n\n pooler = MaxPooling2D(shape, pool_size, strides, padding, channels_first)\n\n out = pooler.forward(x_in)\n\n return out\n\n\ndef _shape(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n\n return x_in.shape\n\n\ndef _reshape(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n shape = converter.outputs[inputs[1]]\n\n tensor = shape.attr[\"value\"].tensor\n dtype = shape.attr[\"dtype\"].type\n if dtype == tf.int32:\n nums = array.array('i', tensor.tensor_content)\n elif dtype == tf.int64:\n nums = array.array('l', tensor.tensor_content)\n else:\n raise TypeError(\"Unsupported dtype for reshape shape\")\n\n return converter.protocol.reshape(x_in, list(nums))\n\n\ndef _transpose(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n perm = converter.outputs[inputs[1]]\n\n tensor = perm.attr[\"value\"].tensor\n shape = [i.size for i in tensor.tensor_shape.dim]\n\n dtype = perm.attr[\"dtype\"].type\n if dtype == tf.int32:\n nums = array.array('i', tensor.tensor_content)\n elif dtype == tf.int64:\n nums = array.array('l', tensor.tensor_content)\n else:\n raise TypeError(\"Unsupported dtype for transpose perm\")\n\n return converter.protocol.transpose(x_in, np.array(nums).reshape(shape))\n\n\ndef _expand_dims(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n\n if isinstance(x_in, tf.NodeDef):\n input_out = _nodef_to_private_pond(converter, x_in)\n else:\n input_out = x_in\n\n input_axis = converter.outputs[inputs[1]]\n axis_attr = input_axis.attr[\"value\"].tensor.int_val\n axis_val = array.array('i', axis_attr)[0]\n\n return converter.protocol.expand_dims(input_out, axis_val)\n\n\ndef _negative(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n\n if isinstance(x_in, tf.NodeDef):\n input_out = _nodef_to_private_pond(converter, x_in)\n else:\n input_out = x_in\n\n return converter.protocol.negative(input_out)\n\n\ndef _gather(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n indices = converter.outputs[inputs[1]]\n axis = converter.outputs[inputs[2]]\n\n if isinstance(x_in, tf.NodeDef):\n input_out = _nodef_to_private_pond(converter, x_in)\n else:\n input_out = x_in\n\n indices_out = list(_nodef_to_numpy_array(indices))\n\n axis_val = axis.attr[\"value\"].tensor.int_val[0]\n\n return converter.protocol.gather(input_out, indices_out, axis_val)\n\n\ndef _squeeze(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n\n axis = node.attr[\"squeeze_dims\"].list.i\n\n return converter.protocol.squeeze(x_in, list(axis))\n\n\ndef _split(converter, node: Any, inputs: List[str]) -> Any:\n axis = converter.outputs[inputs[0]]\n x_in = converter.outputs[inputs[1]]\n\n if isinstance(x_in, tf.NodeDef):\n input_out = _nodef_to_private_pond(converter, x_in)\n else:\n input_out = x_in\n\n num_split = node.attr[\"num_split\"].i\n axis_val = axis.attr[\"value\"].tensor.int_val[0]\n\n return converter.protocol.split(input_out, num_split, axis_val)[0]\n\n\ndef _pad(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n p = (converter.outputs[inputs[1]])\n\n paddings_t = p.attr[\"value\"].tensor\n\n paddings_arr = list(array.array('I', paddings_t.tensor_content))\n paddings_lst = [paddings_arr[i:i + 2]\n for i in range(0, len(paddings_arr), 2)]\n\n return converter.protocol.pad(x_in, paddings_lst)\n\n\ndef _rsqrt(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n\n if isinstance(x_in, tf.NodeDef):\n tensor = x_in.attr[\"value\"].tensor\n shape = [i.size for i in tensor.tensor_shape.dim]\n\n dtype = x_in.attr[\"dtype\"].type\n if dtype == tf.float32:\n nums = array.array('f', tensor.tensor_content)\n elif dtype == tf.float64:\n nums = array.array('d', tensor.tensor_content)\n\n else:\n raise TypeError(\"Unsupported dtype for rsqrt\")\n\n def inputter_fn():\n return tf.constant(1 / np.sqrt(np.array(nums).reshape(shape)))\n\n else:\n # XXX this is a little weird but the input into rsqrt is public and\n # being used only for batchnorm at the moment\n decoded = converter.protocol._decode(x_in.value_on_0, True) # pylint: disable=protected-access\n\n def inputter_fn():\n return tf.rsqrt(decoded)\n\n x = converter.protocol.define_public_input(\n converter.model_provider, inputter_fn)\n\n return x\n\n\ndef _add(converter, node: Any, inputs: List[str]) -> Any:\n a = converter.outputs[inputs[0]]\n b = converter.outputs[inputs[1]]\n\n if isinstance(a, tf.NodeDef):\n a_out = _nodef_to_public_pond(converter, a)\n else:\n a_out = a\n\n if isinstance(b, tf.NodeDef):\n b_out = _nodef_to_public_pond(converter, b)\n else:\n b_out = b\n\n return converter.protocol.add(a_out, b_out)\n\n\ndef _sub(converter, node: Any, inputs: List[str]) -> Any:\n a = converter.outputs[inputs[0]]\n b = converter.outputs[inputs[1]]\n\n if isinstance(a, tf.NodeDef):\n a_out = _nodef_to_public_pond(converter, a)\n else:\n a_out = a\n\n if isinstance(b, tf.NodeDef):\n b_out = _nodef_to_public_pond(converter, b)\n else:\n b_out = b\n\n return converter.protocol.sub(a_out, b_out)\n\n\ndef _mul(converter, node: Any, inputs: List[str]) -> Any:\n a = converter.outputs[inputs[0]]\n b = converter.outputs[inputs[1]]\n\n if isinstance(a, tf.NodeDef):\n a_out = _nodef_to_public_pond(converter, a)\n else:\n a_out = a\n\n if isinstance(b, tf.NodeDef):\n b_out = _nodef_to_public_pond(converter, b)\n else:\n b_out = b\n\n return converter.protocol.mul(a_out, b_out)\n\n\ndef _avgpool(converter, node: Any, inputs: List[str]) -> Any:\n x_in = converter.outputs[inputs[0]]\n\n ksize = node.attr[\"ksize\"].list.i\n s = node.attr[\"strides\"].list.i\n\n padding = node.attr[\"padding\"].s.decode('ascii')\n pool_size = [ksize[1], ksize[2]]\n strides = [s[1], s[2]]\n\n shape = [int(i) for i in x_in.shape]\n\n channels_first = node.attr[\"data_format\"].s.decode('ascii') == \"NCHW\"\n\n avg = AveragePooling2D(shape, pool_size, strides, padding, channels_first)\n\n out = avg.forward(x_in)\n\n return out\n\n\ndef _concat(converter, node: Any, inputs: List[str]) -> Any:\n input0 = converter.outputs[inputs[0]]\n input1 = converter.outputs[inputs[1]]\n axis = converter.outputs[inputs[2]]\n axis_int = axis.attr[\"value\"].tensor.int_val[0]\n\n return converter.protocol.concat([input0, input1], axis_int)\n\n\ndef _batch_to_space_nd(converter, node, inputs):\n x_in = converter.outputs[inputs[0]]\n block_shape = converter.outputs[inputs[1]].attr[\"value\"].tensor\n crops = converter.outputs[inputs[2]].attr[\"value\"].tensor\n\n return converter.protocol.batch_to_space_nd(x_in, block_shape, crops)\n\n\ndef _space_to_batch_nd(converter, node, inputs):\n x_in = converter.outputs[inputs[0]]\n block_shape = converter.outputs[inputs[1]].attr[\"value\"].tensor\n paddings = converter.outputs[inputs[2]].attr[\"value\"].tensor\n\n return converter.protocol.space_to_batch_nd(x_in, block_shape, paddings)\n\n\ndef _flatten(converter, node, inputs):\n x_in = converter.outputs[inputs[0]]\n\n shape = x_in.shape.as_list()\n non_batch = 1\n for dim in shape[1:]:\n non_batch *= dim\n\n return converter.protocol.reshape(x_in, [-1, non_batch])\n\n\ndef _required_space_to_batch_paddings(converter, node, inputs: List[str]):\n\n inputs_node = [converter.outputs[inputs[i]] for i in range(len(inputs))]\n inputs_int32 = []\n for x_in in inputs_node:\n pvt_check = isinstance(x_in, PondPrivateTensor)\n msk_check = isinstance(x_in, PondMaskedTensor)\n if pvt_check or msk_check:\n logging.warning((\"Revealing private input: \"\n \"required_space_to_batch_paddings assumes public \"\n \"input.\"))\n inputs_int32.append(tf.cast(x_in.reveal().decode(), tf.int32))\n elif isinstance(x_in, tf.NodeDef):\n inputs_int32.append(_nodef_to_numpy_array(x_in))\n else:\n raise TypeError(\"Unexpected input of type {}.\".format(type(x_in)))\n\n if len(inputs_int32) == 2:\n input_shape, block_shape = inputs_int32\n\n def inputter_pad():\n pads, _ = tf.required_space_to_batch_paddings(input_shape, block_shape)\n return tf.cast(pads, tf.float64)\n\n def inputter_crop():\n _, crops = tf.required_space_to_batch_paddings(input_shape, block_shape)\n return tf.cast(crops, tf.float64)\n else:\n base_paddings, input_shape, block_shape = inputs_int32\n\n def inputter_pad():\n pads, _ = tf.required_space_to_batch_paddings(\n input_shape,\n block_shape,\n base_paddings=base_paddings,\n )\n return tf.cast(pads, tf.float64)\n\n def inputter_crop():\n _, crops = tf.required_space_to_batch_paddings(\n input_shape,\n block_shape,\n base_paddings=base_paddings,\n )\n return tf.cast(crops, tf.float64)\n\n pad_private = converter.protocol.define_public_input(\n converter.model_provider, inputter_pad)\n crop_private = converter.protocol.define_public_input(\n converter.model_provider, inputter_crop)\n\n return (pad_private, crop_private)\n\n\ndef _argmax(converter, node, inputs):\n x_in = converter.outputs[inputs[0]]\n axis = converter.outputs[inputs[1]].attr[\"value\"].tensor.int_val[0]\n\n return converter.protocol.argmax(x_in, axis=axis)\n\n\ndef _slice(converter, node, inputs):\n x_in = converter.outputs[inputs[0]]\n begin = _nodef_to_numpy_array(converter.outputs[inputs[1]])\n size = _nodef_to_numpy_array(converter.outputs[inputs[2]])\n\n if isinstance(x_in, tf.NodeDef):\n input_out = _nodef_to_private_pond(converter, x_in)\n else:\n input_out = x_in\n\n # Slice is a special case of strided_slice. Slice takes size (the number of\n # elements we want to slice) as an input. However strided_slice takes end\n # (integer until which the slicing takes place) as input.\n # We can infere the end parameter with : end[i] = begin[i] + size[i].\n # If size is negative, the stepping go towards smaller indices.\n # In this case we can infer the end parameter with: end[i] = input_shape[i] - size[i] + 1\n end = np.zeros(len(begin))\n input_shape = x_in.shape.as_list()\n\n # if size is negative take the input dimension\n for i in range(len(end)): # pylint: disable=consider-using-enumerate\n if size[i] < 0:\n end[i] = input_shape[i] - size[i] + 1\n else:\n end[i] = begin[i] + size[i]\n\n return converter.protocol.strided_slice(input_out, begin, end)\n\n\n# pylint: enable=unused-argument\n# pylint: enable=missing-docstring\ndef _nodef_to_public_pond(converter, x):\n \"\"\"Map a NodeDef x to a PublicPondTensor.\"\"\"\n dtype = x.attr[\"dtype\"].type\n x_shape = [i.size for i in x.attr[\"value\"].tensor.tensor_shape.dim]\n\n if not x_shape:\n if dtype == tf.float32:\n nums = x.attr[\"value\"].tensor.float_val\n elif dtype == tf.float64:\n nums = x.attr[\"value\"].tensor.float_val\n elif dtype == tf.int32:\n nums = x.attr[\"value\"].tensor.int_val\n else:\n raise TypeError(\"Unsupported dtype\")\n\n def inputter_fn():\n return tf.constant(np.array(nums).reshape(1, 1))\n\n else:\n if dtype == tf.float32:\n nums = array.array('f', x.attr[\"value\"].tensor.tensor_content)\n elif dtype == tf.float64:\n nums = array.array('d', x.attr[\"value\"].tensor.tensor_content)\n elif dtype == tf.int32:\n nums = array.array('i', x.attr[\"value\"].tensor.tensor_content)\n else:\n raise TypeError(\"Unsupported dtype\")\n\n def inputter_fn():\n return tf.constant(np.array(nums).reshape(x_shape))\n\n x_public = converter.protocol.define_public_input(\n converter.model_provider, inputter_fn)\n\n return x_public\n\n\ndef _nodef_to_private_pond(converter, x):\n \"\"\"Map a NodeDef x to a PrivatePondTensor.\"\"\"\n dtype = x.attr[\"dtype\"].type\n warn_msg = \"Unexpected dtype {} found at node {}\"\n err_msg = \"Unsupported dtype {} found at node {}\"\n\n x_shape = [i.size for i in x.attr[\"value\"].tensor.tensor_shape.dim]\n\n if not x_shape:\n if dtype == tf.float32:\n nums = x.attr[\"value\"].tensor.float_val\n elif dtype == tf.float64:\n nums = x.attr[\"value\"].tensor.float_val\n elif dtype == tf.int32:\n logging.warning(warn_msg, dtype, x.name)\n nums = x.attr[\"value\"].tensor.int_val\n else:\n raise TypeError(err_msg.format(dtype, x.name))\n\n def inputter_fn():\n return tf.constant(np.array(nums).reshape(1, 1))\n\n else:\n if dtype == tf.float32:\n nums = array.array('f', x.attr[\"value\"].tensor.tensor_content)\n elif dtype == tf.float64:\n nums = array.array('d', x.attr[\"value\"].tensor.tensor_content)\n elif dtype == tf.int32:\n logging.warning(warn_msg, dtype, x.name)\n nums = array.array('i', x.attr[\"value\"].tensor.tensor_content)\n else:\n raise TypeError(err_msg.format(dtype, x.name))\n\n def inputter_fn():\n return tf.constant(np.array(nums).reshape(x_shape))\n\n x_private = converter.protocol.define_private_input(\n converter.model_provider, inputter_fn)\n\n return x_private\n\n\ndef _nodef_to_numpy_array(x):\n \"\"\"Map a NodeDef x to a np.array.\"\"\"\n dtype = x.attr[\"dtype\"].type\n x_shape = [i.size for i in x.attr[\"value\"].tensor.tensor_shape.dim]\n\n if dtype == tf.float32:\n nums = array.array('f', x.attr[\"value\"].tensor.tensor_content)\n elif dtype == tf.float64:\n nums = array.array('d', x.attr[\"value\"].tensor.tensor_content)\n elif dtype == tf.int32:\n nums = array.array('i', x.attr[\"value\"].tensor.tensor_content)\n else:\n raise TypeError(\"Unsupported dtype\")\n\n return np.array(nums).reshape(x_shape)\n"
] |
[
[
"numpy.array",
"tensorflow.required_space_to_batch_paddings",
"tensorflow.rsqrt",
"tensorflow.constant",
"tensorflow.placeholder",
"tensorflow.cast"
]
] |
delos/dm-pta-mc
|
[
"bce9ce815a518e1b47d1894fce3e003c5e649113"
] |
[
"src/signals.py"
] |
[
"\"\"\"\n Functions computing the signal shapes\n\"\"\"\n\nimport numpy as np\nfrom time import time\n\nimport src.constants as const\n\n\ndef subtract_signal(t, signal, fit_params=3):\n \"\"\"\n\n Returns the subtracted signal\n\n \"\"\"\n\n # fit dphi(t) to polynomials and subtract the contribution from n=0, 1 and 2\n coef = np.polynomial.polynomial.polyfit(t, signal, fit_params - 1) # (3)\n delta_signal = np.einsum(\n \"n,nj->j\", coef, np.asarray([np.power(t, n) for n in range(fit_params)])\n ) # (Nt)\n\n # compute the subtracted signal\n ht = signal - delta_signal # (Nt), unit = s\n\n return ht\n\n\ndef dphi_dop_chunked(\n t,\n profile,\n r0_vec,\n v_vec,\n d_hat,\n use_form=False,\n use_chunk=False,\n chunk_size=10000,\n verbose=False,\n form_fun=None,\n interp_table=None,\n time_end=np.inf,\n):\n \"\"\"\n\n Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to\n store in memory\n\n \"\"\"\n\n num_objects = len(list(profile.items())[0][1]) # number of elements of 1st dict entry\n\n dphi = np.zeros(len(t))\n\n if use_chunk == True:\n\n if num_objects % chunk_size == 0:\n num_chunks = num_objects // chunk_size\n else:\n num_chunks = num_objects // chunk_size + 1\n\n if verbose:\n print(\" Chunking data (%d chunks) ... \"%num_chunks)\n print()\n\n for i in range(num_chunks):\n \n if time() > time_end: raise TimeoutError\n\n r0_c = r0_vec[i * chunk_size : (i + 1) * chunk_size]\n v_c = v_vec[i * chunk_size : (i + 1) * chunk_size]\n\n profile_c = {}\n for key in list(profile):\n profile_c[key] = profile[key][i * chunk_size : (i + 1) * chunk_size]\n\n dphi += dphi_dop(\n t, profile_c, r0_c, v_c, d_hat, use_form=use_form, form_fun=form_fun, interp_table=interp_table\n )\n else:\n\n dphi += dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=use_form, form_fun=form_fun, interp_table=interp_table)\n\n return dphi\n\n\ndef dphi_dop_chunked_vec(\n t,\n profile,\n r0_vec,\n v_vec,\n use_form=False,\n use_chunk=False,\n chunk_size=10000,\n verbose=False,\n form_fun=None,\n interp_table=None,\n time_end=np.inf,\n):\n \"\"\"\n\n Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to\n store in memory\n\n \"\"\"\n\n num_objects = len(list(profile.items())[0][1]) # number of elements of 1st dict entry\n\n dphi_vec = np.zeros((len(t), 3))\n\n if use_chunk == True:\n\n if verbose:\n print(\" Chunking data ... \")\n print()\n\n if num_objects % chunk_size == 0:\n num_chunks = num_objects // chunk_size\n else:\n num_chunks = num_objects // chunk_size + 1\n\n for i in range(num_chunks):\n \n if time() > time_end: raise TimeoutError\n\n r0_c = r0_vec[i * chunk_size : (i + 1) * chunk_size]\n v_c = v_vec[i * chunk_size : (i + 1) * chunk_size]\n\n profile_c = {}\n for key in list(profile):\n profile_c[key] = profile[key][i * chunk_size : (i + 1) * chunk_size]\n\n dphi_vec += dphi_dop_vec(\n t, profile_c, r0_c, v_c, use_form=use_form, form_fun=form_fun, interp_table=interp_table\n )\n else:\n\n dphi_vec += dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=use_form, form_fun=form_fun, interp_table=interp_table)\n\n return dphi_vec\n\n\ndef dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=False, form_fun=None,\n interp_table=None):\n \"\"\"\n\n Returns the vector phase shift due to the Doppler delay for subhalos of mass, mass.\n Dot with d_hat to get dphi_I\n\n TODO: add use_closest option\n\n \"\"\"\n\n v_mag = np.linalg.norm(v_vec, axis=1)\n\n r0_v = np.einsum(\"ij, ij -> i\", r0_vec, v_vec)\n t0 = -r0_v / np.square(v_mag) # year\n\n b_vec = r0_vec + v_vec * t0[:, np.newaxis] # (N, 3)\n b_mag = np.linalg.norm(b_vec, axis=1) # (N)\n tau = b_mag / v_mag\n\n b_hat = b_vec / b_mag[:, np.newaxis] # (N, 3)\n v_hat = v_vec / v_mag[:, np.newaxis]\n\n x = np.subtract.outer(t, t0) / tau\n x0 = -t0 / tau\n\n prefactor = (\n const.yr_to_s\n * const.GN\n / (const.km_s_to_kpc_yr * const.c_light * np.square(v_mag))\n )\n\n if interp_table is None:\n \n bd_term = (np.sqrt(1 + x ** 2) + x) - (np.sqrt(1 + x0 ** 2) + x0) # (Nt, N)\n vd_term = np.arcsinh(x) - np.arcsinh(x0)\n \n if 'M' in list(profile):\n prefactor *= profile['M']\n \n if use_form:\n \n t_cl = np.maximum(np.minimum(t0, t[-1]), 0)\n x_cl = (t_cl - t0) / tau\n r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)\n \n rv = ((3 * profile['M'] / (4 * np.pi)) * (1 / 200) * (1 / const.rho_crit)) ** (1 / 3)\n \n form_func = np.where(r_cl<rv, form(r_cl / rv, profile['c']), 1) # (N)\n \n bd_term *= prefactor * form_func\n vd_term *= prefactor * form_func\n \n else:\n \n bd_term = prefactor * bd_term\n vd_term = prefactor * vd_term\n else:\n if form_fun is not None:\n t_cl = np.maximum(np.minimum(t0, t[-1]), 0)\n x_cl = (t_cl - t0) / tau\n r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)\n \n form_func = form_fun(r_cl, profile['rs'], profile['rhos'])\n \n bd_term *= prefactor * form_func\n vd_term *= prefactor * form_func\n \n else:\n raise ValueError('rho_s, r_s halo description currently requires custom density profile (\"USE_FORMTAB\")')\n \n else:\n \n y = b_mag / profile['rs']\n \n bd_term0, vd_term0 = interp_table.bd_vd_terms(x0, y)\n \n y.shape = (1,-1)\n y = np.broadcast_to(y,x.shape)\n \n bd_term, vd_term = interp_table.bd_vd_terms(x, y)\n \n bd_term -= bd_term0\n vd_term -= vd_term0\n \n bd_term *= prefactor * profile['rhos'] * profile['rs']**3\n vd_term *= prefactor * profile['rhos'] * profile['rs']**3\n\n # sum the signal over all the events\n sig = np.einsum(\"to, oi -> ti\", bd_term, b_hat) - np.einsum(\n \"to, oi -> ti\", vd_term, v_hat\n )\n\n return sig\n\n\ndef dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=False, form_fun=None,\n interp_table=None):\n \"\"\"\n\n Returns the phase shift due to the Doppler delay for subhalos of mass, mass\n\n TODO: add use_closest option\n\n \"\"\"\n\n v_mag = np.linalg.norm(v_vec, axis=1)\n\n r0_v = np.einsum(\"ij, ij -> i\", r0_vec, v_vec) # kpc^2/yr\n t0 = -r0_v / np.square(v_mag) # year\n\n b_vec = r0_vec + v_vec * t0[:, np.newaxis] # (N, 3), kpc\n b_mag = np.linalg.norm(b_vec, axis=1) # (N)\n tau = b_mag / v_mag # year\n\n b_hat = b_vec / b_mag[:, np.newaxis]\n v_hat = v_vec / v_mag[:, np.newaxis]\n\n b_d = np.dot(b_hat, d_hat)\n v_d = np.dot(v_hat, d_hat)\n\n x = np.subtract.outer(t, t0) / tau\n x0 = -t0 / tau\n\n prefactor = (\n const.yr_to_s\n * const.GN\n / (const.km_s_to_kpc_yr * const.c_light * np.square(v_mag))\n )\n\n if interp_table is None:\n \n bd_term = (np.sqrt(1 + x ** 2) + x) - (np.sqrt(1 + x0 ** 2) + x0)\n vd_term = np.arcsinh(x) - np.arcsinh(x0)\n \n sig = bd_term * b_d - vd_term * v_d\n \n if 'M' in list(profile):\n prefactor *= profile['M']\n \n if use_form:\n \n t_cl = np.maximum(np.minimum(t0, t[-1]), 0)\n x_cl = (t_cl - t0) / tau\n r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)\n \n rv = ((3 * profile['M'] / (4 * np.pi)) * (1 / 200) * (1 / const.rho_crit)) ** (1 / 3)\n \n form_func = np.where(r_cl<rv, form(r_cl / rv, profile['c']), 1) # (N)\n \n sig = form_func * sig\n else:\n if form_fun is not None:\n t_cl = np.maximum(np.minimum(t0, t[-1]), 0)\n x_cl = (t_cl - t0) / tau\n r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)\n \n form_func = form_fun(r_cl, profile['rs'], profile['rhos'])\n \n sig = form_func * sig\n \n else:\n raise ValueError('rho_s, r_s halo description currently requires custom density profile (\"USE_FORMTAB\")')\n \n else:\n \n y = b_mag / profile['rs']\n \n bd_term0, vd_term0 = interp_table.bd_vd_terms(x0, y)\n \n y.shape = (1,-1)\n y = np.broadcast_to(y,x.shape)\n \n bd_term, vd_term = interp_table.bd_vd_terms(x, y)\n \n bd_term -= bd_term0\n vd_term -= vd_term0\n \n sig = profile['rhos'] * profile['rs']**3 * (bd_term * b_d + vd_term * v_d)\n\n sig = prefactor * sig\n\n # sum the signal over all the events\n return np.sum(sig, axis=-1)\n\n\ndef form(s, c):\n\n return (np.log(1 + c * s) - c * s / (1 + c * s)) / (np.log(1 + c) - c / (1 + c))\n\n"
] |
[
[
"numpy.square",
"numpy.linalg.norm",
"numpy.dot",
"numpy.log",
"numpy.minimum",
"numpy.polynomial.polynomial.polyfit",
"numpy.sum",
"numpy.subtract.outer",
"numpy.einsum",
"numpy.power",
"numpy.sqrt",
"numpy.arcsinh",
"numpy.broadcast_to"
]
] |
qurator-spk/sbb_ned
|
[
"d4cfe249f72e48913f254a58fbe0dbe6e47bd168"
] |
[
"qurator/sbb_ned/models/evaluation.py"
] |
[
"import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef compute_lr(target_lr, n_epochs, train_set_size, batch_size, warmup):\n total = (n_epochs - 1) * int(np.ceil(train_set_size / batch_size))\n progress = [float(t) / total for t in range(0, total)]\n\n factor = [p / warmup if p < warmup else max((p - 1.) / (warmup - 1.), 0.) for p in progress]\n\n lr = [f * target_lr for f in factor]\n\n return lr\n\n\ndef load_train_log(directories, num_epochs, target_lr, **kwargs):\n parts = []\n for d, ep, t_lr in zip(directories, num_epochs, target_lr):\n files = ['{}/loss_ep{}.pkl'.format(d, i) for i in range(1, ep)]\n\n files = [f for f in files if os.path.exists(f)]\n\n part = pd.concat([pd.read_pickle(f) for f in files])\n\n part['lr'] = compute_lr(target_lr=t_lr, n_epochs=ep, **kwargs)[0:len(part)]\n\n parts.append(part)\n\n return pd.concat(parts).reset_index(drop=True)\n\n\ndef plot_loss_against_lr(loss, wnd_size=6000):\n fig = plt.figure(figsize=(11.69, 8.27))\n\n ax1 = fig.add_subplot(111)\n ax1.set_xlabel('time')\n ax1.set_ylabel('loss', color='b')\n\n ax1.plot(loss.loss.rolling(wnd_size).mean(), color='b')\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n ax2.set_ylabel('learning rate', color='r')\n\n ax2.plot(loss.lr.rolling(wnd_size).mean(), 'r')\n"
] |
[
[
"pandas.concat",
"numpy.ceil",
"pandas.read_pickle",
"matplotlib.pyplot.figure"
]
] |
TeaKatz/Models_Corpus
|
[
"6d9e91eb97829e73d88ecfc4754492f6324ef383"
] |
[
"TransformerNet/layers/Decoder_test.py"
] |
[
"import tensorflow as tf\r\nfrom TransformerNet.layers import Encoder, Decoder\r\n\r\n\r\ndef Decoder_test(*args, **kwargs):\r\n inputs = tf.random.uniform((64, 62), dtype=tf.int64, minval=0, maxval=200) # (batch_size, input_seq_len)\r\n enc_output = Encoder(num_layers=2, d_model=512, num_heads=8,\r\n d_ff=2048, input_vocab_size=8500,\r\n maximum_position_encoding=10000)(inputs, False, None)\r\n target = tf.random.uniform((64, 26), dtype=tf.int64, minval=0, maxval=200) # (batch_size, target_seq_len)\r\n\r\n sample_decoder = Decoder(*args, **kwargs)\r\n output, attn = sample_decoder(target,\r\n enc_output=enc_output,\r\n training=False,\r\n look_ahead_mask=None,\r\n padding_mask=None)\r\n\r\n print(output.shape) # (batch_size, target_seq_len, d_model)\r\n print(attn['decoder_layer2_attention2'].shape) # (batch_size, target_seq_len, input_seq_len)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n Decoder_test(num_layers=2, d_model=512, num_heads=8,\r\n d_ff=2048, target_vocab_size=8000,\r\n maximum_position_encoding=5000)\r\n"
] |
[
[
"tensorflow.random.uniform"
]
] |
Womcos/SCARF
|
[
"b90251bc23410cb810a7082ca75147a7aae21dec"
] |
[
"encoding/functions/rectify.py"
] |
[
"\n\"\"\"Rectify function\"\"\"\nimport torch\nfrom torch.autograd import Function\n\nfrom encoding import cpu\nif torch.cuda.device_count() > 0:\n from encoding import gpu\n\n__all__ = ['rectify']\n\nclass _rectify(Function):\n @staticmethod\n def forward(ctx, y, x, kernel_size, stride, padding, dilation, average):\n ctx.save_for_backward(x)\n # assuming kernel_size is 3\n kernel_size = [k + 2 * (d - 1) for k,d in zip(kernel_size, dilation)]\n ctx.kernel_size = kernel_size\n ctx.stride = stride\n ctx.padding = padding\n ctx.dilation = dilation\n ctx.average = average\n if x.is_cuda:\n gpu.conv_rectify(y, x, kernel_size, stride, padding, dilation, average)\n else:\n cpu.conv_rectify(y, x, kernel_size, stride, padding, dilation, average)\n ctx.mark_dirty(y)\n return y\n\n @staticmethod\n def backward(ctx, grad_y):\n x, = ctx.saved_variables\n if x.is_cuda:\n gpu.conv_rectify(grad_y, x, ctx.kernel_size, ctx.stride,\n ctx.padding, ctx.dilation, ctx.average)\n else:\n cpu.conv_rectify(grad_y, x, ctx.kernel_size, ctx.stride,\n ctx.padding, ctx.dilation, ctx.average)\n ctx.mark_dirty(grad_y)\n return grad_y, None, None, None, None, None, None\n\nrectify = _rectify.apply\n"
] |
[
[
"torch.cuda.device_count"
]
] |
locdoan12121997/Indoor_Segmentation
|
[
"7e90fceb92e1be035a5eedec6ee53bf343bcdab6"
] |
[
"train_joint.py"
] |
[
"from models.joint_fpn import JointFpn\nfrom trainers.segmentation_trainer import SegmentationTrainer\nfrom data_generators.joint_data_generator import JointDataGenerator\nfrom data_generators.scenenet_rgbd_data_generator import ScenenetRGBDDataGenerator\nfrom utils.config import process_config\nfrom utils.dirs import create_dirs\nfrom utils.utils import get_args\nimport tensorflow as tf\nfrom utils import factory\nfrom tensorflow.keras.mixed_precision import experimental as mixed_precision\n\n\ndef main():\n # capture the config path from the run arguments\n # then process the json configuration file\n try:\n args = get_args()\n config = process_config(args.config)\n except:\n print(\"missing or invalid arguments\")\n exit(0)\n\n # use mixed precision for training\n if config.exp.mixed_precision:\n print('Use mixed precision training')\n policy = mixed_precision.Policy('mixed_float16')\n mixed_precision.set_policy(policy)\n\n if config.exp.jpa_optimization:\n tf.config.optimizer.set_jit(True)\n\n # create the experiments dirs\n create_dirs([config.callbacks.tensorboard_log_dir,\n config.callbacks.checkpoint_dir])\n\n print('Create the training data generator.')\n if config.generator.is_scenenet == True:\n train_data = ScenenetRGBDDataGenerator(config)\n else:\n train_data = JointDataGenerator(config)\n\n validation_data = None\n if type(config.validation.img_dir) == str:\n print('Create the validation data generator.')\n validation_data = JointDataGenerator(\n config, is_training_set=False)\n\n print('Create the model.')\n model = factory.create(config.model.class_name)(config, train_data)\n\n print('Create the trainer')\n trainer = SegmentationTrainer(\n model, train_data, config, validation_generator=validation_data)\n\n print('Start training the model.')\n trainer.train()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"tensorflow.keras.mixed_precision.experimental.Policy",
"tensorflow.keras.mixed_precision.experimental.set_policy",
"tensorflow.config.optimizer.set_jit"
]
] |
dumpram/stm32_real_time_test
|
[
"59b3e6bbd11498df032a180e06144c8046b14bbe"
] |
[
"scripts/test.py"
] |
[
"#!/usr/bin/python3\n\n# System imports\nimport argparse\nimport sys\nimport serial\n\n# Data processing imports\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport seaborn as sns\n\ndef checkparams(pwm_freq, pwm_duty, num_samples):\n check_ok = True\n if pwm_freq < 20 or pwm_freq > 100:\n print(\"Allowed PWM freq is between in [20, 100] kHz interval.\")\n check_ok = False\n if pwm_duty < 5 or pwm_duty > 80:\n print(\"Allowed PWM duty is between in [5, 80] percent interval.\")\n check_ok = False\n if num_samples < 1 or num_samples > 20000:\n print(\"Allowed samples num is between in [1, 8192] interval.\")\n check_ok = False\n if check_ok == False:\n sys.exit(1);\n\ndef main(baudrate, pwm_freq, pwm_duty, num_samples, delays_file):\n\n ser = serial.Serial(\n port='/dev/ttyUSB0',\n baudrate=baudrate,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n rtscts=0\n )\n\n if not ser.is_open:\n print(\"Error opening serial port device.\")\n sys.exit(1)\n\n checkparams(pwm_freq, pwm_duty, num_samples)\n\n print(\"Params OK!\")\n\n delays = np.empty(num_samples)\n\n ser.write(str.encode('{},{},{}\\r\\n'.format(\n pwm_freq, \n pwm_duty, \n num_samples)))\n\n timer_frequency = int(ser.readline().strip()) # MHz\n ser.write(str.encode('\\n')); # start measurement\n\n for i in range(num_samples):\n delays[i] = int(ser.readline().strip())\n\n ser.close()\n\n delays *= (1e-6 / timer_frequency);\n\n delays = np.delete(delays, 0);\n delays = np.delete(delays, 0);\n\n print(\"min: {}, avg: {}, max = {}\".format(\n np.min(delays),\n np.mean(delays),\n np.max(delays)));\n\n print(\"std: \", np.std(delays))\n\n\n LOG_FILE = open(delays_file, 'w')\n\n np.save(delays_file, delays);\n\n # mean = np.mean(delays);\n # maxi = np.max(delays);\n # mini = np.min(delays);\n\n # # sns.distplot(delays, norm_hist=True);\n\n # # plt.show();\n # # \n # delays *= 1e6;\n\n # plt.plot(delays)\n # plt.ylabel('Vrijeme kašnjenja (${\\mu}s$)')\n # plt.xlabel('Uzorci (padajući brid odziva)')\n # plt.show()\n\n # plt.figure(0)\n # n, bins, patches = plt.hist(delays, 50, normed=True, \n # histtype='step');\n\n # y = mlab.normpdf(bins, \n # np.mean(delays), \n # np.std(delays))\n\n # plt.show()\n # plt.figure(1)\n # plt.plot(bins, y)\n # plt.xlabel('Vrijeme kašnjenja (${\\mu}s$)')\n # plt.ylabel('Funkcija gustoće vjerojatnosti')\n # plt.show();\n \n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--baudrate', type=int, default=115200)\n parser.add_argument('--pwm_freq', type=int, default=20)\n parser.add_argument('--pwm_duty', type=int, default=50)\n parser.add_argument('--num_samples', type=int, default=20000)\n parser.add_argument('--delays_file', type=str, default='novo.npy')\n\n ARGS, other = parser.parse_known_args()\n\n main(ARGS.baudrate, ARGS.pwm_freq, ARGS.pwm_duty, ARGS.num_samples, \n ARGS.delays_file);\n"
] |
[
[
"numpy.max",
"numpy.delete",
"numpy.empty",
"numpy.min",
"numpy.save",
"numpy.mean",
"numpy.std"
]
] |
mnoukhov/ecn
|
[
"f1b838cfe2e27f7cc30cdf2e711b9a474b27a158"
] |
[
"src/ecn.py"
] |
[
"import argparse\nimport datetime\nimport json\nimport os\nimport time\nfrom os import path\n\nimport numpy as np\nimport torch\nfrom absl import flags\nfrom torch import optim\nfrom pprint import pprint\nimport wandb\n\nfrom src.alive_sieve import AliveSieve, SievePlayback\nfrom src.nets import AgentModel\nfrom src.rewards_lib import calc_rewards\nfrom src.sampling import (generate_test_batches,\n generate_training_batch,\n hash_batches)\n\nFLAGS = flags.FLAGS\n\n\ndef render_action(t, s, prop, term):\n agent = t % 2\n speaker = 'A' if agent == 0 else 'B'\n utility = s.utilities[:, agent]\n print(' ', end='')\n if speaker == 'B':\n print(' ', end='')\n\n print(' ' + ''.join([str(v) for v in s.m_prev[0].view(-1).tolist()]), end='')\n print(' %s/%s %s/%s %s/%s' % (\n prop[0][0].item(), s.pool[0][0].item(),\n prop[0][1].item(), s.pool[0][1].item(),\n prop[0][2].item(), s.pool[0][2].item(),\n ), end='')\n print('')\n\n if t + 1 == s.N[0]:\n print(' [out of time]')\n elif term[0][0]:\n print(' ACC')\n\n\ndef save_model(model_file, agent_models, agent_opts, start_time, episode):\n state = {}\n for i in range(2):\n state['agent%s' % i] = {}\n state['agent%s' % i]['model_state'] = agent_models[i].state_dict()\n state['agent%s' % i]['opt_state'] = agent_opts[i].state_dict()\n state['episode'] = episode\n state['elapsed_time'] = time.time() - start_time\n with open(model_file + '.tmp', 'wb') as f:\n torch.save(state, f)\n os.rename(model_file + '.tmp', model_file)\n\n\ndef load_model(model_file, agent_models, agent_opts):\n with open(model_file, 'rb') as f:\n state = torch.load(f)\n for i in range(2):\n agent_models[i].load_state_dict(state['agent%s' % i]['model_state'])\n agent_opts[i].load_state_dict(state['agent%s' % i]['opt_state'])\n episode = state['episode']\n # create a kind of 'virtual' start_time\n start_time = time.time() - state['elapsed_time']\n return episode, start_time\n\n\nclass State(object):\n def __init__(self, N, pool, utilities):\n batch_size = N.size()[0]\n self.N = N\n self.pool = pool\n self.utilities = torch.zeros(batch_size, 2, 3, dtype=torch.int64, device=FLAGS.device)\n self.utilities[:, 0] = utilities[0]\n self.utilities[:, 1] = utilities[1]\n\n self.last_proposal = torch.zeros(batch_size, 3, dtype=torch.int64, device=FLAGS.device)\n self.m_prev = torch.zeros(batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device)\n\n def sieve_(self, still_alive_idxes):\n self.N = self.N[still_alive_idxes]\n self.pool = self.pool[still_alive_idxes]\n self.utilities = self.utilities[still_alive_idxes]\n self.last_proposal = self.last_proposal[still_alive_idxes]\n self.m_prev = self.m_prev[still_alive_idxes]\n\n\ndef run_episode(\n batch,\n agent_models,\n batch_size,\n testing,\n render=False,\n initial_agent=0):\n \"\"\"\n turning testing on means, we disable stochasticity: always pick the argmax\n \"\"\"\n\n s = State(**batch)\n\n sieve = AliveSieve(batch_size=batch_size)\n actions_by_timestep = []\n alive_masks = []\n\n # next two tensors wont be sieved, they will stay same size throughout\n # entire batch, we will update them using sieve.out_idxes[...]\n rewards = torch.zeros(batch_size, 3, device=FLAGS.device)\n num_steps = torch.full((batch_size,), FLAGS.max_timesteps, dtype=torch.int64, device=FLAGS.device)\n term_matches_argmax_count = 0\n utt_matches_argmax_count = 0\n utt_stochastic_draws = 0\n num_policy_runs = 0\n prop_matches_argmax_count = 0\n prop_stochastic_draws = 0\n utt_mask = torch.zeros(2, batch_size, 3, dtype=torch.int64, device=FLAGS.device)\n prop_mask = torch.zeros(2, batch_size, 3, dtype=torch.int64, device=FLAGS.device)\n\n entropy_loss_by_agent = [\n torch.zeros(1, device=FLAGS.device),\n torch.zeros(1, device=FLAGS.device)\n ]\n if render:\n print(' ')\n print(' ',\n '{} {} {}'.format(*s.utilities[0][0].tolist()),\n ' ',\n '{} {} {}'.format(*s.pool[0].tolist()),\n ' ',\n '{} {} {}'.format(*s.utilities[0][1].tolist()))\n\n current_A_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device)\n prev_A_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device)\n current_A_message = torch.zeros(sieve.batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device)\n prev_A_message = torch.zeros(sieve.batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device)\n current_A_term = torch.zeros(sieve.batch_size, 1, dtype=torch.uint8)\n\n for t in range(FLAGS.max_timesteps):\n if FLAGS.linguistic:\n if FLAGS.normal_form and t % 2 == 1:\n _prev_message = prev_A_message\n else:\n _prev_message = s.m_prev\n else:\n _prev_message = torch.zeros(sieve.batch_size, 6, dtype=torch.int64, device=FLAGS.device)\n\n if FLAGS.proposal:\n if FLAGS.normal_form and t % 2 == 1:\n _prev_proposal = prev_A_proposal\n else:\n _prev_proposal = s.last_proposal\n else:\n _prev_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device)\n\n # agent = t % 2\n agent = (initial_agent + t) % 2\n agent_model = agent_models[agent]\n (nodes, term_a, s.m_prev, this_proposal, _entropy_loss,\n _term_matches_argmax_count, _utt_matches_argmax_count, _utt_stochastic_draws,\n _prop_matches_argmax_count, _prop_stochastic_draws, _utt_mask, _prop_mask) = agent_model(\n pool=s.pool,\n utility=s.utilities[:, agent],\n m_prev=_prev_message,\n prev_proposal=_prev_proposal,\n testing=testing,\n )\n\n entropy_loss_by_agent[agent] += _entropy_loss\n actions_by_timestep.append(nodes)\n term_matches_argmax_count += _term_matches_argmax_count\n num_policy_runs += sieve.batch_size\n utt_matches_argmax_count += _utt_matches_argmax_count\n utt_stochastic_draws += _utt_stochastic_draws\n prop_matches_argmax_count += _prop_matches_argmax_count\n prop_stochastic_draws += _prop_stochastic_draws\n\n if FLAGS.force_masking_comm:\n utt_mask[agent][sieve.out_idxes] |= _utt_mask\n prop_mask[agent][sieve.out_idxes] |= _prop_mask\n\n if FLAGS.proposal_termination and not FLAGS.normal_form:\n term_a = torch.prod(this_proposal == _prev_proposal,\n dim=1,\n keepdim=True)\n elif not FLAGS.proposal_termination and FLAGS.normal_form:\n #TODO which proposal to use here?\n if t % 2 == 1:\n term_a = (term_a * current_A_term)\n else:\n current_A_term = term_a\n term_a = torch.zeros((sieve.batch_size,1), dtype=torch.uint8, device=FLAGS.device)\n\n elif FLAGS.proposal_termination and FLAGS.normal_form:\n if t % 2 == 1:\n term_a = torch.prod(this_proposal == current_A_proposal,\n dim=1,\n keepdim=True)\n else:\n term_a = torch.zeros((sieve.batch_size,1), dtype=torch.uint8, device=FLAGS.device)\n\n\n if render and sieve.out_idxes[0] == 0:\n render_action(\n t=t,\n s=s,\n term=term_a,\n prop=this_proposal\n )\n\n new_rewards = calc_rewards(\n t=t,\n s=s,\n term=term_a,\n agent=agent,\n )\n rewards[sieve.out_idxes] = new_rewards\n s.last_proposal = this_proposal\n\n if FLAGS.normal_form and t % 2 == 0:\n prev_A_proposal = current_A_proposal\n current_A_proposal = this_proposal\n prev_A_message = current_A_message\n current_A_message = s.m_prev\n\n\n sieve.mark_dead(term_a)\n sieve.mark_dead(t + 1 >= s.N)\n alive_masks.append(sieve.alive_mask.clone())\n sieve.set_dead_global(num_steps, t + 1)\n if sieve.all_dead():\n break\n\n s.sieve_(sieve.alive_idxes)\n\n if FLAGS.normal_form:\n current_A_proposal = current_A_proposal[sieve.alive_idxes]\n prev_A_proposal = prev_A_proposal[sieve.alive_idxes]\n current_A_message = current_A_message[sieve.alive_idxes]\n prev_A_message = prev_A_message[sieve.alive_idxes]\n\n\n sieve.self_sieve_()\n\n if render:\n print(' rewards: {:2.2f} {:2.2f} {:2.2f}'.format(*rewards[0].tolist()))\n print(' ')\n\n utt_mask_count = utt_mask.sum(dim=[1,2]).cpu().numpy()\n prop_mask_count = prop_mask.sum(dim=[1,2]).cpu().numpy()\n\n return (actions_by_timestep, rewards, num_steps, alive_masks, entropy_loss_by_agent,\n term_matches_argmax_count, num_policy_runs, utt_matches_argmax_count, utt_stochastic_draws,\n prop_matches_argmax_count, prop_stochastic_draws, utt_mask_count, prop_mask_count)\n\n\ndef safe_div(a, b):\n \"\"\"\n returns a / b, unless b is zero, in which case returns 0\n this is primarily for usage in cases where b might be systemtically zero, eg because comms are disabled or similar\n also accounts for a or b being tensors\n \"\"\"\n if isinstance(a, torch.Tensor):\n a = a.item()\n if isinstance(b, torch.Tensor):\n b = b.item()\n return 0 if b == 0 else a / b\n\n\ndef run(args):\n \"\"\"\n testing option will:\n - use argmax, ie disable stochastic draws\n - not run optimizers\n - not save model\n \"\"\"\n if args.wandb:\n if args.wandb_offline:\n os.environ[\"WANDB_MODE\"] = \"dryrun\"\n\n wandb.init(project='ecn',\n name=args.name,\n dir=f'{args.savedir}',\n group=args.wandb_group)\n wandb.config.update(args)\n wandb.config.update(FLAGS)\n flags_dict = {flag.name: flag.value for flag in FLAGS.flags_by_module_dict()['main.py']}\n args_dict = args.__dict__\n pprint(args_dict)\n pprint(flags_dict)\n\n os.makedirs(args.model_dir, exist_ok=True)\n os.makedirs(args.logdir, exist_ok=True)\n\n if args.seed is not None:\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n train_r = np.random.RandomState(args.seed)\n else:\n train_r = np.random\n\n test_r = np.random.RandomState(args.test_seed)\n test_batches = generate_test_batches(batch_size=args.batch_size,\n num_batches=5,\n random_state=test_r)\n test_hashes = hash_batches(test_batches)\n\n episode = 0\n start_time = time.time()\n agent_models = []\n agent_opts = []\n agent_name = ['A', 'B']\n for i in range(2):\n model = AgentModel(\n name=agent_name[i],\n term_entropy_reg=args.term_entropy_reg,\n utterance_entropy_reg=args.utterance_entropy_reg,\n proposal_entropy_reg=args.proposal_entropy_reg\n ).to(FLAGS.device)\n agent_models.append(model)\n agent_opts.append(optim.Adam(params=agent_models[i].parameters()))\n if args.wandb:\n wandb.watch(agent_models)\n if path.isfile(args.model_file) and not args.no_load:\n episode, start_time = load_model(\n model_file=args.model_file,\n agent_models=agent_models,\n agent_opts=agent_opts)\n print('loaded model')\n elif args.testing:\n print('')\n print('ERROR: must have loadable model to use --testing option')\n print('')\n return\n last_print = time.time()\n rewards_sum = torch.zeros(3, device=FLAGS.device)\n steps_sum = 0\n count_sum = 0\n f_log = open(args.log_file, 'w')\n all_args = {**args_dict, **flags_dict}\n f_log.write('meta: %s\\n' % json.dumps(all_args))\n last_save = time.time()\n baseline = torch.zeros(3, device=FLAGS.device)\n term_matches_argmax_count = 0\n num_policy_runs = 0\n utt_matches_argmax_count = 0\n utt_stochastic_draws = 0\n prop_matches_argmax_count = 0\n prop_stochastic_draws = 0\n utt_mask_count = np.array([0,0])\n prop_mask_count = np.array([0,0])\n while episode < args.episodes:\n render = (episode % args.render_every_episode == 0)\n split = 2 if FLAGS.randomize_first else 1\n agent_losses = [0,0]\n both_rewards = []\n\n for i in range(2):\n agent_opts[i].zero_grad()\n\n for initial_agent in range(split):\n batch = generate_training_batch(batch_size=args.batch_size // split,\n test_hashes=test_hashes,\n random_state=train_r)\n (actions, rewards, steps, alive_masks, entropy_loss_by_agent,\n _term_matches_argmax_count, _num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws,\n _prop_matches_argmax_count, _prop_stochastic_draws,\n _utt_mask_count, _prop_mask_count) = run_episode(\n batch=batch,\n agent_models=agent_models,\n batch_size=args.batch_size // split,\n render=render,\n initial_agent=initial_agent,\n testing=args.testing)\n term_matches_argmax_count += _term_matches_argmax_count\n utt_matches_argmax_count += _utt_matches_argmax_count\n utt_stochastic_draws += _utt_stochastic_draws\n num_policy_runs += _num_policy_runs\n prop_matches_argmax_count += _prop_matches_argmax_count\n prop_stochastic_draws += _prop_stochastic_draws\n utt_mask_count += _utt_mask_count\n prop_mask_count += _prop_mask_count\n\n if not args.testing:\n reward_loss_by_agent = [0, 0]\n baselined_rewards = rewards - baseline\n rewards_by_agent = []\n for i in range(2):\n if FLAGS.prosocial:\n rewards_by_agent.append(baselined_rewards[:, 2])\n else:\n rewards_by_agent.append(baselined_rewards[:, i])\n sieve_playback = SievePlayback(alive_masks)\n for t, global_idxes in sieve_playback:\n agent = (initial_agent + t) % 2\n if len(actions[t]) > 0:\n for action in actions[t]:\n _rewards = rewards_by_agent[agent]\n _reward = _rewards[global_idxes].float().contiguous().view(\n sieve_playback.batch_size, 1)\n _reward_loss = - (action * _reward)\n _reward_loss = _reward_loss.sum()\n reward_loss_by_agent[agent] += _reward_loss\n\n for i in range(2):\n loss = entropy_loss_by_agent[i] + reward_loss_by_agent[i]\n loss.backward()\n\n rewards_sum += rewards.detach().sum(0)\n steps_sum += steps.sum()\n count_sum += args.batch_size // split\n both_rewards.append(rewards)\n\n\n for i in range(2):\n agent_opts[i].step()\n\n rewards = torch.cat(both_rewards).detach()\n baseline = 0.7 * baseline + 0.3 * rewards.mean(0).detach()\n\n if render:\n \"\"\"\n run the test batches, print the results\n \"\"\"\n test_rewards_sum = np.zeros(3)\n test_count_sum = len(test_batches) * args.batch_size\n test_num_policy_runs = 0\n test_utt_mask_count = [0,0]\n test_prop_mask_count = [0,0]\n test_utt_mask_count = np.array([0,0])\n test_prop_mask_count = np.array([0,0])\n for test_batch in test_batches:\n (actions, test_rewards, steps, alive_masks, entropy_loss_by_agent,\n _term_matches_argmax_count, _test_num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws,\n _prop_matches_argmax_count, _prop_stochastic_draws,\n _test_utt_mask_count, _test_prop_mask_count) = run_episode(\n batch=test_batch,\n agent_models=agent_models,\n batch_size=args.batch_size,\n render=True,\n testing=True)\n test_rewards_sum += test_rewards.sum(0).cpu().numpy()\n test_num_policy_runs += _test_num_policy_runs\n test_utt_mask_count += _test_utt_mask_count\n test_prop_mask_count += _test_prop_mask_count\n\n time_since_last = time.time() - last_print\n rewards_str = '%.2f,%.2f,%.2f' % (rewards_sum[0] / count_sum,\n rewards_sum[1] / count_sum,\n rewards_sum[2] / count_sum)\n test_rewards_str = '%.2f,%.2f,%.2f' % (test_rewards_sum[0] / test_count_sum,\n test_rewards_sum[1] / test_count_sum,\n test_rewards_sum[2] / test_count_sum)\n baseline_str = '%.2f,%.2f,%.2f' % (baseline[0], baseline[1], baseline[2])\n utt_mask_pct = utt_mask_count / (3 * count_sum)\n test_utt_mask_pct = test_utt_mask_count / (3 * test_count_sum)\n prop_mask_pct = prop_mask_count / (3 * count_sum)\n test_prop_mask_pct = test_prop_mask_count / (3 * test_count_sum)\n print('test {}'.format(test_rewards_str))\n print('train {}'.format(rewards_str))\n print('base {}'.format(baseline_str))\n print('ep {}, {} games/sec, {:2.2f} avg steps'.format(\n episode,\n int(count_sum / time_since_last),\n steps_sum.item() / count_sum\n ))\n print('argmaxp term={:4.4f} utt={:4.4f} prop={:4.4f}'.format(\n term_matches_argmax_count / num_policy_runs,\n safe_div(utt_matches_argmax_count, utt_stochastic_draws),\n prop_matches_argmax_count / prop_stochastic_draws\n ))\n if FLAGS.force_masking_comm:\n print('utt mask % {:2.2f},{:2.2f} test % {:2.2f},{:2.2f}'.format(\n *utt_mask_pct, *test_utt_mask_pct,\n ))\n print('prop mask % {:2.2f},{:2.2f} test % {:2.2f},{:2.2f}'.format(\n *prop_mask_pct, *test_prop_mask_pct,\n ))\n\n episode_log = {\n 'episode': episode,\n 'avg_reward_A': (rewards_sum[0] / count_sum).item(),\n 'avg_reward_B': (rewards_sum[1] / count_sum).item(),\n 'avg_reward_0': (rewards_sum[2] / count_sum).item(),\n 'test_reward_A': (test_rewards_sum[0] / test_count_sum).item(),\n 'test_reward_B': (test_rewards_sum[1] / test_count_sum).item(),\n 'test_reward': (test_rewards_sum[2] / test_count_sum).item(),\n 'avg_steps': torch.true_divide(steps_sum, count_sum).item(),\n 'games_sec': (count_sum / time_since_last),\n 'elapsed': time.time() - start_time,\n 'argmaxp_term': term_matches_argmax_count / num_policy_runs,\n 'argmaxp_utt': safe_div(utt_matches_argmax_count, utt_stochastic_draws),\n 'argmaxp_prop': prop_matches_argmax_count / prop_stochastic_draws,\n 'utt_unmasked_A': utt_mask_pct[0],\n 'utt_unmasked_B': utt_mask_pct[1],\n 'prop_unmasked_A': prop_mask_pct[0],\n 'prop_unmasked_B': prop_mask_pct[1],\n 'test_utt_unmasked_A': test_utt_mask_pct[0],\n 'test_utt_unmasked_B': test_utt_mask_pct[1],\n 'test_prop_unmasked_A': test_prop_mask_pct[0],\n 'test_prop_unmasked_B': test_prop_mask_pct[1],\n }\n f_log.write(json.dumps(episode_log) + '\\n')\n f_log.flush()\n if args.wandb:\n wandb.log(episode_log)\n last_print = time.time()\n steps_sum = 0\n rewards_sum.fill_(0)\n term_matches_argmax_count = 0\n num_policy_runs = 0\n utt_matches_argmax_count = 0\n utt_stochastic_draws = 0\n prop_matches_argmax_count = 0\n prop_stochastic_draws = 0\n count_sum = 0\n utt_mask_count.fill(0)\n prop_mask_count.fill(0)\n\n if (not args.testing\n and not args.no_save\n and episode > 0\n and episode % args.save_every_episode == 0):\n save_model(model_file=args.model_file,\n agent_models=agent_models,\n agent_opts=agent_opts,\n start_time=start_time,\n episode=episode)\n print('saved model')\n\n episode += 1\n\n if (not args.no_save and\n not args.testing):\n save_model(\n model_file=args.model_file,\n agent_models=agent_models,\n agent_opts=agent_opts,\n start_time=start_time,\n episode=episode)\n print('saved model')\n f_log.close()\n"
] |
[
[
"torch.zeros",
"numpy.array",
"torch.prod",
"torch.cat",
"torch.true_divide",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.random.seed",
"torch.save",
"torch.manual_seed",
"torch.full",
"torch.load"
]
] |
0xflotus/robogym
|
[
"5ec2fcbda9828941fe3072792dd25fb5a915bbbb",
"5ec2fcbda9828941fe3072792dd25fb5a915bbbb"
] |
[
"robogym/randomization/tests/test_randomization.py",
"robogym/envs/rearrange/tests/test_mesh.py"
] |
[
"import unittest\n\nimport attr\nimport numpy as np\n\nfrom robogym.randomization.env import (\n EnvActionRandomizer,\n EnvObservationRandomizer,\n EnvParameterRandomizer,\n EnvRandomization,\n EnvSimulationRandomizer,\n build_randomizable_param,\n)\nfrom robogym.randomization.observation import ObservationRandomizer\nfrom robogym.randomization.parameters import FloatRandomizerParameter\n\n\nclass DummyRandomizerParameter(FloatRandomizerParameter):\n def __init__(self, name, val):\n super().__init__(\n name, val, value_range=(-1.0, 1.0), delta=1.0,\n )\n\n\n@attr.s(auto_attribs=True)\nclass DummyNestedEnvParameter:\n c: int = build_randomizable_param(1, low=-3, high=3)\n\n\n@attr.s(auto_attribs=True)\nclass DummyEnvParameter:\n a: int = build_randomizable_param(0, low=-5, high=5)\n b: float = build_randomizable_param(0.0, low=-1.0, high=1.0)\n\n x: int = 0 # Non randomizable parameter.\n\n nested: DummyNestedEnvParameter = DummyNestedEnvParameter()\n\n\nclass DummyObservationRandomizer(ObservationRandomizer):\n def __init__(self, name, val):\n super().__init__(name)\n self.val = self.register_parameter(val)\n\n def _randomize(self, target, random_state):\n target[self.val.name] = self.val.get_value()\n return target\n\n\nclass TestRandomization(unittest.TestCase):\n def setUp(self):\n super().setUp()\n self.random_state = np.random.RandomState()\n\n def test_randomizer_parameters(self):\n parameter = DummyRandomizerParameter(\"foo\", 0.0)\n\n assert parameter.get_value() == 0.0\n assert parameter.get_range() == (-1.0, 1.0)\n assert parameter.get_delta() == 1.0\n\n parameter.set_value(1.0)\n assert parameter.get_value() == 1.0\n\n def test_randomizer_basic(self):\n \"\"\"\n Test functionality of basic randomizer.\n \"\"\"\n randomizer = EnvParameterRandomizer(DummyEnvParameter())\n\n assert len(randomizer.get_parameters()) == 3\n\n # Make sure register duplicate parameter is not allowed.\n with self.assertRaises(AssertionError):\n randomizer.register_parameter(DummyRandomizerParameter(\"a\", 1))\n\n randomizer.register_parameter(DummyRandomizerParameter(\"d\", 1))\n\n assert len(randomizer.get_parameters()) == 4\n\n randomizer.get_parameter(\"a\").set_value(1)\n randomizer.get_parameter(\"b\").set_value(0.5)\n randomizer.get_parameter(\"c\").set_value(2)\n\n parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)\n assert parameters.a == 1\n assert parameters.b == 0.5\n assert parameters.nested.c == 2\n\n randomizer.disable()\n\n parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)\n randomizer.get_parameter(\"a\").set_value(1)\n assert parameters.a == 0\n\n def test_observation_randomizer(self):\n randomizer = EnvObservationRandomizer(\n [\n DummyObservationRandomizer(\"r1\", DummyRandomizerParameter(\"foo\", 0.0)),\n DummyObservationRandomizer(\"r2\", DummyRandomizerParameter(\"bar\", 1.0)),\n ]\n )\n\n assert len(randomizer.get_randomizers()) == 2\n assert len(randomizer.get_parameters()) == 2\n obs = randomizer.randomize({}, self.random_state)\n assert obs[\"foo\"] == 0.0\n assert obs[\"bar\"] == 1.0\n\n def test_env_randomization(self):\n randomization = EnvRandomization(\n parameter_randomizer=EnvParameterRandomizer(DummyEnvParameter()),\n observation_randomizer=EnvObservationRandomizer(\n [\n DummyObservationRandomizer(\n \"r1\", DummyRandomizerParameter(\"foo\", 0.0)\n ),\n ]\n ),\n action_randomizer=EnvActionRandomizer([]),\n simulation_randomizer=EnvSimulationRandomizer([]),\n )\n\n randomization.update_parameter(\"observation.r1:foo\", 0.5)\n\n parameter = randomization.get_parameter(\"observation.r1:foo\")\n assert parameter.get_value() == 0.5\n",
"import numpy as np\nimport pytest\n\nfrom robogym.envs.rearrange.ycb import make_env\n\n\n@pytest.mark.parametrize(\"mesh_scale\", [0.5, 1.0, 1.5])\ndef test_mesh_centering(mesh_scale):\n # We know these meshe stls. are not center properly.\n for mesh_name in [\"005_tomato_soup_can\", \"073-b_lego_duplo\", \"062_dice\"]:\n env = make_env(\n parameters={\n \"mesh_names\": mesh_name,\n \"simulation_params\": {\"mesh_scale\": mesh_scale},\n }\n ).unwrapped\n obj_pos = env.mujoco_simulation.get_object_pos()\n bounding_pos = env.mujoco_simulation.get_object_bounding_boxes()[:, 0, :]\n\n assert np.allclose(obj_pos, bounding_pos, atol=5e-3)\n"
] |
[
[
"numpy.random.RandomState"
],
[
"numpy.allclose"
]
] |
csmiler/ProbeGAN
|
[
"6155f5ff33b0673df20b9dbbcbc3e63b75228ef0"
] |
[
"classifiers/robust_classifier.py"
] |
[
"#################################################\n# Retrieve robust classifier from:\n# https://github.com/MadryLab/robustness\n#################################################\n\nfrom robustness.datasets import CIFAR, RestrictedImageNet, ImageNet\nfrom robustness.model_utils import make_and_restore_model\n\ndef get_robust_classifier(dataset, model_path, parallel=True):\n if dataset == \"cifar10\":\n model, _ = make_and_restore_model(arch='resnet50', dataset=CIFAR(), \\\n resume_path=model_path, parallel=parallel)\n elif dataset == \"RestrictedImageNet\":\n model, _ = make_and_restore_model(arch='resnet50', dataset=RestrictedImageNet(''), \\\n resume_path=model_path, parallel=parallel)\n elif dataset == \"ImageNet\":\n model, _ = make_and_restore_model(arch='resnet50', dataset=ImageNet(''), \\\n resume_path=model_path, parallel=parallel)\n else:\n raise NotImplementedError(\"Model for {} is not implemented!\".format(dataset))\n\n model.eval()\n return model\n\nif __name__ == \"__main__\":\n netC = get_robust_classifier(\"cifar10\", \"pretrained/cifar_l2_0_5.pt\")\n import torch, torchvision\n import numpy as np\n import torchvision.transforms as transforms\n from torch.nn import functional as F\n\n with torch.no_grad():\n test_dir = \"../output_imgs/cifar10_new9_cLoss10.0\"\n transform=transforms.Compose([\n transforms.Scale(32),\n transforms.ToTensor()#,\n# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n dataset = torchvision.datasets.ImageFolder(test_dir, transform=transform)\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=16, num_workers=16, shuffle=False)\n for item, data in enumerate(data_loader):\n print(data[0].shape)\n output, _ = netC(data[0])\n output = F.softmax(output).data.cpu().numpy()\n print(output.shape)\n argmax = np.argmax(output, axis=-1)\n print(argmax.squeeze())\n maxp = np.amax(output, axis=-1)\n print(maxp.squeeze())\n"
] |
[
[
"torch.no_grad",
"numpy.argmax",
"torch.utils.data.DataLoader",
"numpy.amax",
"torch.nn.functional.softmax"
]
] |
jms7446/PRML
|
[
"39325e085597cb48623f331d63726eea0dc9a714"
] |
[
"prmlmy/plot_util.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom prmlmy.util import cv_, norm2s, calc_range\n\n\ndef plot_decision_boundary(model, X_train, y_train=None, x1_range=None, x2_range=None, points=300,\n title=None, pad_ratio=0.2, ax=None):\n ax = ax or plt\n x1_range = x1_range or calc_range(X_train[:, 0], pad_ratio=pad_ratio)\n x2_range = x2_range or calc_range(X_train[:, 1], pad_ratio=pad_ratio)\n if y_train is None:\n y_train = np.zeros(X_train.shape[0])\n\n x1s = np.linspace(x1_range[0], x1_range[1], num=points)\n x2s = np.linspace(x2_range[0], x2_range[1], num=points)\n x1, x2 = np.meshgrid(x1s, x2s)\n x = np.array([x1, x2]).reshape(2, -1).T\n y = model.predict(x).reshape(points, points)\n\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train)\n ax.contourf(x1, x2, y, alpha=0.2)\n if title:\n ax.set_title(title)\n\n\ndef plot_decision_proba(model, X_train, y_train=None, x1_range=None, x2_range=None, points=300,\n title=None, pad_ratio=0.2, ax=None):\n ax = ax or plt\n x1_range = x1_range or calc_range(X_train[:, 0], pad_ratio=pad_ratio)\n x2_range = x2_range or calc_range(X_train[:, 1], pad_ratio=pad_ratio)\n if y_train is None:\n y_train = np.zeros(X_train.shape[0])\n\n x1s = np.linspace(x1_range[0], x1_range[1], num=points)\n x2s = np.linspace(x2_range[0], x2_range[1], num=points)\n x1, x2 = np.meshgrid(x1s, x2s)\n x = np.array([x1, x2]).reshape(2, -1).T\n y = model.proba(x).reshape(points, points)\n\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train)\n ax.contourf(x1, x2, y, np.linspace(0, 1, 5), alpha=0.2)\n if title:\n ax.set_title(title)\n\n\ndef get_figsize_default(ncols, nrows):\n width = ncols * 5 + 1\n height = nrows * 4 + 1\n return width, height\n\n\ndef grid_plot(rows, cols, plot_func, row_names=None, col_names=None, figsize=None, *args, **kwargs):\n row_names = row_names or [str(row) for row in rows]\n col_names = col_names or [str(col) for col in cols]\n figsize = figsize or get_figsize_default(len(cols), len(rows))\n fig, axs = plt.subplots(nrows=len(rows), ncols=len(cols), figsize=figsize)\n axs = axs.reshape(len(rows), len(cols))\n for row_axs, row, row_name in zip(axs, rows, row_names):\n for ax, col, col_name in zip(row_axs, cols, col_names):\n title = \":\".join([row_name, col_name])\n plot_func(row, col, title, ax=ax, *args, **kwargs)\n"
] |
[
[
"numpy.array",
"numpy.linspace",
"numpy.meshgrid",
"numpy.zeros"
]
] |
richielo/Medical_Localization_RL
|
[
"58653170824ee087f10b6c8650ee9bc8e05b64e9"
] |
[
"pneumoRL/image_util.py"
] |
[
"import os \nimport sys\nimport numpy as np\nfrom PIL import Image\nimport torch\n\n#TODO - add save function, these functions can be used to check movement \ndef crop_image(image_array, bb):\n image_array_copy = image_array.clone()\n y_min = int(bb[0])\n x_min = int(bb[1])\n height = int(bb[2])\n width = int(bb[3])\n y_max = y_min + height\n x_max = x_min + width\n return image_array[y_min:y_max, x_min:x_max]\n\n#Keep image size, set pixel value outside of bounding box as 0\ndef crop_pad_image(image_array, bb):\n image_array_copy = image_array.clone()\n y_min = int(bb[0])\n x_min = int(bb[1])\n height = int(bb[2])\n width = int(bb[3])\n y_max = y_min + height\n x_max = x_min + width\n mask_array = np.zeros(image_array.shape, dtype=int)\n mask_array[y_min:y_max, x_min:x_max] = 1\n zero_array = np.where(mask_array==0)\n image_array_copy[zero_array[0],zero_array[1]] = 0\n return image_array_copy\n \ndef set_bb_to_black(image_array, bb):\n image_array_copy = image_array.clone()\n y_min = int(bb[0])\n x_min = int(bb[1])\n height = int(bb[2])\n width = int(bb[3])\n y_max = y_min + height\n x_max = x_min + width\n mask_array = np.zeros(image_array.shape, dtype=int)\n mask_array[y_min:y_max, x_min:x_max] = 1\n zero_array = np.where(mask_array==1)\n image_array_copy[zero_array[0],zero_array[1]] = 0\n return image_array_copy\n\ndef transform_img_for_model(image_array, transforms=None):\n image_array_copy = np.copy(image_array)\n #image_array_copy.unsqueeze_(0)\n image_array_copy = np.expand_dims(image_array_copy, axis=2)\n if(transforms is None):\n image_array_copy = torch.from_numpy(image_array_copy).repeat(3, 1, 1)\n else:\n image_array_copy = transforms(image_array_copy).repeat(3, 1, 1)\n return image_array_copy\n\ndef save_image_from_tensor(image_array, path):\n og = Image.fromarray(image_array.numpy())\n og = og.convert('RGB')\n og.save(path)\n \ndef resize_image(image_array, width, height):\n og = Image.fromarray(image_array.numpy())\n og = og.convert('RGB')\n og = og.resize((width, height))\n og = og.convert('L')\n return np.array(og)\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.copy",
"torch.from_numpy",
"numpy.where",
"numpy.expand_dims"
]
] |
mhhm2005eg/CarND-Advanced-Lane-Lines
|
[
"1f571e4714df0dcca21fbf2b09b5af73caddb8f4"
] |
[
"color.py"
] |
[
"import numpy as np\nimport cv2\nfrom PIL import Image\n\nimg_form = \"jpg\"\nimg_out_dir = \"./output_images\"\nvid_form = \"mp4\"\nvid_out_dir = \"./test_videos_output\"\n\nclass array_image:\n def __init__(self):\n self.image = None\n self.binary_image = None\n\n def store(self, name):\n name = img_out_dir + \"/\" + name + \".\" + img_form\n print(\"Saving image: \" + name)\n im = Image.fromarray(self.binary_image)\n im.save(name)\n\nclass color(array_image):\n def __init__(self, caller=None, color = \"Gr\"):\n threshold = {'R':(200, 255), 'G':(200, 255), 'B':(200, 255), 'H':(15, 100), 'L':(0,255), 'S':(90, 255), 'Gr':(200, 255)}\n self.available = False\n self.binary_available = False\n self.image = None\n self.binary_image = None\n self.caller = caller\n self.color = color\n self.threshold = threshold[self.color]\n\n def get(self, binary=False, masked=False, thresh=None):\n ret = 0\n if (self.available) & (thresh==None):\n if binary:\n if self.binary_available:\n ret = self.binary_image\n else:\n self.binary_image = self.color_select(color=self.color, binary=True)\n self.binary_available = True\n ret = self.binary_image\n else:\n ret = self.image\n else:\n self.image = self.color_select(color=self.color, binary=False)\n self.available = True\n if binary:\n self.binary_image = self.color_select(color=self.color, binary=True, thresh=None)\n self.binary_available = True\n ret = self.binary_image\n else:\n ret = self.image\n\n if masked:\n ret = self.caller.region_of_interest(ret)\n\n return ret\n\n def grayscale(self):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(self.caller.image, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n def color_select(self, color='R', binary = True, thresh=None):\n #image received is RGB mpimg.imread\n img = np.copy(self.caller.image)\n RGB_colors = {'R':0, 'G':1, 'B':2}\n HLS_colors = {'H':0, 'L':1, 'S':2}\n if color in RGB_colors:\n channel = img[:,:,RGB_colors[color]]\n elif color in HLS_colors:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n channel = img[:, :, HLS_colors[color]]\n else:\n channel = self.grayscale()\n if binary:\n if not thresh:\n thresh = self.threshold\n\n binary_output = np.zeros_like(img[:,:,0])\n binary_output[(channel > thresh[0]) & (channel <= thresh[1])] = 1\n return binary_output\n else:\n return channel\n\n"
] |
[
[
"numpy.zeros_like",
"numpy.copy"
]
] |
ankitshah009/dcase_util
|
[
"738571ce78faf60b0fdfa1d59fd42f42c8944f3d",
"738571ce78faf60b0fdfa1d59fd42f42c8944f3d"
] |
[
"dcase_util/keras/data.py",
"dcase_util/datasets/tut.py"
] |
[
"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import\nimport numpy\nimport copy\n\nfrom dcase_util.ui import FancyStringifier, FancyLogger\nfrom dcase_util.containers import ContainerMixin\nfrom dcase_util.data import DataBuffer\n\n\ndef get_keras_data_sequence_class():\n # Use getter method to avoid importing Keras when importing dcase_util. This allows user to decide when import\n # Keras, so that user can set random seeds before Keras import.\n\n from keras.utils import Sequence\n\n class KerasDataSequence(Sequence, ContainerMixin):\n def __init__(self, item_list=None, batch_size=64,\n buffer_size=None,\n data_processing_chain=None, meta_processing_chain=None,\n data_processing_chain_callback_on_epoch_end=None, meta_processing_chain_callback_on_epoch_end=None,\n transformer_callbacks=None,\n refresh_buffer_on_epoch=False,\n data_format='channels_last',\n target_format='single_target_per_sequence',\n **kwargs):\n \"\"\"Constructor\n\n Parameters\n ----------\n item_list : list or dict\n Items in the data sequence. List containing multi-level dictionary with first level key\n 'data' and 'meta'. Second level should contain parameters for process method in the processing chain.\n Default value None\n\n batch_size : int\n Batch size (item count).\n Default value 64\n\n buffer_size : int\n Internal buffer size (item count). By setting this sufficiently high, data sequence generator can\n possibly fit all sequence items into internal buffer and can fetch without loading from disk.\n Set to None, if no internal buffer used.\n Default value None\n\n data_processing_chain : ProcessingChain\n Data processing chain.\n Default value None\n\n meta_processing_chain : ProcessingChain\n Meta processing chain.\n Default value None\n\n data_processing_chain_callback_on_epoch_end : list of dict\n Can be used to call methods with parameters for processing chain at the end of epoch. This can be\n used to control processing chain's internal status (e.g. roll the data).\n Default value None\n\n meta_processing_chain_callback_on_epoch_end : list of dict\n Can be used to call methods with parameters for processing chain at the end of epoch. This can be\n used to control processing chain's internal status (e.g. roll the data).\n Default value None\n\n transformer_callbacks : list of func\n Transformer callbacks to jointly process data and meta. This can be used for local data modification and\n data augmentation.\n Default value None\n\n refresh_buffer_on_epoch : bool\n In case internal data buffer is used, force data and meta refresh at the end of each epoch. Use this if\n data is modified/augmented differently for each epoch.\n In case data_processing_chain_callback_on_epoch_end or meta_processing_chain_callback_on_epoch_end is\n used, this parameter is automatically set to True.\n Default value False\n\n data_format : str\n Keras like data format, controls where channel should be added.\n Possible values ['channels_first', 'channels_last']\n Default value 'channels_last'\n\n target_format : str\n Meta data interpretation in the relation to the data items.\n Default value 'single_target_per_segment'\n\n \"\"\"\n\n # Run ContainerMixin init\n ContainerMixin.__init__(self, **kwargs)\n\n self._data_shape = None\n self._data_axis = None\n\n self.item_list = copy.copy(item_list)\n\n self.batch_size = batch_size\n\n self.buffer_size = buffer_size\n self.data_refresh_on_epoch = refresh_buffer_on_epoch\n\n if data_format is None:\n data_format = 'channels_last'\n\n self.data_format = data_format\n if self.data_format not in ['channels_first', 'channels_last']:\n message = '{name}: Unknown data_format [{data_format}].'.format(\n name=self.__class__.__name__,\n data_format=self.data_format\n )\n self.logger.exception(message)\n raise NotImplementedError(message)\n\n if target_format is None:\n target_format = 'single_target_per_sequence'\n\n self.target_format = target_format\n if self.target_format not in ['same', 'single_target_per_sequence']:\n message = '{name}: Unknown target_format [{target_format}].'.format(\n name=self.__class__.__name__,\n target_format=self.target_format\n )\n self.logger.exception(message)\n raise NotImplementedError(message)\n\n if data_processing_chain_callback_on_epoch_end is None:\n data_processing_chain_callback_on_epoch_end = []\n\n self.data_processing_chain_callback_on_epoch_end = data_processing_chain_callback_on_epoch_end\n\n if self.data_processing_chain_callback_on_epoch_end:\n self.data_refresh_on_epoch = True\n\n if meta_processing_chain_callback_on_epoch_end is None:\n meta_processing_chain_callback_on_epoch_end = []\n\n self.meta_processing_chain_callback_on_epoch_end = meta_processing_chain_callback_on_epoch_end\n\n if transformer_callbacks is None:\n transformer_callbacks = []\n\n self.transformer_callbacks = transformer_callbacks\n\n # Processing chains\n self.data_processing_chain = data_processing_chain\n self.meta_processing_chain = meta_processing_chain\n\n if self.buffer_size is not None:\n # Initialize data buffer\n self.data_buffer = DataBuffer(\n size=self.buffer_size\n )\n\n else:\n self.data_buffer = None\n\n def __str__(self):\n ui = FancyStringifier()\n output = ''\n output += ui.class_name(self.__class__.__name__) + '\\n'\n\n output += ui.data(\n indent=2,\n field='Batch size',\n value=self.batch_size\n ) + '\\n'\n\n output += ui.data(\n indent=2,\n field='Epoch size',\n value=len(self), unit='batches'\n ) + '\\n'\n\n shape = self.data_shape\n axis = self.data_axis\n\n output += ui.data(field='Data item shape', value=shape) + '\\n'\n\n output += ui.data(\n indent=4,\n field='Time',\n value=shape[axis['time_axis']]\n ) + '\\n'\n\n output += ui.data(\n indent=4,\n field='Data',\n value=shape[axis['data_axis']]\n ) + '\\n'\n\n if 'sequence_axis' in axis:\n output += ui.data(\n indent=4,\n field='Sequence',\n value=shape[axis['sequence_axis']]\n ) + '\\n'\n\n output += ui.data(\n indent=4,\n field='Axis',\n value=axis\n ) + '\\n'\n\n if self.buffer_size is not None:\n output += ui.line(field='Buffer') + '\\n'\n output += ui.data(\n indent=4,\n field='buffer_size',\n value=self.buffer_size,\n unit='items'\n ) + '\\n'\n output += ui.data(\n indent=4,\n field='buffer usage',\n value=self.data_buffer.count,\n unit='items'\n ) + '\\n'\n output += ui.data(\n indent=4,\n field='buffer usage',\n value=(self.data_buffer.count / float(self.buffer_size)) * 100,\n unit='%'\n ) + '\\n'\n\n return output\n\n def __getitem__(self, index):\n start_index = index * self.batch_size\n stop_index = (index + 1) * self.batch_size\n\n batch_buffer_data = []\n batch_buffer_meta = []\n\n for item_index in range(start_index, stop_index):\n if item_index < len(self.item_list):\n item = self.item_list[item_index]\n\n # Load item data\n data, meta = self.process_item(item=item)\n\n if self.transformer_callbacks:\n # Apply transformer callbacks\n for callback in self.transformer_callbacks:\n data, meta = callback(\n data=data,\n meta=meta\n )\n\n # Collect data\n batch_buffer_data.append(data.data)\n\n # Collect meta\n if self.target_format == 'single_target_per_sequence':\n # Collect single target per sequence\n for i in range(0, data.shape[data.sequence_axis]):\n batch_buffer_meta.append(meta.data[:, 0])\n\n elif self.target_format == 'same':\n # Collect single target per sequence\n batch_buffer_meta.append(\n numpy.repeat(\n a=meta.data,\n repeats=data.length,\n axis=1\n )\n )\n\n if len(data.shape) == 2:\n # Prepare 2D data, stack along time_axis\n if data.time_axis == 0:\n batch_buffer_data = numpy.vstack(batch_buffer_data)\n\n elif data.time_axis == 1:\n batch_buffer_data = numpy.hstack(batch_buffer_data)\n\n elif len(data.shape) == 3:\n # Prepare 3D data, stack along sequence_axis\n if data.sequence_axis == 0:\n batch_buffer_data = numpy.vstack(batch_buffer_data)\n\n elif data.sequence_axis == 1:\n batch_buffer_data = numpy.hstack(batch_buffer_data)\n\n elif data.sequence_axis == 2:\n batch_buffer_data = numpy.dstack(batch_buffer_data)\n\n # Add channel dimension to the data\n if self.data_format == 'channels_first':\n batch_buffer_data = numpy.expand_dims(\n batch_buffer_data,\n axis=0\n )\n\n elif self.data_format == 'channels_last':\n batch_buffer_data = numpy.expand_dims(\n batch_buffer_data,\n axis=3\n )\n\n # Prepare meta\n if self.target_format == 'single_target_per_sequence':\n batch_buffer_meta = numpy.vstack(batch_buffer_meta)\n\n elif self.target_format == 'same':\n batch_buffer_meta = numpy.hstack(batch_buffer_meta).T\n\n return batch_buffer_data, batch_buffer_meta\n\n def __len__(self):\n num_batches = int(numpy.ceil(len(self.item_list) / float(self.batch_size)))\n\n if num_batches > 0:\n return num_batches\n else:\n return 1\n\n @property\n def data_shape(self):\n if self._data_shape is None:\n # Load first item and get data length\n data = self.process_item(\n item=self.item_list[0]\n )[0]\n\n self._data_shape = data.shape\n\n self._data_axis = {\n 'time_axis': data.time_axis,\n 'data_axis': data.data_axis\n }\n\n if hasattr(data,'sequence_axis'):\n self._data_axis['sequence_axis']= data.sequence_axis\n\n return self._data_shape\n\n @property\n def data_axis(self):\n if self._data_axis is None:\n # Load first item and get data length\n data = self.process_item(\n item=self.item_list[0]\n )[0]\n\n self._data_shape = data.shape\n self._data_axis = {\n 'time_axis': data.time_axis,\n 'data_axis': data.data_axis\n }\n\n if hasattr(data, 'sequence_axis'):\n self._data_axis['sequence_axis'] = data.sequence_axis\n\n return self._data_axis\n\n @property\n def data_size(self):\n shape = self.data_shape\n axis = self.data_axis\n size = {\n 'time': shape[axis['time_axis']],\n 'data': shape[axis['data_axis']],\n }\n\n if 'sequence_axis' in axis:\n size['sequence'] = shape[axis['sequence_axis']]\n\n return size\n\n def process_item(self, item):\n if self.data_buffer is not None:\n # Fetch data and meta through internal buffer\n if not self.data_buffer.key_exists(key=item):\n data = self.data_processing_chain.process(**item['data'])\n meta = self.meta_processing_chain.process(**item['meta'])\n\n self.data_buffer.set(\n key=item,\n data=data,\n meta=meta\n )\n\n else:\n data, meta = self.data_buffer.get(key=item)\n\n else:\n # Fetch data and meta directly.\n data = self.data_processing_chain.process(**item['data'])\n meta = self.meta_processing_chain.process(**item['meta'])\n\n return data, meta\n\n def on_epoch_end(self):\n if self.data_processing_chain_callback_on_epoch_end:\n for callback_parameters in self.data_processing_chain_callback_on_epoch_end:\n if 'method_name' in callback_parameters:\n self.data_processing_chain.call_method(\n method_name=callback_parameters['method_name'],\n parameters=callback_parameters.get('parameters', {})\n )\n\n if self.meta_processing_chain_callback_on_epoch_end:\n for callback_parameters in self.meta_processing_chain_callback_on_epoch_end:\n if 'method_name' in callback_parameters:\n self.data_processing_chain.call_method(\n method_name=callback_parameters['method_name'],\n parameters=callback_parameters.get('parameters', {})\n )\n\n if self.data_buffer is not None and self.data_refresh_on_epoch:\n # Force reload of data\n self.data_buffer.clear()\n\n return KerasDataSequence\n\n\ndef data_collector(item_list=None,\n data_processing_chain=None, meta_processing_chain=None,\n target_format='single_target_per_sequence',\n channel_dimension='channels_last',\n verbose=True,\n print_indent=2\n ):\n \"\"\"Data collector\n\n Collects data and meta into matrices while processing them through processing chains.\n\n Parameters\n ----------\n item_list : list or dict\n Items in the data sequence. List containing multi-level dictionary with first level key\n 'data' and 'meta'. Second level should contain parameters for process method in the processing chain.\n Default value None\n\n data_processing_chain : ProcessingChain\n Data processing chain.\n Default value None\n\n meta_processing_chain : ProcessingChain\n Meta processing chain.\n Default value None\n\n channel_dimension : str\n Controls where channel dimension should be added. Similar to Keras data format parameter.\n If None given, no channel dimension is added.\n Possible values [None, 'channels_first', 'channels_last']\n Default value None\n\n target_format : str\n Meta data interpretation in the relation to the data items.\n Default value 'single_target_per_segment'\n\n verbose : bool\n Print information about the data\n Default value True\n\n print_indent : int\n Default value 2\n\n Returns\n -------\n numpy.ndarray\n data\n\n numpy.ndarray\n meta\n\n dict\n data size information\n\n \"\"\"\n\n if item_list:\n # Collect all data and meta\n X = []\n Y = []\n\n for item in item_list:\n data = data_processing_chain.process(**item['data'])\n meta = meta_processing_chain.process(**item['meta'])\n\n X.append(data.data)\n\n # Collect meta\n if target_format == 'single_target_per_sequence':\n # Collect single target per sequence\n for i in range(0, data.shape[data.sequence_axis]):\n Y.append(meta.data[:, 0])\n\n elif target_format == 'same':\n # Collect single target per sequence\n Y.append(\n numpy.repeat(\n a=meta.data,\n repeats=data.length,\n axis=1\n ).T\n )\n\n data_size = {}\n\n if len(data.shape) == 2:\n # Stack collected data and meta correct way\n if data.time_axis == 0:\n X = numpy.vstack(X)\n Y = numpy.vstack(Y)\n\n else:\n X = numpy.hstack(X)\n Y = numpy.hstack(Y)\n\n # Get data item size\n data_size = {\n 'data': X.shape[data.data_axis],\n 'time': X.shape[data.time_axis],\n }\n\n elif len(data.shape) == 3:\n # Stack collected data and meta correct way\n if data.sequence_axis == 0:\n X = numpy.vstack(X)\n Y = numpy.vstack(Y)\n\n elif data.sequence_axis == 1:\n X = numpy.hstack(X)\n Y = numpy.hstack(Y)\n\n elif data.sequence_axis == 2:\n X = numpy.dstack(X)\n Y = numpy.dstack(Y)\n\n if channel_dimension:\n # Add channel dimension to the data\n if channel_dimension == 'channels_first':\n X = numpy.expand_dims(X, axis=1)\n\n elif channel_dimension == 'channels_last':\n X = numpy.expand_dims(X, axis=3)\n\n # Get data item size\n data_size = {\n 'data': X.shape[data.data_axis],\n 'time': X.shape[data.time_axis],\n 'sequence': X.shape[data.sequence_axis],\n }\n\n if verbose:\n data_shape = data.shape\n data_axis = {\n 'time_axis': data.time_axis,\n 'data_axis': data.data_axis\n }\n\n if hasattr(data, 'sequence_axis'):\n data_axis['sequence_axis'] = data.sequence_axis\n\n meta_shape = meta.shape\n meta_axis = {\n 'time_axis': meta.time_axis,\n 'data_axis': meta.data_axis\n }\n\n if hasattr(meta, 'sequence_axis'):\n meta_axis['sequence_axis'] = meta.sequence_axis\n\n logger = FancyLogger()\n\n # Data information\n logger.line('Data', indent=print_indent)\n\n # Matrix\n logger.data(\n field='Matrix shape',\n value=X.shape,\n indent=print_indent + 2\n )\n\n # Item\n logger.data(\n field='Item shape',\n value=data_shape,\n indent=print_indent + 2\n )\n\n logger.data(\n field='Time',\n value=data_shape[data_axis['time_axis']],\n indent=print_indent + 4\n )\n\n logger.data(\n field='Data',\n value=data_shape[data_axis['data_axis']],\n indent=print_indent + 4\n )\n\n if 'sequence_axis' in data_axis:\n logger.data(\n field='Sequence',\n value=data_shape[data_axis['sequence_axis']],\n indent=print_indent + 4\n )\n\n # Meta information\n logger.line('Meta', indent=print_indent)\n\n # Matrix\n logger.data(\n field='Matrix shape',\n value=Y.shape,\n indent=print_indent + 2\n )\n\n # Item\n logger.data(\n field='Item shape',\n value=meta_shape,\n indent=print_indent + 2\n )\n logger.data(\n field='Time',\n value=meta_shape[meta_axis['time_axis']],\n indent=print_indent + 4\n )\n\n logger.data(\n field='Data',\n value=meta_shape[meta_axis['data_axis']],\n indent=print_indent + 4\n )\n\n if 'sequence_axis' in meta_axis:\n logger.data(\n field='Sequence',\n value=meta_shape[meta_axis['sequence_axis']],\n indent=print_indent + 4\n )\n\n return X, Y, data_size\n",
"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, absolute_import\n\nimport collections\nimport hashlib\nimport os\nimport pickle\nimport sys\n\nimport numpy\nimport yaml\nfrom six import iteritems\nfrom tqdm import tqdm\n\n\nfrom dcase_util.datasets import AcousticSceneDataset, SyntheticSoundEventDataset, SoundEventDataset\nfrom dcase_util.containers import MetaDataContainer, MetaDataItem, OneToOneMappingContainer, \\\n DictContainer, ParameterContainer\nfrom dcase_util.utils import Path\n\n# =====================================================\n# DCASE 2018\n# =====================================================\n\n\nclass TUTUrbanAcousticScenes_2018_DevelopmentSet(AcousticSceneDataset):\n \"\"\"TUT Urban Acoustic Scenes 2018 Development dataset\n\n This dataset is used in DCASE2018 - Task 1, Acoustic scene classification / Subtask A\n\n \"\"\"\n\n def __init__(self,\n storage_name='TUT-urban-acoustic-scenes-2018-development',\n data_path=None,\n included_content_types=None,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-urban-acoustic-scenes-2018-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['dataset_group'] = 'scene'\n kwargs['dataset_meta'] = {\n 'authors': 'Toni Heittola, Annamaria Mesaros, and Tuomas Virtanen',\n 'title': 'TUT Urban Acoustic Scenes 2018, development dataset',\n 'url': None,\n 'audio_source': 'Field recording',\n 'audio_type': 'Natural',\n 'recording_device_model': 'Zoom F8',\n 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',\n 'licence': 'free non-commercial'\n }\n kwargs['crossvalidation_folds'] = 1\n kwargs['meta_filename'] ='meta.csv'\n\n filename_base = 'TUT-urban-acoustic-scenes-2018-development'\n source_url = 'https://zenodo.org/record/1228142/files/'\n\n kwargs['package_list'] = [\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + filename_base + '.doc.zip',\n 'remote_bytes': 10517,\n 'remote_md5': '28a4a9c46a6f46709ecc8eece365a3a4',\n 'filename': filename_base + '.doc.zip'\n },\n {\n 'content_type': 'meta',\n 'remote_file': source_url + filename_base + '.meta.zip',\n 'remote_bytes': 69272,\n 'remote_md5': 'e196065ee83c07af03a11a310364377d',\n 'filename': filename_base + '.meta.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.1.zip',\n 'remote_bytes': 1657811579,\n 'remote_md5': '62f97087c447e29def8716204469bf89',\n 'filename': filename_base + '.audio.1.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.2.zip',\n 'remote_bytes': 1783489370,\n 'remote_md5': '8e569a92025d82bff6b02b956d7c6dc9',\n 'filename': filename_base + '.audio.2.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.3.zip',\n 'remote_bytes': 1809675304,\n 'remote_md5': '00d2020582a4535af5e65322fb2bad56',\n 'filename': filename_base + '.audio.3.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.4.zip',\n 'remote_bytes': 1756582525,\n 'remote_md5': 'd691eb4271f83ba6ba9a28797accc497',\n 'filename': filename_base + '.audio.4.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.5.zip',\n 'remote_bytes': 1724002546,\n 'remote_md5': 'c4d64b5483b60f85e9fe080b3435a6be',\n 'filename': filename_base + '.audio.5.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.6.zip',\n 'remote_bytes': 1645753049,\n 'remote_md5': '2f0feee78f216697eb19497714d97642',\n 'filename': filename_base + '.audio.6.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.7.zip',\n 'remote_bytes': 1671903917,\n 'remote_md5': '07cfefe80a0731de6819181841239f3a',\n 'filename': filename_base + '.audio.7.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.8.zip',\n 'remote_bytes': 1673304843,\n 'remote_md5': '213f3c012859c2e9dcb74aacc8558458',\n 'filename': filename_base + '.audio.8.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.9.zip',\n 'remote_bytes': 1674839259,\n 'remote_md5': 'b724442b09abcb3bd095ebff497cef85',\n 'filename': filename_base + '.audio.9.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.10.zip',\n 'remote_bytes': 1662932947,\n 'remote_md5': 'a27a32fa52e283ed8013375b8a16f269',\n 'filename': filename_base + '.audio.10.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.11.zip',\n 'remote_bytes': 1751473843,\n 'remote_md5': '7073a121e825ffef99832507f30d6644',\n 'filename': filename_base + '.audio.11.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.12.zip',\n 'remote_bytes': 1742332198,\n 'remote_md5': '6567aa61db12776568b6267ce122fb18',\n 'filename': filename_base + '.audio.12.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.13.zip',\n 'remote_bytes': 798990513,\n 'remote_md5': 'd00eeb2db0e093d8975521323a96c519',\n 'filename': filename_base + '.audio.13.zip'\n }\n ]\n kwargs['audio_paths'] = [\n 'audio'\n ]\n super(TUTUrbanAcousticScenes_2018_DevelopmentSet, self).__init__(**kwargs)\n\n def process_meta_item(self, item, absolute_path=True, **kwargs):\n \"\"\"Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n \"\"\"\n\n if absolute_path:\n item.filename = self.relative_to_absolute_path(item.filename)\n\n else:\n item.filename = self.absolute_to_relative_path(item.filename)\n\n if not item.identifier:\n item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[1:-2])\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self\n\n\nclass TUTUrbanAcousticScenes_2018_Mobile_DevelopmentSet(AcousticSceneDataset):\n \"\"\"TUT Urban Acoustic Scenes 2018 Mobile Development dataset\n\n This dataset is used in DCASE2018 - Task 1, Acoustic scene classification / Subtask B\n\n \"\"\"\n\n def __init__(self,\n storage_name='TUT-urban-acoustic-scenes-2018-mobile-development',\n data_path=None,\n included_content_types=None,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-urban-acoustic-scenes-2018-mobile-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['dataset_group'] = 'scene'\n kwargs['dataset_meta'] = {\n 'authors': 'Toni Heittola, Annamaria Mesaros, and Tuomas Virtanen',\n 'title': 'TUT Urban Acoustic Scenes 2018 Mobile, development dataset',\n 'url': None,\n 'audio_source': 'Field recording',\n 'audio_type': 'Natural',\n 'recording_device_model': 'Various',\n 'microphone_model': 'Various',\n 'licence': 'free non-commercial'\n }\n kwargs['crossvalidation_folds'] = 1\n kwargs['meta_filename'] = 'meta.csv'\n\n filename_base = 'TUT-urban-acoustic-scenes-2018-mobile-development'\n source_url = 'https://zenodo.org/record/1228235/files/'\n kwargs['package_list'] = [\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + filename_base + '.doc.zip',\n 'remote_bytes': 12144,\n 'remote_md5': '5694e9cdffa11cef8ec270673dc19ba0',\n 'filename': filename_base + '.doc.zip'\n },\n {\n 'content_type': 'meta',\n 'remote_file': source_url + filename_base + '.meta.zip',\n 'remote_bytes': 88425,\n 'remote_md5': 'b557b6d5d620aa4f15564ab38f1594d4',\n 'filename': filename_base + '.meta.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.1.zip',\n 'remote_bytes': 1692337547,\n 'remote_md5': 'd6f2671af84032b97f393354c124517d',\n 'filename': filename_base + '.audio.1.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.2.zip',\n 'remote_bytes': 1769203601,\n 'remote_md5': 'db8b3603af5d4e559869a592930a7620',\n 'filename': filename_base + '.audio.2.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.3.zip',\n 'remote_bytes': 1674610746,\n 'remote_md5': '703bf73523a6ad1f40d4923cb8ba3ff0',\n 'filename': filename_base + '.audio.3.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.4.zip',\n 'remote_bytes': 1634599587,\n 'remote_md5': '18af04ab5d6f15a72c66f16bfec0ca07',\n 'filename': filename_base + '.audio.4.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.5.zip',\n 'remote_bytes': 1640894390,\n 'remote_md5': 'a579efb032f209a7e77fe22e4808e9ca',\n 'filename': filename_base + '.audio.5.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.6.zip',\n 'remote_bytes': 1693974078,\n 'remote_md5': 'c2c56691047b3be3d98cb0ffd6858d9f',\n 'filename': filename_base + '.audio.6.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + filename_base + '.audio.7.zip',\n 'remote_bytes': 1165383562,\n 'remote_md5': 'e182e5300867f4ed4b580389cc5b931e',\n 'filename': filename_base + '.audio.7.zip'\n }\n ]\n kwargs['audio_paths'] = [\n 'audio'\n ]\n super(TUTUrbanAcousticScenes_2018_Mobile_DevelopmentSet, self).__init__(**kwargs)\n\n def process_meta_item(self, item, absolute_path=True, **kwargs):\n \"\"\"Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n \"\"\"\n\n if absolute_path:\n item.filename = self.relative_to_absolute_path(item.filename)\n\n else:\n item.filename = self.absolute_to_relative_path(item.filename)\n\n if not item.identifier:\n item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[1:-2])\n\n if not item.source_label:\n item.source_label = os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[-1]\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='train',\n fold=fold\n )\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=fold\n )\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(\n filename=self.meta_file\n )\n\n # Load meta and cross validation\n self.load()\n\n return self\n\n\n# =====================================================\n# DCASE 2017\n# =====================================================\n\n\nclass TUTAcousticScenes_2017_DevelopmentSet(AcousticSceneDataset):\n \"\"\"TUT Acoustic scenes 2017 development dataset\n\n This dataset is used in DCASE2017 - Task 1, Acoustic scene classification\n\n \"\"\"\n\n def __init__(self,\n storage_name='TUT-acoustic-scenes-2017-development',\n data_path=None,\n included_content_types=None,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2017-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['dataset_group'] = 'scene'\n kwargs['dataset_meta'] = {\n 'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',\n 'title': 'TUT Acoustic Scenes 2017, development dataset',\n 'url': None,\n 'audio_source': 'Field recording',\n 'audio_type': 'Natural',\n 'recording_device_model': 'Roland Edirol R-09',\n 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',\n 'licence': 'free non-commercial'\n }\n kwargs['crossvalidation_folds'] = 4\n\n source_url = 'https://zenodo.org/record/400515/files/'\n kwargs['package_list'] = [\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.doc.zip',\n 'remote_bytes': 54796,\n 'remote_md5': '2065495aaf3f1103e795c9899e2af1df',\n 'filename': 'TUT-acoustic-scenes-2017-development.doc.zip'\n },\n {\n 'content_type': 'meta',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.meta.zip',\n 'remote_bytes': 104321,\n 'remote_md5': '9007fd4772d816590c5db5f5e9568f5d',\n 'filename': 'TUT-acoustic-scenes-2017-development.meta.zip'\n },\n {\n 'content_type': 'meta',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.error.zip',\n 'remote_bytes': 1432,\n 'remote_md5': '802c700b021769e52a2c1e3b9c117a1b',\n 'filename': 'TUT-acoustic-scenes-2017-development.error.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.1.zip',\n 'remote_bytes': 1071445248,\n 'remote_md5': '251325a9afaaad0326ad1c57f57d514a',\n 'filename': 'TUT-acoustic-scenes-2017-development.audio.1.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.2.zip',\n 'remote_bytes': 1073453613,\n 'remote_md5': 'c26861e05147dc319b4250eb103d9d99',\n 'filename': 'TUT-acoustic-scenes-2017-development.audio.2.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.3.zip',\n 'remote_bytes': 1073077819,\n 'remote_md5': 'a4815775f8a5e629179726ee4cd4f55a',\n 'filename': 'TUT-acoustic-scenes-2017-development.audio.3.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.4.zip',\n 'remote_bytes': 1072822038,\n 'remote_md5': '1732b03afe8c53ef8bba80ba14766e57',\n 'filename': 'TUT-acoustic-scenes-2017-development.audio.4.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.5.zip',\n 'remote_bytes': 1072644652,\n 'remote_md5': '611be754a0c951185c6ae4b7643c19a0',\n 'filename': 'TUT-acoustic-scenes-2017-development.audio.5.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.6.zip',\n 'remote_bytes': 1072667888,\n 'remote_md5': '165a201db800d3ea76fce5a9c2bd97d7',\n 'filename': 'TUT-acoustic-scenes-2017-development.audio.6.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.7.zip',\n 'remote_bytes': 1073417661,\n 'remote_md5': 'c7d79db84264401c0f8680dcc36013ad',\n 'filename': 'TUT-acoustic-scenes-2017-development.audio.7.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.8.zip',\n 'remote_bytes': 1072381222,\n 'remote_md5': '35043f25123439392338c790494c7a19',\n 'filename': 'TUT-acoustic-scenes-2017-development.audio.8.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.9.zip',\n 'remote_bytes': 1072087738,\n 'remote_md5': '0805dcf5d8e6871dc9610182b2efb93a',\n 'filename': 'TUT-acoustic-scenes-2017-development.audio.9.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.10.zip',\n 'remote_bytes': 1046262120,\n 'remote_md5': '5df83a191295a04e290b125c634e13e7',\n 'filename': 'TUT-acoustic-scenes-2017-development.audio.10.zip'\n }\n ]\n kwargs['audio_paths'] = [\n 'audio'\n ]\n super(TUTAcousticScenes_2017_DevelopmentSet, self).__init__(**kwargs)\n\n def process_meta_item(self, item, absolute_path=True, **kwargs):\n \"\"\"Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n \"\"\"\n\n if absolute_path:\n item.filename = self.relative_to_absolute_path(item.filename)\n\n else:\n item.filename = self.absolute_to_relative_path(item.filename)\n\n raw_path, raw_filename = os.path.split(item.filename)\n item.identifier = raw_filename.split('_')[0]\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n if not self.meta_container.exists():\n meta_data = collections.OrderedDict()\n for fold in self.folds():\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(setup_part='train', fold=fold)\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)\n ).load()\n\n # Process, make sure each file is included only once.\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self\n\n\nclass TUTAcousticScenes_2017_EvaluationSet(AcousticSceneDataset):\n \"\"\"TUT Acoustic scenes 2017 evaluation dataset\n\n This dataset is used in DCASE2017 - Task 1, Acoustic scene classification\n\n \"\"\"\n\n def __init__(self,\n storage_name='TUT-acoustic-scenes-2017-evaluation',\n data_path=None,\n included_content_types=None,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2017-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['dataset_group'] = 'scene'\n kwargs['dataset_meta'] = {\n 'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',\n 'title': 'TUT Acoustic Scenes 2017, development dataset',\n 'url': None,\n 'audio_source': 'Field recording',\n 'audio_type': 'Natural',\n 'recording_device_model': 'Roland Edirol R-09',\n 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',\n 'licence': 'free non-commercial'\n }\n kwargs['crossvalidation_folds'] = None\n\n source_url = 'https://zenodo.org/record/1040168/files/'\n kwargs['package_list'] = [\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.doc.zip',\n 'remote_bytes': 53687,\n 'remote_md5': '53709a07416ea3b617c02fcf67dbeb9c',\n 'filename': 'TUT-acoustic-scenes-2017-evaluation.doc.zip'\n },\n {\n 'content_type': 'meta',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.meta.zip',\n 'remote_bytes': 4473,\n 'remote_md5': '200eee9493e8044403e1326e3d05cfde',\n 'filename': 'TUT-acoustic-scenes-2017-evaluation.meta.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.1.zip',\n 'remote_bytes': 1071856687,\n 'remote_md5': '3d6dda4445871e9544e0fefe7d14c7d9',\n 'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.1.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.2.zip',\n 'remote_bytes': 1073362972,\n 'remote_md5': '4085ef5fa286f2169074993a4e405953',\n 'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.2.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.3.zip',\n 'remote_bytes': 1071521152,\n 'remote_md5': 'cac432579e7cf2dff0aec7aaed248956',\n 'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.3.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.4.zip',\n 'remote_bytes': 382756463,\n 'remote_md5': '664bf09c3d24bd26c6b587f1d709de36',\n 'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.4.zip'\n },\n ]\n kwargs['audio_paths'] = ['audio']\n\n super(TUTAcousticScenes_2017_EvaluationSet, self).__init__(**kwargs)\n\n def process_meta_item(self, item, absolute_path=True, filename_map=None, **kwargs):\n \"\"\"Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n filename_map : OneToOneMappingContainer\n Filename map\n Default value None\n\n \"\"\"\n\n if absolute_path:\n item.filename = self.relative_to_absolute_path(item.filename)\n\n else:\n item.filename = self.absolute_to_relative_path(item.filename)\n\n if filename_map and item.filename in filename_map:\n filename_mapped = filename_map.map(item.filename)\n item.identifier = os.path.split(filename_mapped)[1].split('_')[0]\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n if not self.meta_container.exists():\n if os.path.isfile(self.evaluation_setup_filename(setup_part='evaluate')):\n meta_data = collections.OrderedDict()\n\n # Read files in\n data = MetaDataContainer(\n filename=os.path.join(self.evaluation_setup_path, 'evaluate.txt')\n ).load()\n\n # Load filename mapping\n map_filename = os.path.join(self.evaluation_setup_path, 'map.txt')\n if os.path.exists(map_filename):\n filename_map = OneToOneMappingContainer(filename=map_filename).load()\n else:\n filename_map = {}\n\n for item in data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False,\n filename_map=filename_map\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self\n\n\nclass TUTRareSoundEvents_2017_DevelopmentSet(SyntheticSoundEventDataset):\n \"\"\"TUT Acoustic scenes 2017 development dataset\n\n This dataset is used in DCASE2017 - Task 2, Rare sound event detection\n\n \"\"\"\n\n def __init__(self,\n storage_name='TUT-rare-sound-events-2017-development',\n data_path=None,\n included_content_types=None,\n synth_parameters=None,\n dcase_compatibility=True,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-rare-sound-events-2017-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n synth_parameters : dict\n Data synthesis parameters.\n Default value None\n\n dcase_compatibility : bool\n Ensure that dataset is generated same way than in DCASE2017 Challenge setup\n Default value True\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['filelisthash_exclude_dirs'] = kwargs.get(\n 'filelisthash_exclude_dirs',\n [os.path.join('data', 'mixture_data')]\n )\n kwargs['dataset_group'] = 'event'\n kwargs['dataset_meta'] = {\n 'authors': 'Aleksandr Diment, Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',\n 'title': 'TUT Rare Sound Events 2017, development dataset',\n 'url': None,\n 'audio_source': 'Synthetic',\n 'audio_type': 'Natural',\n 'recording_device_model': 'Unknown',\n 'microphone_model': 'Unknown',\n }\n kwargs['crossvalidation_folds'] = 1\n\n source_url = 'https://zenodo.org/record/401395/files/'\n kwargs['package_list'] = [\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-development.doc.zip',\n 'remote_bytes': 21042,\n 'remote_md5': '47c424fe90d2bdc53d9fdd84341c2783',\n 'filename': 'TUT-rare-sound-events-2017-development.doc.zip'\n },\n {\n 'content_type': 'code',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-development.code.zip',\n 'remote_bytes': 81518,\n 'remote_md5': '4cacdf0803daf924a60bf9daa573beb7',\n 'filename': 'TUT-rare-sound-events-2017-development.code.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip',\n 'remote_bytes': 1072175672,\n 'remote_md5': '6f1f4156d41b541d1188fcf44c9a8267',\n 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip',\n 'remote_bytes': 1073378284,\n 'remote_md5': 'ff5dcbe250e45cc404b7b8a6013002ac',\n 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip',\n 'remote_bytes': 1069766123,\n 'remote_md5': 'fb356ae309a40d2f0a38fc1c746835cb',\n 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip',\n 'remote_bytes': 1070042681,\n 'remote_md5': '2a68575b2ec7a69e2cc8b16b87fae0c9',\n 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip',\n 'remote_bytes': 1073380909,\n 'remote_md5': '84e70d855457a18115108e42ec04501a',\n 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip',\n 'remote_bytes': 1073021941,\n 'remote_md5': '048ce898bd434097dd489027f7ba361d',\n 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip',\n 'remote_bytes': 1069890239,\n 'remote_md5': '3ef1c89fcfac39918a5edc5abc6ed29b',\n 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip',\n 'remote_bytes': 180860904,\n 'remote_md5': '69dcb81e70f4e6605e178693afcd7722',\n 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_events.zip',\n 'remote_bytes': 639119477,\n 'remote_md5': 'dc4b7eb77078b4cf1b670c6362679473',\n 'filename': 'TUT-rare-sound-events-2017-development.source_data_events.zip'\n }\n ]\n kwargs['audio_paths'] = ['audio']\n\n default_synth_parameters = DictContainer({\n 'train': {\n 'seed': 42,\n 'event_presence_prob': 0.5,\n 'mixtures_per_class': 500,\n 'ebr_list': [-6, 0, 6],\n },\n 'test': {\n 'seed': 42,\n 'event_presence_prob': 0.5,\n 'mixtures_per_class': 500,\n 'ebr_list': [-6, 0, 6],\n }\n })\n if synth_parameters is None:\n synth_parameters = {}\n\n # Override synth parameters\n synth_parameters = default_synth_parameters.merge(synth_parameters)\n\n # Meta filename depends on synth_parameters\n kwargs['meta_filename'] = 'meta_'+synth_parameters.get_hash_for_path()+'.txt'\n\n self.synth_parameters = synth_parameters\n\n # Add parameter hash\n self.synth_parameters['train']['param_hash'] = hashlib.md5(\n yaml.dump(\n {\n 'event_presence_prob': self.synth_parameters['train']['event_presence_prob'],\n 'mixtures_per_class': self.synth_parameters['train']['mixtures_per_class'],\n 'ebrs': self.synth_parameters['train']['ebr_list'],\n 'seed': self.synth_parameters['train']['seed']\n }\n ).encode('utf-8')).hexdigest()\n\n self.synth_parameters['test']['param_hash'] = hashlib.md5(\n yaml.dump(\n {\n 'event_presence_prob': self.synth_parameters['test']['event_presence_prob'],\n 'mixtures_per_class': self.synth_parameters['test']['mixtures_per_class'],\n 'ebrs': self.synth_parameters['test']['ebr_list'],\n 'seed': self.synth_parameters['test']['seed']\n }\n ).encode('utf-8')).hexdigest()\n\n self.dcase_compatibility = dcase_compatibility\n\n # Initialize baseclass\n super(TUTRareSoundEvents_2017_DevelopmentSet, self).__init__(**kwargs)\n\n # Add code package to be downloaded always\n if 'code' not in self.included_content_types or 'all' not in self.included_content_types:\n self.included_content_types.append('code')\n\n def event_labels(self, scene_label=None):\n \"\"\"List of unique event labels in the meta data.\n\n Parameters\n ----------\n\n Returns\n -------\n labels : list\n List of event labels in alphabetical order.\n\n \"\"\"\n\n labels = ['babycry', 'glassbreak', 'gunshot']\n labels.sort()\n return labels\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n # Make sure evaluation_setup directory exists\n Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder))\n\n return self\n\n def synthesize(self):\n # Create init so we can call functions\n if os.path.exists(os.path.join(self.local_path, 'TUT_Rare_sound_events_mixture_synthesizer', '__init__.py')):\n open(os.path.join(self.local_path, 'TUT_Rare_sound_events_mixture_synthesizer', '__init__.py'), 'a').close()\n\n # Add synth code to the search path\n sys.path.append(os.path.join(self.local_path, 'TUT_Rare_sound_events_mixture_synthesizer'))\n from core import generate_mixture_recipes\n from core import do_mixing\n\n scene_label = 'synthetic'\n subset_map = {'train': 'devtrain',\n 'test': 'devtest'}\n\n data_path = os.path.join(os.path.abspath(self.local_path), 'data')\n\n set_progress = tqdm(['train', 'test'],\n desc=\"{0: <25s}\".format('Set'),\n file=sys.stdout,\n leave=False,\n disable=self.disable_progress_bar,\n ascii=self.use_ascii_progress_bar)\n\n for subset_label in set_progress:\n if self.log_system_progress:\n self.logger.info(' {title:<15s} [{subset_label:<30s}]'.format(\n title='Set ',\n subset_label=subset_label)\n )\n\n # Translated subset name\n subset_name_on_disk = subset_map[subset_label]\n\n # Get parameters\n mixing_params = {\n 'event_presence_prob': self.synth_parameters[subset_label]['event_presence_prob'],\n 'mixtures_per_class': self.synth_parameters[subset_label]['mixtures_per_class'],\n 'ebrs': self.synth_parameters[subset_label]['ebr_list'],\n 'seed': self.synth_parameters[subset_label]['seed']\n }\n\n # Get parameter hash\n param_hash = self.synth_parameters[subset_label]['param_hash']\n\n # Save parameters\n mixture_parameters = os.path.join(\n self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash, 'parameters.yaml'\n )\n if not os.path.isfile(mixture_parameters):\n # Make sure directory exists\n Path().makedirs(\n path=os.path.join(self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash)\n )\n\n # Save\n ParameterContainer(mixing_params).save(filename=mixture_parameters)\n\n # Check do we need to generate recipes\n recipes_exists = True\n for event_label in self.event_labels():\n recipe_filename = 'mixture_recipes_' + subset_name_on_disk + '_' + event_label + '.yaml'\n if not os.path.isfile(os.path.join(self.local_path, 'data', 'mixture_data',\n subset_name_on_disk, param_hash, 'meta', recipe_filename)):\n recipes_exists = False\n\n if not recipes_exists:\n # Generate mixture recipes\n generate_mixture_recipes(\n data_path=data_path,\n current_subsets=numpy.array([subset_name_on_disk]),\n mixing_params=mixing_params\n )\n\n # Check do we need to generate mixtures\n mixture_audio_exists = True\n audio_files = Path().file_list(\n path=os.path.join(self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash, 'audio'))\n\n for event_label in self.event_labels():\n event_audio = []\n for f in audio_files:\n if event_label in f:\n event_audio.append(f)\n if len(event_audio) != self.synth_parameters[subset_label]['mixtures_per_class']:\n mixture_audio_exists = False\n\n if not mixture_audio_exists:\n # Generate mixture audio based on recipes\n do_mixing(\n data_path=data_path,\n current_subsets=numpy.array([subset_name_on_disk]),\n magic_anticlipping_factor=0.2,\n param_hash=param_hash,\n dcase_compatibility_mode=True\n )\n\n if not self.meta_container.exists():\n # Collect meta data\n meta_data = MetaDataContainer()\n for class_label in self.event_labels():\n for subset_label, subset_name_on_disk in iteritems(subset_map):\n subset_name_on_disk = subset_map[subset_label]\n\n # Get parameter hash\n param_hash = self.synth_parameters[subset_label]['param_hash']\n\n mixture_path = os.path.join(\n 'data',\n 'mixture_data',\n subset_name_on_disk,\n param_hash,\n 'audio'\n )\n\n mixture_meta_path = os.path.join(\n self.local_path,\n 'data',\n 'mixture_data',\n subset_name_on_disk,\n param_hash,\n 'meta'\n )\n\n event_list_filename = os.path.join(\n mixture_meta_path,\n 'event_list_' + subset_name_on_disk + '_' + class_label + '.csv'\n )\n\n if os.path.isfile(event_list_filename):\n current_meta = MetaDataContainer(\n filename=event_list_filename\n ).load(\n fields=['filename', 'onset', 'offset', 'event_label']\n )\n\n for item in current_meta:\n item.filename = os.path.join(mixture_path, item.filename)\n item.scene_label = scene_label\n\n meta_data += current_meta\n\n # Save meta\n meta_data.save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n # Evaluation setup filenames\n train_filename = self.evaluation_setup_filename(\n setup_part='train',\n fold=1,\n file_extension='txt'\n )\n\n test_filename = self.evaluation_setup_filename(\n setup_part='test',\n fold=1,\n file_extension='txt'\n )\n\n evaluate_filename = self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=1,\n file_extension='txt'\n )\n\n # Check that evaluation setup exists\n evaluation_setup_exists = True\n if not os.path.isfile(train_filename) or not os.path.isfile(test_filename) or not os.path.isfile(evaluate_filename):\n evaluation_setup_exists = False\n\n if not evaluation_setup_exists:\n # Get parameter hash\n param_hash_train = self.synth_parameters['train']['param_hash']\n\n mixture_meta_path_train = os.path.join(\n self.local_path,\n 'data',\n 'mixture_data',\n subset_map['train'],\n param_hash_train,\n 'meta'\n )\n mixture_path_train = os.path.join(\n 'data',\n 'mixture_data',\n subset_map['train'],\n param_hash_train,\n 'audio'\n )\n\n # Get parameter hash\n param_hash_test = self.synth_parameters['test']['param_hash']\n\n mixture_meta_path_test = os.path.join(\n self.local_path,\n 'data',\n 'mixture_data',\n subset_map['test'],\n param_hash_test,\n 'meta'\n )\n mixture_path_test = os.path.join(\n 'data',\n 'mixture_data',\n subset_map['test'],\n param_hash_test,\n 'audio'\n )\n\n train_meta = MetaDataContainer()\n for class_label in self.event_labels():\n event_list_filename = os.path.join(\n mixture_meta_path_train,\n 'event_list_' + subset_map['train'] + '_' + class_label + '.csv'\n )\n current_meta = MetaDataContainer(\n filename=event_list_filename\n ).load(\n fields=['filename', 'onset', 'offset', 'event_label']\n )\n for item in current_meta:\n item.filename = os.path.join(mixture_path_train, item.filename)\n item.scene_label = scene_label\n\n train_meta += current_meta\n train_meta.save(filename=train_filename)\n\n test_meta = MetaDataContainer()\n for class_label in self.event_labels():\n event_list_filename = os.path.join(\n mixture_meta_path_test,\n 'event_list_' + subset_map['test'] + '_' + class_label + '.csv'\n )\n current_meta = MetaDataContainer(\n filename=event_list_filename\n ).load(\n fields=['filename', 'onset', 'offset', 'event_label']\n )\n current_meta_ = MetaDataContainer()\n for item in current_meta:\n item.filename = os.path.join(mixture_path_test, item.filename)\n current_meta_.append(MetaDataItem(\n {\n 'filename': item.filename,\n 'scene_label': scene_label\n }\n ))\n test_meta += current_meta_\n test_meta.save(filename=test_filename)\n\n eval_meta = MetaDataContainer()\n for class_label in self.event_labels():\n event_list_filename = os.path.join(\n mixture_meta_path_test,\n 'event_list_' + subset_map['test'] + '_' + class_label + '.csv'\n )\n current_meta = MetaDataContainer(\n filename=event_list_filename\n ).load(\n fields=['filename', 'onset', 'offset', 'event_label']\n )\n for item in current_meta:\n item.filename = os.path.join(mixture_path_test, item.filename)\n item.scene_label = scene_label\n\n eval_meta += current_meta\n eval_meta.save(filename=evaluate_filename)\n\n # Load meta and cross validation\n self.load()\n\n def evaluation_setup_filename(self, setup_part='train', fold=None, scene_label=None, file_extension='txt'):\n parts = []\n\n if setup_part == 'test' or setup_part == 'evaluate':\n subset_label = 'test'\n else:\n subset_label = 'train'\n\n param_hash = self.synth_parameters[subset_label]['param_hash']\n\n if setup_part == 'train':\n parts.append('train')\n\n elif setup_part == 'test':\n parts.append('test')\n\n elif setup_part == 'evaluate':\n parts.append('evaluate')\n\n else:\n message = '{name}: Unknown setup_part [{setup_part}]'.format(\n name=self.__class__.__name__,\n setup_part=setup_part\n )\n\n self.logger.exception(message)\n raise ValueError(message)\n\n return os.path.join(self.evaluation_setup_path, '_'.join(parts) + '_' + param_hash + '.' + file_extension)\n\n def train(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):\n \"\"\"List of training items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value \"None\"\n scene_label : str\n Scene label\n Default value \"None\"\n event_label : str\n Event label\n Default value \"None\"\n filename_contains : str:\n String found in filename\n Default value \"None\"\n\n Returns\n -------\n list : list of dicts\n List containing all meta data assigned to training set for given fold.\n\n \"\"\"\n if fold is None or fold == 0:\n fold = 'all_data'\n\n data = self.crossvalidation_data['train'][fold]\n\n if scene_label:\n data = data.filter(scene_label=scene_label)\n\n if event_label:\n data = data.filter(event_label=event_label)\n\n if filename_contains:\n data_ = MetaDataContainer()\n for item in data:\n if filename_contains in item.filename:\n data_.append(item)\n data = data_\n\n return data\n\n def test(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):\n \"\"\"List of testing items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value \"None\"\n scene_label : str\n Scene label\n Default value \"None\"\n event_label : str\n Event label\n Default value \"None\"\n filename_contains : str:\n String found in filename\n Default value \"None\"\n\n Returns\n -------\n list : list of dicts\n List containing all meta data assigned to testing set for given fold.\n\n \"\"\"\n\n if fold is None or fold == 0:\n fold = 'all_data'\n\n data = self.crossvalidation_data['test'][fold]\n\n if scene_label:\n data = data.filter(scene_label=scene_label)\n\n if event_label:\n data = data.filter(event_label=event_label)\n\n if filename_contains:\n data_ = MetaDataContainer()\n for item in data:\n if filename_contains in item.filename:\n data_.append(item)\n data = data_\n\n return data\n\n def eval(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):\n \"\"\"List of evaluation items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value \"None\"\n scene_label : str\n Scene label\n Default value \"None\"\n event_label : str\n Event label\n Default value \"None\"\n filename_contains : str:\n String found in filename\n Default value \"None\"\n\n Returns\n -------\n list : list of dicts\n List containing all meta data assigned to testing set for given fold.\n\n \"\"\"\n\n if fold is None or fold == 0:\n fold = 'all_data'\n\n data = self.crossvalidation_data['evaluate'][fold]\n\n if scene_label:\n data = data.filter(scene_label=scene_label)\n\n if event_label:\n data = data.filter(event_label=event_label)\n\n if filename_contains:\n data_ = MetaDataContainer()\n for item in data:\n if filename_contains in item.filename:\n data_.append(item)\n data = data_\n\n return data\n\n\nclass TUTRareSoundEvents_2017_EvaluationSet(SyntheticSoundEventDataset):\n \"\"\"TUT Acoustic scenes 2017 evaluation dataset\n\n This dataset is used in DCASE2017 - Task 2, Rare sound event detection\n\n \"\"\"\n\n def __init__(self,\n storage_name='TUT-rare-sound-events-2017-evaluation',\n data_path=None,\n included_content_types=None,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-rare-sound-events-2017-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['reference_data_present'] = True\n kwargs['dataset_group'] = 'event'\n kwargs['dataset_meta'] = {\n 'authors': 'Aleksandr Diment, Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',\n 'title': 'TUT Rare Sound Events 2017, evaluation dataset',\n 'url': None,\n 'audio_source': 'Synthetic',\n 'audio_type': 'Natural',\n 'recording_device_model': 'Unknown',\n 'microphone_model': 'Unknown',\n }\n kwargs['crossvalidation_folds'] = None\n\n source_url = 'https://zenodo.org/record/1160455/files/'\n kwargs['package_list'] = [\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.doc.zip',\n 'remote_bytes': 11701,\n 'remote_md5': '36db98a94ce871c6bdc5bd5238383114',\n 'filename': 'TUT-rare-sound-events-2017-evaluation.doc.zip'\n },\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + 'LICENSE.txt',\n 'remote_bytes': 0,\n 'remote_md5': '0707857098fc74d17beb824416fb74b1',\n 'filename': 'LICENSE.txt'\n },\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + 'FREESOUNDCREDITS.txt',\n 'remote_bytes': 0,\n 'remote_md5': '3ecea52bdb0eadd6e1af52a21f735d6d',\n 'filename': 'FREESOUNDCREDITS.txt'\n },\n {\n 'content_type': ['audio', 'meta'],\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.1.zip',\n 'remote_bytes': 1071143794,\n 'remote_md5': 'db4aecd5175dead27ceb2692e7f28bb1',\n 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.1.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.2.zip',\n 'remote_bytes': 1071773516,\n 'remote_md5': 'e97d5842c46805cdb94e6d4017870cde',\n 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.2.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.3.zip',\n 'remote_bytes': 1073505512,\n 'remote_md5': '1fe20c762cecd26979e2c5303c8e9f48',\n 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.3.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.4.zip',\n 'remote_bytes': 1071132551,\n 'remote_md5': '5042cd00aed9af6b37a253e24f88554f',\n 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.4.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.5.zip',\n 'remote_bytes': 308314939,\n 'remote_md5': '72180597ed5bfaa73491755f74b84738',\n 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.5.zip'\n }\n ]\n kwargs['audio_paths'] = ['audio']\n\n # Initialize base class\n super(TUTRareSoundEvents_2017_EvaluationSet, self).__init__(**kwargs)\n\n def scene_labels(self):\n return ['synthetic']\n\n def event_labels(self, scene_label=None):\n \"\"\"List of unique event labels in the meta data.\n\n Parameters\n ----------\n\n Returns\n -------\n labels : list\n List of event labels in alphabetical order.\n\n \"\"\"\n\n labels = ['babycry', 'glassbreak', 'gunshot']\n labels.sort()\n return labels\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n scene_label = 'synthetic'\n subset_map = {'test': 'evaltest'}\n param_hash = 'bbb81504db15a03680a0044474633b67'\n # Make sure evaluation_setup directory exists\n Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder))\n\n if not self.meta_container.exists() and self.reference_data_present:\n # Collect meta data\n meta_data = MetaDataContainer()\n for class_label in self.event_labels():\n for subset_label, subset_name_on_disk in iteritems(subset_map):\n subset_name_on_disk = subset_map[subset_label]\n\n mixture_path = os.path.join(\n 'data',\n 'mixture_data',\n subset_name_on_disk,\n param_hash,\n 'audio'\n )\n\n mixture_meta_path = os.path.join(\n self.local_path,\n 'data',\n 'mixture_data',\n subset_name_on_disk,\n param_hash,\n 'meta'\n )\n\n event_list_filename = os.path.join(\n mixture_meta_path,\n 'event_list_' + subset_name_on_disk + '_' + class_label + '.csv'\n )\n\n if os.path.isfile(event_list_filename):\n current_meta = MetaDataContainer(\n filename=event_list_filename\n ).load(\n fields=['filename', 'onset', 'offset', 'event_label']\n )\n\n for item in current_meta:\n item.filename = os.path.join(mixture_path, item.filename)\n item.scene_label = scene_label\n\n meta_data += current_meta\n\n # Save meta\n meta_data.save(filename=self.meta_file)\n\n\n test_filename = self.evaluation_setup_filename(\n setup_part='test',\n fold=None,\n file_extension='txt'\n )\n\n evaluate_filename = self.evaluation_setup_filename(\n setup_part='evaluate',\n fold=None,\n file_extension='txt'\n )\n\n # Check that evaluation setup exists\n evaluation_setup_exists = True\n if not os.path.isfile(test_filename) or not os.path.isfile(evaluate_filename):\n evaluation_setup_exists = False\n\n if not evaluation_setup_exists:\n # Get parameter hash\n mixture_meta_path_test = os.path.join(\n self.local_path,\n 'data',\n 'mixture_data',\n subset_map['test'],\n param_hash,\n 'meta'\n )\n mixture_path_test = os.path.join(\n 'data',\n 'mixture_data',\n subset_map['test'],\n param_hash,\n 'audio'\n )\n\n test_meta = MetaDataContainer()\n for class_label in self.event_labels():\n event_list_filename = os.path.join(\n mixture_meta_path_test,\n 'event_list_' + subset_map['test'] + '_' + class_label + '.csv'\n )\n current_meta = MetaDataContainer(\n filename=event_list_filename\n ).load(\n fields=['filename', 'onset', 'offset', 'event_label']\n )\n current_meta_ = MetaDataContainer()\n for item in current_meta:\n item.filename = os.path.join(mixture_path_test, item.filename)\n current_meta_.append(MetaDataItem(\n {\n 'filename': item.filename,\n 'scene_label': scene_label\n }\n ))\n test_meta += current_meta_\n test_meta.save(filename=test_filename)\n\n eval_meta = MetaDataContainer()\n for class_label in self.event_labels():\n event_list_filename = os.path.join(\n mixture_meta_path_test,\n 'event_list_' + subset_map['test'] + '_' + class_label + '.csv'\n )\n current_meta = MetaDataContainer(\n filename=event_list_filename\n ).load(\n fields=['filename', 'onset', 'offset', 'event_label']\n )\n for item in current_meta:\n item.filename = os.path.join(mixture_path_test, item.filename)\n item.scene_label = scene_label\n\n eval_meta += current_meta\n eval_meta.save(filename=evaluate_filename)\n\n # Load meta and cross validation\n self.load()\n\n return self\n\n def evaluation_setup_filename(self, setup_part='train', fold=None, scene_label=None, file_extension='txt'):\n parts = []\n\n if setup_part == 'test' or setup_part == 'evaluate':\n subset_label = 'test'\n else:\n subset_label = 'train'\n\n if setup_part == 'train':\n parts.append('train')\n\n elif setup_part == 'test':\n parts.append('test')\n\n elif setup_part == 'evaluate':\n parts.append('evaluate')\n\n else:\n message = '{name}: Unknown setup_part [{setup_part}]'.format(\n name=self.__class__.__name__,\n setup_part=setup_part\n )\n\n self.logger.exception(message)\n raise ValueError(message)\n\n return os.path.join(self.evaluation_setup_path, '_'.join(parts) + '.' + file_extension)\n\n def train(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):\n \"\"\"List of training items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value None\n\n scene_label : str\n Scene label\n Default value None\"\n\n event_label : str\n Event label\n Default value None\"\n\n filename_contains : str:\n String found in filename\n Default value None\n\n Returns\n -------\n list\n List containing all meta data assigned to training set for given fold.\n\n \"\"\"\n\n if fold is None or fold == 0:\n fold = 'all_data'\n\n data = self.crossvalidation_data['train'][fold]\n\n if scene_label:\n data = data.filter(scene_label=scene_label)\n\n if event_label:\n data = data.filter(event_label=event_label)\n\n if filename_contains:\n data_ = MetaDataContainer()\n for item in data:\n if filename_contains in item.filename:\n data_.append(item)\n data = data_\n\n return data\n\n def test(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):\n \"\"\"List of testing items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value None\n\n scene_label : str\n Scene label\n Default value None\n\n event_label : str\n Event label\n Default value None\n\n filename_contains : str:\n String found in filename\n Default value None\n\n Returns\n -------\n list\n List containing all meta data assigned to testing set for given fold.\n\n \"\"\"\n\n if fold is None or fold == 0:\n fold = 'all_data'\n\n data = self.crossvalidation_data['test'][fold]\n\n if scene_label:\n data = data.filter(scene_label=scene_label)\n\n if event_label:\n data = data.filter(event_label=event_label)\n\n if filename_contains:\n data_ = MetaDataContainer()\n for item in data:\n if filename_contains in item.filename:\n data_.append(item)\n data = data_\n\n return data\n\n def eval(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):\n \"\"\"List of evaluation items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value None\n\n scene_label : str\n Scene label\n Default value None\n\n event_label : str\n Event label\n Default value None\n\n filename_contains : str:\n String found in filename\n Default value None\n\n Returns\n -------\n list\n List containing all meta data assigned to testing set for given fold.\n\n \"\"\"\n\n if fold is None or fold == 0:\n fold = 'all_data'\n\n data = self.crossvalidation_data['evaluate'][fold]\n\n if scene_label:\n data = data.filter(scene_label=scene_label)\n\n if event_label:\n data = data.filter(event_label=event_label)\n\n if filename_contains:\n data_ = MetaDataContainer()\n for item in data:\n if filename_contains in item.filename:\n data_.append(item)\n data = data_\n\n return data\n\n\nclass TUTSoundEvents_2017_DevelopmentSet(SoundEventDataset):\n \"\"\"TUT Sound events 2017 development dataset\n\n This dataset is used in DCASE2017 - Task 3, Sound event detection in real life audio\n\n \"\"\"\n\n def __init__(self,\n storage_name='TUT-sound-events-2017-development',\n data_path=None,\n included_content_types=None,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-sound-events-2017-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['dataset_group'] = 'event'\n kwargs['dataset_meta'] = {\n 'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',\n 'title': 'TUT Sound Events 2016, development dataset',\n 'url': 'https://zenodo.org/record/45759',\n 'audio_source': 'Field recording',\n 'audio_type': 'Natural',\n 'recording_device_model': 'Roland Edirol R-09',\n 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',\n 'licence': 'free non-commercial'\n }\n kwargs['crossvalidation_folds'] = 4\n\n source_url = 'https://zenodo.org/record/814831/files/'\n kwargs['package_list'] = [\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + 'TUT-sound-events-2017-development.doc.zip',\n 'remote_bytes': 56150,\n 'remote_md': 'aa6024e70f5bff3fe15d962b01753e23',\n 'filename': 'TUT-sound-events-2017-development.doc.zip'\n },\n {\n 'content_type': 'meta',\n 'remote_file': source_url + 'TUT-sound-events-2017-development.meta.zip',\n 'remote_bytes': 140684,\n 'remote_md': '50e870b3a89ed3452e2a35b508840929',\n 'filename': 'TUT-sound-events-2017-development.meta.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-sound-events-2017-development.audio.1.zip',\n 'remote_bytes': 1062653169,\n 'remote_md': '6f1cd31592b8240a14be3ee513db6a23',\n 'filename': 'TUT-sound-events-2017-development.audio.1.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-sound-events-2017-development.audio.2.zip',\n 'remote_bytes': 213232458,\n 'remote_md': 'adcff03341b84dc8d35f035b93c1efa0',\n 'filename': 'TUT-sound-events-2017-development.audio.2.zip'\n }\n ]\n kwargs['audio_paths'] = [os.path.join('audio', 'street')]\n super(TUTSoundEvents_2017_DevelopmentSet, self).__init__(**kwargs)\n\n def process_meta_item(self, item, absolute_path=True, **kwargs):\n \"\"\"Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n \"\"\"\n\n if absolute_path:\n item.filename = self.relative_to_absolute_path(item.filename)\n\n else:\n item.filename = self.absolute_to_relative_path(item.filename)\n\n raw_path, raw_filename = os.path.split(item.filename)\n item.identifier = raw_filename.split('_')[0]\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n if not self.meta_container.exists():\n meta_data = MetaDataContainer()\n annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])\n for annotation_filename in annotation_files:\n data = MetaDataContainer(filename=annotation_filename).load()\n for item in data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data += data\n\n # Save meta\n meta_data.save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self\n\n\nclass TUTSoundEvents_2017_EvaluationSet(SoundEventDataset):\n \"\"\"TUT Sound events 2017 evaluation dataset\n\n This dataset is used in DCASE2017 - Task 3, Sound event detection in real life audio\n\n \"\"\"\n\n def __init__(self,\n storage_name='TUT-sound-events-2017-evaluation',\n data_path=None,\n included_content_types=None,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-sound-events-2017-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['dataset_group'] = 'event'\n kwargs['dataset_meta'] = {\n 'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',\n 'title': 'TUT Sound Events 2016, development dataset',\n 'url': 'https://zenodo.org/record/45759',\n 'audio_source': 'Field recording',\n 'audio_type': 'Natural',\n 'recording_device_model': 'Roland Edirol R-09',\n 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',\n 'licence': 'free non-commercial'\n }\n kwargs['crossvalidation_folds'] = None\n\n source_url = 'https://zenodo.org/record/1040179/files/'\n kwargs['package_list'] = [\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + 'TUT-sound-events-2017-evaluation.doc.zip',\n 'remote_bytes': 54606,\n 'remote_md5': '8bbf41671949edee15d6cdc3f9e726c9',\n 'filename': 'TUT-sound-events-2017-evaluation.doc.zip'\n },\n {\n 'content_type': 'meta',\n 'remote_file': source_url + 'TUT-sound-events-2017-evaluation.meta.zip',\n 'remote_bytes': 762,\n 'remote_md5': 'a951598abaea87296ca409e30fb0b379',\n 'filename': 'TUT-sound-events-2017-evaluation.meta.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-sound-events-2017-evaluation.audio.zip',\n 'remote_bytes': 388173790,\n 'remote_md5': '1d3aa81896be0f142130ca9ca7a2b871',\n 'filename': 'TUT-sound-events-2017-evaluation.audio.zip'\n }\n ]\n kwargs['audio_paths'] = ['audio']\n super(TUTSoundEvents_2017_EvaluationSet, self).__init__(**kwargs)\n\n def scene_labels(self):\n labels = ['street']\n labels.sort()\n return labels\n\n def process_meta_item(self, item, absolute_path=True, **kwargs):\n \"\"\"Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n \"\"\"\n\n if absolute_path:\n item.filename = self.relative_to_absolute_path(item.filename)\n\n else:\n item.filename = self.absolute_to_relative_path(item.filename)\n\n raw_path, raw_filename = os.path.split(item.filename)\n item.identifier = os.path.splitext(raw_filename)[0]\n item.source_label = 'mixture'\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n if not self.meta_container.exists():\n evaluate_filename = self.evaluation_setup_filename(\n setup_part='evaluate',\n scene_label=self.scene_labels()[0]\n )\n eval_file = MetaDataContainer(filename=evaluate_filename)\n\n if eval_file.exists():\n # Get meta data from evaluation file\n meta_data = MetaDataContainer()\n eval_file.load()\n for item in eval_file:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data += eval_file\n\n # Save meta\n meta_data.save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n elif os.path.isdir(os.path.join(self.local_path, 'meta')):\n annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])\n\n # Get meta data from annotation files\n meta_data = MetaDataContainer()\n\n for annotation_filename in annotation_files:\n data = MetaDataContainer(filename=annotation_filename).load()\n for item in data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data += data\n\n # Save meta\n meta_data.save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self\n\n\n# =====================================================\n# DCASE 2016\n# =====================================================\n\n\nclass TUTAcousticScenes_2016_DevelopmentSet(AcousticSceneDataset):\n \"\"\"TUT Acoustic scenes 2016 development dataset\n\n This dataset is used in DCASE2016 - Task 1, Acoustic scene classification\n\n \"\"\"\n\n def __init__(self,\n storage_name='TUT-acoustic-scenes-2016-development',\n data_path=None,\n included_content_types=None,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2016-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['dataset_group'] = 'scene'\n kwargs['dataset_meta'] = {\n 'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',\n 'title': 'TUT Acoustic Scenes 2016, development dataset',\n 'url': 'https://zenodo.org/record/45739',\n 'audio_source': 'Field recording',\n 'audio_type': 'Natural',\n 'recording_device_model': 'Roland Edirol R-09',\n 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',\n 'licence': 'free non-commercial'\n }\n kwargs['crossvalidation_folds'] = 4\n\n source_url = 'https://zenodo.org/record/45739/files/'\n kwargs['package_list'] = [\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.doc.zip',\n 'remote_bytes': 69671,\n 'remote_md5': 'f94ad46eb36325d9fbce5d60f7fc9926',\n 'filename': 'TUT-acoustic-scenes-2016-development.doc.zip'\n },\n {\n 'content_type': 'meta',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.meta.zip',\n 'remote_bytes': 28815,\n 'remote_md5': '779b33da2ebbf8bde494b3c981827251',\n 'filename': 'TUT-acoustic-scenes-2016-development.meta.zip'\n },\n {\n 'content_type': 'meta',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.error.zip',\n 'remote_bytes': 1283,\n 'remote_md5': 'a0d3e0d81b0a36ece87d0f3a9124a386',\n 'filename': 'TUT-acoustic-scenes-2016-development.error.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.1.zip',\n 'remote_bytes': 1070981236,\n 'remote_md5': 'e39546e65f2e72517b6335aaf0c8323d',\n 'filename': 'TUT-acoustic-scenes-2016-development.audio.1.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.2.zip',\n 'remote_bytes': 1067186166,\n 'remote_md5': 'd36cf3253e2c041f68e937a3fe804807',\n 'filename': 'TUT-acoustic-scenes-2016-development.audio.2.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.3.zip',\n 'remote_bytes': 1073644405,\n 'remote_md5': '0393a9620ab882b1c26d884eccdcffdd',\n 'filename': 'TUT-acoustic-scenes-2016-development.audio.3.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.4.zip',\n 'remote_bytes': 1072111347,\n 'remote_md5': 'fb3e4e0cd7ea82120ec07031dee558ce',\n 'filename': 'TUT-acoustic-scenes-2016-development.audio.4.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.5.zip',\n 'remote_bytes': 1069681513,\n 'remote_md5': 'a19cf600b33c8f88f6ad607bafd74057',\n 'filename': 'TUT-acoustic-scenes-2016-development.audio.5.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.6.zip',\n 'remote_bytes': 1072890150,\n 'remote_md5': '591aad3219d1155342572cc1f6af5680',\n 'filename': 'TUT-acoustic-scenes-2016-development.audio.6.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.7.zip',\n 'remote_bytes': 1069265197,\n 'remote_md5': '9e6c1897789e6bce13ac69c6caedb7ab',\n 'filename': 'TUT-acoustic-scenes-2016-development.audio.7.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.8.zip',\n 'remote_bytes': 528461098,\n 'remote_md5': 'c4718354f48fcc9dfc7305f6cd8325c8',\n 'filename': 'TUT-acoustic-scenes-2016-development.audio.8.zip'\n }\n ]\n kwargs['audio_paths'] = [\n 'audio'\n ]\n super(TUTAcousticScenes_2016_DevelopmentSet, self).__init__(**kwargs)\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n if not self.meta_container.exists():\n meta_data = {}\n for fold in range(1, self.crossvalidation_folds):\n # Read train files in\n fold_data = MetaDataContainer(\n filename=self.evaluation_setup_filename(setup_part='train', fold=fold)\n ).load()\n\n # Read eval files in\n fold_data += MetaDataContainer(\n filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)\n ).load()\n\n for item in fold_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n return self\n\n def process_meta_item(self, item, absolute_path=True, **kwargs):\n \"\"\"Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n \"\"\"\n\n if absolute_path:\n item.filename = self.relative_to_absolute_path(item.filename)\n else:\n item.filename = self.absolute_to_relative_path(item.filename)\n\n raw_path, raw_filename = os.path.split(item.filename)\n item.identifier = raw_filename.split('_')[0]\n\n\nclass TUTAcousticScenes_2016_EvaluationSet(AcousticSceneDataset):\n \"\"\"TUT Acoustic scenes 2016 evaluation dataset\n\n This dataset is used in DCASE2016 - Task 1, Acoustic scene classification\n\n \"\"\"\n\n def __init__(self,\n storage_name='TUT-acoustic-scenes-2016-evaluation',\n data_path=None,\n included_content_types=None,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2016-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['dataset_group'] = 'scene'\n kwargs['dataset_meta'] = {\n 'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',\n 'title': 'TUT Acoustic Scenes 2016, evaluation dataset',\n 'url': 'https://zenodo.org/record/165995',\n 'audio_source': 'Field recording',\n 'audio_type': 'Natural',\n 'recording_device_model': 'Roland Edirol R-09',\n 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',\n 'licence': 'free non-commercial'\n }\n kwargs['crossvalidation_folds'] = None\n\n source_url = 'https://zenodo.org/record/165995/files/'\n kwargs['package_list'] = [\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.doc.zip',\n 'remote_bytes': 69217,\n 'remote_md5': 'ef315bf912d1124050646888cc3ceba2',\n 'filename': 'TUT-acoustic-scenes-2016-evaluation.doc.zip'\n },\n {\n 'content_type': 'meta',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.meta.zip',\n 'remote_bytes': 5962,\n 'remote_md5': '0d5c131fc3f50c682de62e0e648aceba',\n 'filename': 'TUT-acoustic-scenes-2016-evaluation.meta.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.1.zip',\n 'remote_bytes': 1067685684,\n 'remote_md5': '7c6c2e54b8a9c4c37a803b81446d16fe',\n 'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.1.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.2.zip',\n 'remote_bytes': 1068308900,\n 'remote_md5': '7930f1dc26707ab3ba9526073af87333',\n 'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.2.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.3.zip',\n 'remote_bytes': 538894804,\n 'remote_md5': '17187d633d6402aee4b481122a1b28f0',\n 'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.3.zip'\n }\n ]\n kwargs['audio_paths'] = ['audio']\n super(TUTAcousticScenes_2016_EvaluationSet, self).__init__(**kwargs)\n\n def process_meta_item(self, item, absolute_path=True, **kwargs):\n \"\"\"Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n \"\"\"\n\n if absolute_path:\n item.filename = self.relative_to_absolute_path(item.filename)\n\n else:\n item.filename = self.absolute_to_relative_path(item.filename)\n\n if item.filename_original is not None:\n raw_path, raw_filename = os.path.split(item.filename_original)\n item.identifier = raw_filename.split('_')[0]\n del item['filename_original']\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n if not self.meta_container.exists():\n evaluate_filename = self.evaluation_setup_filename(\n setup_part='evaluate'\n )\n\n eval_file = MetaDataContainer(filename=evaluate_filename)\n if eval_file.exists():\n eval_data = eval_file.load()\n meta_data = {}\n for item in eval_data:\n if item.filename not in meta_data:\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data[item.filename] = item\n\n # Save meta\n MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self\n\n\nclass TUTSoundEvents_2016_DevelopmentSet(SoundEventDataset):\n \"\"\"TUT Sound events 2016 development dataset\n\n This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio\n\n \"\"\"\n\n def __init__(self,\n storage_name='TUT-acoustic-scenes-2016-development',\n data_path=None,\n included_content_types=None,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2016-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['dataset_group'] = 'event'\n kwargs['dataset_meta'] = {\n 'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',\n 'title': 'TUT Sound Events 2016, development dataset',\n 'url': 'https://zenodo.org/record/45759',\n 'audio_source': 'Field recording',\n 'audio_type': 'Natural',\n 'recording_device_model': 'Roland Edirol R-09',\n 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',\n 'licence': 'free non-commercial'\n }\n kwargs['crossvalidation_folds'] = 4\n\n source_url = 'https://zenodo.org/record/45759/files/'\n kwargs['package_list'] = [\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + 'TUT-sound-events-2016-development.doc.zip',\n 'remote_bytes': 70918,\n 'remote_md5': '33fd26a895530aef607a07b08704eacd',\n 'filename': 'TUT-sound-events-2016-development.doc.zip'\n },\n {\n 'content_type': 'meta',\n 'remote_file': source_url + 'TUT-sound-events-2016-development.meta.zip',\n 'remote_bytes': 122321,\n 'remote_md5': '7b29f0e2b82b3f264653cb4fa43da75d',\n 'filename': 'TUT-sound-events-2016-development.meta.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-sound-events-2016-development.audio.zip',\n 'remote_bytes': 1014040667,\n 'remote_md5': 'a6006efaa85bb69d5064b00c6802a8f8',\n 'filename': 'TUT-sound-events-2016-development.audio.zip'\n }\n ]\n kwargs['audio_paths'] = [\n os.path.join('audio', 'home'),\n os.path.join('audio', 'residential_area')\n ]\n super(TUTSoundEvents_2016_DevelopmentSet, self).__init__(**kwargs)\n\n def process_meta_item(self, item, absolute_path=True, **kwargs):\n \"\"\"Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n \"\"\"\n\n if absolute_path:\n item.filename = self.relative_to_absolute_path(item.filename)\n\n else:\n item.filename = self.absolute_to_relative_path(item.filename)\n\n raw_path, raw_filename = os.path.split(item.filename)\n item.identifier = os.path.splitext(raw_filename)[0]\n item.source_label = 'mixture'\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n if not self.meta_container.exists():\n meta_data = MetaDataContainer()\n annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])\n for annotation_filename in annotation_files:\n scene_label = os.path.split(os.path.split(annotation_filename)[0])[1]\n identifier = os.path.splitext(os.path.split(annotation_filename)[1])[0]\n audio_filename = os.path.join('audio', scene_label, identifier + '.wav')\n\n data = MetaDataContainer(filename=annotation_filename).load()\n for item in data:\n item.filename = audio_filename\n item.scene_label = scene_label\n\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data += data\n\n # Save meta\n meta_data.save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self\n\n\nclass TUTSoundEvents_2016_EvaluationSet(SoundEventDataset):\n \"\"\"TUT Sound events 2016 evaluation dataset\n\n This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio\n\n \"\"\"\n def __init__(self,\n storage_name='TUT-sound-events-2016-evaluation',\n data_path=None,\n included_content_types=None,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-sound-events-2016-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['dataset_group'] = 'event'\n kwargs['dataset_meta'] = {\n 'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',\n 'title': 'TUT Sound Events 2016, evaluation dataset',\n 'url': 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/',\n 'audio_source': 'Field recording',\n 'audio_type': 'Natural',\n 'recording_device_model': 'Roland Edirol R-09',\n 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',\n 'licence': 'free non-commercial'\n }\n kwargs['crossvalidation_folds'] = None\n\n source_url = 'https://zenodo.org/record/996424/files/'\n kwargs['package_list'] = [\n {\n 'content_type': 'documentation',\n 'remote_file': source_url + 'TUT-sound-events-2016-evaluation.doc.zip',\n 'remote_bytes': 69834,\n 'remote_md5': '0644b54d96f4cefd0ecb2c7ea9161aa9',\n 'filename': 'TUT-sound-events-2016-evaluation.doc.zip'\n },\n {\n 'content_type': 'meta',\n 'remote_file': source_url + 'TUT-sound-events-2016-evaluation.meta.zip',\n 'remote_bytes': 41608,\n 'remote_md5': '91c266b0780ac619a0d74298a3805e9e',\n 'filename': 'TUT-sound-events-2016-evaluation.meta.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-sound-events-2016-evaluation.audio.zip',\n 'remote_bytes': 471072452,\n 'remote_md5': '29434e8c53bd51206df0234e6cf2238c',\n 'filename': 'TUT-sound-events-2016-evaluation.audio.zip'\n }\n ]\n kwargs['audio_paths'] = [\n os.path.join('audio', 'home'),\n os.path.join('audio', 'residential_area')\n ]\n super(TUTSoundEvents_2016_EvaluationSet, self).__init__(**kwargs)\n\n def scene_labels(self):\n labels = ['home', 'residential_area']\n labels.sort()\n return labels\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n if not self.meta_container.exists() and os.path.isdir(os.path.join(self.local_path, 'meta')):\n meta_data = MetaDataContainer()\n annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])\n for annotation_filename in annotation_files:\n scene_label = os.path.split(os.path.split(annotation_filename)[0])[1]\n identifier = os.path.splitext(os.path.split(annotation_filename)[1])[0]\n audio_filename = os.path.join('audio', scene_label, identifier + '.wav')\n\n data = MetaDataContainer(filename=annotation_filename).load(decimal='comma')\n for item in data:\n item.filename = audio_filename\n item.scene_label = scene_label\n\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data += data\n\n # Save meta\n meta_data.save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self\n\n# =====================================================\n# Others\n# =====================================================\n\n\nclass TUT_SED_Synthetic_2016(SoundEventDataset):\n \"\"\"TUT SED Synthetic 2016\n\n \"\"\"\n\n def __init__(self,\n storage_name='TUT-SED-synthetic-2016',\n data_path=None,\n included_content_types=None,\n **kwargs):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-SED-synthetic-2016'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n \"\"\"\n\n kwargs['included_content_types'] = included_content_types\n kwargs['data_path'] = data_path\n kwargs['storage_name'] = storage_name\n kwargs['dataset_group'] = 'event'\n kwargs['dataset_meta'] = {\n 'authors': 'Emre Cakir',\n 'title': 'TUT-SED Synthetic 2016',\n 'url': 'http://www.cs.tut.fi/sgn/arg/taslp2017-crnn-sed/tut-sed-synthetic-2016',\n 'audio_source': 'Field recording',\n 'audio_type': 'Synthetic',\n 'recording_device_model': 'Unknown',\n 'microphone_model': 'Unknown',\n }\n kwargs['crossvalidation_folds'] = 1\n\n source_url = 'http://www.cs.tut.fi/sgn/arg/taslp2017-crnn-sed/datasets/TUT-SED-synthetic-2016/'\n kwargs['package_list'] = [\n {\n 'content_type': 'meta',\n 'remote_file': source_url + 'TUT-SED-synthetic-2016.meta.zip',\n 'remote_bytes': 973618,\n 'remote_md5': 'e2ae895bdf39f2a359a97bb0bcf76101',\n 'filename': 'TUT-SED-synthetic-2016.meta.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.1.zip',\n 'remote_bytes': 1026369647,\n 'remote_md5': 'ede8b9c6d1b0d1d64bfc5791404f58fb',\n 'filename': 'TUT-SED-synthetic-2016.audio.1.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.2.zip',\n 'remote_bytes': 1018650039,\n 'remote_md5': 'cde647a377a58fc74e3012139d65c447',\n 'filename': 'TUT-SED-synthetic-2016.audio.2.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.3.zip',\n 'remote_bytes': 1070239392,\n 'remote_md5': '5fc2824dcce442f441f4c6a975881789',\n 'filename': 'TUT-SED-synthetic-2016.audio.3.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.4.zip',\n 'remote_bytes': 1040622610,\n 'remote_md5': '4ba016d949171ccc8493d3d274009825',\n 'filename': 'TUT-SED-synthetic-2016.audio.4.zip'\n },\n {\n 'content_type': 'audio',\n 'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.5.zip',\n 'remote_bytes': 264812997,\n 'remote_md5': '6a44578dd7738bd4ba044d5d2b9a5448',\n 'filename': 'TUT-SED-synthetic-2016.audio.5.zip'\n },\n {\n 'content_type': 'features',\n 'remote_file': source_url + 'TUT-SED-synthetic-2016.features.zip',\n 'remote_bytes': 480894082,\n 'remote_md5': '66bc0abc19a276986964a6d4a2d2f6bc',\n 'filename': 'TUT-SED-synthetic-2016.features.zip'\n }\n ]\n kwargs['audio_paths'] = ['audio']\n super(TUT_SED_Synthetic_2016, self).__init__(**kwargs)\n\n def prepare(self):\n \"\"\"Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n \"\"\"\n\n if not self.meta_container.exists():\n meta_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['txt'])\n meta_data = MetaDataContainer()\n for meta_filename in meta_files:\n audio_filename = os.path.join('audio', os.path.split(meta_filename)[1].replace('.txt', '.wav'))\n data = MetaDataContainer(filename=meta_filename).load()\n for item in data:\n item.filename = audio_filename\n item.scene_label = 'synthetic'\n item.source_label = 'm'\n self.process_meta_item(\n item=item,\n absolute_path=False\n )\n\n meta_data += data\n\n # Save meta\n meta_data.save(filename=self.meta_file)\n\n # Load meta and cross validation\n self.load()\n\n return self\n\n def evaluation_setup_filename(self, setup_part='train', fold=None, scene_label=None, file_extension='txt'):\n parts = []\n if scene_label:\n parts.append(scene_label)\n\n if fold:\n parts.append('fold' + str(fold))\n\n if setup_part == 'train':\n return os.path.join(self.evaluation_setup_path, 'train+validate' + '.' + file_extension)\n\n elif setup_part == 'test':\n return os.path.join(self.evaluation_setup_path, 'test' + '.' + file_extension)\n\n elif setup_part == 'validate':\n return os.path.join(self.evaluation_setup_path, 'validate' + '.' + file_extension)\n\n elif setup_part == 'evaluate':\n return os.path.join(self.evaluation_setup_path, 'evaluate' + '.' + file_extension)\n\n def validation_split(self, fold=None, scene_label=None, **kwargs):\n validation_files = MetaDataContainer(\n filename=self.evaluation_setup_filename(setup_part='validate', fold=fold)\n ).load().unique_files\n\n for index, filename in enumerate(validation_files):\n validation_files[index] = self.relative_to_absolute_path(filename)\n\n return validation_files\n\n def file_features(self, filename):\n \"\"\"Pre-calculated acoustic features for given file\n\n Parameters\n ----------\n filename : str\n File name\n\n Returns\n -------\n data : numpy.ndarray\n Matrix containing acoustic features\n\n \"\"\"\n\n filename_ = self.absolute_to_relative_path(filename).replace('audio/', 'features/')\n filename_ = os.path.splitext(filename_)[0] + '.cpickle'\n if os.path.isfile(os.path.join(self.local_path, filename_)):\n feature_data = pickle.load(open(os.path.join(self.local_path, filename_), \"rb\"))\n return feature_data['feat']\n\n else:\n return None\n"
] |
[
[
"numpy.dstack",
"numpy.vstack",
"numpy.repeat",
"numpy.hstack",
"numpy.expand_dims"
],
[
"numpy.array"
]
] |
jaimeaguilera/Investing-projects
|
[
"8b598a6ce9fee626964008fa65d0c3e551091564"
] |
[
"utilities/kit.py"
] |
[
"import pandas as pd\nimport numpy as np\nfrom numpy.linalg import inv\n\ndef get_ffme_returns():\n \"\"\"\n Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap\n \"\"\"\n me_m = pd.read_csv(\"data/Portfolios_Formed_on_ME_monthly_EW.csv\",\n header=0, index_col=0, na_values=-99.99)\n rets = me_m[['Lo 10', 'Hi 10']]\n rets.columns = ['SmallCap', 'LargeCap']\n rets = rets/100\n rets.index = pd.to_datetime(rets.index, format=\"%Y%m\").to_period('M')\n return rets\n\ndef get_fff_returns():\n \"\"\"\n Load the Fama-French Research Factor Monthly Dataset\n \"\"\"\n rets = pd.read_csv(\"data/F-F_Research_Data_Factors_m.csv\",\n header=0, index_col=0, na_values=-99.99)/100\n rets.index = pd.to_datetime(rets.index, format=\"%Y%m\").to_period('M')\n return rets\n\n\ndef get_hfi_returns():\n \"\"\"\n Load and format the EDHEC Hedge Fund Index Returns\n \"\"\"\n hfi = pd.read_csv(\"data/edhec-hedgefundindices.csv\",\n header=0, index_col=0, parse_dates=True)\n hfi = hfi/100\n hfi.index = hfi.index.to_period('M')\n return hfi\n\ndef get_ind_file(filetype, weighting=\"vw\", n_inds=30):\n \"\"\"\n Load and format the Ken French Industry Portfolios files\n Variant is a tuple of (weighting, size) where:\n weighting is one of \"ew\", \"vw\"\n number of inds is 30 or 49\n \"\"\" \n if filetype is \"returns\":\n name = f\"{weighting}_rets\" \n divisor = 100\n elif filetype is \"nfirms\":\n name = \"nfirms\"\n divisor = 1\n elif filetype is \"size\":\n name = \"size\"\n divisor = 1\n else:\n raise ValueError(f\"filetype must be one of: returns, nfirms, size\")\n \n ind = pd.read_csv(f\"data/ind{n_inds}_m_{name}.csv\", header=0, index_col=0, na_values=-99.99)/divisor\n ind.index = pd.to_datetime(ind.index, format=\"%Y%m\").to_period('M')\n ind.columns = ind.columns.str.strip()\n return ind\n\ndef get_ind_returns(weighting=\"vw\", n_inds=30):\n \"\"\"\n Load and format the Ken French Industry Portfolios Monthly Returns\n \"\"\"\n return get_ind_file(\"returns\", weighting=weighting, n_inds=n_inds)\n\ndef get_ind_nfirms(n_inds=30):\n \"\"\"\n Load and format the Ken French 30 Industry Portfolios Average number of Firms\n \"\"\"\n return get_ind_file(\"nfirms\", n_inds=n_inds)\n\ndef get_ind_size(n_inds=30):\n \"\"\"\n Load and format the Ken French 30 Industry Portfolios Average size (market cap)\n \"\"\"\n return get_ind_file(\"size\", n_inds=n_inds)\n\n\ndef get_ind_market_caps(n_inds=30, weights=False):\n \"\"\"\n Load the industry portfolio data and derive the market caps\n \"\"\"\n ind_nfirms = get_ind_nfirms(n_inds=n_inds)\n ind_size = get_ind_size(n_inds=n_inds)\n ind_mktcap = ind_nfirms * ind_size\n if weights:\n total_mktcap = ind_mktcap.sum(axis=1)\n ind_capweight = ind_mktcap.divide(total_mktcap, axis=\"rows\")\n return ind_capweight\n #else\n return ind_mktcap\n\ndef get_total_market_index_returns(n_inds=30):\n \"\"\"\n Load the 30 industry portfolio data and derive the returns of a capweighted total market index\n \"\"\"\n ind_capweight = get_ind_market_caps(n_inds=n_inds)\n ind_return = get_ind_returns(weighting=\"vw\", n_inds=n_inds)\n total_market_return = (ind_capweight * ind_return).sum(axis=\"columns\")\n return total_market_return\n \ndef skewness(r):\n \"\"\"\n Alternative to scipy.stats.skew()\n Computes the skewness of the supplied Series or DataFrame\n Returns a float or a Series\n \"\"\"\n r = r[(r!=0) & (r.notnull())]\n \n demeaned_r = r - r.mean()\n # use the population standard deviation, so set dof=0\n sigma_r = r.std(ddof=0)\n exp = (demeaned_r**3).mean()\n return exp/sigma_r**3\n\n\ndef kurtosis(r):\n \"\"\"\n Alternative to scipy.stats.kurtosis()\n Computes the kurtosis of the supplied Series or DataFrame\n Returns a float or a Series\n \"\"\"\n r = r[(r!=0) & (r.notnull())]\n \n demeaned_r = r - r.mean()\n # use the population standard deviation, so set dof=0\n sigma_r = r.std(ddof=0)\n exp = (demeaned_r**4).mean()\n return exp/sigma_r**4\n\n\ndef compound(r):\n \"\"\"\n returns the result of compounding the set of returns in r\n \"\"\"\n return np.expm1(np.log1p(r).sum())\n\n \ndef annualize_rets(r):\n \"\"\"\n Annualizes a set of returns\n We should infer the periods per year\n but that is currently left as an exercise\n to the reader :-)\n \"\"\"\n r_valid = r[(r!=0) & (r.notnull())]\n\n date_beg = r_valid.agg(lambda x: x.first_valid_index())\n date_end = r_valid.agg(lambda x: x.last_valid_index())\n \n try:\n years_fraction = (date_end-date_beg).dt.days/365.2425\n except:\n years_fraction = (date_end-date_beg).days/365.2425\n\n compounded_growth = (1+r_valid).prod()\n\n return compounded_growth**(1/years_fraction)-1\n\ndef annualize_vol(r):\n \"\"\"\n Annualizes the vol of a set of returns\n We should infer the periods per year\n but that is currently left as an exercise\n to the reader :-)\n \"\"\"\n r_valid = r[(r!=0) & (r.notnull())]\n\n total_num_periods = r_valid.count()\n\n date_beg = r_valid.agg(lambda x: x.first_valid_index())\n date_end = r_valid.agg(lambda x: x.last_valid_index())\n \n try:\n years_fraction = (date_end-date_beg).dt.days/365.2425\n except:\n years_fraction = (date_end-date_beg).days/365.2425\n \n periods_per_year = total_num_periods/years_fraction\n\n return r_valid.std()*((periods_per_year)**0.5)\n \ndef sharpe_ratio(r, riskfree_rate):\n \"\"\"\n Computes the annualized sharpe ratio of a set of returns\n \"\"\"\n # convert the annual riskfree rate to per period\n r_valid = r[(r!=0) & (r.notnull())]\n\n total_num_periods = r_valid.count()\n\n date_beg = r_valid.agg(lambda x: x.first_valid_index())\n date_end = r_valid.agg(lambda x: x.last_valid_index())\n \n try:\n years_fraction = (date_end-date_beg).dt.days/365.2425\n except:\n years_fraction = (date_end-date_beg).days/365.2425\n \n periods_per_year = total_num_periods/years_fraction\n \n rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1\n excess_ret = r - rf_per_period\n ann_ex_ret = annualize_rets(excess_ret)\n ann_vol = annualize_vol(r)\n \n return ann_ex_ret/ann_vol\n\n\nimport scipy.stats\ndef is_normal(r, level=0.01):\n \"\"\"\n Applies the Jarque-Bera test to determine if a Series is normal or not\n Test is applied at the 1% level by default\n Returns True if the hypothesis of normality is accepted, False otherwise\n \"\"\"\n if isinstance(r, pd.DataFrame):\n return r.aggregate(is_normal)\n else:\n statistic, p_value = scipy.stats.jarque_bera(r)\n return p_value > level\n\n\ndef drawdown(return_series: pd.Series):\n \"\"\"Takes a time series of asset returns.\n returns a DataFrame with columns for\n the wealth index, \n the previous peaks, and \n the percentage drawdown\n \"\"\"\n wealth_index = 1000*(1+return_series).cumprod()\n previous_peaks = wealth_index.cummax()\n drawdowns = (wealth_index - previous_peaks)/previous_peaks\n return pd.DataFrame({\"Wealth\": wealth_index, \n \"Previous Peak\": previous_peaks, \n \"Drawdown\": drawdowns})\n\n\ndef semideviation(r):\n \"\"\"\n Returns the semideviation aka negative semideviation of r\n r must be a Series or a DataFrame, else raises a TypeError\n \"\"\"\n if isinstance(r, pd.Series):\n is_negative = r < 0\n return r[is_negative].std(ddof=0)\n elif isinstance(r, pd.DataFrame):\n return r.aggregate(semideviation)\n else:\n raise TypeError(\"Expected r to be a Series or DataFrame\")\n\n\ndef var_historic(r, level=5):\n \"\"\"\n Returns the historic Value at Risk at a specified level\n i.e. returns the number such that \"level\" percent of the returns\n fall below that number, and the (100-level) percent are above\n \"\"\"\n r = r[(r!=0) & (r.notnull())]\n \n if isinstance(r, pd.DataFrame):\n return r.aggregate(var_historic, level=level)\n elif isinstance(r, pd.Series):\n return -np.percentile(r, level)\n else:\n raise TypeError(\"Expected r to be a Series or DataFrame\")\n\n\ndef cvar_historic(r, level=5):\n \"\"\"\n Computes the Conditional VaR of Series or DataFrame\n \"\"\"\n r = r[(r!=0) & (r.notnull())]\n \n if isinstance(r, pd.Series):\n is_beyond = r <= -var_historic(r, level=level)\n return -r[is_beyond].mean()\n elif isinstance(r, pd.DataFrame):\n return r.aggregate(cvar_historic, level=level)\n else:\n raise TypeError(\"Expected r to be a Series or DataFrame\")\n\n\nfrom scipy.stats import norm\ndef var_gaussian(r, level=5, modified=False):\n \"\"\"\n Returns the Parametric Gauusian VaR of a Series or DataFrame\n If \"modified\" is True, then the modified VaR is returned,\n using the Cornish-Fisher modification\n \"\"\"\n # compute the Z score assuming it was Gaussian\n r = r[(r!=0) & (r.notnull())]\n \n z = norm.ppf(level/100)\n if modified:\n # modify the Z score based on observed skewness and kurtosis\n s = skewness(r)\n k = kurtosis(r)\n z = (z +\n (z**2 - 1)*s/6 +\n (z**3 -3*z)*(k-3)/24 -\n (2*z**3 - 5*z)*(s**2)/36\n )\n return -(r.mean() + z*r.std(ddof=0))\n\n\ndef portfolio_return(weights, returns):\n \"\"\"\n Computes the return on a portfolio from constituent returns and weights\n weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix\n \"\"\"\n return weights.T @ returns\n\n\ndef portfolio_vol(weights, covmat):\n \"\"\"\n Computes the vol of a portfolio from a covariance matrix and constituent weights\n weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix\n \"\"\"\n vol = (weights.T @ covmat @ weights)**0.5\n return vol \n\n\ndef plot_ef2(n_points, er, cov):\n \"\"\"\n Plots the 2-asset efficient frontier\n \"\"\"\n if er.shape[0] != 2 or er.shape[0] != 2:\n raise ValueError(\"plot_ef2 can only plot 2-asset frontiers\")\n weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]\n rets = [portfolio_return(w, er) for w in weights]\n vols = [portfolio_vol(w, cov) for w in weights]\n ef = pd.DataFrame({\n \"Returns\": rets, \n \"Volatility\": vols\n })\n return ef.plot.line(x=\"Volatility\", y=\"Returns\", style=\".-\")\n\n\nfrom scipy.optimize import minimize\n\ndef minimize_vol(target_return, er, cov):\n \"\"\"\n Returns the optimal weights that achieve the target return\n given a set of expected returns and a covariance matrix\n \"\"\"\n n = er.shape[0]\n init_guess = np.repeat(1/n, n)\n bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!\n # construct the constraints\n weights_sum_to_1 = {'type': 'eq',\n 'fun': lambda weights: np.sum(weights) - 1\n }\n return_is_target = {'type': 'eq',\n 'args': (er,),\n 'fun': lambda weights, er: target_return - portfolio_return(weights,er)\n }\n weights = minimize(portfolio_vol, init_guess,\n args=(cov,), method='SLSQP',\n options={'disp': False},\n constraints=(weights_sum_to_1,return_is_target),\n bounds=bounds)\n return weights.x\n\n\ndef tracking_error(r_a, r_b):\n \"\"\"\n Returns the Tracking Error between the two return series\n \"\"\"\n return np.sqrt(((r_a - r_b)**2).sum())\n\n \ndef msr(riskfree_rate, er, cov):\n \"\"\"\n Returns the weights of the portfolio that gives you the maximum sharpe ratio\n given the riskfree rate and expected returns and a covariance matrix\n \"\"\"\n n = er.shape[0]\n init_guess = np.repeat(1/n, n)\n bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!\n # construct the constraints\n weights_sum_to_1 = {'type': 'eq',\n 'fun': lambda weights: np.sum(weights) - 1\n }\n def neg_sharpe(weights, riskfree_rate, er, cov):\n \"\"\"\n Returns the negative of the sharpe ratio\n of the given portfolio\n \"\"\"\n r = portfolio_return(weights, er)\n vol = portfolio_vol(weights, cov)\n return -(r - riskfree_rate)/vol\n \n weights = minimize(neg_sharpe, init_guess,\n args=(riskfree_rate, er, cov), method='SLSQP',\n options={'disp': False},\n constraints=(weights_sum_to_1,),\n bounds=bounds)\n return weights.x\n\n\ndef gmv(cov):\n \"\"\"\n Returns the weights of the Global Minimum Volatility portfolio\n given a covariance matrix\n \"\"\"\n n = cov.shape[0]\n return msr(0, np.repeat(1, n), cov)\n\n\ndef optimal_weights(n_points, er, cov):\n \"\"\"\n Returns a list of weights that represent a grid of n_points on the efficient frontier\n \"\"\"\n target_rs = np.linspace(er.min(), er.max(), n_points)\n weights = [minimize_vol(target_return, er, cov) for target_return in target_rs]\n return weights\n\n\ndef plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False):\n \"\"\"\n Plots the multi-asset efficient frontier\n \"\"\"\n weights = optimal_weights(n_points, er, cov)\n rets = [portfolio_return(w, er) for w in weights]\n vols = [portfolio_vol(w, cov) for w in weights]\n ef = pd.DataFrame({\n \"Returns\": rets, \n \"Volatility\": vols\n })\n ax = ef.plot.line(x=\"Volatility\", y=\"Returns\", style=style, legend=legend)\n if show_cml:\n ax.set_xlim(left = 0)\n # get MSR\n w_msr = msr(riskfree_rate, er, cov)\n r_msr = portfolio_return(w_msr, er)\n vol_msr = portfolio_vol(w_msr, cov)\n # add CML\n cml_x = [0, vol_msr]\n cml_y = [riskfree_rate, r_msr]\n ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10)\n if show_ew:\n n = er.shape[0]\n w_ew = np.repeat(1/n, n)\n r_ew = portfolio_return(w_ew, er)\n vol_ew = portfolio_vol(w_ew, cov)\n # add EW\n ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10)\n if show_gmv:\n w_gmv = gmv(cov)\n r_gmv = portfolio_return(w_gmv, er)\n vol_gmv = portfolio_vol(w_gmv, cov)\n # add EW\n ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10)\n \n return ax\n\n \ndef run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):\n \"\"\"\n Run a backtest of the CPPI strategy, given a set of returns for the risky asset\n Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History\n \"\"\"\n # set up the CPPI parameters\n dates = risky_r.index\n n_steps = len(dates)\n account_value = start\n floor_value = start*floor\n peak = account_value\n if isinstance(risky_r, pd.Series): \n risky_r = pd.DataFrame(risky_r, columns=[\"R\"])\n\n if safe_r is None:\n safe_r = pd.DataFrame().reindex_like(risky_r)\n safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number\n # set up some DataFrames for saving intermediate values\n account_history = pd.DataFrame().reindex_like(risky_r)\n risky_w_history = pd.DataFrame().reindex_like(risky_r)\n cushion_history = pd.DataFrame().reindex_like(risky_r)\n floorval_history = pd.DataFrame().reindex_like(risky_r)\n peak_history = pd.DataFrame().reindex_like(risky_r)\n\n for step in range(n_steps):\n if drawdown is not None:\n peak = np.maximum(peak, account_value)\n floor_value = peak*(1-drawdown)\n cushion = (account_value - floor_value)/account_value\n risky_w = m*cushion\n risky_w = np.minimum(risky_w, 1)\n risky_w = np.maximum(risky_w, 0)\n safe_w = 1-risky_w\n risky_alloc = account_value*risky_w\n safe_alloc = account_value*safe_w\n # recompute the new account value at the end of this step\n account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])\n # save the histories for analysis and plotting\n cushion_history.iloc[step] = cushion\n risky_w_history.iloc[step] = risky_w\n account_history.iloc[step] = account_value\n floorval_history.iloc[step] = floor_value\n peak_history.iloc[step] = peak\n risky_wealth = start*(1+risky_r).cumprod()\n backtest_result = {\n \"Wealth\": account_history,\n \"Risky Wealth\": risky_wealth, \n \"Risk Budget\": cushion_history,\n \"Risky Allocation\": risky_w_history,\n \"m\": m,\n \"start\": start,\n \"floor\": floor,\n \"risky_r\":risky_r,\n \"safe_r\": safe_r,\n \"drawdown\": drawdown,\n \"peak\": peak_history,\n \"floor\": floorval_history\n }\n return backtest_result\n\n\ndef summary_stats(r, riskfree_rate=0.03):\n \"\"\"\n Return a DataFrame that contains aggregated summary stats for the returns in the columns of r\n \"\"\"\n ann_r = annualize_rets(r)\n ann_vol = annualize_vol(r)\n ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate)\n dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())\n skew = r.aggregate(skewness)\n kurt = r.aggregate(kurtosis)\n cf_var5 = r.aggregate(var_gaussian, modified=True)\n hist_cvar5 = r.aggregate(cvar_historic)\n return pd.DataFrame({\n \"Annualized Return\": ann_r,\n \"Annualized Vol\": ann_vol,\n \"Skewness\": skew,\n \"Kurtosis\": kurt,\n \"Cornish-Fisher VaR (5%)\": cf_var5,\n \"Historic CVaR (5%)\": hist_cvar5,\n \"Sharpe Ratio\": ann_sr,\n \"Max Drawdown\": dd\n })\n\n \ndef gbm(n_years = 10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True):\n \"\"\"\n Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo\n :param n_years: The number of years to generate data for\n :param n_paths: The number of scenarios/trajectories\n :param mu: Annualized Drift, e.g. Market Return\n :param sigma: Annualized Volatility\n :param steps_per_year: granularity of the simulation\n :param s_0: initial value\n :return: a numpy array of n_paths columns and n_years*steps_per_year rows\n \"\"\"\n # Derive per-step Model Parameters from User Specifications\n dt = 1/steps_per_year\n n_steps = int(n_years*steps_per_year) + 1\n # the standard way ...\n # rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios))\n # without discretization error ...\n rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios))\n rets_plus_1[0] = 1\n ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1\n return ret_val\n\n \nimport statsmodels.api as sm\ndef regress(dependent_variable, explanatory_variables, alpha=True):\n \"\"\"\n Runs a linear regression to decompose the dependent variable into the explanatory variables\n returns an object of type statsmodel's RegressionResults on which you can call\n .summary() to print a full summary\n .params for the coefficients\n .tvalues and .pvalues for the significance levels\n .rsquared_adj and .rsquared for quality of fit\n \"\"\"\n if alpha:\n explanatory_variables = explanatory_variables.copy()\n explanatory_variables[\"Alpha\"] = 1\n \n lm = sm.OLS(dependent_variable, explanatory_variables).fit()\n return lm\n\ndef portfolio_tracking_error(weights, ref_r, bb_r):\n \"\"\"\n returns the tracking error between the reference returns\n and a portfolio of building block returns held with given weights\n \"\"\"\n return tracking_error(ref_r, (weights*bb_r).sum(axis=1))\n \ndef style_analysis(dependent_variable, explanatory_variables):\n \"\"\"\n Returns the optimal weights that minimizes the Tracking error between\n a portfolio of the explanatory variables and the dependent variable\n \"\"\"\n n = explanatory_variables.shape[1]\n init_guess = np.repeat(1/n, n)\n bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!\n # construct the constraints\n weights_sum_to_1 = {'type': 'eq',\n 'fun': lambda weights: np.sum(weights) - 1\n }\n solution = minimize(portfolio_tracking_error, init_guess,\n args=(dependent_variable, explanatory_variables,), method='SLSQP',\n options={'disp': False},\n constraints=(weights_sum_to_1,),\n bounds=bounds)\n weights = pd.Series(solution.x, index=explanatory_variables.columns)\n return weights\n\n\ndef ff_analysis(r, factors):\n \"\"\"\n Returns the loadings of r on the Fama French Factors\n which can be read in using get_fff_returns()\n the index of r must be a (not necessarily proper) subset of the index of factors\n r is either a Series or a DataFrame\n \"\"\"\n if isinstance(r, pd.Series):\n dependent_variable = r\n explanatory_variables = factors.loc[r.index]\n tilts = regress(dependent_variable, explanatory_variables).params\n elif isinstance(r, pd.DataFrame):\n tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns})\n else:\n raise TypeError(\"r must be a Series or a DataFrame\")\n return tilts\n\ndef weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):\n \"\"\"\n Returns the weights of the EW portfolio based on the asset returns \"r\" as a DataFrame\n If supplied a set of capweights and a capweight tether, it is applied and reweighted \n \"\"\"\n n = len(r.columns)\n ew = pd.Series(1/n, index=r.columns)\n if cap_weights is not None:\n cw = cap_weights.loc[r.index[0]] # starting cap weight\n ## exclude microcaps\n if microcap_threshold is not None and microcap_threshold > 0:\n microcap = cw < microcap_threshold\n ew[microcap] = 0\n ew = ew/ew.sum()\n #limit weight to a multiple of capweight\n if max_cw_mult is not None and max_cw_mult > 0:\n ew = np.minimum(ew, cw*max_cw_mult)\n ew = ew/ew.sum() #reweight\n return ew\n\ndef weight_cw(r, cap_weights, **kwargs):\n \"\"\"\n Returns the weights of the CW portfolio based on the time series of capweights\n \"\"\"\n w = cap_weights.loc[r.index[1]]\n return w/w.sum()\n\ndef backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs):\n \"\"\"\n Backtests a given weighting scheme, given some parameters:\n r : asset returns to use to build the portfolio\n estimation_window: the window to use to estimate parameters\n weighting: the weighting scheme to use, must be a function that takes \"r\", and a variable number of keyword-value arguments\n \"\"\"\n n_periods = r.shape[0]\n # return windows\n windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window)]\n weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]\n # convert List of weights to DataFrame\n weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns)\n returns = (weights * r).sum(axis=\"columns\", min_count=1) #mincount is to generate NAs if all inputs are NAs\n return returns\n\ndef sample_cov(r, **kwargs):\n \"\"\"\n Returns the sample covariance of the supplied returns\n \"\"\"\n return r.cov()\n\ndef weight_gmv(r, cov_estimator=sample_cov, **kwargs):\n \"\"\"\n Produces the weights of the GMV portfolio given a covariance matrix of the returns \n \"\"\"\n est_cov = cov_estimator(r, **kwargs)\n return gmv(est_cov)\n\ndef cc_cov(r, **kwargs):\n \"\"\"\n Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model\n \"\"\"\n rhos = r.corr()\n n = rhos.shape[0]\n # this is a symmetric matrix with diagonals all 1 - so the mean correlation is ...\n rho_bar = (rhos.values.sum()-n)/(n*(n-1))\n ccor = np.full_like(rhos, rho_bar)\n np.fill_diagonal(ccor, 1.)\n sd = r.std()\n return pd.DataFrame(ccor * np.outer(sd, sd), index=r.columns, columns=r.columns)\n\ndef shrinkage_cov(r, delta=0.5, **kwargs):\n \"\"\"\n Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators\n \"\"\"\n prior = cc_cov(r, **kwargs)\n sample = sample_cov(r, **kwargs)\n return delta*prior + (1-delta)*sample\n\ndef risk_contribution(w,cov):\n \"\"\"\n Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix\n \"\"\"\n total_portfolio_var = portfolio_vol(w,cov)**2\n # Marginal contribution of each constituent\n marginal_contrib = cov@w\n risk_contrib = np.multiply(marginal_contrib,w.T)/total_portfolio_var\n return risk_contrib\n\ndef target_risk_contributions(target_risk, cov):\n \"\"\"\n Returns the weights of the portfolio that gives you the weights such\n that the contributions to portfolio risk are as close as possible to\n the target_risk, given the covariance matrix\n \"\"\"\n n = cov.shape[0]\n init_guess = np.repeat(1/n, n)\n bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!\n # construct the constraints\n weights_sum_to_1 = {'type': 'eq',\n 'fun': lambda weights: np.sum(weights) - 1\n }\n def msd_risk(weights, target_risk, cov):\n \"\"\"\n Returns the Mean Squared Difference in risk contributions\n between weights and target_risk\n \"\"\"\n w_contribs = risk_contribution(weights, cov)\n return ((w_contribs-target_risk)**2).sum()\n \n weights = minimize(msd_risk, init_guess,\n args=(target_risk, cov), method='SLSQP',\n options={'disp': False},\n constraints=(weights_sum_to_1,),\n bounds=bounds)\n return weights.x\n\ndef equal_risk_contributions(cov):\n \"\"\"\n Returns the weights of the portfolio that equalizes the contributions\n of the constituents based on the given covariance matrix\n \"\"\"\n n = cov.shape[0]\n return target_risk_contributions(target_risk=np.repeat(1/n,n), cov=cov)\n\ndef weight_erc(r, cov_estimator=sample_cov, **kwargs):\n \"\"\"\n Produces the weights of the ERC portfolio given a covariance matrix of the returns \n \"\"\"\n est_cov = cov_estimator(r, **kwargs)\n return equal_risk_contributions(est_cov)\n\ndef implied_returns(delta, sigma, w):\n \"\"\"\nObtain the implied expected returns by reverse engineering the weights\nInputs:\ndelta: Risk Aversion Coefficient (scalar)\nsigma: Variance-Covariance Matrix (N x N) as DataFrame\n w: Portfolio weights (N x 1) as Series\nReturns an N x 1 vector of Returns as Series\n \"\"\"\n ir = delta * sigma.dot(w).squeeze() # to get a series from a 1-column dataframe\n ir.name = 'Implied Returns'\n return ir\n\n# Assumes that Omega is proportional to the variance of the prior\ndef proportional_prior(sigma, tau, p):\n \"\"\"\n Returns the He-Litterman simplified Omega\n Inputs:\n sigma: N x N Covariance Matrix as DataFrame\n tau: a scalar\n p: a K x N DataFrame linking Q and Assets\n returns a P x P DataFrame, a Matrix representing Prior Uncertainties\n \"\"\"\n helit_omega = p.dot(tau * sigma).dot(p.T)\n # Make a diag matrix from the diag elements of Omega\n return pd.DataFrame(np.diag(np.diag(helit_omega.values)),index=p.index, columns=p.index)\n\ndef bl(w_prior, sigma_prior, p, q,\n omega=None,\n delta=2.5, tau=.02):\n \"\"\"\n# Computes the posterior expected returns based on \n# the original black litterman reference model\n#\n# W.prior must be an N x 1 vector of weights, a Series\n# Sigma.prior is an N x N covariance matrix, a DataFrame\n# P must be a K x N matrix linking Q and the Assets, a DataFrame\n# Q must be an K x 1 vector of views, a Series\n# Omega must be a K x K matrix a DataFrame, or None\n# if Omega is None, we assume it is\n# proportional to variance of the prior\n# delta and tau are scalars\n \"\"\"\n if omega is None:\n omega = proportional_prior(sigma_prior, tau, p)\n # Force w.prior and Q to be column vectors\n # How many assets do we have?\n N = w_prior.shape[0]\n # And how many views?\n K = q.shape[0]\n # First, reverse-engineer the weights to get pi\n pi = implied_returns(delta, sigma_prior, w_prior)\n # Adjust (scale) Sigma by the uncertainty scaling factor\n sigma_prior_scaled = tau * sigma_prior \n # posterior estimate of the mean, use the \"Master Formula\"\n # we use the versions that do not require\n # Omega to be inverted (see previous section)\n # this is easier to read if we use '@' for matrixmult instead of .dot()\n # mu_bl = pi + sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ (q - p @ pi)\n mu_bl = pi + sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega).dot(q - p.dot(pi).values))\n # posterior estimate of uncertainty of mu.bl\n# sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ p @ sigma_prior_scaled\n sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot(p).dot(sigma_prior_scaled)\n return (mu_bl, sigma_bl)\n\n# for convenience and readability, define the inverse of a dataframe\ndef inverse(d):\n \"\"\"\n Invert the dataframe by inverting the underlying matrix\n \"\"\"\n return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)\n\ndef weight_msr(sigma, mu, scale=True):\n \"\"\"\n Optimal (Tangent/Max Sharpe Ratio) Portfolio weights\n by using the Markowitz Optimization Procedure\n Mu is the vector of Excess expected Returns\n Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series\n This implements page 188 Equation 5.2.28 of\n \"The econometrics of financial markets\" Campbell, Lo and Mackinlay.\n \"\"\"\n w = inverse(sigma).dot(mu)\n if scale:\n w = w/sum(w) # fix: this assumes all w is +ve\n return w\n"
] |
[
[
"numpy.minimum",
"numpy.multiply",
"numpy.outer",
"pandas.read_csv",
"numpy.full_like",
"pandas.DataFrame",
"numpy.log1p",
"numpy.sqrt",
"numpy.linalg.inv",
"scipy.optimize.minimize",
"pandas.to_datetime",
"numpy.array",
"numpy.percentile",
"scipy.stats.norm.ppf",
"numpy.fill_diagonal",
"numpy.sum",
"pandas.Series",
"numpy.repeat",
"numpy.linspace",
"numpy.diag",
"numpy.maximum"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.