repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
LeeDaeil/PyQt5_study
|
[
"ecdd22ce2809ce6f01c8691a7ca75ef1771b7202"
] |
[
"Study_6/CNS_Fun.py"
] |
[
"import multiprocessing\nimport time\n\n\nclass function1(multiprocessing.Process):\n def __init__(self, mem):\n multiprocessing.Process.__init__(self)\n self.mem = mem[0] # main mem connection\n\n def run(self):\n while True:\n print(self, self.mem['QPROLD'])\n time.sleep(1)\n\n\nclass function2(multiprocessing.Process):\n def __init__(self, mem):\n multiprocessing.Process.__init__(self)\n self.mem = mem\n self.mem2 = mem[2]\n\n def run(self):\n while True:\n # print(self, self.mem[1]['Test'], '->', 1, self.mem2)\n self.mem[1]['Test'] = 1\n self.mem[2].append(1)\n time.sleep(1)\n\n\nclass function3(multiprocessing.Process):\n def __init__(self, mem):\n multiprocessing.Process.__init__(self)\n self.mem = mem\n self.mem2 = mem[2]\n\n def run(self):\n while True:\n # print(self, self.mem[1]['Test'], '->', 2, self.mem2)\n self.mem[1]['Test'] = 2\n self.mem[2].append(2)\n time.sleep(3)\n\n#========================================================================\n\n\nclass t_function1(multiprocessing.Process):\n def __init__(self, mem):\n multiprocessing.Process.__init__(self)\n self.mem = mem[0] # main mem connection\n\n def run(self):\n para = ['KMSISO']\n while True:\n print(self, self.mem['KCNTOMS']['V'])\n time.sleep(1)\n\n\n#========================================================================\n# Interface part\n#========================================================================\nimport sys\nfrom PyQt5.QtWidgets import QDialog, QApplication\nfrom PyQt5 import QtCore\nfrom ui_data.gui_study_6 import Ui_Dialog\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg\n\n\nclass MyForm(QDialog):\n def __init__(self, mem):\n super().__init__()\n\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n\n self.mem = mem\n\n self.draw_power_gp()\n self.draw_turbin_gp()\n\n self.blick_switch = True\n\n # x msec마다 업데이트\n timer = QtCore.QTimer(self)\n timer.timeout.connect(self.update_gp)\n timer.timeout.connect(self.update_label)\n timer.timeout.connect(self.update_alarm)\n timer.start(500)\n\n self.show()\n\n def update_alarm(self):\n # rgb(227, 227, 227) : red, rgb(255, 0, 0): gray\n if self.mem['KLAMPO21']['V'] == 1:\n self.ui.arlarm_1.setStyleSheet(\"background-color: rgb(227, 227, 227);\")\n elif self.mem['KLAMPO21']['V'] == 0:\n self.ui.arlarm_1.setStyleSheet(\"background-color: rgb(255, 0, 0);\")\n\n if self.mem['KLAMPO22']['V'] == 1:\n self.ui.arlarm_2.setStyleSheet(\"background-color: rgb(227, 227, 227);\")\n elif self.mem['KLAMPO22']['V'] == 0:\n self.ui.arlarm_2.setStyleSheet(\"background-color: rgb(255, 0, 0);\")\n\n if self.blick_switch:\n self.ui.arlarm_3.setStyleSheet(\"background-color: rgb(255, 0, 0);\")\n self.blick_switch = False\n else:\n self.ui.arlarm_3.setStyleSheet(\"background-color: rgb(227, 227, 227);\")\n self.blick_switch = True\n\n def update_label(self):\n self.ui.power_label_1.setText('Reactor Power : {:0.2f}[%]'.format(self.mem['QPROLD']['V']*100))\n self.ui.turbine_label_1.setText('Turbine Load : {}[Mwe]'.format(self.mem['KBCDO22']['V']))\n self.ui.power_label_2.setText('{:0.2f}[%]'.format(self.mem['QPROLD']['V']*100))\n self.ui.turbine_label_2.setText('{}[Mwe]'.format(self.mem['KBCDO22']['V']))\n\n def update_gp(self):\n # self.ui.label.setText(\"{}\".format(self.mem['QPROLD']['V']))\n self.p_ax.clear()\n self.t_ax.clear()\n tempx = [x for x in range(0, len(self.mem['QPROLD']['L']))]\n self.p_ax.plot(self.mem['QPROLD']['L'])\n self.p_ax.set_ylim(-0.2, 1.2)\n self.p_ax.set_yticks([0, 0.25, 0.5, 0.75, 1.0])\n self.p_ax.set_yticklabels([0, 25, 50, 75, 100])\n\n self.t_ax.plot(self.mem['KBCDO22']['L'])\n\n self.p_ax.grid()\n self.t_ax.grid()\n self.p_fig.tight_layout(pad=0.1)\n self.t_fig.tight_layout(pad=0.1)\n self.p_canvas.draw()\n self.t_canvas.draw()\n\n def draw_power_gp(self):\n self.p_fig = plt.figure()\n self.p_ax = self.p_fig.add_subplot(111)\n # self.ax1 = self.fig.add_subplot(122)\n self.p_canvas = FigureCanvasQTAgg(self.p_fig)\n self.ui.power_layout.addWidget(self.p_canvas)\n\n def draw_turbin_gp(self):\n self.t_fig = plt.figure()\n self.t_ax = self.t_fig.add_subplot(111)\n # self.ax1 = self.fig.add_subplot(122)\n self.t_canvas = FigureCanvasQTAgg(self.t_fig)\n self.ui.power_layout_2.addWidget(self.t_canvas)\n\n\nclass interface_function(multiprocessing.Process):\n def __init__(self, mem):\n multiprocessing.Process.__init__(self)\n self.mem = mem[0]\n\n def run(self):\n app = QApplication(sys.argv)\n w = MyForm(self.mem)\n w.exec()\n sys.exit(app.exec_())"
] |
[
[
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"matplotlib.pyplot.figure"
]
] |
hustlibraco/MockingBird
|
[
"c396792b22613e8ac37b1c51bd38bd462909251f"
] |
[
"synthesizer/models/global_style_token.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as tFunctional\nfrom synthesizer.gst_hyperparameters import GSTHyperparameters as hp\n\n\nclass GlobalStyleToken(nn.Module):\n\n def __init__(self):\n\n super().__init__()\n self.encoder = ReferenceEncoder()\n self.stl = STL()\n\n def forward(self, inputs):\n enc_out = self.encoder(inputs)\n style_embed = self.stl(enc_out)\n\n return style_embed\n\n\nclass ReferenceEncoder(nn.Module):\n '''\n inputs --- [N, Ty/r, n_mels*r] mels\n outputs --- [N, ref_enc_gru_size]\n '''\n\n def __init__(self):\n\n super().__init__()\n K = len(hp.ref_enc_filters)\n filters = [1] + hp.ref_enc_filters\n convs = [nn.Conv2d(in_channels=filters[i],\n out_channels=filters[i + 1],\n kernel_size=(3, 3),\n stride=(2, 2),\n padding=(1, 1)) for i in range(K)]\n self.convs = nn.ModuleList(convs)\n self.bns = nn.ModuleList([nn.BatchNorm2d(num_features=hp.ref_enc_filters[i]) for i in range(K)])\n\n out_channels = self.calculate_channels(hp.n_mels, 3, 2, 1, K)\n self.gru = nn.GRU(input_size=hp.ref_enc_filters[-1] * out_channels,\n hidden_size=hp.E // 2,\n batch_first=True)\n\n def forward(self, inputs):\n N = inputs.size(0)\n out = inputs.view(N, 1, -1, hp.n_mels) # [N, 1, Ty, n_mels]\n for conv, bn in zip(self.convs, self.bns):\n out = conv(out)\n out = bn(out)\n out = tFunctional.relu(out) # [N, 128, Ty//2^K, n_mels//2^K]\n\n out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K]\n T = out.size(1)\n N = out.size(0)\n out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K]\n\n self.gru.flatten_parameters()\n memory, out = self.gru(out) # out --- [1, N, E//2]\n\n return out.squeeze(0)\n\n def calculate_channels(self, L, kernel_size, stride, pad, n_convs):\n for i in range(n_convs):\n L = (L - kernel_size + 2 * pad) // stride + 1\n return L\n\n\nclass STL(nn.Module):\n '''\n inputs --- [N, E//2]\n '''\n\n def __init__(self):\n\n super().__init__()\n self.embed = nn.Parameter(torch.FloatTensor(hp.token_num, hp.E // hp.num_heads))\n d_q = hp.E // 2\n d_k = hp.E // hp.num_heads\n # self.attention = MultiHeadAttention(hp.num_heads, d_model, d_q, d_v)\n self.attention = MultiHeadAttention(query_dim=d_q, key_dim=d_k, num_units=hp.E, num_heads=hp.num_heads)\n\n init.normal_(self.embed, mean=0, std=0.5)\n\n def forward(self, inputs):\n N = inputs.size(0)\n query = inputs.unsqueeze(1) # [N, 1, E//2]\n keys = tFunctional.tanh(self.embed).unsqueeze(0).expand(N, -1, -1) # [N, token_num, E // num_heads]\n style_embed = self.attention(query, keys)\n\n return style_embed\n\n\nclass MultiHeadAttention(nn.Module):\n '''\n input:\n query --- [N, T_q, query_dim]\n key --- [N, T_k, key_dim]\n output:\n out --- [N, T_q, num_units]\n '''\n\n def __init__(self, query_dim, key_dim, num_units, num_heads):\n\n super().__init__()\n self.num_units = num_units\n self.num_heads = num_heads\n self.key_dim = key_dim\n\n self.W_query = nn.Linear(in_features=query_dim, out_features=num_units, bias=False)\n self.W_key = nn.Linear(in_features=key_dim, out_features=num_units, bias=False)\n self.W_value = nn.Linear(in_features=key_dim, out_features=num_units, bias=False)\n\n def forward(self, query, key):\n querys = self.W_query(query) # [N, T_q, num_units]\n keys = self.W_key(key) # [N, T_k, num_units]\n values = self.W_value(key)\n\n split_size = self.num_units // self.num_heads\n querys = torch.stack(torch.split(querys, split_size, dim=2), dim=0) # [h, N, T_q, num_units/h]\n keys = torch.stack(torch.split(keys, split_size, dim=2), dim=0) # [h, N, T_k, num_units/h]\n values = torch.stack(torch.split(values, split_size, dim=2), dim=0) # [h, N, T_k, num_units/h]\n\n # score = softmax(QK^T / (d_k ** 0.5))\n scores = torch.matmul(querys, keys.transpose(2, 3)) # [h, N, T_q, T_k]\n scores = scores / (self.key_dim ** 0.5)\n scores = tFunctional.softmax(scores, dim=3)\n\n # out = score * V\n out = torch.matmul(scores, values) # [h, N, T_q, num_units/h]\n out = torch.cat(torch.split(out, 1, dim=0), dim=3).squeeze(0) # [N, T_q, num_units]\n\n return out\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.GRU",
"torch.nn.ModuleList",
"torch.FloatTensor",
"torch.split",
"torch.nn.BatchNorm2d",
"torch.nn.init.normal_",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.nn.functional.relu",
"torch.matmul",
"torch.nn.functional.tanh"
]
] |
anewmark/galaxy_dark_matter
|
[
"b5261e4e413d3a18a45a19e92f7545adc408878a",
"b5261e4e413d3a18a45a19e92f7545adc408878a"
] |
[
"lin-log_test.py",
"call_ages.py"
] |
[
"print('Testing Lin v Log')\n\n\ntest=2\nif test==1:\n\timport astropy.table as table \n\timport numpy as np\n\tfrom defcuts import *\n\tfrom defflags import *\n\tfrom halflight_first import *\n\tfrom def_get_mags import *\n\tfrom def_halflight_math import *\n\t\n\tbands=['g', 'r', 'i','z', 'y']\n\tdaperture=[1.01,1.51,2.02,3.02,4.03,5.71,8.40,11.8,16.8,23.5]\n\taperture=[x*0.5 for x in daperture]\n\n\tty='mean'\n\tstax=True\n\tif stax==False:\n\t\ttag=''\n\telse:\n\t\ttag='uplim'\n\ttxtdist= 'Figure2'\n\ttxtslope='Figure1'\n\n\toutdir='/Users/amandanewmark/repositories/galaxy_dark_matter/lumprofplots/clumps/+LL'+ty+tag\n\tdoutdir='/Users/amandanewmark/repositories/galaxy_dark_matter/lumprofplots/distribution/+LL'+ty+tag\n\tFlags=['flags_pixel_bright_object_center', 'brobj_cen_flag-', 'No Bright Ojbect Centers', 'Only Bright Object Centers', 'brobj_cen_flag']\n\n\tindir='/Users/amandanewmark/repositories/galaxy_dark_matter/GAH/'\n\tbigdata = table.Table.read(indir+ 'LOWZ_HSCGAMA15_apmgs+cmodmag.fits')\n\tdef do_cuts(datatab):\n\t\tparm=['flags_pixel_saturated_center','flags_pixel_edge','flags_pixel_interpolated_center','flags_pixel_cr_center','flags_pixel_suspect_center', 'flags_pixel_clipped_any','flags_pixel_bad']\n\t\tne=[99.99, 199.99, 0.0]\n\t\tmincut=0.1\n\t\tmaxcut=''\n\t\tcutdata=not_cut(datatab, bands, 'mag_forced_cmodel', ne)\n\t\tfor b in range(0, len(bands)-1):\n\t\t\tnewdata=many_flags(cutdata, parm, bands[b])\t#flags not in y?\n\t\t\tcutdata=newdata\n\t\n\t\treturn newdata\n\tdef get_TF(data):\n\t\tbandi=['i']\n\t\tFlag, Not,lab= TFflag(bandi,Flags, data)\n\t\treturn Flag, Not\t\n\t\n\tnewdata=do_cuts(bigdata)\n\n\tFlagdat, Notdat=get_TF(newdata)\n\n\tdef my_halflight2(dat1, sc=''):\n\t\tloglum, lograd, loglumd= get_ind_lums(dat1, bands, aperture, scale='log')\n\t\n\t\tif stax==True:\n\t\t\tloglum, lograd, loglumd= upper_rad_cut(loglum, lograd, loglumd, 4, proof=False)\n\t\t#print('length of radius array is ', len(lograd))\n\t\n\t\tmloglum, mlogdens, mlograd, mlogerr= get_avg_lums(loglum, lograd, loglumd, gr=[1,80,11], type=ty, scale=sc)\n\t\n\t\tlogr12s= get_halflight(loglum, lograd)\n\t\n\t\tlogr12= get_halflight(mloglum, mlograd)\n\t\n\t\tMs, cs, errs= get_slopes(logr12s, lograd, loglumd, error=None, smax=stax)\n\t\tM, c, logrcut, logldcut, sterr, errcut =get_slopes(logr12, mlograd, mlogdens, error=mlogerr, smax=stax)\n\t\tprint(sterr)\n\t\n\t\tcutmlogld = M * logrcut + c\n\t\n\t\tind=[loglum, loglumd, lograd, logr12s]\n\t\tmeans=[mloglum,mlogdens,mlograd,logr12, mlogerr]\n\t\tind_slope=[Ms, cs, errs]\n\t\tmean_slopes=[M, c, logrcut, logldcut, cutmlogld, sterr, errcut]\n\t\t#logrcut and logldcut are for lines of best fit\n\t\n\t\treturn ind, means, ind_slope, mean_slopes\n\t\n\tinds1, means1, ind_slope1, mean_slopes1=my_halflight2(Flagdat, sc='lindata')\n\n\tinds2, means2, ind_slope2, mean_slopes2=my_halflight2(Flagdat, sc='')\n\n\tdef my_graphs(inds1, means1, ind_slope1, mean_slopes1, inds2, means2, ind_slope2, mean_slopes2):\n\t\timport matplotlib.pyplot as plt\n\t\timport numpy as np\n\t\timport math\n\t\t#ind=[loglum, loglumd, lograd, logr12s]\n\t\t#means=[mloglum,mlogdens,lograd,logr12, mlogerr]\n\t\t#ind_slope=[Ms, cs, errs]\n\t\t#mean_slopes=[M, c, logrcut, logldcut, cutmlogld, sterr, errcut]\n\t\n\t\tdef lum_mult_fit(x1, x2, y1, y2, xcut1, xcut2, yfit1, yfit2, sterr1, sterr2 , m1, m2, error1, error2, outdir=''):\n\t\t\tprint('Make Scatter Plots')\n\t\t\tf=plt.figure()\n\t\t\tplt.scatter(x1, y1, color='r', marker='o',label='Linearly Averaged')\n\t\t\tplt.plot(xcut1, yfit1, color='m', label='Linearly Averaged: slope= '+str(np.round(m1,2))+' +- '+str(np.round(sterr1,2)))\n\t\t\tplt.errorbar(x1, y1, yerr=error1, fmt='.',color='r')\t\n\n\t\t\tplt.scatter(x2, y2, color='b', marker='o',label='Log Averaged ')\n\t\t\tplt.plot(xcut2, yfit2, color='c', label='Log Averaged: slope= '+str(np.round(m2,2))+' +- '+str(np.round(sterr2,2)))\n\t\t\tplt.errorbar(x2, y2, yerr=error2, fmt='.',color='b')\n\n\t\t\tplt.xlabel('Log Radii (kpc)')\n\t\t\tplt.ylabel('Luminosity Densities (Lsolar/kpc^2)')\n\t\t\tplt.title('Average Luminosity Densities v Radii')\n\t\t\t#plt.xlim(math.log10(1), math.log10(80))\n\t\t\t#plt.ylim(6,8.6)\n\t\t\tplt.legend(loc=0,prop={'size':6.0})\n\t\t\tf.text(0.05, 0.05, txtslope, color='red', weight='bold')\n\t\t\toutdirs=outdir+tag+'TF.pdf'\n\t\t\t#plt.show()\n\t\t\tf.savefig(outdirs)\n\t\t\tprint(outdirs)\n\n\t\tdef dist_mean(m1s, m2s, m1, m2, sterr1, sterr2, KS=False):\n\n\t\t\tfigs=plt.figure()\n\t\t\tbs=np.linspace(-2.0,-1.4,num=15, endpoint=False)\n\t\t\tn1, b1, p1= plt.hist(m1s, bs, color='red', label='Linearly Averaged ('+str(len(m1s))+')', alpha=0.8)\n\t\t\tn2, b2, p2= plt.hist(m2s,bs, color='blue', label='Log Averaged ('+str(len(m2s))+')', alpha=0.8)\n\t\t\n\t\t\tts=''\n\t\t\tif KS==True:\n\t\t\t\tM=m1s+m2s\n\t\t\t\timport scipy\n\t\t\t\tD, p=scipy.stats.ks_2samp(m1s,m2s)\n\t\t\t\tplt.plot(0,0, c='green', marker='*', label='K-S test is '+str(D))\n\t\t\t\tplt.xlim(np.min(M),-1.4)\n\t\t\t\tts='KS'\n\t\t\n\t\t\t#print('Standard Deviation (Not Flagged): ', str(np.std(m1s)))\n\t\t\t#print('Standard Deviation (Flagged): ', str(np.std(m2s)))\n\t\t\n\t\t\tplt.axvline(x=m1, color='magenta', label='Linearly Averaged: slope= '+str(np.round(m1,2))+' +- ' +str(np.round(sterr1,2)))\n\t\t\tplt.axvline(x=m2, color='cyan', label='Log Averaged: slope= '+str(np.round(m2,2))+' +- '+str(np.round(sterr2,2)))\n\t\t\tplt.xlabel('Slopes', fontsize=10)\n\t\t\tplt.legend(loc=0,prop={'size':6.5})\n\t\t\tplt.ylabel('Frequency', fontsize=10)\n\t\t\tplt.title('With '+ty+' Slopes')\n\t\t\toutdirs=doutdir+'slopedist.pdf'\n\t\t\t#figs.text(0.03, 0.03, txtdist, color='red', weight='bold')\n\t\t\t#plt.show()\n\t\t\tfigs.savefig(outdirs)\n\t\t\tprint(outdirs)\n\t\t\n\t\tdef all_lumprof(lum1s, lum2s, rad1s, rad2s, mrad1, mrad2, mden1, mden2, error1, error2):\n\t\t\tf=plt.figure()\n\t\t\t#print(len(mrad1)) #these are the mean radii\n\t\t\t#print(len(mrad2))\n\t\t\t#print(len(mden1))\n\t\t\t#print(len(mden2))\n\t\t\tfor n in range(len(lum1s)):\n\t\t\t\tplt.plot(rad1s[n], lum1s[n],color='lightgrey', marker='.')\n\t\t\tfor n in range(len(lum2s)):\n\t\t\t\tplt.plot(rad2s[n], lum2s[n],color='lightgrey', marker='.')\n\t\t\tplt.scatter(mrad1, mden1, color='red', marker='o',label='Linearly Averaged ('+str(len(lum1s))+')', zorder=3)\n\t\t\tplt.scatter(mrad2,mden2,color='blue', marker='o',label='Log Averaged ('+str(len(lum1s))+')', zorder=3)\n\t\t\tplt.xlabel('Log Radii (kpc)')\n\t\t\tplt.ylabel('Luminosity Densities (Lsolar/kpc^2)')\n\t\t\tplt.title('Average Luminosity Densities v Radii')\n\t\t\tplt.legend(loc=0,prop={'size':6.0})\n\t\t\toutdirs=outdir+tag+'all_lumprof.pdf'\n\t\t\t#plt.show()\n\t\t\tf.savefig(outdirs)\n\t\t\tprint(outdirs)\n\t\t\n\t\tdist_mean(ind_slope1[0],ind_slope2[0],mean_slopes1[0],mean_slopes2[0],mean_slopes1[5], mean_slopes2[5], KS=False)\n\t\n\t\tall_lumprof(inds1[1], inds2[1], inds1[2], inds2[2], means1[2], means2[2], means1[1], means2[1],means1[4], means2[4])\n\t\n\t\tlum_mult_fit(means1[2], means2[2], means1[1], means2[1], mean_slopes1[2], mean_slopes2[2], mean_slopes1[4], mean_slopes2[4], mean_slopes1[5], mean_slopes2[5], mean_slopes1[0], mean_slopes2[0],means1[4], means2[4], outdir=outdir)\n\t\n\tmy_graphs(inds1, means1, ind_slope1, mean_slopes1, inds2, means2, ind_slope2, mean_slopes2)\n\nelse:\n\tfrom halflight_second import meanlum2\n\timport numpy as np\n\timport matplotlib.pyplot as plt\n\tNaps=0\n\tL=np.array([7.5, 8.0, 8.5, 9.0, 8.5,7.0, 8.5])\n\tR=np.array([1,2,3,3,4,0,2.5])\n\t\n\tmL, mR, bb=meanlum2(L, R, Naps,grange=[10**0.8,10**3.5,4],scale='lindata')\n\tmL1, mR1, bb1=meanlum2(L, R, Naps,grange=[10**0.8,10**3.5,4],scale='')\n\t\n\tprint('Lums', mL, mL1)\n\tprint('Rads', mR1, mR1)\n\t\n\tplt.scatter(mR, mL, color='red', label='Averaged Linearly')\n\tplt.scatter(mR1, mL1, color='blue', label='Averaged on Log scale')\n\tplt.xlabel('Log Radii')\n\tplt.ylabel('Log Luminosity')\n\tplt.legend(loc=0,prop={'size':6.0})\n\tplt.show()\n\t",
"print('Plot mass fraction as a function of Age')\nimport astropy.table as table \n#from defcuts import *\nimport math\nimport numpy as np\nfrom def_get_mags import get_zdistmod\nfrom def_ages import *\nfrom def_age_plots import *\nindir='/Users/amandanewmark/repositories/galaxy_dark_matter/GAH/'\n\t\nDATA=table.Table.read(indir+'small_vespa_LOWZ.fits')\n\nprint(np.ndim(DATA))\ndef get_agebin(Data,hm, plots=False):\n\t\n\trunid=Data['RUNID']\n\trunIDs, count=np.unique(runid, return_counts=True)\n\n\trun=5\n\tndata=Data[runid==run] #only looking at first runID\n\tdef age_bin(datas, tag=[]):\n\t\t#print(datas['SPECOBJID','AGESTART','MASS', 'M_STELLAR'])\n\t\tmass=datas['MASS']\n\t\tTmass=datas['M_STELLAR']\n\t\tagestart=datas['AGESTART']\n\t\tageend=datas['AGEEND']\n\n\t\tmass_fraction=mass/Tmass\n\t\n\t\tprint('MF max= ', np.max(mass_fraction), 'MF min= ', np.min(mass_fraction))\n\t\n\t\n\t\tagebins=(ageend+agestart)/2\n\t\tageranges=(ageend+agestart)\n\t\tagebin=no_repeats(agebins)\n\t\tagerange=no_repeats(ageranges)\n\t\n\t\tstart=no_repeats(agestart)\n\t\tend=no_repeats(ageend)\n\n\t\tstack_mf, errors=stack_mass3(agestart, mass, Tmass, start)\n\t\n\t\t\t#errs=divide_error(mass, Tmass, datas['MASS_ERROR'], datas['M_STELLAR_ERROR'])\n\t\n\t\t\t#age_plot(abin, stack_mf, start, end, tag=tag) <-- DO NOT USE\n\t\tif plots==True:\n\t\t\tage_plot1(agebin, stack_mf, start, end, errors, tag=tag)\n\t\n\tnewdata=mass_frac_cut1(ndata, hm, get_opp=False)\n\n\tper=[str(hm*100), '%']\n\tper=''.join(per)\n\n\ttry:\n\t\tuname,ind, inv,count=np.unique(newdata['SPECOBJID'], return_index=True, return_counts=True,return_inverse=True)\n\n\t\ttagn=['+mf_cut80_', 'Number of Galaxies= '+str(len(uname)), 'Only Galaxies with Mass Fractions > '+per,'RunID= '+str(run)]\n\t\t#age_bin(newdata, tag=tagn)\n\t\tndata=newdata\n\texcept:\n\t\tuname,ind, inv,count=np.unique(ndata['SPECOBJID'], return_index=True, return_counts=True,return_inverse=True)\n\t\ttagn=['+all','Number of Galaxies= '+str(len(uname)),'All Galaxies','RunID= '+str(run)]\n\n\tage_bin(ndata, tag=tagn)\n\t\n\tprint(tagn[1])\n\nget_agebin(DATA, 0.585)"
] |
[
[
"numpy.array",
"matplotlib.pyplot.errorbar",
"numpy.round",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"numpy.min",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"scipy.stats.ks_2samp",
"matplotlib.pyplot.scatter",
"numpy.linspace"
],
[
"numpy.max",
"numpy.min",
"numpy.ndim",
"numpy.unique"
]
] |
niksaz/semantic-code-search
|
[
"8b25dbdba43fa9ee6c400a9243b81aa6a7d0c07a"
] |
[
"src/model_restore_helper.py"
] |
[
"from typing import Dict, Any, Optional, Type\n\nimport tensorflow as tf\nfrom dpu_utils.utils import RichPath\n\nfrom encoders import \\\n NBoWEncoder, CodeTokensASTEncoder, TBCNNEncoder, ASTNNEncoder, AstTokensEncoder, ASTPretrainedNBoWEncoder, \\\n GraphPretrainedNBoWEncoder, GraphTokensEncoder, GraphNodesDataPreprocessor, \\\n ASTTypeBagDataPreprocessor, TreeDataPreprocessor, TreeTokenPlusTypeDataPreprocessor\nfrom encoders.graph_encoder import GraphEncoder\nfrom models import Model, NeuralBoWModel, NeuralASTModel, SelfAttentionModel, ConvolutionalModel, ConvSelfAttentionModel\n\n\ndef get_model_class_from_name(model_name: str) -> Type[Model]:\n model_name = model_name.lower()\n initial_model_name = model_name\n is_plain = False\n is_raw = False\n if model_name.endswith('-raw'):\n is_raw = True\n model_name = model_name[:-len('-raw')]\n if model_name.endswith('-plain'):\n is_plain = True\n model_name = model_name[:-len('-plain')]\n\n if model_name in ['ggnn', 'ggnnmodel']:\n NeuralASTModel.MODEL_NAME = initial_model_name\n NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder\n GraphEncoder.update_config(model_name, is_plain)\n return NeuralASTModel\n elif model_name in ['rnn-ggnn-sandwich']:\n NeuralASTModel.MODEL_NAME = initial_model_name\n NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder\n GraphEncoder.update_config(model_name, is_plain)\n return NeuralASTModel\n elif model_name in ['transformer-ggnn-sandwich']:\n NeuralASTModel.MODEL_NAME = initial_model_name\n NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder\n GraphEncoder.update_config(model_name, is_plain)\n return NeuralASTModel\n elif model_name in ['great', 'greatmodel']:\n NeuralASTModel.MODEL_NAME = initial_model_name\n NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder\n GraphEncoder.update_config(model_name, is_plain)\n return NeuralASTModel\n elif model_name in ['great10', 'great10model']:\n NeuralASTModel.MODEL_NAME = initial_model_name\n NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder\n GraphEncoder.update_config(model_name, is_plain)\n return NeuralASTModel\n elif model_name in ['transformer', 'transformermodel']:\n NeuralASTModel.MODEL_NAME = initial_model_name\n NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder\n GraphEncoder.update_config(model_name, is_plain, is_raw)\n return NeuralASTModel\n elif model_name in ['transformer10', 'transformer10model']:\n NeuralASTModel.MODEL_NAME = initial_model_name\n NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder\n GraphEncoder.update_config(model_name, is_plain, is_raw)\n return NeuralASTModel\n elif model_name in ['graphnbow', 'graphnbowmodel']:\n NeuralASTModel.MODEL_NAME = initial_model_name\n NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder\n GraphEncoder.update_config(model_name, False, is_raw)\n return NeuralASTModel\n elif model_name == 'nbowtypesast':\n NeuralASTModel.MODEL_NAME = initial_model_name\n CodeTokensASTEncoder.AST_ENCODER_CLASS = NBoWEncoder\n CodeTokensASTEncoder.DATA_PREPROCESSOR = ASTTypeBagDataPreprocessor\n return NeuralASTModel\n elif model_name == 'node2vecast':\n NeuralASTModel.MODEL_NAME = initial_model_name\n CodeTokensASTEncoder.AST_ENCODER_CLASS = ASTPretrainedNBoWEncoder\n CodeTokensASTEncoder.DATA_PREPROCESSOR = ASTTypeBagDataPreprocessor\n return NeuralASTModel\n elif model_name == 'tbcnnast':\n NeuralASTModel.MODEL_NAME = initial_model_name\n CodeTokensASTEncoder.AST_ENCODER_CLASS = TBCNNEncoder\n CodeTokensASTEncoder.DATA_PREPROCESSOR = TreeDataPreprocessor\n return NeuralASTModel\n elif model_name == 'astnn':\n NeuralASTModel.MODEL_NAME = initial_model_name\n CodeTokensASTEncoder.AST_ENCODER_CLASS = ASTNNEncoder\n CodeTokensASTEncoder.CODE_ENCODER_CLASS = AstTokensEncoder\n CodeTokensASTEncoder.DATA_PREPROCESSOR = TreeTokenPlusTypeDataPreprocessor\n return NeuralASTModel\n elif model_name == 'node2vecgraphs':\n NeuralASTModel.MODEL_NAME = initial_model_name\n CodeTokensASTEncoder.AST_ENCODER_CLASS = GraphPretrainedNBoWEncoder\n CodeTokensASTEncoder.DATA_PREPROCESSOR = GraphNodesDataPreprocessor\n return NeuralASTModel\n elif model_name in ['neuralbow', 'neuralbowmodel']:\n return NeuralBoWModel\n elif model_name in ['rnn', 'rnnmodel']:\n NeuralASTModel.MODEL_NAME = initial_model_name\n NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder\n GraphEncoder.update_config(model_name, is_plain, is_raw)\n return NeuralASTModel\n elif model_name in {'selfatt', 'selfattention', 'selfattentionmodel'}:\n return SelfAttentionModel\n elif model_name in {'1dcnn', 'convolutionalmodel'}:\n return ConvolutionalModel\n elif model_name in {'convselfatt', 'convselfattentionmodel'}:\n return ConvSelfAttentionModel\n else:\n raise Exception(\"Unknown model '%s'!\" % model_name)\n\n\ndef restore(path: RichPath, is_train: bool, hyper_overrides: Optional[Dict[str, Any]] = None) -> Model:\n saved_data = path.read_as_pickle()\n\n if hyper_overrides is not None:\n saved_data['hyperparameters'].update(hyper_overrides)\n\n model_class = get_model_class_from_name(saved_data['model_type'])\n model = model_class(saved_data['hyperparameters'], saved_data.get('run_name'))\n model.query_metadata.update(saved_data['query_metadata'])\n for (language, language_metadata) in saved_data['per_code_language_metadata'].items():\n model.per_code_language_metadata[language] = language_metadata\n model.make_model(is_train=is_train)\n\n variables_to_initialize = []\n with model.sess.graph.as_default():\n with tf.name_scope(\"restore\"):\n restore_ops = []\n used_vars = set()\n for variable in sorted(model.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES),\n key=lambda v: v.name):\n used_vars.add(variable.name)\n if variable.name in saved_data['weights']:\n # print('Initializing %s from saved value.' % variable.name)\n restore_ops.append(variable.assign(saved_data['weights'][variable.name]))\n else:\n print('Freshly initializing %s since no saved value was found.' % variable.name)\n variables_to_initialize.append(variable)\n for var_name in sorted(saved_data['weights']):\n if var_name not in used_vars:\n if var_name.endswith('Adam:0') or var_name.endswith('Adam_1:0') or var_name in ['beta1_power:0',\n 'beta2_power:0']:\n continue\n print('Saved weights for %s not used by model.' % var_name)\n restore_ops.append(tf.variables_initializer(variables_to_initialize))\n model.sess.run(restore_ops)\n return model\n"
] |
[
[
"tensorflow.variables_initializer",
"tensorflow.name_scope"
]
] |
dask/partd
|
[
"efa78b4cb27ec450d9bff360e1bf9451c5ec0b17"
] |
[
"partd/numpy.py"
] |
[
"\"\"\" Store arrays\n\nWe put arrays on disk as raw bytes, extending along the first dimension.\nAlongside each array x we ensure the value x.dtype which stores the string\ndescription of the array's dtype.\n\"\"\"\nfrom contextlib import suppress\nimport pickle\n\nimport numpy as np\nfrom toolz import valmap, identity, partial\nfrom .core import Interface\nfrom .file import File\nfrom .utils import frame, framesplit, suffix\n\n\ndef serialize_dtype(dt):\n \"\"\" Serialize dtype to bytes\n\n >>> serialize_dtype(np.dtype('i4'))\n b'<i4'\n >>> serialize_dtype(np.dtype('M8[us]'))\n b'<M8[us]'\n \"\"\"\n return dt.str.encode()\n\n\ndef parse_dtype(s):\n \"\"\" Parse text as numpy dtype\n\n >>> parse_dtype(b'i4')\n dtype('int32')\n\n >>> parse_dtype(b\"[('a', 'i4')]\")\n dtype([('a', '<i4')])\n \"\"\"\n if s.startswith(b'['):\n return np.dtype(eval(s)) # Dangerous!\n else:\n return np.dtype(s)\n\n\nclass Numpy(Interface):\n def __init__(self, partd=None):\n if not partd or isinstance(partd, str):\n partd = File(partd)\n self.partd = partd\n Interface.__init__(self)\n\n def __getstate__(self):\n return {'partd': self.partd}\n\n def append(self, data, **kwargs):\n for k, v in data.items():\n self.partd.iset(suffix(k, '.dtype'), serialize_dtype(v.dtype))\n self.partd.append(valmap(serialize, data), **kwargs)\n\n def _get(self, keys, **kwargs):\n bytes = self.partd._get(keys, **kwargs)\n dtypes = self.partd._get([suffix(key, '.dtype') for key in keys],\n lock=False)\n dtypes = map(parse_dtype, dtypes)\n return list(map(deserialize, bytes, dtypes))\n\n def delete(self, keys, **kwargs):\n keys2 = [suffix(key, '.dtype') for key in keys]\n self.partd.delete(keys2, **kwargs)\n\n def _iset(self, key, value):\n return self.partd._iset(key, value)\n\n def drop(self):\n return self.partd.drop()\n\n def __del__(self):\n self.partd.__del__()\n\n @property\n def lock(self):\n return self.partd.lock\n\n def __exit__(self, *args):\n self.drop()\n self.partd.__exit__(self, *args)\n\ntry:\n from pandas import msgpack\nexcept ImportError:\n try:\n import msgpack\n except ImportError:\n msgpack = False\n\n\ndef serialize(x):\n if x.dtype == 'O':\n l = x.flatten().tolist()\n with suppress(Exception): # Try msgpack (faster on strings)\n return frame(msgpack.packb(l, use_bin_type=True))\n return frame(pickle.dumps(l, protocol=pickle.HIGHEST_PROTOCOL))\n else:\n return x.tobytes()\n\n\ndef deserialize(bytes, dtype, copy=False):\n if dtype == 'O':\n try:\n if msgpack.version >= (0, 5, 2):\n unpack_kwargs = {'raw': False}\n else:\n unpack_kwargs = {'encoding': 'utf-8'}\n\n blocks = [msgpack.unpackb(f, **unpack_kwargs)\n for f in framesplit(bytes)]\n except Exception:\n blocks = [pickle.loads(f) for f in framesplit(bytes)]\n\n result = np.empty(sum(map(len, blocks)), dtype='O')\n i = 0\n for block in blocks:\n result[i:i + len(block)] = block\n i += len(block)\n return result\n else:\n result = np.frombuffer(bytes, dtype)\n if copy:\n result = result.copy()\n return result\n\n\ncompress_text = identity\ndecompress_text = identity\ncompress_bytes = lambda bytes, itemsize: bytes\ndecompress_bytes = identity\n\nwith suppress(ImportError):\n import blosc\n blosc.set_nthreads(1)\n\n compress_bytes = blosc.compress\n decompress_bytes = blosc.decompress\n\n compress_text = partial(blosc.compress, typesize=1)\n decompress_text = blosc.decompress\n\nwith suppress(ImportError):\n from snappy import compress as compress_text\n from snappy import decompress as decompress_text\n\n\ndef compress(bytes, dtype):\n if dtype == 'O':\n return compress_text(bytes)\n else:\n return compress_bytes(bytes, dtype.itemsize)\n\n\ndef decompress(bytes, dtype):\n if dtype == 'O':\n return decompress_text(bytes)\n else:\n return decompress_bytes(bytes)\n"
] |
[
[
"numpy.dtype",
"numpy.frombuffer"
]
] |
panghantian-kavout/DeepRL
|
[
"c144f751d12f17ae5f0fd99d97fd936de39f57c1"
] |
[
"DeepRL/Agent/PGAgent.py"
] |
[
"from Agent import Agent\nimport random\nimport tensorflow as tf\nimport numpy as np\n\n\nclass PGAgent(Agent):\n\n def __init__(self, _model, _env, _is_train=True,\n _optimizer=None, _global_step=None, _replay=None,\n _gpu=False, _gamma=0.99,\n _batch_size=32, _beta_entropy=0.01,\n _grad_clip=None, _epoch_show_log=1e3):\n\n super(PGAgent, self).__init__(_is_train, _gpu)\n\n # set config\n self.config.gpu = _gpu\n self.config.gamma = _gamma\n self.config.batch_size = _batch_size\n self.config.beta_entropy = _beta_entropy\n self.config.grad_clip = _grad_clip\n self.config.epoch_show_log = _epoch_show_log\n\n # set env\n self.env = _env\n\n with tf.device(self.config.device):\n # create p func\n self.p_func, self.vars = _model(self.x_place)\n\n if self.is_train:\n # place for action, value\n self.value_place = tf.placeholder(tf.float32)\n self.action_place = tf.placeholder(tf.float32)\n # get entropy\n entropy = tf.reduce_sum(\n self.p_func * tf.log(self.p_func + 1e-10))\n # get loss\n loss = -tf.reduce_sum(\n tf.log(\n tf.reduce_sum(self.p_func * self.action_place, 1)\n + 1e-10) * self.value_place) + \\\n self.config.beta_entropy * entropy\n\n # compute grads of vars\n self.grads_op = tf.gradients(loss, self.vars)\n\n if _optimizer:\n self.createOpt(_optimizer, _global_step)\n\n self.replay = _replay\n\n # init all vars\n self.sess.run(tf.initialize_all_variables())\n\n def step(self):\n return super(PGAgent, self).stepUntilEnd(self.p_func)\n\n def grad(self, _cur_x, _batch_tuples):\n with tf.device(self.config.device):\n # get action data (one hot)\n action_data = self.getActionData(\n self.p_func.get_shape().as_list()[1], _batch_tuples)\n # get value data\n value_data = self.getNStepVTargetData(None, _batch_tuples)\n if value_data.std() == 0:\n value_data = np.zero_like(value_data)\n else:\n value_data = (value_data - value_data.mean()) / \\\n value_data.std()\n self.grads_data = self.sess.run(\n self.grads_op,\n feed_dict={\n self.x_place: _cur_x,\n self.action_place: action_data,\n self.value_place: value_data,\n }\n )\n\n def doTrain(self, _batch_tuples, _weights):\n # get inputs from batch\n cur_x = self.getCurInputs(_batch_tuples)\n # fill grad\n self.grad(cur_x, _batch_tuples)\n\n return np.ones([len(_batch_tuples)], np.float32)\n\n def chooseAction(self, _model, _state):\n return self.chooseSoftAction(_model, _state)\n"
] |
[
[
"tensorflow.initialize_all_variables",
"numpy.zero_like",
"tensorflow.gradients",
"tensorflow.log",
"tensorflow.placeholder",
"tensorflow.reduce_sum",
"tensorflow.device"
]
] |
WingCode/live-project
|
[
"977dfbcaade35d8173dbb6ace102fe8998f1cdf4"
] |
[
"test/funksvd_recommender_test.py"
] |
[
"import os\n\nfrom builder.matrix_factorization_calculator import MatrixFactorization\nfrom recs.funksvd_recommender import FunkSVDRecs\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"prs_project.settings\")\n\nimport django\n\ndjango.setup()\n\nimport unittest\n\nimport pandas as pd\n\nSTAR_WARS = 'star wars'\nWONDER_WOMAN = 'wonder woman'\nAVENGERS = 'avengers'\nWOLVERINE = 'logan'\nPIRATES_OF = 'pirates of the caribbien'\nHARRY = 'harry potter I'\nCAPTAIN_AMERICA = 'captain america'\nALIEN = 'alien'\nDR_STRANGELOVE = 'doctor strangelove'\nJACQUES = 'jacques'\n\n\nclass TestNeighborhoodBasedRecs(unittest.TestCase):\n def setUp(self):\n self.ratings = pd.DataFrame(\n [['1', STAR_WARS, 9, '2013-10-12 23:21:27+00:00'],\n ['1', WONDER_WOMAN, 10, '2014-10-12 23:22:27+00:00'],\n ['1', AVENGERS, 10, '2015-11-12 23:20:27+00:00'],\n ['1', WOLVERINE, 8, '2015-08-12 23:20:27+00:00'],\n ['1', PIRATES_OF, 10, '2015-10-12 22:20:27+00:00'],\n ['1', HARRY, 10, '2015-10-12 23:21:27+00:00'],\n ['1', CAPTAIN_AMERICA, 10, '2014-10-12 23:20:27+00:00'],\n ['1', ALIEN, 6, '2015-10-12 23:22:27+00:00'],\n ['1', JACQUES, 6, '2015-10-12 11:20:27+00:00'],\n\n ['2', STAR_WARS, 10, '2013-10-12 23:20:27+00:00'],\n ['2', WONDER_WOMAN, 10, '2014-10-12 23:20:27+00:00'],\n ['2', AVENGERS, 9, '2016-10-12 23:20:27+00:00'],\n ['2', PIRATES_OF, 6, '2010-10-12 23:20:27+00:00'],\n ['2', CAPTAIN_AMERICA, 10, '2005-10-12 23:20:27+00:00'],\n ['2', DR_STRANGELOVE, 10, '2015-01-12 23:20:27+00:00'],\n\n ['3', STAR_WARS, 9, '2013-10-12 20:20:27+00:00'],\n ['3', AVENGERS, 10, '2015-10-12 10:20:27+00:00'],\n ['3', PIRATES_OF, 9, '2013-03-12 23:20:27+00:00'],\n ['3', HARRY, 8, '2016-10-13 23:20:27+00:00'],\n ['3', DR_STRANGELOVE, 10, '2016-09-12 23:20:27+00:00'],\n\n ['4', STAR_WARS, 8, '2013-10-12 23:20:27+00:00'],\n ['4', WONDER_WOMAN, 8, '2014-10-12 23:20:27+00:00'],\n ['4', AVENGERS, 9, '2015-10-12 23:20:27+00:00'],\n ['4', PIRATES_OF, 5, '2013-10-12 23:20:27+00:00'],\n ['4', HARRY, 6, '2014-10-12 23:20:27+00:00'],\n ['4', ALIEN, 8, '2015-10-12 23:20:27+00:00'],\n ['4', DR_STRANGELOVE, 9, '2015-10-12 23:20:27+00:00'],\n\n ['5', STAR_WARS, 6, '2013-10-12 23:20:27+00:00'],\n ['5', AVENGERS, 1, '2014-10-12 23:20:27+00:00'],\n ['5', WOLVERINE, 2, '2015-10-12 23:20:27+00:00'],\n ['5', PIRATES_OF, 2, '2016-10-12 23:20:27+00:00'],\n ['5', HARRY, 10, '2016-10-12 23:20:27+00:00'],\n ['5', CAPTAIN_AMERICA, 1, '2016-10-12 23:20:27+00:00'],\n ['5', ALIEN, 4, '2016-10-12 23:20:27+00:00'],\n ['5', DR_STRANGELOVE, 3, '2016-10-12 23:20:27+00:00'],\n ['5', JACQUES, 10, '2016-10-12 23:20:27+00:00'],\n\n ['6', STAR_WARS, 8, '2013-10-12 23:20:27+00:00'],\n ['6', WONDER_WOMAN, 8, '2014-10-12 23:20:27+00:00'],\n ['6', AVENGERS, 8, '2014-10-12 23:20:27+00:00'],\n ['6', WOLVERINE, 8, '2015-10-12 23:20:27+00:00'],\n ['6', PIRATES_OF, 6, '2016-10-12 23:20:27+00:00'],\n ['6', HARRY, 10, '2016-10-12 23:20:27+00:00'],\n ['6', JACQUES, 8, '2016-10-12 23:20:27+00:00'],\n\n ['7', AVENGERS, 10, '2014-10-12 23:20:27+00:00'],\n ['7', PIRATES_OF, 3, '2016-10-12 23:20:27+00:00'],\n ['7', HARRY, 1, '2016-10-12 23:20:27+00:00'],\n ['7', ALIEN, 8, '2016-10-12 23:20:27+00:00'],\n ['7', DR_STRANGELOVE, 10, '2016-10-12 23:20:27+00:00'],\n\n ['8', STAR_WARS, 9, '2013-10-12 23:20:27+00:00'],\n ['8', WONDER_WOMAN, 7, '2014-10-12 23:20:27+00:00'],\n ['8', AVENGERS, 7, '2014-10-12 23:20:27+00:00'],\n ['8', WOLVERINE, 7, '2015-10-12 23:20:27+00:00'],\n ['8', PIRATES_OF, 8, '2016-10-12 23:20:27+00:00'],\n ['8', HARRY, 8, '2016-10-12 23:20:27+00:00'],\n ['8', ALIEN, 8, '2016-10-12 23:20:27+00:00'],\n ['8', DR_STRANGELOVE, 8, '2016-10-12 23:20:27+00:00'],\n ['8', JACQUES, 10, '2016-10-12 23:20:27+00:00'],\n\n ['9', WONDER_WOMAN, 7, '2014-10-12 23:20:27+00:00'],\n ['9', AVENGERS, 8, '2014-10-12 23:20:27+00:00'],\n ['9', WOLVERINE, 8, '2015-10-12 23:20:27+00:00'],\n ['9', PIRATES_OF, 7, '2016-10-12 23:20:27+00:00'],\n ['9', HARRY, 8, '2016-10-12 23:20:27+00:00'],\n ['9', CAPTAIN_AMERICA, 10, '2016-10-12 23:20:27+00:00'],\n ['9', DR_STRANGELOVE, 10, '2016-10-12 23:20:27+00:00'],\n ['9', JACQUES, 7, '2016-10-12 23:20:27+00:00'],\n\n ['10', AVENGERS, 7, '2014-10-12 23:20:27+00:00'],\n ['10', ALIEN, 10, '2016-10-12 23:20:27+00:00'],\n ['10', CAPTAIN_AMERICA, 6, '2016-10-12 23:20:27+00:00'],\n ['10', DR_STRANGELOVE, 8, '2016-10-12 23:20:27+00:00'],\n\n ], columns=['user_id', 'movie_id', 'rating', 'rating_timestamp'])\n\n self.save_path = './test/'\n self.k=3\n MF = MatrixFactorization(save_path=self.save_path)\n MF.train(self.ratings, k=self.k)\n\n def test_rec(self):\n\n recommender = FunkSVDRecs(self.save_path)\n recs = recommender.recommend_items_by_ratings('1',\n [{'movie_id': AVENGERS, 'rating': 7},\n {'movie_id': ALIEN, 'rating': 10},\n {'movie_id': CAPTAIN_AMERICA, 'rating': 6}], num=2)\n self.assertIsNotNone(recs)\n self.assertEqual(len(recs), 2)\n\n\n def test_rec2(self):\n recommender = FunkSVDRecs(self.save_path)\n recs = recommender.recommend_items_by_ratings('5',\n [{'movie_id': AVENGERS, 'rating': 1}], num=5)\n self.assertIsNotNone(recs)\n self.assertEqual(len(recs), 5)\n top = [r[0] for r in recs][:2]\n self.assertIn(HARRY, top, '{} was missing from {}'.format(HARRY, top))\n self.assertIn(JACQUES, top, '{} was missing from {}'.format(JACQUES, top))\n\n\n def test_rec_increasing(self):\n recommender = FunkSVDRecs(self.save_path)\n recs1 = recommender.recommend_items_by_ratings('5',\n [{'movie_id': AVENGERS, 'rating': 1}], num=2)\n self.assertIsNotNone(recs1)\n self.assertEqual(len(recs1), 2)\n\n recs2 = recommender.recommend_items_by_ratings('5',\n [{'movie_id': AVENGERS, 'rating': 1}], num=3)\n self.assertIsNotNone(recs2)\n self.assertEqual(len(recs2), 3)\n\n self.assertEqual(recs1[0],recs2[0] )\n self.assertEqual(recs1[1],recs2[1] )\n\n def test_rec_increasing2(self):\n\n recommender = FunkSVDRecs(self.save_path)\n recs4 = recommender.recommend_items_by_ratings('5',\n [{'movie_id': AVENGERS, 'rating': 1}], num=4)\n self.assertIsNotNone(recs4)\n self.assertEqual(len(recs4), 4)\n self.assertAlmostEqual(recs4[1][1]['prediction'], 7.812836963)\n recs6 = recommender.recommend_items_by_ratings('5',\n [{'movie_id': AVENGERS, 'rating': 1}], num=6)\n self.assertIsNotNone(recs6)\n self.assertEqual(len(recs6), 6)\n self.compare_recs(recs4, recs6)\n\n recommender = FunkSVDRecs(self.save_path)\n recs42 = recommender.recommend_items_by_ratings('5',\n [{'movie_id': AVENGERS, 'rating': 1}], num=4)\n self.compare_recs(recs4, recs42)\n\n recs1 = recommender.recommend_items_by_ratings('5',\n [{'movie_id': AVENGERS, 'rating': 1}], num=7)\n recs2 = recommender.recommend_items_by_ratings('5',\n [{'movie_id': AVENGERS, 'rating': 1}], num=9)\n\n self.compare_recs(recs1, recs2)\n\n\n def compare_recs(self, recs1, recs2):\n for i in range(len(recs1)):\n self.assertEqual(recs1[i][0], recs2[i][0])\n\n\nif __name__ == '__main__':\n unittest.main()"
] |
[
[
"pandas.DataFrame"
]
] |
SunAriesCN/face-datasets
|
[
"78d3cbb1badc224cf17091c7b8712ea2fed1eb2b"
] |
[
"LFW/plot.py"
] |
[
"import matplotlib\nmatplotlib.use(\"agg\")\nimport matplotlib.pyplot as plt\nimport random\nimport os\n\ndef getPlotColor(i):\n cnames = [\n '#ED1F24',\n '#B8529F',\n '#3952A3',\n '#69BC45', \n '#FF8400',\n '#A74E44', \n '#7642CC', \n '#000000', \n '#00FF00',\n '#FF0000']\n return cnames[i % 10]\n \ndef draw_chart(log_name, path_to_png, maps, precision, threshold):\n line_width = 1.0 # the line width\n # plot \n figure_1 = plt.figure(log_name,figsize=(12, 6))\n ax= plt.subplot(1,1,1)\n ax.grid(True,color=\"gray\",linestyle=\"-.\" )\n max_size = 0\n for name in maps:\n y = maps[name]\n max_size = max(max_size, len(y))\n idx = 0 \n for name in maps:\n y = maps[name]\n #print(y)\n n = len(y)\n if n < max_size * 0.2:\n continue\n \n x = [i for i in range(0,n)]\n ave = float(sum(y))/n\n label = '%.1f %s' % (ave, name)\n # ema \n c = getPlotColor(idx)\n plt.plot(x , y, color=c, linewidth=line_width,label = label)\n idx += 1\n # threshold line\n label = 'threshold:%.4f' % (threshold)\n plt.plot([0, max_size], [threshold,threshold], color='green', linewidth=line_width,label = label) \n plt.title('%.4f -- %s' % (precision, log_name))\n plt.xlabel('x')\n plt.ylabel('y')\n plt.legend(loc='lower left')\n png_path = os.path.join(path_to_png, '%.4f--%s.png'%(precision, log_name))\n plt.savefig(png_path)\n"
] |
[
[
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot"
]
] |
Northengard/torch2tf_converter
|
[
"8d16c5364d97e25ed04c89704d9768b4bffc1c6b"
] |
[
"models/example.py"
] |
[
"from torch import nn\n\n\nclass SimpleTorchModel(nn.Module):\n def __init__(self):\n super(SimpleTorchModel, self).__init__()\n n_chn = 32\n self.conv1 = nn.Conv2d(3, n_chn, (3, 3), padding=1, stride=(1, 1), bias=False)\n self.conv1_bn = nn.BatchNorm2d(n_chn)\n self.conv1_relu = nn.ReLU()\n\n self.adp_avg_pool = nn.AdaptiveAvgPool2d(1)\n self.flatten = nn.Flatten()\n self.d1 = nn.Linear(32, 16, bias=False)\n self.d2 = nn.Linear(16, 10)\n self.d2_relu = nn.ReLU()\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv1_bn(x)\n x = self.conv1_relu(x)\n x = self.adp_avg_pool(x)\n x = self.flatten(x)\n x = self.d1(x)\n x = self.d2(x)\n x = self.d2_relu(x)\n return x\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Flatten"
]
] |
MargeryLab/BMaskR-CNN
|
[
"41f63d301d6be7fa30ba281a5a0f727fbca6ad2a"
] |
[
"tests/data/test_detection_utils.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\r\n\r\nimport copy\r\nimport numpy as np\r\nimport os\r\nimport unittest\r\nimport pycocotools.mask as mask_util\r\nfrom fvcore.common.file_io import PathManager\r\n\r\nfrom detectron2.data import MetadataCatalog, detection_utils\r\nfrom detectron2.data import transforms as T\r\nfrom detectron2.structures import BitMasks, BoxMode\r\n\r\n\r\nclass TestTransformAnnotations(unittest.TestCase):\r\n def test_transform_simple_annotation(self):\r\n transforms = T.TransformList([T.HFlipTransform(400)])\r\n anno = {\r\n \"bbox\": np.asarray([10, 10, 200, 300]),\r\n \"bbox_mode\": BoxMode.XYXY_ABS,\r\n \"category_id\": 3,\r\n \"segmentation\": [[10, 10, 100, 100, 100, 10], [150, 150, 200, 150, 200, 200]],\r\n }\r\n\r\n output = detection_utils.transform_instance_annotations(anno, transforms, (400, 400))\r\n self.assertTrue(np.allclose(output[\"bbox\"], [200, 10, 390, 300]))\r\n self.assertEqual(len(output[\"segmentation\"]), len(anno[\"segmentation\"]))\r\n self.assertTrue(np.allclose(output[\"segmentation\"][0], [390, 10, 300, 100, 300, 10]))\r\n\r\n detection_utils.annotations_to_instances([output, output], (400, 400))\r\n\r\n def test_flip_keypoints(self):\r\n transforms = T.TransformList([T.HFlipTransform(400)])\r\n anno = {\r\n \"bbox\": np.asarray([10, 10, 200, 300]),\r\n \"bbox_mode\": BoxMode.XYXY_ABS,\r\n \"keypoints\": np.random.rand(17, 3) * 50 + 15,\r\n }\r\n\r\n output = detection_utils.transform_instance_annotations(\r\n copy.deepcopy(anno),\r\n transforms,\r\n (400, 400),\r\n keypoint_hflip_indices=detection_utils.create_keypoint_hflip_indices(\r\n [\"keypoints_coco_2017_train\"]\r\n ),\r\n )\r\n # The first keypoint is nose\r\n self.assertTrue(np.allclose(output[\"keypoints\"][0, 0], 400 - anno[\"keypoints\"][0, 0]))\r\n # The last 16 keypoints are 8 left-right pairs\r\n self.assertTrue(\r\n np.allclose(\r\n output[\"keypoints\"][1:, 0].reshape(-1, 2)[:, ::-1],\r\n 400 - anno[\"keypoints\"][1:, 0].reshape(-1, 2),\r\n )\r\n )\r\n self.assertTrue(\r\n np.allclose(\r\n output[\"keypoints\"][1:, 1:].reshape(-1, 2, 2)[:, ::-1, :],\r\n anno[\"keypoints\"][1:, 1:].reshape(-1, 2, 2),\r\n )\r\n )\r\n\r\n def test_crop(self):\r\n transforms = T.TransformList([T.CropTransform(300, 300, 10, 10)])\r\n keypoints = np.random.rand(17, 3) * 50 + 15\r\n keypoints[:, 2] = 2\r\n anno = {\r\n \"bbox\": np.asarray([10, 10, 200, 400]),\r\n \"bbox_mode\": BoxMode.XYXY_ABS,\r\n \"keypoints\": keypoints,\r\n }\r\n\r\n output = detection_utils.transform_instance_annotations(\r\n copy.deepcopy(anno), transforms, (10, 10)\r\n )\r\n # box is shifted and cropped\r\n self.assertTrue((output[\"bbox\"] == np.asarray([0, 0, 0, 10])).all())\r\n # keypoints are no longer visible\r\n self.assertTrue((output[\"keypoints\"][:, 2] == 0).all())\r\n\r\n def test_transform_RLE(self):\r\n transforms = T.TransformList([T.HFlipTransform(400)])\r\n mask = np.zeros((300, 400), order=\"F\").astype(\"uint8\")\r\n mask[:, :200] = 1\r\n\r\n anno = {\r\n \"bbox\": np.asarray([10, 10, 200, 300]),\r\n \"bbox_mode\": BoxMode.XYXY_ABS,\r\n \"segmentation\": mask_util.encode(mask[:, :, None])[0],\r\n \"category_id\": 3,\r\n }\r\n output = detection_utils.transform_instance_annotations(\r\n copy.deepcopy(anno), transforms, (300, 400)\r\n )\r\n mask = output[\"segmentation\"]\r\n self.assertTrue((mask[:, 200:] == 1).all())\r\n self.assertTrue((mask[:, :200] == 0).all())\r\n\r\n inst = detection_utils.annotations_to_instances(\r\n [output, output], (400, 400), mask_format=\"bitmask\"\r\n )\r\n self.assertTrue(isinstance(inst.gt_masks, BitMasks))\r\n\r\n def test_transform_RLE_resize(self):\r\n transforms = T.TransformList(\r\n [T.HFlipTransform(400), T.ScaleTransform(300, 400, 400, 400, \"bilinear\")]\r\n )\r\n mask = np.zeros((300, 400), order=\"F\").astype(\"uint8\")\r\n mask[:, :200] = 1\r\n\r\n anno = {\r\n \"bbox\": np.asarray([10, 10, 200, 300]),\r\n \"bbox_mode\": BoxMode.XYXY_ABS,\r\n \"segmentation\": mask_util.encode(mask[:, :, None])[0],\r\n \"category_id\": 3,\r\n }\r\n output = detection_utils.transform_instance_annotations(\r\n copy.deepcopy(anno), transforms, (400, 400)\r\n )\r\n\r\n inst = detection_utils.annotations_to_instances(\r\n [output, output], (400, 400), mask_format=\"bitmask\"\r\n )\r\n self.assertTrue(isinstance(inst.gt_masks, BitMasks))\r\n\r\n def test_gen_crop(self):\r\n instance = {\"bbox\": [10, 10, 100, 100], \"bbox_mode\": BoxMode.XYXY_ABS}\r\n t = detection_utils.gen_crop_transform_with_instance((10, 10), (150, 150), instance)\r\n # the box center must fall into the cropped region\r\n self.assertTrue(t.x0 <= 55 <= t.x0 + t.w)\r\n\r\n def test_gen_crop_outside_boxes(self):\r\n instance = {\"bbox\": [10, 10, 100, 100], \"bbox_mode\": BoxMode.XYXY_ABS}\r\n with self.assertRaises(AssertionError):\r\n detection_utils.gen_crop_transform_with_instance((10, 10), (15, 15), instance)\r\n\r\n def test_read_sem_seg(self):\r\n cityscapes_dir = MetadataCatalog.get(\"cityscapes_fine_sem_seg_val\").gt_dir\r\n sem_seg_gt_path = os.path.join(\r\n cityscapes_dir, \"frankfurt\", \"frankfurt_000001_083852_gtFine_labelIds.png\"\r\n )\r\n if not PathManager.exists(sem_seg_gt_path):\r\n raise unittest.SkipTest(\r\n \"Semantic segmentation ground truth {} not found.\".format(sem_seg_gt_path)\r\n )\r\n sem_seg = detection_utils.read_image(sem_seg_gt_path, \"L\")\r\n self.assertEqual(sem_seg.ndim, 3)\r\n self.assertEqual(sem_seg.shape[2], 1)\r\n self.assertEqual(sem_seg.dtype, np.uint8)\r\n self.assertEqual(sem_seg.max(), 32)\r\n self.assertEqual(sem_seg.min(), 1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n"
] |
[
[
"numpy.allclose",
"numpy.random.rand",
"numpy.asarray",
"numpy.zeros"
]
] |
Sabokou/BigData
|
[
"7901451cf3fa748c541ef93cf1578495335165cf"
] |
[
"Spark/Recommendations/py-apps/Spark_Recommendations.py"
] |
[
"import logging\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\nimport numpy as np\nimport pandas as pd\nimport pyspark.sql.functions as F\nfrom pyspark.sql.types import DoubleType\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom pyspark import SparkConf\nfrom pyspark import SparkContext\nfrom pyspark.mllib.feature import HashingTF, IDF\n\nimport psycopg2\nimport psycopg2.extras\nimport sqlalchemy\nimport time\n\nclass Legerible:\n\n def __init__(self):\n self.s_user = None\n self.s_user = None\n\n self.alchemy_engine = None\n self.alchemy_connection = None\n self.psycopg2_connection = None\n\n self.b_connected = False\n self.b_initialised = False\n\n # establishes connection to database\n self.connect()\n\n # tests if database has entries\n self.test()\n\n # ###########################################################################################################\n # INIT FUNCTIONS\n\n def connect(self):\n \"\"\"\n makes a sqlalchemy and psycopg2 connection to the db.\n\n :return:\n \"\"\"\n\n while self.b_connected is False:\n try:\n self.alchemy_engine = sqlalchemy.create_engine(\n 'postgres+psycopg2://postgres:1234@database:5432/postgres')\n self.alchemy_connection = self.alchemy_engine.connect()\n self.psycopg2_connection = psycopg2.connect(database=\"postgres\", user=\"postgres\", port=5432,\n password=\"1234\", host=\"database\")\n self.b_connected = True\n print(\"Database Connected\")\n logging.info(\"Connected to DB\")\n except Exception as an_exception:\n logging.error(an_exception)\n logging.error(\"Not connected to DB\")\n time.sleep(5)\n return True\n\n def test(self, b_verbose=True):\n \"\"\"\n tests the connection to the db.\n\n :param b_verbose:\n :return:\n \"\"\"\n # checks if data / tables are present if it fails it initialises the database\n if self.b_connected:\n try:\n df = pd.read_sql_query(\"\"\"SELECT true\n FROM books\n LIMIT 1; \n \"\"\",\n self.alchemy_connection)\n if b_verbose:\n print(df)\n return df\n except Exception as err:\n self.init_db()\n logging.error(\"Tables not initialized\")\n return False\n\n\n # ###########################################################################################################\n # USING FUNCTIONS\n\n\n def get_select(self, s_sql_statement: str) -> object:\n \"\"\"\n This Function needs a Select-Statements and returns the result in a df.\n\n :param s_sql_statement:\n :return df:\n \"\"\"\n try:\n df = pd.read_sql_query(s_sql_statement, self.alchemy_connection)\n except Exception as an_exception:\n logging.error(an_exception)\n logging.error(\"Query couldn't be executed.\")\n return False\n return df\n\n def exec_statement(self, sql: str):\n \"\"\"\n can execute every kind of Sql-statement but does NOT return a response.\n\n Use for:\n - CALL Procedure\n - UPDATE Statement\n :param sql:\n :return:\n \"\"\"\n try:\n db_cursor = self.psycopg2_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)\n db_cursor.execute(sql)\n self.psycopg2_connection.commit()\n db_cursor.close()\n return True\n except psycopg2.errors.InFailedSqlTransaction:\n self.b_connected = False\n self.connect()\n logging.error(\"Transaction Failed - Review given inputs!\")\n return False\n\n\nleg = Legerible()\n\nall_loans = leg.get_select(\"\"\"SELECT L.n_loan_id AS Loan_ID, L.ts_now as Timestamp, B.s_isbn AS ISBN, B.s_title AS Title, \n B.s_aut_first_name AS Author_first_name, B.s_aut_last_name AS Author_last_name, U.s_user_name AS User\n FROM Loan AS L\n LEFT JOIN Books AS B ON (L.n_book_id = B.n_book_id)\n LEFT JOIN Users AS U ON (L.n_user_id = U.n_user_id)\"\"\")\n\nsc = SparkSession.builder.appName(\"recommendations\").getOrCreate()\nsparkContext = sc.sparkContext\n\n\ndef count_books(self):\n return self.distinct().count()\n\n\ntop_x = all_loans.groupby(\"book_id\").count()\ncount_b = 33 # count_books(lines)\ncount_l = count_books(all_loans)\n\n\ndef return_counts():\n a = [count_b, count_l, top_x]\n return a\n\n\nleg = Legerible()\n\n\ndef recommendation(user_id):\n # getting all loaned books by the user\n loans = leg.get_select(f\"\"\"SELECT L.ts_now as Timestamp, B.s_isbn AS ISBN, B.s_title AS Title,\n B.s_aut_first_name AS Author_first_name, B.s_aut_last_name AS Author_last_name\n FROM Loan AS L\n LEFT JOIN Books AS B ON (L.n_book_id = B.n_book_id)\n LEFT JOIN Users AS U ON (L.n_user_id = U.n_user_id)\n WHERE U.s_user_name LIKE '%{user_id}%'\"\"\")\n # getting books and loaned books from db\n books_loans = leg.get_select(\"\"\"SELECT n_book_id, s_isbn AS ISBN, s_title AS Title, n_publishing_year AS Publishing_year, \n s_book_language AS language,s_aut_first_name AS Author_first_name, \n s_aut_last_name AS Author_last_name\n FROM BOOKS\"\"\")\n\n # getting last loaned book\n expr = [F.last(col).alias(col) for col in loans.columns]\n last_loaned = loans.agg(*expr).select(\"book_id\").collect()[0][-1]\n\n # function to get all important attributes\n df = books_loans.withColumn(\"important_features\", concat_ws(\",\", \"isbn\", 'title', 'language'))\n\n # Bringing the important_features into the RDD Form\n rdd = sparkContext.parallelize(\n [[0, '9780575097568, Rivers of London,en'], [1, '9780345524591, Moon Over Soho,None'],\n [2, '9780525516019, A Land of Permanent Goodbyes,en'], [3, 'None,Der Text des Lebens,de'],\n [4, '9783453273351, Später,un'], [5, '9783492070904, Das Geheimnis von Zimmer 622 - Roman,un'],\n [6, '9783257071481, Hard Land,un'], [7, '9783785727416, Der neunte Arm des Oktopus - Thriller,un'],\n [8, '9783455011784, The Hill We Climb: Ein Gedicht zur Inauguration,un'],\n [9, '9783423282789, Vom Aufstehen - Ein Leben in Geschichten,un'],\n [10, '9783423282734, Junge Frau, am Fenster stehend, Abendlicht, blaues Kleid - Roman,un'],\n [11, '9783630876672, Über Menschen - Roman,un'], [12, '9783426282564, Die Mitternachtsbibliothek - Roman,un'],\n [13, '9783446269156, Sprich mit mir - Roman,un'], [14, '9783866124776, Der Buchspazierer - Roman,un'],\n [15, '9783764510473, Der Fall des Präsidenten - Thriller,un'],\n [16, '9783832181536, Der große Sommer - Roman,un'], [17, '9783737101127, Monschau,un'],\n [18, '9783426281550, Der Heimweg,un'], [19, '9783462050837, Eurotrash - Roman,un'],\n [20, '9783462053289, Kim Jiyoung, geboren 1982 - Roman,un'],\n [21, '9783896676931, Klara und die Sonne - Roman,un'],\n [22, '9783442316397, Von der Pflicht - Eine Betrachtung,un'], [23, '9783103973150, Adas Raum - Roman,un'],\n [24, '9783833877179, Genesis - Die Befreiung der Geschlechter,un'],\n [25, '9783462053616, Komplett Gänsehaut,un'], [26, '9783462054767, Der Mann im roten Rock,un'],\n [27, '9783492075008, Und erlöse uns von den Blöden - Vom Menschenverstand in hysterischen Zeiten,un'],\n [28, '9783965840928, Die Ernährungs-Docs - Gesund und schlank durch Intervallfasten,un'],\n [29, '9783861221265, Die fünf Sprachen der Liebe - wie Kommunikation in der Ehe gelingt,un'],\n [30, '9783827501530, Der Wahrheit verpflichtet - Meine Geschichte,un'],\n [31, '9783948319007, ON/ OFF GESUNDHEIT - Den Körper neu erschaffen durch Ernährung,un'],\n [32, '9783789129407, Ronja Räubertochter,un'], [33, '9783751200530, Dunkelnacht,un']])\n\n # Compute TF-IDF\n documents = rdd.map(lambda l: l[1].replace(\" \", \"\").split(\",\"))\n\n from pyspark.mllib.feature import HashingTF, IDF\n hashingTF = HashingTF()\n tf = hashingTF.transform(documents)\n\n tf.cache()\n idf = IDF().fit(tf)\n tfidf = idf.transform(tf)\n\n # Compute L2 norm\n from pyspark.mllib.feature import Normalizer\n labels = rdd.map(lambda l: l[0])\n features = tfidf\n\n normalizer = Normalizer()\n data = labels.zip(normalizer.transform(features))\n\n # Compute cosine similarity by multiplying the matrix with itself\n from pyspark.mllib.linalg.distributed import IndexedRowMatrix\n mat = IndexedRowMatrix(data).toBlockMatrix()\n dot = mat.multiply(mat.transpose())\n dot.toLocalMatrix().toArray()\n\n data.cartesian(data) \\\n .map(lambda l: ((l[0][0], l[1][0]), l[0][1].dot(l[1][1]))) \\\n .sortByKey() \\\n .collect()\n\n import pyspark.sql.functions as psf\n # creating model\n df = rdd.toDF([\"ID\", \"Office_Loc\"]) \\\n .withColumn(\"Office_Loc\", psf.split(psf.regexp_replace(\"Office_Loc\", \" \", \"\"), ','))\n\n from pyspark.ml.feature import HashingTF, IDF\n hashingTF = HashingTF(inputCol=\"Office_Loc\", outputCol=\"tf\")\n tf = hashingTF.transform(df)\n\n # fitting data into model\n idf = IDF(inputCol=\"tf\", outputCol=\"feature\").fit(tf)\n tfidf = idf.transform(tf)\n\n from pyspark.ml.feature import Normalizer\n # normalize features\n normalizer = Normalizer(inputCol=\"feature\", outputCol=\"norm\")\n data = normalizer.transform(tfidf)\n\n dot_udf = psf.udf(lambda x, y: float(x.dot(y)), DoubleType())\n\n # creating recommendation dataframe\n rec_df = data.alias(\"loaned_book\").join(data.alias(\"book_id\"), psf.col(\"loaned_book.ID\") < psf.col(\"book_id.ID\"))\n # calculating the score point between last loaned book and all books\n rec_df = rec_df.select(psf.col(\"loaned_book.ID\").alias(\"loaned_book\"), psf.col(\"book_id.ID\").alias(\"book_id\"),\n dot_udf(\"loaned_book.norm\", \"book_id.norm\").alias(\"Score\"))\n rec_df = rec_df.filter(F.col('loaned_book') == last_loaned).sort(col('Score').desc())\n\n result = []\n for i in range(0, 4):\n result.append(rec_df.select('book_id').collect()[i][0])\n\n # returning result in form of array with top4 recommendation book_id\n return result\n\n\ntry:\n for ids in range(3):\n result = recommendation(id)\n print(f\"For user {id} the following recommendations were created: {result}\")\n for recommendations in result:\n leg.exec_statement(f\"INSERT INTO RECOMMENDATIONS (n_user_id, n_book_id) VALUES ({id}, {recommendations})\")\nexcept Exception as exc:\n logging.error(\"Handled exception for recommendation system - no new fields added to database this iteration\")\n"
] |
[
[
"pandas.read_sql_query"
]
] |
kiseyno92/SNU_ML
|
[
"be48a5c570ef59dc2b5a782c828536e100d7f0eb",
"be48a5c570ef59dc2b5a782c828536e100d7f0eb"
] |
[
"Practice1/ML_Linear_Regression.py",
"Practice3/NN_basic.py"
] |
[
"\n# coding: utf-8\n\n# Linear Regression Example\n\n# In[1]:\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets, linear_model\n\n\n# In[4]:\n\n#Load the diabetes dataset\ndiabetes = datasets.load_diabetes()\n#diabetes\n\n\n# In[6]:\n\ndiabetes_X = diabetes.data[:,np.newaxis, 2]#모든 행에 대해서 2 index 라인을 사용하겠다\n\n\n# In[10]:\n\n# Split the data into training/testing sets 처음부터 뒤에서 20명까지\ndiabetes_X_train = diabetes_X[:-20]\ndiabetes_X_test = diabetes_X[-20:]\n\n#Split the targets into sets\ndiabetes_Y_train = diabetes.target[:-20]\ndiabetes_Y_test = diabetes.target[-20:]\n\n\n# In[11]:\n\n#Plot train data\nplt.scatter(diabetes_X_train, diabetes_Y_train, color = 'black')\nplt.title('diabetes_train')\nplt.xlabel('BMI')\nplt.ylabel('diabetes')\nplt.show()\n\n\n# In[13]:\n\n#Create linear regression object\nregr = linear_model.LinearRegression()\n\n#Train the model using the training sets\nregr.fit(diabetes_X_train, diabetes_Y_train)\n\n\n# In[14]:\n\nprint('Coefficient: \\n', regr.coef_)\nprint('Intercept: \\n', regr.intercept_)\n\n\n# In[15]:\n\ndef linearFunction(x,a,b):\n y = (a*x) + b\n return y\n\n\n# In[19]:\n\nplt.scatter(diabetes_X_train,diabetes_Y_train, color = 'black')\n\nx = np.arange(-0.1,0.2,0.1) #x값 넣어주기\ny = linearFunction(x,regr.coef_,regr.intercept_)\nplt.plot(x,y,color = 'blue',linewidth =3)\n\nplt.title('diabetes_train')\nplt.xlabel('BMI')\nplt.ylabel('diabetes')\nplt.show()\n\n\n# Test Model\n\n# In[20]:\n\nplt.scatter(diabetes_X_test,diabetes_Y_test, color = 'black')\nplt.plot(diabetes_X_test,regr.predict(diabetes_X_test),color = 'blue',linewidth =3)\n\nplt.title('diabetes_test')\nplt.xlabel('BMI')\nplt.ylabel('diabetes')\nplt.show()\n\n\n# In[24]:\n\n#The mean squared error\nprint (\"Mean squared error : %.2f\" %np.mean((regr.predict(diabetes_X_test) - diabetes_Y_test)**2))\n\nprint(\"Variance score : %.2f\" % regr.score(diabetes_X_test, diabetes_Y_test))\n \n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n",
"\n# coding: utf-8\n\n# In[9]:\n\nimport tensorflow as tf\nimport numpy as np\nx_data = np.array([[0,0],[0,1],[1,0],[1,1]], dtype = np.float32)\n\n\n# In[10]:\n\ny_data = np.array([[0],[1],[1],[0]], dtype = np.float32)\n\n\nX = tf.placeholder(tf.float32, [None,2])\nY = tf.placeholder(tf.float32, [None,1])\n\n\n\n\n\n# In[11]:\n\nW1 = tf.Variable(tf.random_normal([2,2]), name = 'weight1')\nb1 = tf.Variable(tf.random_normal([2]), name = 'bias1')\nlayer1 = tf.sigmoid(tf.matmul(X,W1)+b1)\n\n\n# In[12]:\n\nW2 = tf.Variable(tf.random_normal([2,1]), name = 'weight2')\nb2 = tf.Variable(tf.random_normal([1]), name = 'bias2')\n\n\n# In[13]:\n\nhypothesis = tf.sigmoid(tf.matmul(layer1,W2)+b2)\n\n\n# In[14]:\n\ncost = -tf.reduce_mean(Y*tf.log(hypothesis)+(1-Y)*tf.log(1-hypothesis))\n\n\n# In[18]:\n\ntrain = tf.train.GradientDescentOptimizer(learning_rate = 0.01).minimize(cost)\n\n\n# In[19]:\n\npredicted = tf.cast(hypothesis>0.5, dtype = tf.float32)\naccuracy = tf.reduce_mean(tf.cast(tf.equal(predicted,Y),dtype = tf.float32))\n\n\n# In[20]:\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n for step in range(2000):\n sess.run(train, feed_dict = {X:x_data, Y: y_data})\n if step %100 ==0:\n print(step, sess.run(cost,feed_dict = {X:x_data, Y:y_data}), sess.run([W1,W1]))\n h,p,a = sess.run([hypothesis, predicted, accuracy], feed_dict ={X:x_data, Y: y_data})\n print ('\\nHypothsis:',h,'\\npredicted:',p,'\\naccuracy:',a)\n\n\n# In[ ]:\n\n\n\n"
] |
[
[
"sklearn.linear_model.LinearRegression",
"sklearn.datasets.load_diabetes",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter"
],
[
"numpy.array",
"tensorflow.Session",
"tensorflow.matmul",
"tensorflow.equal",
"tensorflow.log",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.random_normal",
"tensorflow.cast",
"tensorflow.train.GradientDescentOptimizer"
]
] |
dmnpignaud/incubator-airflow
|
[
"84a55f3e546cfbfd5f47302537444c8c8c4d2753"
] |
[
"scripts/perf/scheduler_ops_metrics.py"
] |
[
"# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport logging\nimport pandas as pd\nimport sys\n\nfrom airflow import configuration, settings\nfrom airflow.jobs import SchedulerJob\nfrom airflow.models import DagBag, DagModel, DagRun, TaskInstance\nfrom airflow.utils import timezone\nfrom airflow.utils.state import State\n\nSUBDIR = 'scripts/perf/dags'\nDAG_IDS = ['perf_dag_1', 'perf_dag_2']\nMAX_RUNTIME_SECS = 6\n\n\nclass SchedulerMetricsJob(SchedulerJob):\n \"\"\"\n This class extends SchedulerJob to instrument the execution performance of\n task instances contained in each DAG. We want to know if any DAG\n is starved of resources, and this will be reflected in the stats printed\n out at the end of the test run. The following metrics will be instrumented\n for each task instance (dag_id, task_id, execution_date) tuple:\n\n 1. Queuing delay - time taken from starting the executor to the task\n instance to be added to the executor queue.\n 2. Start delay - time taken from starting the executor to the task instance\n to start execution.\n 3. Land time - time taken from starting the executor to task instance\n completion.\n 4. Duration - time taken for executing the task instance.\n\n The DAGs implement bash operators that call the system wait command. This\n is representative of typical operators run on Airflow - queries that are\n run on remote systems and spend the majority of their time on I/O wait.\n\n To Run:\n $ python scripts/perf/scheduler_ops_metrics.py [timeout]\n\n You can specify timeout in seconds as an optional parameter.\n Its default value is 6 seconds.\n \"\"\"\n __mapper_args__ = {\n 'polymorphic_identity': 'SchedulerMetricsJob'\n }\n\n def print_stats(self):\n \"\"\"\n Print operational metrics for the scheduler test.\n \"\"\"\n session = settings.Session()\n TI = TaskInstance\n tis = (\n session\n .query(TI)\n .filter(TI.dag_id.in_(DAG_IDS))\n .all()\n )\n successful_tis = [x for x in tis if x.state == State.SUCCESS]\n ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date,\n (ti.queued_dttm - self.start_date).total_seconds(),\n (ti.start_date - self.start_date).total_seconds(),\n (ti.end_date - self.start_date).total_seconds(),\n ti.duration) for ti in successful_tis]\n ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id',\n 'execution_date',\n 'queue_delay',\n 'start_delay', 'land_time',\n 'duration'])\n\n print('Performance Results')\n print('###################')\n for dag_id in DAG_IDS:\n print('DAG {}'.format(dag_id))\n print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])\n print('###################')\n if len(tis) > len(successful_tis):\n print(\"WARNING!! The following task instances haven't completed\")\n print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state)\n for ti in filter(lambda x: x.state != State.SUCCESS, tis)],\n columns=['dag_id', 'task_id', 'execution_date', 'state']))\n\n session.commit()\n\n def heartbeat(self):\n \"\"\"\n Override the scheduler heartbeat to determine when the test is complete\n \"\"\"\n super(SchedulerMetricsJob, self).heartbeat()\n session = settings.Session()\n # Get all the relevant task instances\n TI = TaskInstance\n successful_tis = (\n session\n .query(TI)\n .filter(TI.dag_id.in_(DAG_IDS))\n .filter(TI.state.in_([State.SUCCESS]))\n .all()\n )\n session.commit()\n\n dagbag = DagBag(SUBDIR)\n dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]\n # the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.\n num_task_instances = sum([(timezone.utcnow() - task.start_date).days\n for dag in dags for task in dag.tasks])\n\n if (len(successful_tis) == num_task_instances or\n (timezone.utcnow() - self.start_date).total_seconds() >\n MAX_RUNTIME_SECS):\n if (len(successful_tis) == num_task_instances):\n self.log.info(\"All tasks processed! Printing stats.\")\n else:\n self.log.info(\"Test timeout reached. \"\n \"Printing available stats.\")\n self.print_stats()\n set_dags_paused_state(True)\n sys.exit()\n\n\ndef clear_dag_runs():\n \"\"\"\n Remove any existing DAG runs for the perf test DAGs.\n \"\"\"\n session = settings.Session()\n drs = session.query(DagRun).filter(\n DagRun.dag_id.in_(DAG_IDS),\n ).all()\n for dr in drs:\n logging.info('Deleting DagRun :: {}'.format(dr))\n session.delete(dr)\n\n\ndef clear_dag_task_instances():\n \"\"\"\n Remove any existing task instances for the perf test DAGs.\n \"\"\"\n session = settings.Session()\n TI = TaskInstance\n tis = (\n session\n .query(TI)\n .filter(TI.dag_id.in_(DAG_IDS))\n .all()\n )\n for ti in tis:\n logging.info('Deleting TaskInstance :: {}'.format(ti))\n session.delete(ti)\n session.commit()\n\n\ndef set_dags_paused_state(is_paused):\n \"\"\"\n Toggle the pause state of the DAGs in the test.\n \"\"\"\n session = settings.Session()\n dms = session.query(DagModel).filter(\n DagModel.dag_id.in_(DAG_IDS))\n for dm in dms:\n logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused))\n dm.is_paused = is_paused\n session.commit()\n\n\ndef main():\n global MAX_RUNTIME_SECS\n if len(sys.argv) > 1:\n try:\n max_runtime_secs = int(sys.argv[1])\n if max_runtime_secs < 1:\n raise ValueError\n MAX_RUNTIME_SECS = max_runtime_secs\n except ValueError:\n logging.error('Specify a positive integer for timeout.')\n sys.exit(1)\n\n configuration.load_test_config()\n\n set_dags_paused_state(False)\n clear_dag_runs()\n clear_dag_task_instances()\n\n job = SchedulerMetricsJob(dag_ids=DAG_IDS, subdir=SUBDIR)\n job.run()\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.DataFrame"
]
] |
HarshCasper/Ling
|
[
"5171024b440ae455bad4d0972c2046288bd8b159"
] |
[
"app.py"
] |
[
"from flask import Flask,render_template,url_for,request\nfrom flask_bootstrap import Bootstrap \nimport pandas as pd \nimport numpy as np \n\n# ML Packages\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.externals import joblib\n\n\napp = Flask(__name__)\nBootstrap(app)\n\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n\tdf= pd.read_csv(\"data/names_dataset.csv\")\n\t# Features and Labels\n\tdf_X = df.name\n\tdf_Y = df.sex\n \n # Vectorization\n\tcorpus = df_X\n\tcv = CountVectorizer()\n\tX = cv.fit_transform(corpus) \n\t\n\t# Loading our ML Model\n\tnaivebayes_model = open(\"models/naivebayesgendermodel.pkl\",\"rb\")\n\tclf = joblib.load(naivebayes_model)\n\n\t# Receives the input query from form\n\tif request.method == 'POST':\n\t\tnamequery = request.form['namequery']\n\t\tdata = [namequery]\n\t\tvect = cv.transform(data).toarray()\n\t\tmy_prediction = clf.predict(vect)\n\treturn render_template('results.html',prediction = my_prediction,name = namequery.upper())\n\n\nif __name__ == '__main__':\n\tapp.run(debug=True)\n"
] |
[
[
"sklearn.externals.joblib.load",
"pandas.read_csv",
"sklearn.feature_extraction.text.CountVectorizer"
]
] |
khaykingleb/HiFi-GAN
|
[
"6bafd6f8f67d2393e057cb64cd6c1311d59a85f0"
] |
[
"nv/datasets/ljspeech_dataset.py"
] |
[
"import torchaudio\nimport torch\n\n\nclass LJSpeechDataset(torchaudio.datasets.LJSPEECH):\n\n def __init__(self, root: str):\n super().__init__(root=root)\n \n def __getitem__(self, index: int):\n waveform, sr, _, transcript = super().__getitem__(index)\n waveform_length = torch.tensor([waveform.shape[-1]]).int()\n \n return transcript, waveform, waveform_length\n"
] |
[
[
"torch.tensor"
]
] |
shenlong95/mealpy
|
[
"36fd404b8c255699cccc5ea68dbda385836a615b"
] |
[
"mealpy/human_based/ICA.py"
] |
[
"#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu\" at 14:07, 02/03/2021 %\n# %\n# Email: nguyenthieu2102@gmail.com %\n# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %\n# Github: https://github.com/thieu1995 %\n# ------------------------------------------------------------------------------------------------------%\n\nimport numpy as np\nfrom mealpy.optimizer import Optimizer\n\n\nclass BaseICA(Optimizer):\n \"\"\"\n The original version of: Imperialist Competitive Algorithm (ICA)\n Link:\n https://ieeexplore.ieee.org/document/4425083\n \"\"\"\n\n def __init__(self, problem, epoch=10000, pop_size=100, empire_count=5, selection_pressure=1, assimilation_coeff=1.5,\n revolution_prob=0.05, revolution_rate=0.1, revolution_step_size=0.1, revolution_step_size_damp=0.99, zeta=0.1, **kwargs):\n \"\"\"\n Args:\n problem ():\n epoch (int): maximum number of iterations, default = 10000\n pop_size (int): number of population size (n: pop_size, m: clusters), default = 100\n empire_count (): Number of Empires (also Imperialists)\n selection_pressure (): Selection Pressure\n assimilation_coeff (): Assimilation Coefficient (beta in the paper)\n revolution_prob (): Revolution Probability\n revolution_rate (): Revolution Rate (mu)\n revolution_step_size (): Revolution Step Size (sigma)\n revolution_step_size_damp (): Revolution Step Size Damp Rate\n zeta (): Colonies Coefficient in Total Objective Value of Empires\n **kwargs ():\n \"\"\"\n super().__init__(problem, kwargs)\n self.nfe_per_epoch = pop_size\n self.sort_flag = True\n\n self.epoch = epoch\n self.pop_size = pop_size\n self.empire_count = empire_count\n self.selection_pressure = selection_pressure\n self.assimilation_coeff = assimilation_coeff\n self.revolution_prob = revolution_prob\n self.revolution_rate = revolution_rate\n self.revolution_step_size = revolution_step_size\n self.revolution_step_size_damp = revolution_step_size_damp\n self.zeta = zeta\n\n self.pop_empires, self.pop_colonies, self.empires = None, None, None\n self.n_revoluted_variables, self.idx_list_variables = None, None\n\n def revolution_country(self, position, idx_list_variables, n_revoluted):\n pos_new = position + self.revolution_step_size * np.random.normal(0, 1, self.problem.n_dims)\n idx_list = np.random.choice(idx_list_variables, n_revoluted, replace=False)\n position[idx_list] = pos_new[idx_list] # Change only those selected index\n return position\n\n def initialization(self):\n pop = self.create_population(self.pop_size)\n self.pop, self.g_best = self.get_global_best_solution(pop)\n\n # Initialization\n self.n_revoluted_variables = int(round(self.revolution_rate * self.problem.n_dims))\n self.idx_list_variables = list(range(0, self.problem.n_dims))\n\n # pop = Empires\n colony_count = self.pop_size - self.empire_count\n self.pop_empires = self.pop[:self.empire_count].copy()\n self.pop_colonies = self.pop[self.empire_count:].copy()\n\n cost_empires_list = np.array([solution[self.ID_FIT][self.ID_TAR] for solution in self.pop_empires])\n cost_empires_list_normalized = cost_empires_list - (np.max(cost_empires_list) + np.min(cost_empires_list))\n prob_empires_list = np.abs(cost_empires_list_normalized / np.sum(cost_empires_list_normalized))\n # Randomly choose colonies to empires\n self.empires = {}\n idx_already_selected = []\n for i in range(0, self.empire_count - 1):\n self.empires[i] = []\n n_colonies = int(round(prob_empires_list[i] * colony_count))\n idx_list = np.random.choice(list(set(range(0, colony_count)) - set(idx_already_selected)), n_colonies, replace=False).tolist()\n idx_already_selected += idx_list\n for idx in idx_list:\n self.empires[i].append(self.pop_colonies[idx])\n idx_last = list(set(range(0, colony_count)) - set(idx_already_selected))\n self.empires[self.empire_count - 1] = []\n for idx in idx_last:\n self.empires[self.empire_count - 1].append(self.pop_colonies[idx])\n\n def evolve(self, epoch):\n \"\"\"\n Args:\n epoch (int): The current iteration\n \"\"\"\n # Assimilation\n for idx, colonies in self.empires.items():\n for idx_colony, colony in enumerate(colonies):\n pos_new = colony[self.ID_POS] + self.assimilation_coeff * \\\n np.random.uniform(0, 1, self.problem.n_dims) * (self.pop_empires[idx][self.ID_POS] - colony[self.ID_POS])\n pos_new = self.amend_position_faster(pos_new)\n self.empires[idx][idx_colony][self.ID_POS] = pos_new\n self.empires[idx] = self.update_fitness_population(self.empires[idx])\n # empires[idx], g_best = self.update_global_best_solution(empires[idx], self.ID_MIN_PROB, g_best)\n\n # Revolution\n for idx, colonies in self.empires.items():\n # Apply revolution to Imperialist\n pos_new = self.revolution_country(self.pop_empires[idx][self.ID_POS], self.idx_list_variables, self.n_revoluted_variables)\n self.pop_empires[idx][self.ID_POS] = self.amend_position_faster(pos_new)\n\n # Apply revolution to Colonies\n for idx_colony, colony in enumerate(colonies):\n if np.random.rand() < self.revolution_prob:\n pos_new = self.revolution_country(colony[self.ID_POS], self.idx_list_variables, self.n_revoluted_variables)\n self.empires[idx][idx_colony][self.ID_POS] = self.amend_position_faster(pos_new)\n self.empires[idx] = self.update_fitness_population(self.empires[idx])\n self.pop_empires = self.update_fitness_population(self.pop_empires)\n _, g_best = self.update_global_best_solution(self.pop_empires)\n\n # Intra-Empire Competition\n for idx, colonies in self.empires.items():\n for idx_colony, colony in enumerate(colonies):\n if self.compare_agent(colony, self.pop_empires[idx]):\n self.empires[idx][idx_colony], self.pop_empires[idx] = self.pop_empires[idx], colony.copy()\n\n # Update Total Objective Values of Empires\n cost_empires_list = []\n for idx, colonies in self.empires.items():\n fit_list = np.array([solution[self.ID_FIT][self.ID_TAR] for solution in colonies])\n fit_empire = self.pop_empires[idx][self.ID_FIT][self.ID_TAR] + self.zeta * np.mean(fit_list)\n cost_empires_list.append(fit_empire)\n cost_empires_list = np.array(cost_empires_list)\n\n # Find possession probability of each empire based on its total power\n cost_empires_list_normalized = cost_empires_list - (np.max(cost_empires_list) + np.min(cost_empires_list))\n prob_empires_list = np.abs(cost_empires_list_normalized / np.sum(cost_empires_list_normalized)) # Vector P\n\n uniform_list = np.random.uniform(0, 1, len(prob_empires_list)) # Vector R\n vector_D = prob_empires_list - uniform_list\n idx_empire = np.argmax(vector_D)\n\n # Find the weakest empire and weakest colony inside it\n idx_weakest_empire = np.argmax(cost_empires_list)\n if len(self.empires[idx_weakest_empire]) > 0:\n colonies_sorted, best, worst = self.get_special_solutions(self.empires[idx_weakest_empire])\n self.empires[idx_empire].append(colonies_sorted.pop(-1))\n else:\n self.empires[idx_empire].append(self.pop_empires.pop(idx_weakest_empire))\n\n self.pop = self.pop_empires + self.pop_colonies\n\n"
] |
[
[
"numpy.max",
"numpy.random.normal",
"numpy.array",
"numpy.random.choice",
"numpy.random.rand",
"numpy.sum",
"numpy.min",
"numpy.mean",
"numpy.random.uniform",
"numpy.argmax"
]
] |
negm/scipy-lecture-notes
|
[
"cc87204fcc4bd2f4702f7c29c83cb8ed5c94b7d6"
] |
[
"advanced/mathematical_optimization/examples/compare_optimizers_plot.py"
] |
[
"\"\"\"\nPlotting the comparison of optimizers\n======================================\n\nPlots the results from the comparison of optimizers.\n\n\"\"\"\n\nimport pickle\n\nimport numpy as np\nimport pylab as pl\n\nresults = pickle.load(file('compare_optimizers.pkl'))\n#results = pickle.load(file('compare_optimizers_gradients.pkl'))\nn_methods = len(results.values()[0]['Rosenbrock '])\nn_dims = len(results)\n\nsymbols = 'o>*Ds'\n\npl.figure(1, figsize=(10, 4))\npl.clf()\n\ncolors = pl.cm.Spectral(np.linspace(0, 1, n_dims))[:, :3]\n\nmethod_names = results.values()[0]['Rosenbrock '].keys()\nmethod_names.sort(key=lambda x: x[::-1], reverse=True)\n\nfor n_dim_index, ((n_dim, n_dim_bench), color) in enumerate(\n zip(sorted(results.items()), colors)):\n for (cost_name, cost_bench), symbol in zip(sorted(n_dim_bench.items()),\n symbols):\n for method_index, method_name, in enumerate(method_names):\n this_bench = cost_bench[method_name]\n bench = np.mean(this_bench)\n pl.semilogy([method_index + .1*n_dim_index, ], [bench, ],\n marker=symbol, color=color)\n\n# Create a legend for the problem type\nfor cost_name, symbol in zip(sorted(n_dim_bench.keys()),\n symbols):\n pl.semilogy([-10, ], [0, ], symbol, color='.5',\n label=cost_name)\n\npl.xticks(np.arange(n_methods), method_names, size=11)\npl.xlim(-.2, n_methods - .5)\npl.legend(loc='best', numpoints=1, handletextpad=0, prop=dict(size=12),\n frameon=False)\npl.ylabel('# function calls (a.u.)')\n\n# Create a second legend for the problem dimensionality\npl.twinx()\n\nfor n_dim, color in zip(sorted(results.keys()), colors):\n pl.plot([-10, ], [0, ], 'o', color=color,\n label='# dim: %i' % n_dim)\npl.legend(loc=(.47, .07), numpoints=1, handletextpad=0, prop=dict(size=12),\n frameon=False, ncol=2)\npl.xlim(-.2, n_methods - .5)\n\npl.xticks(np.arange(n_methods), method_names)\npl.yticks(())\n\npl.tight_layout()\npl.show()\n\n\n"
] |
[
[
"numpy.linspace",
"numpy.arange",
"numpy.mean"
]
] |
hammer/pandas
|
[
"7d7d210cc40206564d63e4b0d1239eef0e987173"
] |
[
"pandas/stats/tests/test_ols.py"
] |
[
"\"\"\"\nUnit test suite for OLS and PanelOLS classes\n\"\"\"\n\n# pylint: disable-msg=W0212\n\nfrom __future__ import division\n\nfrom datetime import datetime\nimport unittest\nimport numpy as np\n\nfrom pandas.core.panel import LongPanel, Panel\nfrom pandas.core.api import DataFrame, Index, Series, notnull\nfrom pandas.stats.api import ols\nfrom pandas.stats.plm import NonPooledPanelOLS, PanelOLS\nfrom pandas.util.testing import (assert_almost_equal, assert_series_equal,\n assert_frame_equal)\nimport pandas.util.testing as tm\n\nfrom common import BaseTest\n\ndef _check_repr(obj):\n repr(obj)\n str(obj)\n\ndef _compare_ols_results(model1, model2):\n assert(type(model1) == type(model2))\n\n if hasattr(model1, '_window_type'):\n _compare_moving_ols(model1, model2)\n else:\n _compare_fullsample_ols(model1, model2)\n\ndef _compare_fullsample_ols(model1, model2):\n assert_series_equal(model1.beta, model2.beta)\n\ndef _compare_moving_ols(model1, model2):\n assert_frame_equal(model1.beta, model2.beta)\n\nclass TestOLS(BaseTest):\n\n # TODO: Add tests for OLS y predict\n # TODO: Right now we just check for consistency between full-sample and\n # rolling/expanding results of the panel OLS. We should also cross-check\n # with trusted implementations of panel OLS (e.g. R).\n # TODO: Add tests for non pooled OLS.\n\n @classmethod\n def setupClass(cls):\n try:\n import scikits.statsmodels.api as _\n except ImportError:\n import nose\n raise nose.SkipTest\n\n def testOLSWithDatasets(self):\n import scikits.statsmodels.datasets as datasets\n\n self.checkDataSet(datasets.ccard.load(), skip_moving=True)\n self.checkDataSet(datasets.cpunish.load(), skip_moving=True)\n self.checkDataSet(datasets.longley.load(), skip_moving=True)\n self.checkDataSet(datasets.stackloss.load(), skip_moving=True)\n self.checkDataSet(datasets.copper.load())\n self.checkDataSet(datasets.scotland.load())\n\n # degenerate case fails on some platforms\n # self.checkDataSet(datasets.ccard.load(), 39, 49) # one col in X all 0s\n\n def checkDataSet(self, dataset, start=None, end=None, skip_moving=False):\n exog = dataset.exog[start : end]\n endog = dataset.endog[start : end]\n x = DataFrame(exog, index=np.arange(exog.shape[0]),\n columns=np.arange(exog.shape[1]))\n y = Series(endog, index=np.arange(len(endog)))\n\n self.checkOLS(exog, endog, x, y)\n\n if not skip_moving:\n self.checkMovingOLS('rolling', x, y)\n self.checkMovingOLS('rolling', x, y, nw_lags=0)\n self.checkMovingOLS('expanding', x, y, nw_lags=0)\n self.checkMovingOLS('rolling', x, y, nw_lags=1)\n self.checkMovingOLS('expanding', x, y, nw_lags=1)\n self.checkMovingOLS('expanding', x, y, nw_lags=1, nw_overlap=True)\n\n def checkOLS(self, exog, endog, x, y):\n import scikits.statsmodels.api as sm\n reference = sm.OLS(endog, sm.add_constant(exog, prepend=False)).fit()\n result = ols(y=y, x=x)\n\n # check that sparse version is the same\n sparse_result = ols(y=y.to_sparse(), x=x.to_sparse())\n _compare_ols_results(result, sparse_result)\n\n assert_almost_equal(reference.params, result._beta_raw)\n assert_almost_equal(reference.df_model, result._df_model_raw)\n assert_almost_equal(reference.df_resid, result._df_resid_raw)\n assert_almost_equal(reference.fvalue, result._f_stat_raw[0])\n assert_almost_equal(reference.pvalues, result._p_value_raw)\n assert_almost_equal(reference.rsquared, result._r2_raw)\n assert_almost_equal(reference.rsquared_adj, result._r2_adj_raw)\n assert_almost_equal(reference.resid, result._resid_raw)\n assert_almost_equal(reference.bse, result._std_err_raw)\n assert_almost_equal(reference.tvalues, result._t_stat_raw)\n assert_almost_equal(reference.cov_params(), result._var_beta_raw)\n assert_almost_equal(reference.fittedvalues, result._y_fitted_raw)\n\n _check_non_raw_results(result)\n\n def checkMovingOLS(self, window_type, x, y, **kwds):\n from scikits.statsmodels.tools.tools import rank\n window = rank(x.values) * 2\n\n moving = ols(y=y, x=x, window_type=window_type,\n window=window, **kwds)\n\n # check that sparse version is the same\n sparse_moving = ols(y=y.to_sparse(), x=x.to_sparse(),\n window_type=window_type,\n window=window, **kwds)\n _compare_ols_results(moving, sparse_moving)\n\n index = moving._index\n\n for n, i in enumerate(moving._valid_indices):\n if window_type == 'rolling' and i >= window:\n prior_date = index[i - window + 1]\n else:\n prior_date = index[0]\n\n date = index[i]\n\n x_iter = {}\n for k, v in x.iteritems():\n x_iter[k] = v.truncate(before=prior_date, after=date)\n y_iter = y.truncate(before=prior_date, after=date)\n\n static = ols(y=y_iter, x=x_iter, **kwds)\n\n self.compare(static, moving, event_index=i,\n result_index=n)\n\n _check_non_raw_results(moving)\n\n FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat', 'p_value',\n 'r2', 'r2_adj', 'rmse', 'std_err', 't_stat',\n 'var_beta']\n\n def compare(self, static, moving, event_index=None,\n result_index=None):\n\n # Check resid if we have a time index specified\n if event_index is not None:\n ref = static._resid_raw[-1]\n res = moving._resid_raw[event_index]\n\n assert_almost_equal(ref, res)\n\n ref = static._y_fitted_raw[-1]\n res = moving._y_fitted_raw[event_index]\n\n assert_almost_equal(ref, res)\n\n # Check y_fitted\n\n for field in self.FIELDS:\n attr = '_%s_raw' % field\n\n ref = getattr(static, attr)\n res = getattr(moving, attr)\n\n if result_index is not None:\n res = res[result_index]\n\n assert_almost_equal(ref, res)\n\nclass TestOLSMisc(unittest.TestCase):\n '''\n For test coverage with faux data\n '''\n @classmethod\n def setupClass(cls):\n try:\n import scikits.statsmodels.api as _\n except ImportError:\n import nose\n raise nose.SkipTest\n\n def test_f_test(self):\n x = tm.makeTimeDataFrame()\n y = x.pop('A')\n\n model = ols(y=y, x=x)\n\n hyp = '1*B+1*C+1*D=0'\n result = model.f_test(hyp)\n\n hyp = ['1*B=0',\n '1*C=0',\n '1*D=0']\n result = model.f_test(hyp)\n assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])\n\n self.assertRaises(Exception, model.f_test, '1*A=0')\n\n def test_r2_no_intercept(self):\n y = tm.makeTimeSeries()\n x = tm.makeTimeDataFrame()\n\n x_with = x.copy()\n x_with['intercept'] = 1.\n\n model1 = ols(y=y, x=x)\n model2 = ols(y=y, x=x_with, intercept=False)\n assert_series_equal(model1.beta, model2.beta)\n\n # TODO: can we infer whether the intercept is there...\n self.assert_(model1.r2 != model2.r2)\n\n # rolling\n\n model1 = ols(y=y, x=x, window=20)\n model2 = ols(y=y, x=x_with, window=20, intercept=False)\n assert_frame_equal(model1.beta, model2.beta)\n self.assert_((model1.r2 != model2.r2).all())\n\n def test_summary_many_terms(self):\n x = DataFrame(np.random.randn(100, 20))\n y = np.random.randn(100)\n model = ols(y=y, x=x)\n model.summary\n\n def test_y_predict(self):\n y = tm.makeTimeSeries()\n x = tm.makeTimeDataFrame()\n model1 = ols(y=y, x=x)\n assert_series_equal(model1.y_predict, model1.y_fitted)\n assert_almost_equal(model1._y_predict_raw, model1._y_fitted_raw)\n\n def test_longpanel_series_combo(self):\n wp = tm.makePanel()\n lp = wp.to_long()\n\n y = lp.pop('ItemA')\n model = ols(y=y, x=lp, entity_effects=True, window=20)\n self.assert_(notnull(model.beta.values).all())\n self.assert_(isinstance(model, PanelOLS))\n model.summary\n\n def test_series_rhs(self):\n y = tm.makeTimeSeries()\n x = tm.makeTimeSeries()\n model = ols(y=y, x=x)\n expected = ols(y=y, x={'x' : x})\n assert_series_equal(model.beta, expected.beta)\n\n def test_various_attributes(self):\n # just make sure everything \"works\". test correctness elsewhere\n\n x = DataFrame(np.random.randn(100, 5))\n y = np.random.randn(100)\n model = ols(y=y, x=x, window=20)\n\n series_attrs = ['rank', 'df', 'forecast_mean', 'forecast_vol']\n\n for attr in series_attrs:\n value = getattr(model, attr)\n self.assert_(isinstance(value, Series))\n\n # works\n model._results\n\n def test_catch_regressor_overlap(self):\n df1 = tm.makeTimeDataFrame().ix[:, ['A', 'B']]\n df2 = tm.makeTimeDataFrame().ix[:, ['B', 'C', 'D']]\n y = tm.makeTimeSeries()\n\n data = {'foo' : df1, 'bar' : df2}\n self.assertRaises(Exception, ols, y=y, x=data)\n\n def test_plm_ctor(self):\n y = tm.makeTimeDataFrame()\n x = {'a' : tm.makeTimeDataFrame(),\n 'b' : tm.makeTimeDataFrame()}\n\n model = ols(y=y, x=x, intercept=False)\n model.summary\n\n model = ols(y=y, x=Panel(x))\n model.summary\n\n def test_plm_attrs(self):\n y = tm.makeTimeDataFrame()\n x = {'a' : tm.makeTimeDataFrame(),\n 'b' : tm.makeTimeDataFrame()}\n\n rmodel = ols(y=y, x=x, window=10)\n model = ols(y=y, x=x)\n model.resid\n rmodel.resid\n\n def test_plm_lagged_y_predict(self):\n y = tm.makeTimeDataFrame()\n x = {'a' : tm.makeTimeDataFrame(),\n 'b' : tm.makeTimeDataFrame()}\n\n model = ols(y=y, x=x, window=10)\n result = model.lagged_y_predict(2)\n\n def test_plm_f_test(self):\n y = tm.makeTimeDataFrame()\n x = {'a' : tm.makeTimeDataFrame(),\n 'b' : tm.makeTimeDataFrame()}\n\n model = ols(y=y, x=x)\n\n hyp = '1*a+1*b=0'\n result = model.f_test(hyp)\n\n hyp = ['1*a=0',\n '1*b=0']\n result = model.f_test(hyp)\n assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])\n\n def test_plm_exclude_dummy_corner(self):\n y = tm.makeTimeDataFrame()\n x = {'a' : tm.makeTimeDataFrame(),\n 'b' : tm.makeTimeDataFrame()}\n\n model = ols(y=y, x=x, entity_effects=True, dropped_dummies={'entity' : 'D'})\n model.summary\n\n self.assertRaises(Exception, ols, y=y, x=x, entity_effects=True,\n dropped_dummies={'entity' : 'E'})\n\nclass TestPanelOLS(BaseTest):\n\n FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat',\n 'p_value', 'r2', 'r2_adj', 'rmse', 'std_err',\n 't_stat', 'var_beta']\n\n _other_fields = ['resid', 'y_fitted']\n\n def testFiltering(self):\n result = ols(y=self.panel_y2, x=self.panel_x2)\n\n x = result._x\n index = [x.major_axis[i] for i in x.major_labels]\n index = Index(sorted(set(index)))\n exp_index = Index([datetime(2000, 1, 1), datetime(2000, 1, 3)])\n self.assertTrue(exp_index.equals(index))\n\n index = [x.minor_axis[i] for i in x.minor_labels]\n index = Index(sorted(set(index)))\n exp_index = Index(['A', 'B'])\n self.assertTrue(exp_index.equals(index))\n\n x = result._x_filtered\n index = [x.major_axis[i] for i in x.major_labels]\n index = Index(sorted(set(index)))\n exp_index = Index([datetime(2000, 1, 1),\n datetime(2000, 1, 3),\n datetime(2000, 1, 4)])\n self.assertTrue(exp_index.equals(index))\n\n assert_almost_equal(result._y.values.flat, [1, 4, 5])\n\n exp_x = [[6, 14, 1],\n [9, 17, 1],\n [30, 48, 1]]\n assert_almost_equal(exp_x, result._x.values)\n\n exp_x_filtered = [[6, 14, 1],\n [9, 17, 1],\n [30, 48, 1],\n [11, 20, 1],\n [12, 21, 1]]\n assert_almost_equal(exp_x_filtered, result._x_filtered.values)\n\n self.assertTrue(result._x_filtered.major_axis.equals(\n result.y_fitted.index))\n\n def testWithTimeEffects(self):\n result = ols(y=self.panel_y2, x=self.panel_x2, time_effects=True)\n\n assert_almost_equal(result._y_trans.values.flat, [0, -0.5, 0.5])\n\n exp_x = [[0, 0], [-10.5, -15.5], [10.5, 15.5]]\n assert_almost_equal(result._x_trans.values, exp_x)\n\n # _check_non_raw_results(result)\n\n def testWithEntityEffects(self):\n result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True)\n\n assert_almost_equal(result._y.values.flat, [1, 4, 5])\n\n exp_x = DataFrame([[0, 6, 14, 1], [0, 9, 17, 1], [1, 30, 48, 1]],\n index=result._x.index, columns=['FE_B', 'x1', 'x2',\n 'intercept'],\n dtype=float)\n tm.assert_frame_equal(result._x, exp_x.ix[:, result._x.columns])\n # _check_non_raw_results(result)\n\n def testWithEntityEffectsAndDroppedDummies(self):\n result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True,\n dropped_dummies={'entity' : 'B'})\n\n assert_almost_equal(result._y.values.flat, [1, 4, 5])\n exp_x = DataFrame([[1, 6, 14, 1], [1, 9, 17, 1], [0, 30, 48, 1]],\n index=result._x.index, columns=['FE_A', 'x1', 'x2',\n 'intercept'],\n dtype=float)\n tm.assert_frame_equal(result._x, exp_x.ix[:, result._x.columns])\n # _check_non_raw_results(result)\n\n def testWithXEffects(self):\n result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'])\n\n assert_almost_equal(result._y.values.flat, [1, 4, 5])\n exp_x = [[0, 0, 14, 1], [0, 1, 17, 1], [1, 0, 48, 1]]\n assert_almost_equal(result._x.values, exp_x)\n\n exp_index = Index(['x1_30', 'x1_9', 'x2', 'intercept'])\n self.assertTrue(exp_index.equals(result._x.items))\n\n # _check_non_raw_results(result)\n\n def testWithXEffectsAndDroppedDummies(self):\n result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'],\n dropped_dummies={'x1' : 30})\n\n assert_almost_equal(result._y.values.flat, [1, 4, 5])\n exp_x = [[1, 0, 14, 1], [0, 1, 17, 1], [0, 0, 48, 1]]\n assert_almost_equal(result._x.values, exp_x)\n\n exp_index = Index(['x1_6', 'x1_9', 'x2', 'intercept'])\n self.assertTrue(exp_index.equals(result._x.items))\n\n # _check_non_raw_results(result)\n\n def testWithXEffectsAndConversion(self):\n result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'])\n\n assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])\n exp_x = [[0, 0, 0, 1, 1], [1, 0, 0, 0, 1], [0, 1, 1, 0, 1],\n [0, 0, 0, 1, 1]]\n assert_almost_equal(result._x.values, exp_x)\n\n exp_index = Index(['x1_B', 'x1_C', 'x2_baz', 'x2_foo', 'intercept'])\n self.assertTrue(exp_index.equals(result._x.items))\n\n # _check_non_raw_results(result)\n\n def testWithXEffectsAndConversionAndDroppedDummies(self):\n result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'],\n dropped_dummies={'x2' : 'foo'})\n\n assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])\n exp_x = [[0, 0, 0, 0, 1], [1, 0, 1, 0, 1], [0, 1, 0, 1, 1],\n [0, 0, 0, 0, 1]]\n assert_almost_equal(result._x.values, exp_x)\n\n exp_index = Index(['x1_B', 'x1_C', 'x2_bar', 'x2_baz', 'intercept'])\n self.assertTrue(exp_index.equals(result._x.items))\n\n # _check_non_raw_results(result)\n\n def testForSeries(self):\n self.checkForSeries(self.series_panel_x, self.series_panel_y,\n self.series_x, self.series_y)\n\n self.checkForSeries(self.series_panel_x, self.series_panel_y,\n self.series_x, self.series_y, nw_lags=0)\n\n self.checkForSeries(self.series_panel_x, self.series_panel_y,\n self.series_x, self.series_y, nw_lags=1,\n nw_overlap=True)\n\n\n def testRolling(self):\n self.checkMovingOLS(self.panel_x, self.panel_y)\n\n def testRollingWithFixedEffects(self):\n self.checkMovingOLS(self.panel_x, self.panel_y,\n entity_effects=True)\n\n def testRollingWithTimeEffects(self):\n self.checkMovingOLS(self.panel_x, self.panel_y,\n time_effects=True)\n\n def testRollingWithNeweyWest(self):\n self.checkMovingOLS(self.panel_x, self.panel_y,\n nw_lags=1)\n\n def testRollingWithEntityCluster(self):\n self.checkMovingOLS(self.panel_x, self.panel_y,\n cluster='entity')\n\n def testRollingWithTimeEffectsAndEntityCluster(self):\n self.checkMovingOLS(self.panel_x, self.panel_y,\n time_effects=True, cluster='entity')\n\n def testRollingWithTimeCluster(self):\n self.checkMovingOLS(self.panel_x, self.panel_y,\n cluster='time')\n\n def testRollingWithNeweyWestAndEntityCluster(self):\n self.checkMovingOLS(self.panel_x, self.panel_y,\n nw_lags=1, cluster='entity')\n\n def testRollingWithNeweyWestAndTimeEffectsAndEntityCluster(self):\n self.checkMovingOLS(self.panel_x, self.panel_y,\n nw_lags=1, cluster='entity',\n time_effects=True)\n\n def testExpanding(self):\n self.checkMovingOLS(self.panel_x, self.panel_y, window_type='expanding')\n\n def testNonPooled(self):\n self.checkNonPooled(y=self.panel_y, x=self.panel_x)\n self.checkNonPooled(y=self.panel_y, x=self.panel_x,\n window_type='rolling', window=25, min_periods=10)\n\n def checkNonPooled(self, x, y, **kwds):\n # For now, just check that it doesn't crash\n result = ols(y=y, x=x, pool=False, **kwds)\n\n _check_repr(result)\n for attr in NonPooledPanelOLS.ATTRIBUTES:\n _check_repr(getattr(result, attr))\n\n def checkMovingOLS(self, x, y, window_type='rolling', **kwds):\n window = 25 # must be larger than rank of x\n\n moving = ols(y=y, x=x, window_type=window_type,\n window=window, **kwds)\n\n index = moving._index\n\n for n, i in enumerate(moving._valid_indices):\n if window_type == 'rolling' and i >= window:\n prior_date = index[i - window + 1]\n else:\n prior_date = index[0]\n\n date = index[i]\n\n x_iter = {}\n for k, v in x.iteritems():\n x_iter[k] = v.truncate(before=prior_date, after=date)\n y_iter = y.truncate(before=prior_date, after=date)\n\n static = ols(y=y_iter, x=x_iter, **kwds)\n\n self.compare(static, moving, event_index=i,\n result_index=n)\n\n _check_non_raw_results(moving)\n\n def checkForSeries(self, x, y, series_x, series_y, **kwds):\n # Consistency check with simple OLS.\n result = ols(y=y, x=x, **kwds)\n reference = ols(y=series_y, x=series_x, **kwds)\n\n self.compare(reference, result)\n\n def compare(self, static, moving, event_index=None,\n result_index=None):\n\n # Check resid if we have a time index specified\n if event_index is not None:\n staticSlice = _period_slice(static, -1)\n movingSlice = _period_slice(moving, event_index)\n\n ref = static._resid_raw[staticSlice]\n res = moving._resid_raw[movingSlice]\n\n assert_almost_equal(ref, res)\n\n ref = static._y_fitted_raw[staticSlice]\n res = moving._y_fitted_raw[movingSlice]\n\n assert_almost_equal(ref, res)\n\n # Check y_fitted\n\n for field in self.FIELDS:\n attr = '_%s_raw' % field\n\n ref = getattr(static, attr)\n res = getattr(moving, attr)\n\n if result_index is not None:\n res = res[result_index]\n\n assert_almost_equal(ref, res)\n\n def test_auto_rolling_window_type(self):\n data = tm.makeTimeDataFrame()\n y = data.pop('A')\n\n window_model = ols(y=y, x=data, window=20, min_periods=10)\n rolling_model = ols(y=y, x=data, window=20, min_periods=10,\n window_type='rolling')\n\n assert_frame_equal(window_model.beta, rolling_model.beta)\n\ndef _check_non_raw_results(model):\n _check_repr(model)\n _check_repr(model.resid)\n _check_repr(model.summary_as_matrix)\n _check_repr(model.y_fitted)\n _check_repr(model.y_predict)\n\ndef _period_slice(panelModel, i):\n index = panelModel._x_trans.index\n period = index.levels[0][i]\n\n L, R = index.get_major_bounds(period, period)\n\n return slice(L, R)\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],\n exit=False)\n"
] |
[
[
"pandas.core.api.notnull",
"pandas.util.testing.assert_frame_equal",
"pandas.util.testing.makeTimeDataFrame",
"pandas.core.api.Index",
"pandas.core.panel.Panel",
"pandas.util.testing.assert_almost_equal",
"numpy.random.randn",
"pandas.util.testing.assert_series_equal",
"pandas.stats.api.ols",
"pandas.util.testing.makePanel",
"numpy.arange",
"pandas.util.testing.makeTimeSeries",
"pandas.core.api.DataFrame"
]
] |
lwang89/Stock-Trading-Visualization
|
[
"42f1b775b19e6291a026b6375e362d05660be385"
] |
[
"render/StockTradingGraph.py"
] |
[
"\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib import style\n\n# finance module is no longer part of matplotlib\n# see: https://github.com/matplotlib/mpl_finance\nfrom mpl_finance import candlestick_ochl as candlestick\n\nstyle.use('dark_background')\n\nVOLUME_CHART_HEIGHT = 0.33\n\nUP_COLOR = '#27A59A'\nDOWN_COLOR = '#EF534F'\nUP_TEXT_COLOR = '#73D3CC'\nDOWN_TEXT_COLOR = '#DC2C27'\n\n\ndef date2num(date):\n converter = mdates.strpdate2num('%Y-%m-%d')\n return converter(date)\n\n\nclass StockTradingGraph:\n \"\"\"A stock trading visualization using matplotlib made to render OpenAI gym environments\"\"\"\n\n def __init__(self, df, title=None):\n self.df = df\n self.net_worths = np.zeros(len(df['Date']))\n\n # Create a figure on screen and set the title\n fig = plt.figure()\n fig.suptitle(title)\n\n # Create top subplot for net worth axis\n self.net_worth_ax = plt.subplot2grid(\n (6, 1), (0, 0), rowspan=2, colspan=1)\n\n # Create bottom subplot for shared price/volume axis\n self.price_ax = plt.subplot2grid(\n (6, 1), (2, 0), rowspan=8, colspan=1, sharex=self.net_worth_ax)\n\n # Create a new axis for volume which shares its x-axis with price\n self.volume_ax = self.price_ax.twinx()\n\n # Add padding to make graph easier to view\n plt.subplots_adjust(left=0.11, bottom=0.24,\n right=0.90, top=0.90, wspace=0.2, hspace=0)\n\n # Show the graph without blocking the rest of the program\n plt.show(block=False)\n\n def _render_net_worth(self, current_step, net_worth, step_range, dates):\n # Clear the frame rendered last step\n self.net_worth_ax.clear()\n\n # Plot net worths\n self.net_worth_ax.plot_date(\n dates, self.net_worths[step_range], '-', label='Net Worth')\n\n # Show legend, which uses the label we defined for the plot above\n self.net_worth_ax.legend()\n legend = self.net_worth_ax.legend(loc=2, ncol=2, prop={'size': 8})\n legend.get_frame().set_alpha(0.4)\n\n last_date = date2num(self.df['Date'].values[current_step])\n last_net_worth = self.net_worths[current_step]\n\n # Annotate the current net worth on the net worth graph\n self.net_worth_ax.annotate('{0:.2f}'.format(net_worth), (last_date, last_net_worth),\n xytext=(last_date, last_net_worth),\n bbox=dict(boxstyle='round',\n fc='w', ec='k', lw=1),\n color=\"black\",\n fontsize=\"small\")\n\n # Add space above and below min/max net worth\n self.net_worth_ax.set_ylim(\n min(self.net_worths[np.nonzero(self.net_worths)]) / 1.25, max(self.net_worths) * 1.25)\n\n def _render_price(self, current_step, net_worth, dates, step_range):\n self.price_ax.clear()\n\n # Format data for OHCL candlestick graph\n candlesticks = zip(dates,\n self.df['Open'].values[step_range], self.df['Close'].values[step_range],\n self.df['High'].values[step_range], self.df['Low'].values[step_range])\n\n # Plot price using candlestick graph from mpl_finance\n candlestick(self.price_ax, candlesticks, width=1,\n colorup=UP_COLOR, colordown=DOWN_COLOR)\n\n last_date = date2num(self.df['Date'].values[current_step])\n last_close = self.df['Close'].values[current_step]\n last_high = self.df['High'].values[current_step]\n\n # Print the current price to the price axis\n self.price_ax.annotate('{0:.2f}'.format(last_close), (last_date, last_close),\n xytext=(last_date, last_high),\n bbox=dict(boxstyle='round',\n fc='w', ec='k', lw=1),\n color=\"black\",\n fontsize=\"small\")\n\n # Shift price axis up to give volume chart space\n ylim = self.price_ax.get_ylim()\n self.price_ax.set_ylim(ylim[0] - (ylim[1] - ylim[0])\n * VOLUME_CHART_HEIGHT, ylim[1])\n\n def _render_volume(self, current_step, net_worth, dates, step_range):\n self.volume_ax.clear()\n\n volume = np.array(self.df['Volume'].values[step_range])\n\n pos = self.df['Open'].values[step_range] - \\\n self.df['Close'].values[step_range] < 0\n neg = self.df['Open'].values[step_range] - \\\n self.df['Close'].values[step_range] > 0\n\n # Color volume bars based on price direction on that date\n self.volume_ax.bar(dates[pos], volume[pos], color=UP_COLOR,\n alpha=0.4, width=1, align='center')\n self.volume_ax.bar(dates[neg], volume[neg], color=DOWN_COLOR,\n alpha=0.4, width=1, align='center')\n\n # Cap volume axis height below price chart and hide ticks\n self.volume_ax.set_ylim(0, max(volume) / VOLUME_CHART_HEIGHT)\n self.volume_ax.yaxis.set_ticks([])\n\n def _render_trades(self, current_step, trades, step_range):\n for trade in trades:\n if trade['step'] in step_range:\n date = date2num(self.df['Date'].values[trade['step']])\n high = self.df['High'].values[trade['step']]\n low = self.df['Low'].values[trade['step']]\n\n if trade['type'] == 'buy':\n high_low = low\n color = UP_TEXT_COLOR\n else:\n high_low = high\n color = DOWN_TEXT_COLOR\n\n total = '{0:.2f}'.format(trade['total'])\n\n # Print the current price to the price axis\n self.price_ax.annotate(f'${total}', (date, high_low),\n xytext=(date, high_low),\n color=color,\n fontsize=8,\n arrowprops=(dict(color=color)))\n\n def render(self, current_step, net_worth, trades, window_size=40):\n self.net_worths[current_step] = net_worth\n\n window_start = max(current_step - window_size, 0)\n step_range = range(window_start, current_step + 1)\n\n # Format dates as timestamps, necessary for candlestick graph\n dates = np.array([date2num(x)\n for x in self.df['Date'].values[step_range]])\n\n self._render_net_worth(current_step, net_worth, step_range, dates)\n self._render_price(current_step, net_worth, dates, step_range)\n self._render_volume(current_step, net_worth, dates, step_range)\n self._render_trades(current_step, trades, step_range)\n\n # Format the date ticks to be more easily read\n self.price_ax.set_xticklabels(self.df['Date'].values[step_range], rotation=45,\n horizontalalignment='right')\n\n # Hide duplicate net worth date labels\n plt.setp(self.net_worth_ax.get_xticklabels(), visible=False)\n\n # Necessary to view frames before they are unrendered\n plt.pause(0.001)\n\n def close(self):\n plt.close()\n"
] |
[
[
"numpy.array",
"matplotlib.style.use",
"matplotlib.pyplot.close",
"numpy.nonzero",
"matplotlib.pyplot.figure",
"matplotlib.dates.strpdate2num",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots_adjust"
]
] |
nickwood/endochrone
|
[
"050a2604be82ee4cd5ee6357ea72d3d6d4117277"
] |
[
"tests/stats/test_measures.py"
] |
[
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pytest\n\nfrom endochrone.stats import measures\nfrom endochrone.utils import lazy_test_runner as ltr\n\n__author__ = \"nickwood\"\n__copyright__ = \"nickwood\"\n__license__ = \"mit\"\n\n\ndef test_euclidian_dist():\n A = np.array([0, 0])\n B = np.array([3, 4])\n C = np.array([12, 10])\n D = np.array([0, 3])\n E = np.array([0.7, 2.8])\n\n assert measures.euclidean_dist(A, B)**2 == pytest.approx(25.0)\n assert measures.euclidean_dist(B, C)**2 == pytest.approx(117.0)\n assert measures.euclidean_dist(C, D)**2 == pytest.approx(193.0)\n assert measures.euclidean_dist(A, C)**2 == pytest.approx(244.0)\n assert measures.euclidean_dist(B, D)**2 == pytest.approx(10.0)\n assert measures.euclidean_dist(A, D)**2 == pytest.approx(9.0)\n assert measures.euclidean_dist(B, E)**2 == pytest.approx(6.73)\n\n\ndef test_euclidean_distances():\n X = np.arange(0, 10, 1).reshape(10, 1)\n exp = np.array([[5, 4, 3, 2, 1, 0, 1, 2, 3, 4]])\n assert np.all(measures.euclidean_distances(X=X, p=5) == exp)\n\n X2 = np.arange(0, 20, 1).reshape(10, 2)\n p2 = [3, 4]\n exp2 = np.array([4.24264069, 1.41421356, 1.41421356, 4.24264069,\n 7.07106781, 9.89949494, 12.72792206, 15.55634919,\n 18.38477631, 21.21320344])\n act2 = measures.euclidean_distances(X=X2, p=p2)\n assert np.all(act2 == pytest.approx(exp2))\n\n X3 = np.arange(0, 30, 1).reshape(10, 3)\n p3 = [3, 2, 4]\n exp3 = np.array([3.74165739, 2.23606798, 7.07106781, 12.20655562,\n 17.3781472, 22.56102835, 27.74887385, 32.93933818,\n 38.13135193, 43.32435804])\n act3 = measures.euclidean_distances(X=X3, p=p3)\n assert np.all(act3 == pytest.approx(exp3))\n\n\ndef test_arg_neighbours():\n X = np.arange(0, 1, 0.1)\n n1 = measures.arg_neighbours(X=X, p=np.array([0.3]), size=0.2)\n assert np.all(n1 == np.arange(1, 6, dtype=int))\n\n X = np.arange(0, 10, 1).reshape(10, 1)\n exp = np.array([2, 3, 4, 5])\n assert np.all(measures.arg_neighbours(X=X, p=3.5, size=2) == exp)\n exp = np.array([2, 3, 4, 5, 6, 7, 8])\n assert np.all(measures.arg_neighbours(X=X, p=5, size=3) == exp)\n\n X2 = np.arange(0, 20, 1).reshape(10, 2)\n p2 = [3.5, 2.3]\n exp2 = np.array([0, 1, 2])\n assert np.all(measures.arg_neighbours(X=X2, p=p2, size=4.5) == exp2)\n\n X3 = np.arange(0, 30, 1).reshape(10, 3)\n p3 = [3.5, 2.3, 4.2]\n exp3 = np.array([0, 1, 2])\n assert np.all(measures.arg_neighbours(X=X3, p=p3, size=8.5) == exp3)\n\n\ndef test_neighbours():\n X = np.arange(0, 10, 1).reshape(10, 1)\n exp = np.array([2, 3, 4, 5]).reshape(4, 1)\n assert np.all(measures.neighbours(X=X, p=3.5, size=2) == exp)\n exp = np.array([2, 3, 4, 5, 6, 7, 8]).reshape(7, 1)\n assert np.all(measures.neighbours(X=X, p=5, size=3) == exp)\n\n X2 = np.arange(0, 20, 1).reshape(10, 2)\n p2 = [3.5, 2.3]\n exp2 = np.array([[0, 1], [2, 3], [4, 5]])\n assert np.all(measures.neighbours(X=X2, p=p2, size=4.5) == exp2)\n\n X3 = np.arange(0, 30, 1).reshape(10, 3)\n p3 = [3.5, 2.3, 4.2]\n exp3 = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])\n assert np.all(measures.neighbours(X=X3, p=p3, size=8.5) == exp3)\n\n\ndef test_arg_nearest():\n X = np.arange(10, 20, 1).reshape(10, 1)\n assert np.all(measures.arg_nearest(X=X, p=15, n=3) == [5, 4, 6])\n assert np.all(measures.arg_nearest(X=X, p=15, n=5) == [5, 4, 6, 3, 7])\n\n X2 = np.arange(0, 20, 1).reshape(10, 2)\n p2 = [5.5, 6.3]\n assert np.all(measures.arg_nearest(X=X2, p=p2, n=2) == [3, 2])\n\n\nltr()\n"
] |
[
[
"numpy.array",
"numpy.arange"
]
] |
Ayushparikh-code/nanotune
|
[
"6d63adc64c89aa38592cf732345d38f7c18f05e1"
] |
[
"nanotune/tests/tuning/test_tuner.py"
] |
[
"import copy\n\nimport numpy as np\nimport pytest\n\nimport nanotune as nt\nfrom nanotune.device_tuner.tuner import (Tuner, set_back_valid_ranges,\n set_back_voltages)\nfrom nanotune.device_tuner.tuningresult import TuningResult\nfrom nanotune.tests.mock_classifier import MockClassifer\n\natol = 1e-05\n\n\ndef test_set_back_voltages(gate_1, gate_2):\n gate_1.dc_voltage(-0.8)\n gate_2.dc_voltage(-0.9)\n assert gate_1.dc_voltage() == -0.8\n assert gate_2.dc_voltage() == -0.9\n\n with set_back_voltages([gate_1, gate_2]):\n assert gate_1.dc_voltage() == -0.8\n assert gate_2.dc_voltage() == -0.9\n gate_1.dc_voltage(-0.5)\n gate_2.dc_voltage(-0.4)\n assert gate_1.dc_voltage() == -0.5\n assert gate_2.dc_voltage() == -0.4\n\n assert gate_1.dc_voltage() == -0.8\n assert gate_2.dc_voltage() == -0.9\n\n\ndef test_set_back_valid_ranges(gate_1, gate_2):\n gate_1.current_valid_range([-0.8, -0.5])\n gate_2.current_valid_range([-0.9, -0.4])\n assert gate_1.current_valid_range() == [-0.8, -0.5]\n assert gate_2.current_valid_range() == [-0.9, -0.4]\n\n with set_back_valid_ranges([gate_1, gate_2]):\n assert gate_1.current_valid_range() == [-0.8, -0.5]\n assert gate_2.current_valid_range() == [-0.9, -0.4]\n gate_1.current_valid_range([-0.3, -0.4])\n gate_2.current_valid_range([-0.2, -0.1])\n assert gate_1.current_valid_range() == [-0.3, -0.4]\n assert gate_2.current_valid_range() == [-0.2, -0.1]\n\n assert gate_1.current_valid_range() == [-0.8, -0.5]\n assert gate_2.current_valid_range() == [-0.9, -0.4]\n\n\ndef test_tuner_init_and_attributes(tuner_default_input, tmp_path):\n tuner = Tuner(**tuner_default_input)\n data_settings = copy.deepcopy(tuner.data_settings())\n assert data_settings[\"db_name\"] == \"temp.db\"\n assert data_settings[\"db_folder\"] == str(tmp_path)\n assert data_settings[\"qc_experiment_id\"] == 1\n\n new_data_settings = {\"db_name\": \"other_temp.db\"}\n tuner.data_settings(new_data_settings)\n data_settings.update(new_data_settings)\n assert tuner.data_settings() == data_settings\n\n assert tuner.setpoint_settings()[\"voltage_precision\"] == 0.001\n\n tuner.close()\n\n\ndef test_update_normalization_constants(tuner_default_input, device_pinchoff, tmp_path):\n\n tuner = Tuner(**tuner_default_input)\n device_pinchoff.normalization_constants({})\n\n tuner.update_normalization_constants(device_pinchoff)\n updated_constants = device_pinchoff.normalization_constants()\n\n assert np.allclose(updated_constants[\"dc_current\"], [0.0, 1.2], atol=atol)\n assert updated_constants[\"dc_sensor\"] != updated_constants[\"dc_current\"]\n assert np.allclose(updated_constants[\"rf\"], [0, 1], atol=atol)\n\n tuner.close()\n\n\ndef test_characterize_gates(tuner_default_input, device_pinchoff):\n tuner = Tuner(\n **tuner_default_input,\n )\n tuner.classifiers = {\"pinchoff\": MockClassifer(\"pinchoff\")}\n result = tuner.characterize_gates(\n [device_pinchoff.left_barrier, device_pinchoff.left_barrier]\n )\n gate_name = \"characterization_\" + device_pinchoff.left_barrier.name\n assert gate_name in result.tuningresults.keys()\n print(result)\n tuningresult = result.tuningresults[gate_name]\n assert isinstance(tuningresult, TuningResult)\n assert tuningresult.success\n tuner.close()\n\n\ndef test_device_specific_settings(tuner_default_input, device_pinchoff):\n tuner = Tuner(\n **tuner_default_input,\n )\n original_setpoints = copy.deepcopy(tuner.setpoint_settings())\n original_classifiers = copy.deepcopy(tuner.classifiers)\n original_fit_options = copy.deepcopy(tuner.fit_options())\n\n assert \"normalization_constants\" not in tuner.data_settings().keys()\n n_csts = {\"dc_current\": (-0.3, 1.2), \"dc_sensor\": (0.2, 0.8), \"rf\": (0, 1)}\n device_pinchoff.normalization_constants(n_csts)\n with tuner.device_specific_settings(device_pinchoff):\n assert tuner.data_settings()[\"normalization_constants\"] == n_csts\n\n assert tuner.setpoint_settings() == original_setpoints\n assert tuner.classifiers == original_classifiers\n assert tuner.fit_options() == original_fit_options\n\n assert \"normalization_constants\" not in tuner.data_settings().keys()\n\n tuner.close()\n"
] |
[
[
"numpy.allclose"
]
] |
abdulrhmanG-alahmadi/Exploring-Weather-Trends
|
[
"e3fca01e18efdc90edf3de777aa174c5a1025567"
] |
[
"visulize.py"
] |
[
"'''\r\nthis code was made by Abdulrhman Alahmadi and runs in linear time O(n)\r\nplease if you have any suggestions to make it more efficient share them with me\r\n'''\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n# getting csv files\r\nglobalData = pd.read_csv(\"C:\\\\Users\\\\alahm\\\\Desktop\\\\Data Analyst\\\\Global_data.csv\")\r\nmakkahData = pd.read_csv(\"C:\\\\Users\\\\alahm\\\\Desktop\\\\Data Analyst\\\\x.csv\")\r\n\r\n\r\n# making the avrage to a moving avrage\r\nglobalData['avg_temp'] = globalData['avg_temp'].rolling(window=20).mean()\r\nmakkahData['avg_temp'] = makkahData['avg_temp'].rolling(window=20).mean()\r\n\r\n\r\n# creating the plot\r\n\r\n## making thicker lines in the graph\r\nfor i in range(10):\r\n plt.plot(makkahData['year'], makkahData['avg_temp'],color= 'gold')\r\n plt.plot(globalData['year'],globalData['avg_temp'],color= 'darkorange')\r\n## one final ittretion with labels added \r\nplt.plot(makkahData['year'],makkahData['avg_temp'],color= 'gold',label='Makkah temperature')\r\nplt.plot(globalData['year'],globalData['avg_temp'],color= 'darkorange',label='Global temperature')\r\n\r\n##creating the title and labels while custmizing it as i see fit\r\nplt.title('Temperature trends for the past 200 years',size=20, fontweight='bold', fontname='Helvetica')\r\nplt.xlabel('Year',fontweight='bold', fontname='Helvetica')\r\nplt.legend(shadow=True,loc='upper left')\r\nplt.ylabel('Temperature (°C)',fontweight='bold',fontname='Helvetica')\r\n\r\n# printing the final result\r\nplt.show()\r\n\r\n\r\n"
] |
[
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
] |
mwojcikowski/opendrugdiscovery
|
[
"174b22f6a9378bfbb1f435a6e71cad13218e5e6d",
"174b22f6a9378bfbb1f435a6e71cad13218e5e6d"
] |
[
"oddt/metrics.py",
"oddt/scoring/descriptors/__init__.py"
] |
[
"\"\"\"Metrics for estimating performance of drug discovery methods implemented in ODDT\"\"\"\n\nfrom math import ceil\nimport numpy as np\nfrom sklearn.metrics import roc_curve as roc, roc_auc_score as roc_auc, auc, mean_squared_error\n\n__all__ = ['roc', 'auc', 'roc_auc', 'roc_log_auc', 'enrichment_factor', 'random_roc_log_auc', 'rmse']\n\ndef rmse(y_true, y_pred):\n \"\"\"Compute Root Mean Squared Error (RMSE)\n \n Parameters\n ----------\n y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]\n Ground truth (correct) target values.\n \n y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]\n Estimated target values.\n \n Returns\n -------\n rmse : float\n A positive floating point value (the best value is 0.0).\n \"\"\"\n return np.sqrt(mean_squared_error(y_true, y_pred))\n\ndef enrichment_factor(y_true, y_score, percentage=1, pos_label=None):\n \"\"\"Computes enrichment factor for given percentage, i.e. EF_1% is enrichment factor for first percent of given samples.\n \n Parameters\n ----------\n y_true : array, shape=[n_samples]\n True binary labels, in range {0,1} or {-1,1}. If positive label is different than 1, it must be explicitly defined.\n \n y_score : array, shape=[n_samples]\n Scores for tested series of samples\n \n percentage : int or float\n The percentage for which EF is being calculated\n \n pos_label: int\n Positive label of samples (if other than 1)\n \n Returns\n -------\n ef : float\n Enrichment Factor for given percenage\n \"\"\"\n if pos_label is None:\n pos_label = 1\n labels = y_true == pos_label\n # calculate fraction of positve labels\n n_perc = ceil(float(percentage)/100.*len(labels))\n return labels[:n_perc].sum()/n_perc*100\n \ndef roc_log_auc(y_true, y_score, pos_label=None, log_min=0.001, log_max=1.):\n \"\"\"Computes area under semi-log ROC for random distribution.\n \n Parameters\n ----------\n y_true : array, shape=[n_samples]\n True binary labels, in range {0,1} or {-1,1}. If positive label is different than 1, it must be explicitly defined.\n \n y_score : array, shape=[n_samples]\n Scores for tested series of samples\n \n pos_label: int\n Positive label of samples (if other than 1)\n \n log_min : float (default=0.001)\n Minimum logarithm value for estimating AUC\n \n log_max : float (default=1.)\n Maximum logarithm value for estimating AUC.\n \n Returns\n -------\n auc : float\n semi-log ROC AUC\n \"\"\"\n fpr, tpr, t = roc(y_true, y_score, pos_label=pos_label)\n idx = (fpr >= log_min) & (fpr <= log_max)\n log_fpr = 1-np.log10(fpr[idx])/np.log10(log_min)\n return auc(log_fpr, tpr[idx])\n \ndef random_roc_log_auc(log_min=0.001, log_max=1.):\n \"\"\"Computes area under semi-log ROC for random distribution.\n \n Parameters\n ----------\n log_min : float (default=0.001)\n Minimum logarithm value for estimating AUC\n \n log_max : float (default=1.)\n Maximum logarithm value for estimating AUC.\n \n Returns\n -------\n auc : float\n semi-log ROC AUC for random distribution\n \"\"\"\n return (log_max-log_min)/(np.log(10)*np.log10(log_max/log_min))\n",
"import numpy as np\nfrom scipy.spatial.distance import cdist as distance\n\ndef atoms_by_type(atom_dict, types, mode = 'atomic_nums'):\n \"\"\"\n AutoDock4 types definition: http://autodock.scripps.edu/faqs-help/faq/where-do-i-set-the-autodock-4-force-field-parameters\n \"\"\"\n if mode == 'atomic_nums':\n return {num: atom_dict[atom_dict['atomicnum'] == num] for num in set(types)}\n elif mode == 'atom_types_sybyl':\n return {t: atom_dict[atom_dict['atomtype'] == t] for t in set(types)}\n elif mode == 'atom_types_ad4':\n # all AD4 atom types are capitalized\n types = [t.upper() for t in types]\n out = {}\n for t in set(types):\n if t == 'HD':\n out[t] = atom_dict[atom_dict['atomicnum'] == 1 & atom_dict['isdonorh']]\n elif t == 'C':\n out[t] = atom_dict[atom_dict['atomicnum'] == 6 & ~atom_dict['isaromatic']]\n elif t == 'CD': # not canonical AD4 type, although used by NNscore, with no description. properies assued by name\n out[t] = atom_dict[atom_dict['atomicnum'] == 6 & ~atom_dict['isdonor']]\n elif t == 'A':\n out[t] = atom_dict[atom_dict['atomicnum'] == 6 & atom_dict['isaromatic']]\n elif t == 'N':\n out[t] = atom_dict[atom_dict['atomicnum'] == 7 & ~atom_dict['isacceptor']]\n elif t == 'NA':\n out[t] = atom_dict[atom_dict['atomicnum'] == 7 & atom_dict['isacceptor']]\n elif t == 'OA':\n out[t] = atom_dict[atom_dict['atomicnum'] == 8 & atom_dict['isacceptor']]\n elif t == 'F':\n out[t] = atom_dict[atom_dict['atomicnum'] == 9]\n elif t == 'MG':\n out[t] = atom_dict[atom_dict['atomicnum'] == 12]\n elif t == 'P':\n out[t] = atom_dict[atom_dict['atomicnum'] == 15]\n elif t == 'SA':\n out[t] = atom_dict[atom_dict['atomicnum'] == 16 & atom_dict['isacceptor']]\n elif t == 'S':\n out[t] = atom_dict[atom_dict['atomicnum'] == 16 & ~atom_dict['isacceptor']]\n elif t == 'CL':\n out[t] = atom_dict[atom_dict['atomicnum'] == 17]\n elif t == 'CA':\n out[t] = atom_dict[atom_dict['atomicnum'] == 20]\n elif t == 'MN':\n out[t] = atom_dict[atom_dict['atomicnum'] == 25]\n elif t == 'FE':\n out[t] = atom_dict[atom_dict['atomicnum'] == 26]\n elif t == 'CU':\n out[t] = atom_dict[atom_dict['atomicnum'] == 29]\n elif t == 'ZN':\n out[t] = atom_dict[atom_dict['atomicnum'] == 30]\n elif t == 'BR':\n out[t] = atom_dict[atom_dict['atomicnum'] == 35]\n elif t == 'I':\n out[t] = atom_dict[atom_dict['atomicnum'] == 53]\n else:\n raise ValueError('Unsopported atom type: %s' % t)\n return out\n\nclass close_contacts(object):\n def __init__(self, protein = None, cutoff = 4, mode = 'atomic_nums', ligand_types = None, protein_types = None, aligned_pairs = False):\n self.cutoff = cutoff\n self.protein = protein\n self.ligand_types = ligand_types\n self.protein_types = protein_types if protein_types else ligand_types\n self.aligned_pairs = aligned_pairs\n self.mode = mode\n \n def build(self, ligands, protein = None, single = False):\n if protein is None:\n protein = self.protein\n if single:\n ligands = [ligands]\n# prot_dict = atoms_by_type(protein.atom_dict, self.protein_types, self.mode)\n desc_size = len(self.ligand_types) if self.aligned_pairs else len(self.ligand_types)*len(self.protein_types)\n out = np.zeros(desc_size, dtype=int)\n for mol in ligands:\n# mol_dict = atoms_by_type(mol.atom_dict, self.ligand_types, self.mode) \n if self.aligned_pairs:\n #desc = np.array([(distance(prot_dict[str(prot_type)]['coords'], mol_dict[str(mol_type)]['coords']) <= self.cutoff).sum() for mol_type, prot_type in zip(self.ligand_types, self.protein_types)], dtype=int)\n # this must be LAZY!\n desc = np.array([(distance(atoms_by_type(protein.atom_dict, [prot_type], self.mode)[prot_type]['coords'], atoms_by_type(mol.atom_dict, [mol_type], self.mode)[mol_type]['coords']) <= self.cutoff).sum() for mol_type, prot_type in zip(self.ligand_types, self.protein_types)], dtype=int)\n else:\n desc = np.array([(distance(atoms_by_type(protein.atom_dict, [prot_type], self.mode)[prot_type]['coords'], atoms_by_type(mol.atom_dict, [mol_type], self.mode)[mol_type]['coords']) <= self.cutoff).sum() for mol_type in self.ligand_types for prot_type in self.protein_types], dtype=int)\n out = np.vstack((out, desc))\n return out[1:]\n \n def __reduce__(self):\n return close_contacts, (None, self.cutoff, self.mode, self.ligand_types, self.protein_types, self.aligned_pairs)\n \nclass fingerprints(object):\n def __init__(self, fp = 'fp2', toolkit = 'ob'):\n self.fp = fp\n self.exchange = False\n #if toolkit == oddt.toolkit.backend:\n # self.exchange = False\n #else:\n # self.exchange = True\n # self.target_toolkit = __import__('toolkits.'+toolkit)\n \n def _get_fingerprint(self, mol):\n if self.exchange:\n mol = self.target_toolkit.Molecule(mol)\n return mol.calcfp(self.fp).raw\n \n def build(self, mols, single = False):\n if single:\n mols = [mols]\n out = None\n \n for mol in mols:\n fp = self._get_fingerprint(mol)\n if out is None:\n out = np.zeros_like(fp)\n out = np.vstack((fp, out))\n return out[1:]\n \n def __reduce__(self):\n return fingerprints, ()\n"
] |
[
[
"sklearn.metrics.mean_squared_error",
"numpy.log",
"sklearn.metrics.auc",
"numpy.log10",
"sklearn.metrics.roc_curve"
],
[
"numpy.zeros_like",
"numpy.vstack",
"numpy.zeros"
]
] |
DataScientest-Studio/firepy
|
[
"e4fc61a2232a22b2a7d45ae42123995d51866c52"
] |
[
"streamlit/streamlit_firepy.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nScript for the Streamlit demo of the FirePy project\n\"\"\"\n\nimport folium # map rendering library\nimport streamlit as st\nfrom streamlit_folium import folium_static\nimport rasterio as rio\nimport tifffile\nimport numpy as np\nfrom pyproj import Proj, transform\nimport tensorflow as tf\nfrom smooth_blending import predict_img_with_smooth_windowing\nfrom unet_model import simple_unet_model\n\nselect_fire_events = st.sidebar.selectbox(\n \"Select a fire event\",\n (\"Fire_1\", \"Fire_2\", \"Fire_3\", \"Airport fire 2022-02-16\"),\n index=3\n)\n\nsentinel2_opacity_slider = st.sidebar.slider(\n 'Opacity of Sentinel 2 overlay', 0.0, 1.0, 1.0)\n\nif select_fire_events == \"Fire_1\":\n st.title(\"Incendie n°1 :\")\n st.write(\" \")\n st.subheader(\"Incendie ayant eu lieu le XX/XX/XXXX\")\n sentinel2_image_path = './streamlit/test_images/CAL_database_Sentinel2_185_postFire_RGBIR.tif'\n if 'prediction' in st.session_state:\n del st.session_state['prediction']\n\nelif select_fire_events == \"Fire_2\":\n st.title(\"Incendie n°2 :\")\n st.write(\" \")\n st.subheader(\"The fire started on 16th of February\")\n sentinel2_image_path = './streamlit/test_images/CAL_database_Sentinel2_321_postFire_RGBIR.tif'\n if 'prediction' in st.session_state:\n del st.session_state['prediction']\n\nelif select_fire_events == \"Airport fire 2022-02-16\":\n st.title(\"Incendie n°4 :\")\n st.write(\" \")\n st.subheader(\"The fire started on 16th of February 2022\")\n st.subheader(\"The burnt area is 1674 ha\")\n st.subheader(\"The road closures: Warm Springs Road east of Hwy 395\")\n sentinel2_image_path = './streamlit/test_images/CAL_database_Sentinel2_Airport_postFire_RGBIR.tif'\n if 'prediction' in st.session_state:\n del st.session_state['prediction']\n\nelse:\n st.title(\"Incendie n°3 :\")\n st.write(\" \")\n st.subheader(\"Incendie ayant eu lieu le XX/XX/XXXX\")\n sentinel2_image_path = './streamlit/test_images/CAL_database_Sentinel2_8351_postFire_RGBIR.tif'\n if 'prediction' in st.session_state:\n del st.session_state['prediction']\n\n# Sentinel 2 image open\nraster_sentinel2 = rio.open(sentinel2_image_path)\n\n# Bounding box\ninProj = Proj(init='epsg:32610')\noutProj = Proj(init='epsg:4326')\nlongitude1, latitude1 = transform(\n inProj, outProj, raster_sentinel2.bounds.left, raster_sentinel2.bounds.bottom)\nlongitude2, latitude2 = transform(\n inProj, outProj, raster_sentinel2.bounds.right, raster_sentinel2.bounds.top)\ngps_bounding_box = [longitude1, latitude1, longitude2, latitude2]\n\nbbox = [[gps_bounding_box[1], gps_bounding_box[0]],\n [gps_bounding_box[3], gps_bounding_box[2]]]\n\ncenter_of_bbox = [(gps_bounding_box[1] + gps_bounding_box[3]) / 2,\n (gps_bounding_box[0] + gps_bounding_box[2]) / 2]\n\n# Array data\narr = raster_sentinel2.read()\n\n# Normalize the data (divide par 10000)\narr = arr / 10000\n\n# Change dimension order (number of channel at the end)\narr = np.moveaxis(arr, 0, -1)\n\n# Selecting the RGB channels in the right order\nimage_rgb = arr[:, :, :3]\nimage_rgb = image_rgb[:, :, ::-1] * 10\n\n\ndef predict(arr):\n # Loading a pre-trained model\n model = simple_unet_model(256, 256, 5)\n model.load_weights(\n './streamlit/saved_model/model_patches_20220130bis.hdf5')\n\n predictions_smooth = predict_img_with_smooth_windowing(\n arr,\n window_size=256,\n # Minimal amount of overlap for windowing. Must be an even number.\n subdivisions=2,\n nb_classes=1,\n pred_func=(\n lambda img_batch_subdiv: model.predict((img_batch_subdiv))\n )\n )\n return predictions_smooth[:, :, 0]\n\n\n# Adding the UI components for the user\nst.title(\"FirePy demo\")\n\nzoom_slider = st.sidebar.slider(\n 'Map zoom', 5.0, 15.0, 10.0)\n\n# Showing the map centered on San Jose GPS coordinates\nmap_california = folium.Map(location=center_of_bbox,\n zoom_start=zoom_slider)\n\n# Adding the Sentinel 2 image\nimage = folium.raster_layers.ImageOverlay(\n name=\"Test image\",\n image=image_rgb,\n bounds=bbox,\n interactive=True,\n zindex=1,\n opacity=sentinel2_opacity_slider\n)\nimage.add_to(map_california)\n\nprediction_button = st.sidebar.button(\"Predict the burnt area\")\nprediction_boolean = False\nif prediction_button:\n prediction_smooth_img = predict(arr)\n prediction_boolean = True\n st.session_state.prediction = prediction_smooth_img\n\nif 'prediction' in st.session_state:\n prediction_opacity_slider = st.sidebar.slider(\n 'Opacity of the prediction overlay', 0.0, 1.0, 0.5)\n\nif 'prediction' in st.session_state:\n saved_result = st.session_state.prediction\n # Adding the prediction image\n image_prediction = folium.raster_layers.ImageOverlay(\n name=\"Prediction image\",\n image=saved_result,\n bounds=bbox,\n interactive=True,\n zindex=2,\n opacity=prediction_opacity_slider\n )\n image_prediction.add_to(map_california)\n\n# Display the map\nfolium_static(map_california)\n"
] |
[
[
"numpy.moveaxis"
]
] |
brsr/antitile
|
[
"57228f1e2f2646ee88afbfc853adb8d3a6bcd736"
] |
[
"antitile/xmath.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nExtra math and array functions.\n\"\"\"\n\nimport numpy as np\nfrom numpy.linalg import norm\nimport pandas as pd\n\ndef reflect_through_origin(normal):\n \"\"\"Reflection matrix for reflecting through a plane through the origin\n specified by its normal\n\n >>> x = np.array([1, 0, 0])\n >>> reflect_through_origin(x)\n array([[-1., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 0., 1.]])\n \"\"\"\n return (np.eye(len(normal)) -\n 2 * np.outer(normal, normal) / np.inner(normal, normal))\n\ndef complex_to_float2d(arr):\n \"\"\"Converts a complex array to a multidimensional float array.\n\n >>> x = np.exp(2j*np.pi*np.linspace(0, 1, 5)).round()\n >>> complex_to_float2d(x.round())\n array([[ 1., 0.],\n [ 0., 1.],\n [-1., 0.],\n [-0., -1.],\n [ 1., -0.]])\n \"\"\"\n return arr.view(float).reshape(list(arr.shape) + [-1])\n\ndef float2d_to_complex(arr):\n \"\"\"Converts a multidimensional float array to a complex array.\n Input must be a float type, since there is no integer complex type.\n\n >>> y = np.arange(8, dtype=float).reshape((-1, 2))\n >>> float2d_to_complex(y)\n array([[ 0.+1.j],\n [ 2.+3.j],\n [ 4.+5.j],\n [ 6.+7.j]])\n \"\"\"\n return arr.view(complex)\n\ndef line_intersection(a1, a2, b1, b2):\n \"\"\"Finds the point in the plane that lies at the intersection of the\n line from a1 to a2 and the line from b1 to b2.\n\n >>> a1 = np.array([0, 0])\n >>> a2 = np.array([1, 1])\n >>> b1 = np.array([1, 0])\n >>> b2 = np.array([0, 1])\n >>> line_intersection(a1, a2, b1, b2)\n array([ 0.5, 0.5])\n \"\"\"\n a1x, a1y = a1[..., 0], a1[..., 1]\n a2x, a2y = a2[..., 0], a2[..., 1]\n b1x, b1y = b1[..., 0], b1[..., 1]\n b2x, b2y = b2[..., 0], b2[..., 1]\n\n numx = (a1x*a2y - a1y*a2x)*(b1x - b2x) - (b1x*b2y - b1y*b2x)*(a1x - a2x)\n numy = (a1x*a2y - a1y*a2x)*(b1y - b2y) - (b1x*b2y - b1y*b2x)*(a1y - a2y)\n denom = (a1x-a2x)*(b1y-b2y) - (a1y-a2y)*(b1x-b2x)\n return np.stack([numx, numy], axis=-1)/denom[..., np.newaxis]\n\n\ndef record_initialize(shape, dtype, default_bool=False,\n default_int=-1,\n default_float=np.nan):\n \"\"\"\n Creates and initializes a record array with specified defaults\n instead of whatever garbage happened to already be there.\n\n Args:\n shape: Shape of the array\n dtype: dtype of the array (including field names)\n default_bool: Default value for booleans (default: False)\n default_int: Default value for integers (default: -1, which is\n treated as the largest interg for uint types)\n default_float: Default value for floats (default: nan)\n\n >>> dtype = np.dtype([('a', bool), ('b', int), ('c', float)])\n >>> record_initialize(1, dtype)\n rec.array([(False, -1, nan)], ...\n \"\"\"\n sctypes = np.sctypes\n result = np.recarray(shape, dtype=dtype)\n fields = dtype.fields\n for field, t in fields.items():\n base = t[0].base\n if base in sctypes['int'] + sctypes['uint']:\n result[field] = default_int\n elif base in sctypes['float'] + sctypes['complex']:\n result[field] = default_float\n elif base == np.bool:\n result[field] = default_bool\n return result\n\n\ndef transpose(long_array, filler=-1):\n \"\"\"\n Transposes an array of shape N x 2 into a wide 2d array, using the first\n column as an index. Blanks in the array are filled in with the argument\n filler (by default -1)\n\n >>> x = np.tile(np.arange(2), 5)[:9]\n >>> y = np.arange(9)\n >>> transpose(np.stack((x, y), axis=-1))\n array([[ 0, 2, 4, 6, 8],\n [ 1, 3, 5, 7, -1]])\n\n \"\"\"\n df = pd.DataFrame(long_array, columns=[\"a\", \"b\"])\n df[\"rank\"] = (df.groupby('a').b.rank() - 1)\n step2 = df[[\"a\", 'b', \"rank\"]].set_index([\"a\", \"rank\"]).unstack()\n return step2.fillna(filler).values.astype(long_array.dtype)\n\n\ndef recordify(names, arrays):\n \"\"\"Take a bunch of arrays of the same first dimension and combine them\n into a record array\n\n Args:\n names: List of field names\n arrays: List of arrays\n\n >>> x = np.linspace(0, 1, 4)\n >>> y = np.eye(4)\n >>> z = np.arange(4**3).reshape(4, 4, 4)\n >>> r = recordify(['x', 'y', 'z'], [x, y, z])\n >>> r.dtype\n dtype((numpy.record, [('x', '<f...'), ('y', '<f...', (4,)), ('z', '<i...', (4, 4))]))\"\"\"\n type_list = [(name, array.dtype, array.shape[1:])\n for (name, array) in zip(names, arrays)]\n wtype = np.dtype(type_list)\n result = np.recarray(len(arrays[0]), dtype=wtype)\n for name, array in zip(names, arrays):\n result[name] = array\n return result\n\n\ndef renumber(in_array, fill=-1):\n \"\"\"Renumbers an index of a subarray given by the boolean array inarray.\n (This will probably make more sense if you look at the doctest.)\n\n Args:\n inarray: A 1d boolean array\n fill: An integer to fill in elements that don't appear in the subarray\n By default, -1.\n Returns:\n A 1d numeric array. Takes the value of fill if element does not\n exist in the subarray.\n\n >>> x = np.arange(5)\n >>> condition = (x > 2)\n >>> y = x[condition]\n >>> i = renumber(condition)\n >>> np.where(i >= 0, y[i], -1)\n array([-1, -1, -1, 3, 4])\n \"\"\"\n count = np.sum(in_array)\n index = np.repeat(fill, in_array.shape)\n index[in_array] = np.arange(count)\n return index\n\n\ndef triple_product(a, b, c):\n \"\"\"The scalar triple product of 3 vectors\n a dot b cross c = determinant of [a b c]\n a,b, and c must have last dimension = 3\n\n >>> a = np.zeros(3)\n >>> a[0] = 1\n >>> b = np.arange(4, 16).reshape(4, 3)\n >>> c = np.arange(12)[::-1].reshape(4, 3)\n >>> triple_product(a, b, c)\n array([-15., -15., -15., -15.])\n \"\"\"\n return np.sum(a * np.cross(b, c), axis=-1)\n\n\ndef normalize(vectors, axis=-1):\n \"\"\"Normalizes vectors in n-space. The zero vector remains the zero vector.\n\n Args:\n vectors: Array of vectors\n axis: Which axis to take the norm over (by default the last axis, -1)\n\n >>> x = np.stack((np.ones(5), np.arange(5)), axis=-1)\n >>> normalize(x)\n array([[ 1. , 0. ],\n [ 0.70710678, 0.70710678],\n [ 0.4472136 , 0.89442719],\n [ 0.31622777, 0.9486833 ],\n [ 0.24253563, 0.9701425 ]])\n \"\"\"\n n = norm(vectors, axis=axis, keepdims=True)\n return np.where(n <= 0, 0, vectors / n)\n\n\ndef slerp(pt1, pt2, intervals):\n \"\"\"Spherical linear interpolation.\n\n Args:\n pt1: Array of points. When interval is 0, the result is pt1.\n pt2: Array of points. When interval is 1, the result is pt2.\n intervals: Array of intervals at which to evaluate the\n linear interpolation\n\n >>> x = np.array([1, 0, 0])\n >>> y = np.array([0, 0, 1])\n >>> t = np.linspace(0, 1, 4)[:, np.newaxis]\n >>> slerp(x, y, t)\n array([[ 1. , 0. , 0. ],\n [ 0.8660254, 0. , 0.5 ],\n [ 0.5 , 0. , 0.8660254],\n [ 0. , 0. , 1. ]])\n \"\"\"\n t = intervals\n angle = central_angle(pt1, pt2)[..., np.newaxis]\n return (np.sin((1 - t)*angle)*pt1 + np.sin((t)*angle)*pt2)/np.sin(angle)\n\n\ndef lerp(pt1, pt2, intervals):\n \"\"\"Linear interpolation.\n\n Args:\n pt1: Array of points. When interval is 0, the result is pt1.\n pt2: Array of points. When interval is 1, the result is pt2.\n intervals: Array of intervals at which to evaluate the\n linear interpolation\n\n >>> x = np.array([1, 0, 0])\n >>> y = np.array([0, 0, 1])\n >>> t = np.linspace(0, 1, 4)[:, np.newaxis]\n >>> lerp(x, y, t)\n array([[ 1. , 0. , 0. ],\n [ 0.66666667, 0. , 0.33333333],\n [ 0.33333333, 0. , 0.66666667],\n [ 0. , 0. , 1. ]])\n \"\"\"\n t = intervals\n return (1 - t)*pt1 + t*pt2\n\n\ndef nlerp(*args):\n \"\"\"Normalized linear interpolation.\n\n Args: Same as lerp.\n\n >>> x = np.array([1, 0, 0])\n >>> y = np.array([0, 0, 1])\n >>> t = np.linspace(0, 1, 4)[:, np.newaxis]\n >>> nlerp(x, y, t)\n array([[ 1. , 0. , 0. ],\n [ 0.89442719, 0. , 0.4472136 ],\n [ 0.4472136 , 0. , 0.89442719],\n [ 0. , 0. , 1. ]])\n \"\"\"\n return normalize(lerp(*args))\n\n\ndef distance(x, y, p=2, axis=-1, scale=1):\n \"\"\"Distance between points in Euclidean space.\n\n Args:\n x, y: Coordinates of points.\n p: Which norm to use. 2 = euclidean, 1 = taxicab.\n axis: Which axis the vectors lie along. By default, -1.\n scale: Scale factor for the distance.\n\n Returns: Array of distances.\n\n >>> t = np.linspace(0, 1, 5)[:, np.newaxis]\n >>> x = np.array([[0, 0, 0]])*t + np.array([[0, 10, -10]])*(1 - t)\n >>> y = np.array([[0, 0, 0]])*t + np.array([[10, 0, -10]])*(1 - t)\n >>> np.round(distance(x, y), 2)\n array([ 14.14, 10.61, 7.07, 3.54, 0. ])\n \"\"\"\n return norm((x - y) / scale, ord=p, axis=axis)\n\n\ndef triangle_area(a, b, c):\n \"\"\"Area of Euclidean triangle given by a, b, and c.\n\n Args:\n a, b, c: Coordinates of points.\n\n Returns: Array of areas.\n\n >>> t = np.linspace(0, np.pi, 5)\n >>> a = np.stack([np.cos(t), np.sin(t), np.zeros(5)],axis=-1)\n >>> b = np.array([0, 1, 1])/np.sqrt(2)\n >>> c = np.array([0, -1, 1])/np.sqrt(2)\n >>> np.round(triangle_area(a, b, c), 4)\n array([ 0.866 , 0.7071, 0.5 , 0.7071, 0.866 ])\n \"\"\"\n ab = a - b\n ac = a - c\n return norm(np.cross(ab, ac), axis=-1) / 2\n\n\ndef bearing(origin, destination, pole=np.array([0, 0, 1])):\n \"\"\" Returns the bearing (angle) between points. By default,\n the bearing is calculated with respect to the +y direction.\n\n Args:\n origin: Origin points\n destination: Destination points\n direction: A vector giving the direction the bearing is\n calculated with respect to. By default, [0, 1].\n\n Returns: Array of bearings.\n\n >>> a = np.array([0, 0, 0])\n >>> b = np.array([1, 0, 0])\n >>> c = np.array([0, 0, 1])\n >>> bearing(a, b, c)/np.pi*180\n 90...\n \"\"\"\n direction = origin - pole\n pv = destination - origin\n d = np.sum(pv * direction, axis=-1)\n x = norm(np.cross(pv, direction), axis=-1)\n return np.arctan2(x, d)\n\n\ndef central_angle(x, y, signed=False):\n \"\"\"Central angle between vectors with respect to 0. If vectors have norm\n 1, this is the spherical distance between them.\n\n Args:\n x, y: Coordinates of points on the sphere.\n axis: Which axis the vectors lie along. By default, -1.\n\n Returns: Array of central angles.\n\n >>> t = np.linspace(0, np.pi, 5)\n >>> c = np.cos(t)\n >>> s = np.sin(t)\n >>> z = np.zeros(t.shape)\n >>> x = np.stack((c, s, z), axis=-1)\n >>> y = np.stack((c, z, s), axis=-1)\n >>> np.round(central_angle(x, y)/np.pi*180)\n array([ 0., 60., 90., 60., 0.])\n \"\"\"\n cos = np.sum(x*y, axis=-1)\n sin = norm(np.cross(x, y), axis=-1)\n result = np.arctan2(sin, cos)\n return result if signed else abs(result)\n\n\ndef central_angle_equilateral(pts):\n \"\"\"For use with the naive slerp methods. Takes the central angle between\n each of the points in pts. If they are close, returns the central angle.\n If not, raises an error.\n\n >>> x = np.eye(3)\n >>> central_angle_equilateral(x)/np.pi*180\n 90...\n >>> y = x[[0, 0, 2]]\n >>> central_angle_equilateral(y)/np.pi*180\n Traceback (most recent call last):\n ...\n ValueError: naive_slerp used with non-equilateral face. Difference is 1...\n \"\"\"\n omegas = central_angle(pts, np.roll(pts, 1, axis=0))\n max_diff = np.abs(omegas - np.roll(omegas, 1)).max()\n if not np.isclose(max_diff, 0):\n raise ValueError(\"naive_slerp used with non-equilateral face. \" +\n \"Difference is \" + str(max_diff) + \" radians.\")\n return omegas[0]\n\n\ndef triangle_solid_angle(a, b, c):\n \"\"\"Solid angle of a triangle with respect to 0. If vectors have norm 1,\n this is the spherical area. Note there are two solid angles defined by\n three points: this will always return the smaller of the two. (The other\n is 4*pi minus what this function returns.)\n\n Formula is from Van Oosterom, A; Strackee, J (1983).\n \"The Solid Angle of a Plane Triangle\". IEEE Trans. Biom. Eng.\n BME-30 (2): 125–126. doi:10.1109/TBME.1983.325207.\n\n Args:\n a, b, c: Coordinates of points on the sphere.\n\n Returns: Array of solid angles.\n\n >>> t = np.linspace(0, np.pi, 5)\n >>> a = np.stack([np.cos(t), np.sin(t), np.zeros(5)],axis=-1)\n >>> b = np.array([0, 1, 1])/np.sqrt(2)\n >>> c = np.array([0, -1, 1])/np.sqrt(2)\n >>> np.round(triangle_solid_angle(a, b, c), 4)\n array([ 1.5708, 1.231 , 0. , 1.231 , 1.5708])\n \"\"\"\n\n top = np.abs(triple_product(a, b, c))\n na = norm(a, axis=-1)\n nb = norm(b, axis=-1)\n nc = norm(c, axis=-1)\n bottom = (na*nb*nc + np.sum(a * b, axis=-1)*nc\n + np.sum(b * c, axis=-1)*na\n + np.sum(c * a, axis=-1)*nb)\n return 2 * (np.arctan(top / bottom) % np.pi)\n\n\ndef spherical_bearing(origin, destination, pole=np.array([0, 0, 1])):\n \"\"\" Returns the bearing (angle) between points. By default,\n the bearing is calculated with respect to the north pole.\n Can also be considered as the angle adjacent to origin in the\n triangle formed by origin, destination, and pole.\n\n Args:\n origin: Origin points\n destination: Destination points\n pole: Point bearing is calculated with respect to.\n By default, the north pole.\n\n Returns: Array of bearings.\n\n >>> x = np.array([1, 0, 0])\n >>> spherical_bearing(x, np.roll(x, 1))/np.pi*180\n 90...\n \"\"\"\n c_1 = np.cross(origin, destination)\n c_2 = np.cross(origin, pole)\n cos_theta = np.sum(c_1 * c_2, axis=-1)\n sin_theta = triple_product(origin, destination, pole)\n return np.arctan2(sin_theta, cos_theta)\n\ndef sqrt(x):\n \"\"\"Real sqrt clipped to 0 for negative values\"\"\"\n return np.where(x < 0, 0, np.sqrt(x))\n"
] |
[
[
"numpy.isclose",
"numpy.where",
"numpy.outer",
"numpy.inner",
"numpy.dtype",
"numpy.sin",
"numpy.linalg.norm",
"pandas.DataFrame",
"numpy.arange",
"numpy.sqrt",
"numpy.cross",
"numpy.array",
"numpy.roll",
"numpy.recarray",
"numpy.stack",
"numpy.arctan",
"numpy.arctan2",
"numpy.sum",
"numpy.repeat"
]
] |
naylor-b/OpenAeroStruct
|
[
"eeb69bcea7c80587c6156472ba70735e62b15702"
] |
[
"openaerostruct/utils/plot_wingbox.py"
] |
[
"\"\"\"\r\n\r\nThis only works when using the wingbox model with MULTIPOINT analysis/optimization.\r\n\r\n\"\"\"\r\n\r\n\r\nfrom __future__ import division, print_function\r\nimport sys\r\nmajor_python_version = sys.version_info[0]\r\n\r\nif major_python_version == 2:\r\n import tkFont\r\n import Tkinter as Tk\r\nelse:\r\n import tkinter as Tk\r\n from tkinter import font as tkFont\r\n\r\nfrom six import iteritems\r\nimport numpy as np\r\nfrom openmdao.recorders.sqlite_reader import SqliteCaseReader\r\n\r\nimport matplotlib\r\nmatplotlib.use('TkAgg')\r\nmatplotlib.rcParams['lines.linewidth'] = 2\r\nmatplotlib.rcParams['axes.edgecolor'] = 'gray'\r\nmatplotlib.rcParams['axes.linewidth'] = 0.5\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg,\\\r\n NavigationToolbar2Tk\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport matplotlib.animation as manimation\r\nimport sqlitedict\r\n\r\n#####################\r\n# User-set parameters\r\n#####################\r\n\r\nmy_blue = '#4C72B0'\r\nmy_orange = '#ff9933'\r\nmy_green = '#56A968'\r\n\r\nclass Display(object):\r\n def __init__(self, args):\r\n\r\n self.db_name = args[1]\r\n\r\n try:\r\n self.zoom_scale = args[2]\r\n except:\r\n self.zoom_scale = 2.8\r\n\r\n self.root = Tk.Tk()\r\n self.root.wm_title(\"Viewer\")\r\n\r\n self.f = plt.figure(dpi=100, figsize=(12, 8), facecolor='white')\r\n self.canvas = FigureCanvasTkAgg(self.f, master=self.root)\r\n self.canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\r\n\r\n self.options_frame = Tk.Frame(self.root)\r\n self.options_frame.pack()\r\n\r\n toolbar = NavigationToolbar2Tk(self.canvas, self.root)\r\n toolbar.update()\r\n self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\r\n self.ax = plt.subplot2grid((5, 8), (0, 0), rowspan=5,\r\n colspan=4, projection='3d')\r\n self.ax.set_aspect('equal')\r\n\r\n self.num_iters = 0\r\n self.show_wing = True\r\n self.show_tube = True\r\n self.curr_pos = 0\r\n self.old_n = 0\r\n self.aerostruct = False\r\n\r\n self.load_db()\r\n\r\n if self.show_wing and not self.show_tube:\r\n self.ax2 = plt.subplot2grid((4, 8), (0, 4), rowspan=2, colspan=4)\r\n self.ax3 = plt.subplot2grid((4, 8), (2, 4), rowspan=2, colspan=4)\r\n if self.show_tube and not self.show_wing:\r\n self.ax4 = plt.subplot2grid((4, 8), (0, 4), rowspan=2, colspan=4)\r\n self.ax5 = plt.subplot2grid((4, 8), (2, 4), rowspan=2, colspan=4)\r\n if self.show_wing and self.show_tube:\r\n self.ax2 = plt.subplot2grid((5, 8), (1, 4), colspan=4)\r\n self.ax3 = plt.subplot2grid((5, 8), (0, 4), colspan=4)\r\n self.ax4 = plt.subplot2grid((5, 8), (3, 4), colspan=4)\r\n self.ax5 = plt.subplot2grid((5, 8), (4, 4), colspan=4)\r\n self.ax6 = plt.subplot2grid((5, 8), (2, 4), colspan=4)\r\n\r\n def load_db(self):\r\n cr = self.case_reader = SqliteCaseReader(self.db_name, pre_load=True)\r\n last_case = next(reversed(cr.get_cases('driver')))\r\n\r\n names = []\r\n for key in cr.system_metadata.keys():\r\n try:\r\n surfaces = cr.system_metadata[key]['component_options']['surfaces']\r\n for surface in surfaces:\r\n names.append(surface['name'])\r\n break\r\n except:\r\n pass\r\n\r\n # figure out if this is an optimization and what the objective is\r\n obj_keys = last_case.get_objectives()\r\n if obj_keys.keys(): # if its not an empty list\r\n self.opt = True\r\n self.obj_key = list(obj_keys.keys())[0]\r\n else:\r\n self.opt = False\r\n\r\n self.twist = []\r\n self.mesh = []\r\n self.def_mesh = []\r\n self.def_mesh_maneuver = []\r\n self.radius = []\r\n self.spar_thickness = []\r\n self.skin_thickness = []\r\n self.t_over_c = []\r\n sec_forces = []\r\n sec_forces_maneuver = []\r\n normals = []\r\n normals_maneuver = []\r\n widths = []\r\n widths_maneuver = []\r\n self.lift = []\r\n self.lift_ell = []\r\n self.lift_maneuver = []\r\n self.lift_ell_maneuver = []\r\n self.vonmises = []\r\n alpha = []\r\n alpha_maneuver = []\r\n rho = []\r\n rho_maneuver = []\r\n v = []\r\n self.CL = []\r\n self.AR = []\r\n self.S_ref = []\r\n self.obj = []\r\n self.struct_masses = []\r\n self.cg = []\r\n self.point_mass_locations = []\r\n\r\n # find the names of all surfaces\r\n pt_names = []\r\n for key in last_case.outputs:\r\n\r\n # Aerostructural\n if 'coupled' in key:\r\n self.aerostruct = True\r\n\r\n if 'loads' in key:\r\n pt_names.append(key.split('.')[0])\r\n\r\n # This logic isn't guaranteed to always be in the same order.\r\n # Hardcoding for now because this script is already non-general.\r\n # if pt_names:\r\n # self.pt_names = pt_names = list(set(pt_names))\r\n # pt_name = pt_names[0]\r\n\r\n self.pt_names = pt_names = ['AS_point_0', 'AS_point_1']\r\n pt_name = self.pt_names[0]\r\n\r\n self.names = names\r\n n_names = len(names)\r\n\r\n # loop to pull data out of case reader and organize it into arrays\r\n for i, case in enumerate(cr.get_cases()):\r\n\r\n if self.opt:\r\n self.obj.append(case.outputs[self.obj_key])\r\n\r\n # Loop through each of the surfaces\r\n for name in names:\r\n\r\n # Check if this is an aerostructual case; treat differently\r\n # due to the way the problem is organized\r\n if not self.aerostruct:\r\n\r\n # A mesh exists for all types of cases\r\n self.mesh.append(case.outputs[name+'.mesh'])\r\n\r\n try:\r\n self.radius.append(np.squeeze(case.outputs[name+'.radius']))\r\n self.thickness.append(case.outputs[name+'.thickness'])\r\n self.vonmises.append(\r\n np.max(case.outputs[name+'.vonmises'], axis=1))\r\n self.show_tube = True\r\n except:\r\n self.show_tube = False\r\n try:\r\n self.def_mesh.append(case.outputs[name+'.mesh'])\r\n normals.append(case.outputs[pt_name + '.' + name + '.normals'])\r\n widths.append(case.outputs[pt_name + '.' + name + '.widths'])\r\n sec_forces.append(case.outputs[pt_name + '.aero_states.' + name + '_sec_forces'])\r\n self.CL.append(case.outputs[pt_name + '.' + name + '_perf.CL1'])\r\n self.S_ref.append(case.outputs[pt_name + '.' + name + '.S_ref'])\r\n self.show_wing = True\r\n\r\n except:\r\n self.show_wing = False\r\n else:\r\n self.show_wing, self.show_tube = True, True\r\n\r\n self.mesh.append(case.outputs[name+'.mesh'])\r\n self.radius.append(case.outputs[name+'.skin_thickness'])\r\n self.skin_thickness.append(case.outputs[name+'.skin_thickness'])\r\n self.spar_thickness.append(case.outputs[name+'.spar_thickness'])\r\n self.t_over_c.append(case.outputs[name+'.t_over_c'])\r\n self.struct_masses.append(case.outputs[name+'.structural_mass'])\r\n\r\n vm_var_name = '{pt_name}.{surf_name}_perf.vonmises'.format(pt_name=pt_names[1], surf_name=name)\r\n self.vonmises.append(np.max(case.outputs[vm_var_name], axis=1))\r\n\r\n def_mesh_var_name = '{pt_name}.coupled.{surf_name}.def_mesh'.format(pt_name=pt_name, surf_name=name)\r\n self.def_mesh.append(case.outputs[def_mesh_var_name])\r\n\r\n def_mesh_var_name = '{pt_name}.coupled.{surf_name}.def_mesh'.format(pt_name=pt_names[1], surf_name=name)\r\n self.def_mesh_maneuver.append(case.outputs[def_mesh_var_name])\r\n\r\n normals_var_name = '{pt_name}.coupled.{surf_name}.normals'.format(pt_name=pt_name, surf_name=name)\r\n normals.append(case.outputs[normals_var_name])\r\n\r\n normals_var_name = '{pt_name}.coupled.{surf_name}.normals'.format(pt_name=pt_names[1], surf_name=name)\r\n normals_maneuver.append(case.outputs[normals_var_name])\r\n\r\n widths_var_name = '{pt_name}.coupled.{surf_name}.widths'.format(pt_name=pt_name, surf_name=name)\r\n widths.append(case.outputs[widths_var_name])\r\n\r\n widths_var_name = '{pt_name}.coupled.{surf_name}.widths'.format(pt_name=pt_names[1], surf_name=name)\r\n widths_maneuver.append(case.outputs[widths_var_name])\r\n\r\n sec_forces.append(case.outputs[pt_name+'.coupled.aero_states.' + name + '_sec_forces'])\r\n sec_forces_maneuver.append(case.outputs[pt_names[1]+'.coupled.aero_states.' + name + '_sec_forces'])\r\n\r\n cl_var_name = '{pt_name}.{surf_name}_perf.CL1'.format(pt_name=pt_name, surf_name=name)\r\n self.CL.append(case.outputs[cl_var_name])\r\n\r\n S_ref_var_name = '{pt_name}.coupled.{surf_name}.aero_geom.S_ref'.format(pt_name=pt_name, surf_name=name)\r\n self.S_ref.append(case.outputs[S_ref_var_name])\r\n\r\n # Not the best solution for now, but this will ensure\r\n # that this plots correctly even if twist isn't a desvar\r\n try:\r\n if self.aerostruct: # twist is handled differently for aero and aerostruct\r\n self.twist.append(case.outputs[name+'.geometry.twist'])\r\n else:\r\n self.twist.append(case.outputs[name+'.twist'])\r\n except:\r\n ny = self.mesh[0].shape[1]\r\n self.twist.append(np.atleast_2d(np.zeros(ny)))\r\n\r\n if self.show_wing:\r\n alpha.append(case.outputs['alpha'] * np.pi / 180.)\r\n alpha_maneuver.append(case.outputs['alpha_maneuver'] * np.pi / 180.)\r\n rho.append(case.outputs['rho'])\r\n rho_maneuver.append(case.outputs['rho'])\r\n v.append(case.outputs['v'])\r\n if self.show_tube:\r\n self.cg.append(case.outputs['{pt_name}.cg'.format(pt_name=pt_name)])\r\n else:\r\n self.cg.append(case.outputs['cg'])\r\n\r\n # If there are point masses, save them\r\n try:\r\n self.point_mass_locations.append(case.outputs['point_mass_locations'])\r\n self.point_masses_exist = True\r\n except:\r\n self.point_masses_exist = False\r\n pass\r\n\r\n self.fem_origin_dict = {}\r\n self.yield_stress_dict = {}\r\n\r\n if self.show_tube:\r\n for name in names:\r\n surface = cr.system_metadata[name]['component_options']['surface']\r\n self.yield_stress_dict[name + '_yield_stress'] = surface['yield']\r\n\r\n # self.fem_origin_dict[name + '_fem_origin'] = surface['fem_origin']\r\n\r\n self.fem_origin_dict[name + '_fem_origin'] = (surface['data_x_upper'][0].real *(surface['data_y_upper'][0].real-surface['data_y_lower'][0].real) + \\\r\n surface['data_x_upper'][-1].real*(surface['data_y_upper'][-1].real-surface['data_y_lower'][-1].real)) / \\\r\n ( (surface['data_y_upper'][0].real-surface['data_y_lower'][0].real) + (surface['data_y_upper'][-1].real-surface['data_y_lower'][-1].real))\r\n\r\n le_te_coords = np.array([surface['data_x_upper'][0].real, surface['data_x_upper'][-1].real, surface['wing_weight_ratio']])\r\n\r\n np.save(str('temp_' + name + '_le_te'), le_te_coords)\r\n\r\n if self.opt:\r\n self.num_iters = np.max([int(len(self.mesh) / n_names) - 1, 1])\r\n else:\r\n self.num_iters = 1\r\n\r\n symm_count = 0\r\n for mesh in self.mesh:\r\n if np.all(mesh[:, :, 1] >= -1e-8) or np.all(mesh[:, :, 1] <= 1e-8):\r\n symm_count += 1\r\n if symm_count == len(self.mesh):\r\n self.symmetry = True\r\n else:\r\n self.symmetry = False\r\n\r\n if self.symmetry:\r\n\r\n new_mesh = []\r\n if self.show_tube:\r\n new_r = []\r\n new_skinthickness = []\r\n new_sparthickness = []\r\n new_toverc = []\r\n new_vonmises = []\r\n if self.show_wing:\r\n new_twist = []\r\n new_sec_forces = []\r\n new_sec_forces_maneuver = []\r\n new_def_mesh = []\r\n new_def_mesh_maneuver = []\r\n new_widths = []\r\n new_widths_maneuver = []\r\n new_normals = []\r\n new_normals_maneuver = []\r\n\r\n for i in range(self.num_iters):\r\n for j, name in enumerate(names):\r\n mirror_mesh = self.mesh[i*n_names+j].copy()\r\n mirror_mesh[:, :, 1] *= -1.\r\n mirror_mesh = mirror_mesh[:, ::-1, :][:, 1:, :]\r\n new_mesh.append(np.hstack((self.mesh[i*n_names+j], mirror_mesh)))\r\n\r\n if self.show_tube:\r\n sparthickness = self.spar_thickness[i*n_names+j]\r\n new_sparthickness.append(np.hstack((sparthickness[0], sparthickness[0][::-1])))\r\n skinthickness = self.skin_thickness[i*n_names+j]\r\n new_skinthickness.append(np.hstack((skinthickness[0], skinthickness[0][::-1])))\r\n toverc = self.t_over_c[i*n_names+j]\r\n new_toverc.append(np.hstack((toverc[0], toverc[0][::-1])))\r\n r = self.radius[i*n_names+j]\r\n new_r.append(np.hstack((r, r[::-1])))\r\n vonmises = self.vonmises[i*n_names+j]\r\n new_vonmises.append(np.hstack((vonmises, vonmises[::-1])))\r\n\r\n if self.show_wing:\r\n mirror_mesh = self.def_mesh[i*n_names+j].copy()\r\n mirror_mesh[:, :, 1] *= -1.\r\n mirror_mesh = mirror_mesh[:, ::-1, :][:, 1:, :]\r\n new_def_mesh.append(np.hstack((self.def_mesh[i*n_names+j], mirror_mesh)))\r\n\r\n mirror_normals = normals[i*n_names+j].copy()\r\n mirror_normals = mirror_normals[:, ::-1, :][:, 1:, :]\r\n new_normals.append(np.hstack((normals[i*n_names+j], mirror_normals)))\r\n\r\n mirror_forces = sec_forces[i*n_names+j].copy()\r\n mirror_forces = mirror_forces[:, ::-1, :]\r\n new_sec_forces.append(np.hstack((sec_forces[i*n_names+j], mirror_forces)))\r\n\r\n mirror_mesh_maneuver = self.def_mesh_maneuver[i*n_names+j].copy()\r\n mirror_mesh_maneuver[:, :, 1] *= -1.\r\n mirror_mesh_maneuver = mirror_mesh_maneuver[:, ::-1, :][:, 1:, :]\r\n new_def_mesh_maneuver.append(np.hstack((self.def_mesh_maneuver[i*n_names+j], mirror_mesh_maneuver)))\r\n\r\n mirror_normals_maneuver = normals_maneuver[i*n_names+j].copy()\r\n mirror_normals_maneuver = mirror_normals_maneuver[:, ::-1, :][:, 1:, :]\r\n new_normals_maneuver.append(np.hstack((normals_maneuver[i*n_names+j], mirror_normals_maneuver)))\r\n\r\n mirror_forces_maneuver = sec_forces_maneuver[i*n_names+j].copy()\r\n mirror_forces_maneuver = mirror_forces_maneuver[:, ::-1, :]\r\n new_sec_forces_maneuver.append(np.hstack((sec_forces_maneuver[i*n_names+j], mirror_forces_maneuver)))\r\n\r\n new_widths.append(np.hstack((widths[i*n_names+j], widths[i*n_names+j][::-1])))\r\n new_widths_maneuver.append(np.hstack((widths_maneuver[i*n_names+j], widths_maneuver[i*n_names+j][::-1])))\r\n twist = self.twist[i*n_names+j]\r\n new_twist.append(np.hstack((twist[0], twist[0][::-1][1:])))\r\n\r\n self.mesh = new_mesh\r\n if self.show_tube:\r\n self.skin_thickness = new_skinthickness\r\n self.spar_thickness = new_sparthickness\r\n self.t_over_c = new_toverc\r\n self.radius = new_r\r\n self.vonmises = new_vonmises\r\n if self.show_wing:\r\n self.def_mesh = new_def_mesh\r\n self.twist = new_twist\r\n widths = new_widths\r\n widths_maneuver = new_widths_maneuver\r\n sec_forces = new_sec_forces\r\n sec_forces_maneuver = new_sec_forces_maneuver\r\n\r\n if self.show_wing:\r\n for i in range(self.num_iters):\r\n for j, name in enumerate(names):\r\n m_vals = self.mesh[i*n_names+j].copy()\r\n a = alpha[i]\r\n cosa = np.cos(a)\r\n sina = np.sin(a)\r\n\r\n forces = np.sum(sec_forces[i*n_names+j], axis=0)\r\n\r\n lift = (-forces[:, 0] * sina + forces[:, 2] * cosa) / \\\r\n widths[i*n_names+j]/0.5/rho[i][0]/v[i][0]**2\r\n a_maneuver = alpha_maneuver[i]\r\n cosa_maneuver = np.cos(a_maneuver)\r\n sina_maneuver = np.sin(a_maneuver)\r\n forces_maneuver = np.sum(sec_forces_maneuver[i*n_names+j], axis=0)\r\n lift_maneuver= (-forces_maneuver[:, 0] * sina_maneuver + forces_maneuver[:, 2] * cosa_maneuver) / \\\r\n widths_maneuver[i*n_names+j]/0.5/rho_maneuver[i][1]/v[i][1]**2\r\n\r\n span = (m_vals[0, :, 1] / (m_vals[0, -1, 1] - m_vals[0, 0, 1]))\r\n span = span - (span[0] + .5)\r\n\r\n lift_area = np.sum(lift * (span[1:] - span[:-1]))\r\n\r\n lift_ell = 4 * lift_area / np.pi * np.sqrt(1 - (2*span)**2)\r\n\r\n normalize_factor = max(lift_ell) / 4 * np.pi\r\n lift_ell = lift_ell / normalize_factor\r\n lift = lift / normalize_factor\r\n\r\n lift_area_maneuver = np.sum(lift_maneuver * (span[1:] - span[:-1]))\r\n\r\n lift_ell_maneuver = 4 * lift_area_maneuver / np.pi * np.sqrt(1 - (2*span)**2)\r\n\r\n normalize_factor = max(lift_ell_maneuver) / 4 * np.pi\r\n lift_ell_maneuver = lift_ell_maneuver / normalize_factor\r\n lift_maneuver = lift_maneuver / normalize_factor\r\n\r\n self.lift.append(lift)\r\n self.lift_ell.append(lift_ell)\r\n self.lift_maneuver.append(lift_maneuver)\r\n self.lift_ell_maneuver.append(lift_ell_maneuver)\r\n\r\n wingspan = np.abs(m_vals[0, -1, 1] - m_vals[0, 0, 1])\r\n self.AR.append(wingspan**2 / self.S_ref[i*n_names+j])\r\n\r\n # recenter def_mesh points for better viewing\r\n for i in range(self.num_iters):\r\n center = np.zeros((3))\r\n for j in range(n_names):\r\n center += np.mean(self.def_mesh[i*n_names+j], axis=(0,1))\r\n for j in range(n_names):\r\n self.def_mesh[i*n_names+j] -= center / n_names\r\n self.cg[i] -= center / n_names\r\n if self.point_masses_exist:\r\n self.point_mass_locations[i] -= center / n_names\r\n\r\n # recenter mesh points for better viewing\r\n for i in range(self.num_iters):\r\n center = np.zeros((3))\r\n for j in range(n_names):\r\n center += np.mean(self.mesh[i*n_names+j], axis=(0,1))\r\n for j in range(n_names):\r\n self.mesh[i*n_names+j] -= center / n_names\r\n\r\n if self.show_wing:\r\n self.min_twist, self.max_twist = self.get_list_limits(self.twist)\r\n diff = (self.max_twist - self.min_twist) * 0.05\r\n self.min_twist -= diff\r\n self.max_twist += diff\r\n self.min_l, self.max_l = self.get_list_limits(self.lift)\r\n self.min_le, self.max_le = self.get_list_limits(self.lift_ell)\r\n self.min_l_maneuver, self.max_l_maneuver = self.get_list_limits(self.lift_maneuver)\r\n self.min_le_maneuver, self.max_le_maneuver = self.get_list_limits(self.lift_ell_maneuver)\r\n self.min_l, self.max_l = min(self.min_l, self.min_le, self.min_l_maneuver, self.min_le_maneuver), max(self.max_l, self.max_le, self.max_l_maneuver, self.max_le_maneuver)\r\n diff = (self.max_l - self.min_l) * 0.05\r\n self.min_l -= diff\r\n self.max_l += diff\r\n if self.show_tube:\r\n self.min_t, self.max_t = self.get_list_limits(self.skin_thickness)\r\n self.min_toc, self.max_toc = self.get_list_limits(self.t_over_c)\r\n diff = (self.max_t - self.min_t) * 0.05\r\n self.min_t -= diff\r\n self.max_t += diff\r\n self.min_vm, self.max_vm = self.get_list_limits(self.vonmises)\r\n diff = (self.max_vm - self.min_vm) * 0.05\r\n self.min_vm -= diff\r\n self.max_vm += diff\r\n\r\n def plot_sides(self):\r\n\r\n if self.show_wing:\r\n\r\n self.ax2.cla()\r\n self.ax2.locator_params(axis='y',nbins=5)\r\n self.ax2.locator_params(axis='x',nbins=3)\r\n self.ax2.set_ylim([self.min_twist, self.max_twist])\r\n self.ax2.set_xlim([-1, 1])\r\n self.ax2.set_ylabel('jig twist [deg]', rotation=\"horizontal\", ha=\"right\")\r\n\r\n self.ax3.cla()\r\n self.ax3.text(0.01, 0.1+.4, 'elliptical',\r\n transform=self.ax3.transAxes, color='k')\r\n self.ax3.text(0.7, 0.25+.45, 'cruise',\r\n transform=self.ax3.transAxes, color=my_blue)\r\n self.ax3.text(0.7, 0.4+.45, '2.5 g',\r\n transform=self.ax3.transAxes, color=my_orange)\r\n self.ax3.locator_params(axis='y',nbins=4)\r\n self.ax3.locator_params(axis='x',nbins=3)\r\n self.ax3.set_ylim([self.min_l, self.max_l])\r\n self.ax3.set_xlim([-1, 1])\r\n self.ax3.set_ylabel('normalized lift', rotation=\"horizontal\", ha=\"right\")\r\n\r\n if self.show_tube:\r\n\r\n self.ax4.cla()\r\n self.ax4.locator_params(axis='y',nbins=4)\r\n self.ax4.locator_params(axis='x',nbins=3)\r\n self.ax4.set_ylim([self.min_t, self.max_t])\r\n self.ax4.set_xlim([-1, 1])\r\n self.ax4.set_ylabel('thickness [m]', rotation=\"horizontal\", ha=\"right\")\r\n\r\n self.ax6.cla()\r\n self.ax6.locator_params(axis='y',nbins=4)\r\n self.ax6.locator_params(axis='x',nbins=3)\r\n self.ax6.set_ylim([self.min_toc, self.max_toc])\r\n self.ax6.set_xlim([-1, 1])\r\n self.ax6.set_ylabel('thickness to chord', rotation=\"horizontal\", ha=\"right\")\r\n\r\n self.ax5.cla()\r\n max_yield_stress = 0.\r\n for key, yield_stress in iteritems(self.yield_stress_dict):\r\n self.ax5.axhline(yield_stress, c='r', lw=2, ls='--')\r\n max_yield_stress = max(max_yield_stress, yield_stress)\r\n\r\n self.ax5.locator_params(axis='y',nbins=4)\r\n self.ax5.locator_params(axis='x',nbins=3)\r\n # self.ax5.set_ylim([self.min_vm, self.max_vm])\r\n # self.ax5.set_ylim([0, max_yield_stress*1.1])\r\n self.ax5.set_xlim([-1, 1])\r\n self.ax5.set_ylabel('von Mises [Pa]', rotation=\"horizontal\", ha=\"right\")\r\n self.ax5.set_xlabel('normalized span')\r\n self.ax5.text(0.15, 1.05, 'failure limit',\r\n transform=self.ax5.transAxes, color='r')\r\n\r\n n_names = len(self.names)\r\n for j, name in enumerate(self.names):\r\n m_vals = self.mesh[self.curr_pos*n_names+j].copy()\r\n span = m_vals[0, -1, 1] - m_vals[0, 0, 1]\r\n rel_span = (m_vals[0, :, 1] - m_vals[0, 0, 1]) * 2 / span - 1\r\n span_diff = ((m_vals[0, :-1, 1] + m_vals[0, 1:, 1]) / 2 - m_vals[0, 0, 1]) * 2 / span - 1\r\n\r\n if self.show_wing:\r\n t_vals = self.twist[self.curr_pos*n_names+j]\r\n l_vals = self.lift[self.curr_pos*n_names+j]\r\n l_maneuver_vals = self.lift_maneuver[self.curr_pos*n_names+j]\r\n le_vals = self.lift_ell[self.curr_pos*n_names+j]\r\n # le_vals_maneuver = self.lift_ell_maneuver[self.curr_pos*n_names+j]\r\n\r\n self.ax2.plot(rel_span, t_vals, lw=2, c='k')\r\n self.ax3.plot(rel_span, le_vals, '--', lw=2, c='k', alpha = 0.8)\r\n self.ax3.plot(span_diff, l_vals, lw=2, c=my_blue)\r\n self.ax3.plot(span_diff, l_maneuver_vals, lw=2, c=my_orange)\r\n # self.ax3.plot(rel_span, le_vals_maneuver, '--', lw=2, c='k')\r\n\r\n if self.show_tube:\r\n skinthick = self.skin_thickness[self.curr_pos*n_names+j]\r\n sparthick = self.spar_thickness[self.curr_pos*n_names+j]\r\n toverc = self.t_over_c[self.curr_pos*n_names+j]\r\n vm_vals = self.vonmises[self.curr_pos*n_names+j]\r\n\r\n self.ax4.plot(span_diff, skinthick, lw=2, c=my_blue)\r\n self.ax4.text(0.05, 0.8, 'skin',\r\n transform=self.ax4.transAxes, color=my_blue)\r\n self.ax4.plot(span_diff, sparthick, lw=2, c=my_green)\r\n self.ax4.text(0.05, 0.6, 'spar',\r\n transform=self.ax4.transAxes, color=my_green)\r\n self.ax5.plot(span_diff, vm_vals, lw=2, c='k')\r\n self.ax6.plot(span_diff, toverc, lw=2, c='k')\r\n\r\n self.ax2.set_xticklabels([])\r\n self.ax3.set_xticklabels([])\r\n self.ax4.set_xticklabels([])\r\n self.ax6.set_xticklabels([])\r\n\r\n def plot_wing(self):\r\n\r\n n_names = len(self.names)\r\n self.ax.cla()\r\n az = self.ax.azim\r\n el = self.ax.elev\r\n dist = self.ax.dist\r\n\r\n # for a planform view use:\r\n # az = 270\r\n # el = 0.\r\n # dist = 15.\r\n\r\n\r\n for j, name in enumerate(self.names):\r\n\r\n # for wingbox viz\r\n try:\r\n le_te = np.load(str('temp_' + name + '_le_te.npy'))\r\n except:\r\n print('temp_le_te.npy file not found')\r\n\r\n mesh0 = self.mesh[self.curr_pos*n_names+j].copy()\r\n\r\n self.ax.set_axis_off()\r\n\r\n if self.show_wing:\r\n def_mesh0 = self.def_mesh[self.curr_pos*n_names+j]\r\n x = mesh0[:, :, 0]\r\n y = mesh0[:, :, 1]\r\n z = mesh0[:, :, 2]\r\n\r\n #################### for wingbox viz ####################\r\n mesh1 = np.zeros((2,mesh0.shape[1],mesh0.shape[2]))\r\n mesh1[0,:,:] = mesh0[0,:,:]\r\n mesh1[1,:,:] = mesh0[-1,:,:]\r\n chord_vec = mesh1[1,:,:] - mesh1[0,:,:]\r\n mesh1[0,:,:] = mesh1[0,:,:] + le_te[0] * chord_vec\r\n mesh1[1,:,:] = mesh1[1,:,:] - (1 - le_te[1]) * chord_vec\r\n\r\n current_t_over_c = self.t_over_c[self.curr_pos*n_names+j]\r\n\r\n half_len_toverc = int(len(current_t_over_c) / 2)\r\n tovercarray = np.zeros((len(current_t_over_c)+1))\r\n tovercarray[:half_len_toverc] = current_t_over_c[:half_len_toverc]\r\n tovercarray[half_len_toverc] = current_t_over_c[half_len_toverc]\r\n tovercarray[half_len_toverc+1:-1] = current_t_over_c[half_len_toverc:-1]\r\n chord_array = np.zeros((chord_vec.shape[0]))\r\n for i in range(chord_vec.shape[0]):\r\n chord_array[i] = np.linalg.norm(chord_vec[i,:])\r\n\r\n # for the skins\r\n x_box = mesh1[:, :, 0]\r\n y_box = mesh1[:, :, 1]\r\n z_box = mesh1[:, :, 2] - tovercarray / 2 *chord_array\r\n z_box2 = mesh1[:, :, 2] + tovercarray / 2 *chord_array\r\n\r\n # for the rear spar\r\n mesh2 = mesh1.copy()\r\n mesh2[0,:,:] = mesh1[-1,:,:]\r\n mesh2[1,:,:] = mesh1[-1,:,:]\r\n\r\n mesh2[0, :, 2] = mesh2[0, :, 2] - tovercarray / 2 *chord_array\r\n mesh2[1, :, 2] = mesh2[1, :, 2] + tovercarray / 2 *chord_array\r\n\r\n x_box3 = mesh2[:, :, 0]\r\n y_box3 = mesh2[:, :, 1]\r\n z_box3 = mesh2[:, :, 2]\r\n\r\n # for the forward spar\r\n mesh3 = mesh1.copy()\r\n mesh3[0,:,:] = mesh1[0,:,:]\r\n mesh3[1,:,:] = mesh1[0,:,:]\r\n\r\n mesh3[0, :, 2] = mesh3[0, :, 2] - tovercarray / 2 *chord_array\r\n mesh3[1, :, 2] = mesh3[1, :, 2] + tovercarray / 2 *chord_array\r\n\r\n x_box4 = mesh3[:, :, 0]\r\n y_box4 = mesh3[:, :, 1]\r\n z_box4 = mesh3[:, :, 2]\r\n\r\n #########################################################\r\n\r\n try: # show deformed mesh option may not be available\r\n if self.show_def_mesh.get():\r\n x_def = def_mesh0[:, :, 0]\r\n y_def = def_mesh0[:, :, 1]\r\n z_def = def_mesh0[:, :, 2]\r\n\r\n self.c2.grid(row=0, column=3, padx=5, sticky=Tk.W)\r\n if self.ex_def.get():\r\n z_def = (z_def - z) * 10 + z_def\r\n def_mesh0 = (def_mesh0 - mesh0) * 30 + def_mesh0\r\n else:\r\n def_mesh0 = (def_mesh0 - mesh0) * 2 + def_mesh0\r\n self.ax.plot_wireframe(x_def, y_def, z_def, rstride=1, cstride=1, color='k', linewidth = 0.75)\r\n self.ax.plot_wireframe(x, y, z, rstride=1, cstride=1, color='k', alpha=.3, linewidth = 0.75)\r\n self.ax.plot_surface(x_box, y_box, z_box, rstride=1, cstride=1, color='k', alpha=0.25) # wingbox viz\r\n self.ax.plot_surface(x_box, y_box, z_box2, rstride=1, cstride=1, color='k', alpha=0.25) # wingbox viz\r\n self.ax.plot_surface(x_box3, y_box3, z_box3, rstride=1, cstride=1, color='k', alpha=0.25) # wingbox viz\r\n self.ax.plot_surface(x_box4, y_box4, z_box4, rstride=1, cstride=1, color='k', alpha=0.25) # wingbox viz\r\n else:\r\n self.ax.plot_wireframe(x, y, z, rstride=1, cstride=1, color='k', linewidth = 0.75)\r\n self.ax.plot_surface(x_box, y_box, z_box, rstride=1, cstride=1, color='k', alpha=0.25) # wingbox viz\r\n self.ax.plot_surface(x_box, y_box, z_box2, rstride=1, cstride=1, color='k', alpha=0.25) # wingbox viz\r\n self.ax.plot_surface(x_box3, y_box3, z_box3, rstride=1, cstride=1, color='k', alpha=0.25) # wingbox viz\r\n self.ax.plot_surface(x_box4, y_box4, z_box4, rstride=1, cstride=1, color='k', alpha=0.25) # wingbox viz\r\n self.c2.grid_forget()\r\n except:\r\n self.ax.plot_wireframe(x, y, z, rstride=1, cstride=1, color='k')\r\n self.ax.plot_surface(x_box, y_box, z_box, rstride=1, cstride=1, color='k', alpha=0.25) # wingbox viz\r\n self.ax.plot_surface(x_box, y_box, z_box2, rstride=1, cstride=1, color='k', alpha=0.25) # wingbox viz\r\n self.ax.plot_surface(x_box3, y_box3, z_box3, rstride=1, cstride=1, color='k', alpha=0.25) # wingbox viz\r\n self.ax.plot_surface(x_box4, y_box4, z_box4, rstride=1, cstride=1, color='k', alpha=0.25) # wingbox viz\r\n\r\n # cg = self.cg[self.curr_pos]\r\n # self.ax.scatter(cg[0], cg[1], cg[2], s=100, color='r')\r\n\r\n if self.point_masses_exist:\r\n for point_mass_loc in self.point_mass_locations[self.curr_pos]:\r\n self.ax.scatter(point_mass_loc[0], point_mass_loc[1], point_mass_loc[2], s=100, color='b')\r\n if self.symmetry:\r\n self.ax.scatter(point_mass_loc[0], -point_mass_loc[1], point_mass_loc[2], s=100, color='b')\r\n\r\n lim = 0.\r\n for j in range(n_names):\r\n ma = np.max(self.mesh[self.curr_pos*n_names+j], axis=(0,1,2))\r\n if ma > lim:\r\n lim = ma\r\n lim /= float(self.zoom_scale)\r\n self.ax.auto_scale_xyz([-lim, lim], [-lim, lim], [-lim, lim])\r\n self.ax.set_title(\"Iteration: {}\".format(self.curr_pos))\r\n\r\n # round_to_n = lambda x, n: round(x, -int(np.floor(np.log10(abs(x)))) + (n - 1))\r\n if self.opt:\r\n obj_val = self.obj[self.curr_pos]\r\n\r\n try:\r\n wing_weight_ratio = np.load(str('temp_' + name + '_le_te.npy'))[2]\r\n except:\r\n print('temp_le_te.npy file not found')\r\n\r\n sw_val = self.struct_masses[self.curr_pos] / wing_weight_ratio\r\n self.ax.text2D(.05, -.1, self.obj_key + ' [kg]: {}'.format(obj_val),\r\n transform=self.ax.transAxes, color='k')\r\n self.ax.text2D(.05, -.15, 'wingbox mass (w/o wing_weight_ratio)' + ' [kg]: {}'.format(sw_val),\r\n transform=self.ax.transAxes, color='k')\r\n\r\n self.ax.view_init(elev=el, azim=az) # Reproduce view\r\n self.ax.dist = dist\r\n\r\n def save_video(self):\r\n FFMpegWriter = manimation.writers['ffmpeg']\r\n options = dict(title='Movie', artist='Matplotlib')\r\n writer = FFMpegWriter(fps=5, options=options, bitrate=3000)\r\n\r\n with writer.saving(self.f, \"movie.mp4\", 100):\r\n self.curr_pos = 0\r\n self.update_graphs()\r\n self.f.canvas.draw()\r\n plt.draw()\r\n for i in range(10):\r\n writer.grab_frame()\r\n\r\n for i in range(self.num_iters):\r\n self.curr_pos = i\r\n self.update_graphs()\r\n self.f.canvas.draw()\r\n plt.draw()\r\n writer.grab_frame()\r\n\r\n self.curr_pos = self.num_iters\r\n self.update_graphs()\r\n self.f.canvas.draw()\r\n plt.draw()\r\n for i in range(20):\r\n writer.grab_frame()\r\n\r\n def update_graphs(self, e=None):\r\n if e is not None:\r\n self.curr_pos = int(e)\r\n self.curr_pos = self.curr_pos % (self.num_iters)\r\n\r\n self.plot_wing()\r\n self.plot_sides()\r\n self.canvas.draw()\r\n\r\n def check_length(self):\r\n # Load the current sqlitedict\r\n cr = self.case_reader = SqliteCaseReader(self.db_name)\r\n\r\n # Get the number of current iterations\r\n # Minus one because OpenMDAO uses 1-indexing\r\n self.num_iters = len(cr.get_cases('driver'))\r\n\r\n def get_list_limits(self, input_list):\r\n list_min = 1.e20\r\n list_max = -1.e20\r\n for list_ in input_list:\r\n mi = np.min(list_)\r\n if mi < list_min:\r\n list_min = mi\r\n ma = np.max(list_)\r\n if ma > list_max:\r\n list_max = ma\r\n\r\n return list_min, list_max\r\n\r\n\r\n def auto_ref(self):\r\n \"\"\"\r\n Automatically refreshes the history file, which is\r\n useful if examining a running optimization.\r\n \"\"\"\r\n if self.var_ref.get():\r\n self.root.after(500, self.auto_ref)\r\n self.check_length()\r\n self.update_graphs()\r\n\r\n # Check if the sqlitedict file has change and if so, fully\r\n # load in the new file.\r\n if self.num_iters > self.old_n:\r\n self.load_db()\r\n self.old_n = self.num_iters\r\n self.draw_slider()\r\n\r\n def save_image(self):\r\n fname = 'fig' + '.pdf'\r\n plt.savefig(fname)\r\n\r\n def quit(self):\r\n \"\"\"\r\n Destroy GUI window cleanly if quit button pressed.\r\n \"\"\"\r\n self.root.quit()\r\n self.root.destroy()\r\n\r\n def draw_slider(self):\r\n # scale to choose iteration to view\r\n self.w = Tk.Scale(\r\n self.options_frame,\r\n from_=0, to=self.num_iters - 1,\r\n orient=Tk.HORIZONTAL,\r\n resolution=1,\r\n font=tkFont.Font(family=\"Helvetica\", size=10),\r\n command=self.update_graphs,\r\n length=200)\r\n\r\n if self.curr_pos == self.num_iters - 1 or self.curr_pos == 0 or self.var_ref.get():\r\n self.curr_pos = self.num_iters - 1\r\n self.w.set(self.curr_pos)\r\n self.w.grid(row=0, column=1, padx=5, sticky=Tk.W)\r\n\r\n def draw_GUI(self):\r\n \"\"\"\r\n Create the frames and widgets in the bottom section of the canvas.\r\n \"\"\"\r\n font = tkFont.Font(family=\"Helvetica\", size=10)\r\n\r\n lab_font = Tk.Label(\r\n self.options_frame,\r\n text=\"Iteration number:\",\r\n font=font)\r\n lab_font.grid(row=0, column=0, sticky=Tk.S)\r\n\r\n self.draw_slider()\r\n\r\n if self.show_wing and self.show_tube:\r\n # checkbox to show deformed mesh\r\n self.show_def_mesh = Tk.IntVar()\r\n c1 = Tk.Checkbutton(\r\n self.options_frame,\r\n text=\"Show deformed mesh\",\r\n variable=self.show_def_mesh,\r\n command=self.update_graphs,\r\n font=font)\r\n c1.grid(row=0, column=2, padx=5, sticky=Tk.W)\r\n\r\n # checkbox to exaggerate deformed mesh\r\n self.ex_def = Tk.IntVar()\r\n self.c2 = Tk.Checkbutton(\r\n self.options_frame,\r\n text=\"Exaggerate deformations\",\r\n variable=self.ex_def,\r\n command=self.update_graphs,\r\n font=font)\r\n self.c2.grid(row=0, column=3, padx=5, sticky=Tk.W)\r\n\r\n # Option to automatically refresh history file\r\n # especially useful for currently running optimizations\r\n self.var_ref = Tk.IntVar()\r\n # self.var_ref.set(1)\r\n c11 = Tk.Checkbutton(\r\n self.options_frame,\r\n text=\"Automatically refresh\",\r\n variable=self.var_ref,\r\n command=self.auto_ref,\r\n font=font)\r\n c11.grid(row=0, column=4, sticky=Tk.W, pady=6)\r\n\r\n button = Tk.Button(\r\n self.options_frame,\r\n text='Save video',\r\n command=self.save_video,\r\n font=font)\r\n button.grid(row=0, column=5, padx=5, sticky=Tk.W)\r\n\r\n button4 = Tk.Button(\r\n self.options_frame,\r\n text='Save image',\r\n command=self.save_image,\r\n font=font)\r\n button4.grid(row=0, column=6, padx=5, sticky=Tk.W)\r\n\r\n button5 = Tk.Button(\r\n self.options_frame,\r\n text='Quit',\r\n command=self.quit,\r\n font=font)\r\n button5.grid(row=0, column=7, padx=5, sticky=Tk.W)\r\n\r\n self.auto_ref()\r\n\r\ndef disp_plot(args=sys.argv):\r\n disp = Display(args)\r\n disp.draw_GUI()\r\n plt.tight_layout()\r\n disp.root.protocol(\"WM_DELETE_WINDOW\", disp.quit)\r\n Tk.mainloop()\r\n\r\nif __name__ == '__main__':\r\n disp_plot()\r\n"
] |
[
[
"numpy.min",
"numpy.mean",
"matplotlib.pyplot.draw",
"numpy.cos",
"matplotlib.backends.backend_tkagg.NavigationToolbar2Tk",
"numpy.max",
"numpy.sin",
"numpy.linalg.norm",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.tight_layout",
"numpy.sqrt",
"matplotlib.use",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"numpy.hstack",
"numpy.squeeze",
"numpy.sum",
"matplotlib.pyplot.subplot2grid",
"numpy.abs",
"numpy.all"
]
] |
AsciencioAlex/Matplotlib-In-Practice-Data-Visualization
|
[
"4203b5ba0f3f2fd7f72c26e6fec6d32d7949fc54"
] |
[
"sine-cosine1.py"
] |
[
"from pylab import *\nimport numpy as np\n\n\n# generate uniformly distributed\n# 256 points from -pi to pi, inclusive\nx = np.linspace(-np.pi, np.pi, 256, endpoint=True)\n\n# these are vectorized versions\n# of math.cos, and math.sin in built-in Python maths\n# compute cos for every x\ny = np.cos(x)\n\n# compute sin for every x\ny1 = np.sin(x)\n\n# plot cos\nplot(x,y)\n\n# plot sin\nplot(x, y1)\n\n# define plot title\ntitle(\"Functions $\\sin$ and $\\cos$\")\n\n\n# set x limit\nxlim(-3.0, 3.0)\n# set y limit\nylim(-1.0, 1.0)\n\n# format ticks at specific values\nxticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi],\n [r'$-\\pi$', r'$-\\pi/2$', r'$0$', r'$+\\pi/2$', r'$+\\pi$'])\nyticks([-1, 0, +1],\n [r'$-1$', r'$0$', r'$+1$'])\nshow()\n"
] |
[
[
"numpy.linspace",
"numpy.sin",
"numpy.cos"
]
] |
yick2232/imgclsmob
|
[
"fb220bff18b27d1fc6db1bac6cf69b70c2d07490",
"fb220bff18b27d1fc6db1bac6cf69b70c2d07490",
"fb220bff18b27d1fc6db1bac6cf69b70c2d07490"
] |
[
"chainer_/chainercv2/models/pyramidnet.py",
"pytorch/pytorchcv/models/squeezenext.py",
"pytorch/pytorchcv/models/wrn.py"
] |
[
"\"\"\"\n PyramidNet for ImageNet-1K, implemented in Chainer.\n Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.\n\"\"\"\n\n__all__ = ['PyramidNet', 'pyramidnet101_a360', 'PyrUnit']\n\nimport os\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import Chain\nfrom functools import partial\nfrom chainer.serializers import load_npz\nfrom .common import pre_conv1x1_block, pre_conv3x3_block, SimpleSequential\nfrom .preresnet import PreResActivation\n\n\nclass PyrBlock(Chain):\n \"\"\"\n Simple PyramidNet block for residual path in PyramidNet unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Stride of the convolution.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride):\n super(PyrBlock, self).__init__()\n with self.init_scope():\n self.conv1 = pre_conv3x3_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n activate=False)\n self.conv2 = pre_conv3x3_block(\n in_channels=out_channels,\n out_channels=out_channels)\n\n def __call__(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\n\nclass PyrBottleneck(Chain):\n \"\"\"\n PyramidNet bottleneck block for residual path in PyramidNet unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Stride of the convolution.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride):\n super(PyrBottleneck, self).__init__()\n mid_channels = out_channels // 4\n\n with self.init_scope():\n self.conv1 = pre_conv1x1_block(\n in_channels=in_channels,\n out_channels=mid_channels,\n activate=False)\n self.conv2 = pre_conv3x3_block(\n in_channels=mid_channels,\n out_channels=mid_channels,\n stride=stride)\n self.conv3 = pre_conv1x1_block(\n in_channels=mid_channels,\n out_channels=out_channels)\n\n def __call__(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n return x\n\n\nclass PyrUnit(Chain):\n \"\"\"\n PyramidNet unit with residual connection.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Stride of the convolution.\n bottleneck : bool\n Whether to use a bottleneck or simple block in units.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride,\n bottleneck):\n super(PyrUnit, self).__init__()\n assert (out_channels >= in_channels)\n self.resize_identity = (stride != 1)\n if out_channels > in_channels:\n self.identity_pad_width = ((0, 0), (0, out_channels - in_channels), (0, 0), (0, 0))\n else:\n self.identity_pad_width = None\n\n with self.init_scope():\n if bottleneck:\n self.body = PyrBottleneck(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride)\n else:\n self.body = PyrBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride)\n self.bn = L.BatchNormalization(\n size=out_channels,\n eps=1e-5)\n if self.resize_identity:\n self.identity_pool = partial(\n F.average_pooling_2d,\n ksize=2,\n stride=stride)\n\n def __call__(self, x):\n identity = x\n x = self.body(x)\n x = self.bn(x)\n if self.resize_identity:\n identity = self.identity_pool(identity)\n if self.identity_pad_width is not None:\n identity = F.pad(identity, pad_width=self.identity_pad_width, mode=\"constant\", constant_values=0)\n x = x + identity\n return x\n\n\nclass PyrInitBlock(Chain):\n \"\"\"\n PyramidNet specific initial block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels):\n super(PyrInitBlock, self).__init__()\n with self.init_scope():\n self.conv = L.Convolution2D(\n in_channels=in_channels,\n out_channels=out_channels,\n ksize=7,\n stride=2,\n pad=3,\n nobias=True)\n self.bn = L.BatchNormalization(\n size=out_channels,\n eps=1e-5)\n self.activ = F.relu\n self.pool = partial(\n F.max_pooling_2d,\n ksize=3,\n stride=2,\n pad=1,\n cover_all=False)\n\n def __call__(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.activ(x)\n x = self.pool(x)\n return x\n\n\nclass PyramidNet(Chain):\n \"\"\"\n PyramidNet model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n bottleneck : bool\n Whether to use a bottleneck or simple block in units.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n bottleneck,\n in_channels=3,\n in_size=(224, 224),\n classes=1000):\n super(PyramidNet, self).__init__()\n self.in_size = in_size\n self.classes = classes\n\n with self.init_scope():\n self.features = SimpleSequential()\n with self.features.init_scope():\n setattr(self.features, \"init_block\", PyrInitBlock(\n in_channels=in_channels,\n out_channels=init_block_channels))\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n stage = SimpleSequential()\n with stage.init_scope():\n for j, out_channels in enumerate(channels_per_stage):\n stride = 2 if (j == 0) and (i != 0) else 1\n setattr(stage, \"unit{}\".format(j + 1), PyrUnit(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n bottleneck=bottleneck))\n in_channels = out_channels\n setattr(self.features, \"stage{}\".format(i + 1), stage)\n setattr(self.features, 'post_activ', PreResActivation(in_channels=in_channels))\n setattr(self.features, 'final_pool', partial(\n F.average_pooling_2d,\n ksize=7,\n stride=1))\n\n self.output = SimpleSequential()\n with self.output.init_scope():\n setattr(self.output, 'flatten', partial(\n F.reshape,\n shape=(-1, in_channels)))\n setattr(self.output, 'fc', L.Linear(\n in_size=in_channels,\n out_size=classes))\n\n def __call__(self, x):\n x = self.features(x)\n x = self.output(x)\n return x\n\n\ndef get_pyramidnet(blocks,\n alpha,\n model_name=None,\n pretrained=False,\n root=os.path.join(\"~\", \".chainer\", \"models\"),\n **kwargs):\n \"\"\"\n Create PyramidNet model with specific parameters.\n\n Parameters:\n ----------\n blocks : int\n Number of blocks.\n alpha : int\n PyramidNet's alpha value.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.chainer/models'\n Location for keeping the model parameters.\n \"\"\"\n\n if blocks == 10:\n layers = [1, 1, 1, 1]\n elif blocks == 12:\n layers = [2, 1, 1, 1]\n elif blocks == 14:\n layers = [2, 2, 1, 1]\n elif blocks == 16:\n layers = [2, 2, 2, 1]\n elif blocks == 18:\n layers = [2, 2, 2, 2]\n elif blocks == 34:\n layers = [3, 4, 6, 3]\n elif blocks == 50:\n layers = [3, 4, 6, 3]\n elif blocks == 101:\n layers = [3, 4, 23, 3]\n elif blocks == 152:\n layers = [3, 8, 36, 3]\n elif blocks == 200:\n layers = [3, 24, 36, 3]\n else:\n raise ValueError(\"Unsupported ResNet with number of blocks: {}\".format(blocks))\n\n init_block_channels = 64\n\n growth_add = float(alpha) / float(sum(layers))\n from functools import reduce\n channels = reduce(\n lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]],\n layers,\n [[init_block_channels]])[1:]\n channels = [[int(round(cij)) for cij in ci] for ci in channels]\n\n if blocks < 50:\n bottleneck = False\n else:\n bottleneck = True\n channels = [[cij * 4 for cij in ci] for ci in channels]\n\n net = PyramidNet(\n channels=channels,\n init_block_channels=init_block_channels,\n bottleneck=bottleneck,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n load_npz(\n file=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root),\n obj=net)\n\n return net\n\n\ndef pyramidnet101_a360(**kwargs):\n \"\"\"\n PyramidNet-101 model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.chainer/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_pyramidnet(blocks=101, alpha=360, model_name=\"pyramidnet101_a360\", **kwargs)\n\n\ndef _test():\n import numpy as np\n import chainer\n\n chainer.global_config.train = False\n\n pretrained = False\n\n models = [\n pyramidnet101_a360,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n\n weight_count = net.count_params()\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != pyramidnet101_a360 or weight_count == 42455070)\n\n x = np.zeros((1, 3, 224, 224), np.float32)\n y = net(x)\n assert (y.shape == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n",
"\"\"\"\n SqueezeNext for ImageNet-1K, implemented in PyTorch.\n Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\"\"\"\n\n__all__ = ['SqueezeNext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2']\n\nimport os\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom .common import ConvBlock, conv1x1_block, conv7x7_block\n\n\nclass SqnxtUnit(nn.Module):\n \"\"\"\n SqueezeNext unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride):\n super(SqnxtUnit, self).__init__()\n if stride == 2:\n reduction_den = 1\n self.resize_identity = True\n elif in_channels > out_channels:\n reduction_den = 4\n self.resize_identity = True\n else:\n reduction_den = 2\n self.resize_identity = False\n\n self.conv1 = conv1x1_block(\n in_channels=in_channels,\n out_channels=(in_channels // reduction_den),\n stride=stride,\n bias=True)\n self.conv2 = conv1x1_block(\n in_channels=(in_channels // reduction_den),\n out_channels=(in_channels // (2 * reduction_den)),\n bias=True)\n self.conv3 = ConvBlock(\n in_channels=(in_channels // (2 * reduction_den)),\n out_channels=(in_channels // reduction_den),\n kernel_size=(1, 3),\n stride=1,\n padding=(0, 1),\n bias=True)\n self.conv4 = ConvBlock(\n in_channels=(in_channels // reduction_den),\n out_channels=(in_channels // reduction_den),\n kernel_size=(3, 1),\n stride=1,\n padding=(1, 0),\n bias=True)\n self.conv5 = conv1x1_block(\n in_channels=(in_channels // reduction_den),\n out_channels=out_channels,\n bias=True)\n\n if self.resize_identity:\n self.identity_conv = conv1x1_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n bias=True)\n self.activ = nn.ReLU(inplace=True)\n\n def forward(self, x):\n if self.resize_identity:\n identity = self.identity_conv(x)\n else:\n identity = x\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n x = x + identity\n x = self.activ(x)\n return x\n\n\nclass SqnxtInitBlock(nn.Module):\n \"\"\"\n SqueezeNext specific initial block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels):\n super(SqnxtInitBlock, self).__init__()\n self.conv = conv7x7_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=2,\n padding=1,\n bias=True)\n self.pool = nn.MaxPool2d(\n kernel_size=3,\n stride=2,\n ceil_mode=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.pool(x)\n return x\n\n\nclass SqueezeNext(nn.Module):\n \"\"\"\n SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n final_block_channels : int\n Number of output channels for the final block of the feature extractor.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n num_classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n final_block_channels,\n in_channels=3,\n in_size=(224, 224),\n num_classes=1000):\n super(SqueezeNext, self).__init__()\n self.in_size = in_size\n self.num_classes = num_classes\n\n self.features = nn.Sequential()\n self.features.add_module(\"init_block\", SqnxtInitBlock(\n in_channels=in_channels,\n out_channels=init_block_channels))\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n stage = nn.Sequential()\n for j, out_channels in enumerate(channels_per_stage):\n stride = 2 if (j == 0) and (i != 0) else 1\n stage.add_module(\"unit{}\".format(j + 1), SqnxtUnit(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride))\n in_channels = out_channels\n self.features.add_module(\"stage{}\".format(i + 1), stage)\n self.features.add_module('final_block', conv1x1_block(\n in_channels=in_channels,\n out_channels=final_block_channels,\n bias=True))\n in_channels = final_block_channels\n self.features.add_module('final_pool', nn.AvgPool2d(\n kernel_size=7,\n stride=1))\n\n self.output = nn.Linear(\n in_features=in_channels,\n out_features=num_classes)\n\n self._init_params()\n\n def _init_params(self):\n for name, module in self.named_modules():\n if isinstance(module, nn.Conv2d):\n init.kaiming_uniform_(module.weight)\n if module.bias is not None:\n init.constant_(module.bias, 0)\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.output(x)\n return x\n\n\ndef get_squeezenext(version,\n width_scale,\n model_name=None,\n pretrained=False,\n root=os.path.join(\"~\", \".torch\", \"models\"),\n **kwargs):\n \"\"\"\n Create SqueezeNext model with specific parameters.\n\n Parameters:\n ----------\n version : str\n Version of SqueezeNet ('23' or '23v5').\n width_scale : float\n Scale factor for width of layers.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n\n init_block_channels = 64\n final_block_channels = 128\n channels_per_layers = [32, 64, 128, 256]\n\n if version == '23':\n layers = [6, 6, 8, 1]\n elif version == '23v5':\n layers = [2, 4, 14, 1]\n else:\n raise ValueError(\"Unsupported SqueezeNet version {}\".format(version))\n\n channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]\n\n if width_scale != 1:\n channels = [[int(cij * width_scale) for cij in ci] for ci in channels]\n init_block_channels = int(init_block_channels * width_scale)\n final_block_channels = int(final_block_channels * width_scale)\n\n net = SqueezeNext(\n channels=channels,\n init_block_channels=init_block_channels,\n final_block_channels=final_block_channels,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import download_model\n download_model(\n net=net,\n model_name=model_name,\n local_model_store_dir_path=root)\n\n return net\n\n\ndef sqnxt23_w1(**kwargs):\n \"\"\"\n 1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenext(version=\"23\", width_scale=1.0, model_name=\"sqnxt23_w1\", **kwargs)\n\n\ndef sqnxt23_w3d2(**kwargs):\n \"\"\"\n 1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenext(version=\"23\", width_scale=1.5, model_name=\"sqnxt23_w3d2\", **kwargs)\n\n\ndef sqnxt23_w2(**kwargs):\n \"\"\"\n 2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenext(version=\"23\", width_scale=2.0, model_name=\"sqnxt23_w2\", **kwargs)\n\n\ndef sqnxt23v5_w1(**kwargs):\n \"\"\"\n 1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenext(version=\"23v5\", width_scale=1.0, model_name=\"sqnxt23v5_w1\", **kwargs)\n\n\ndef sqnxt23v5_w3d2(**kwargs):\n \"\"\"\n 1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenext(version=\"23v5\", width_scale=1.5, model_name=\"sqnxt23v5_w3d2\", **kwargs)\n\n\ndef sqnxt23v5_w2(**kwargs):\n \"\"\"\n 2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_squeezenext(version=\"23v5\", width_scale=2.0, model_name=\"sqnxt23v5_w2\", **kwargs)\n\n\ndef _calc_width(net):\n import numpy as np\n net_params = filter(lambda p: p.requires_grad, net.parameters())\n weight_count = 0\n for param in net_params:\n weight_count += np.prod(param.size())\n return weight_count\n\n\ndef _test():\n import torch\n\n pretrained = False\n\n models = [\n sqnxt23_w1,\n sqnxt23_w3d2,\n sqnxt23_w2,\n sqnxt23v5_w1,\n sqnxt23v5_w3d2,\n sqnxt23v5_w2,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n\n # net.eval()\n net.train()\n weight_count = _calc_width(net)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != sqnxt23_w1 or weight_count == 724056)\n assert (model != sqnxt23_w3d2 or weight_count == 1511824)\n assert (model != sqnxt23_w2 or weight_count == 2583752)\n assert (model != sqnxt23v5_w1 or weight_count == 921816)\n assert (model != sqnxt23v5_w3d2 or weight_count == 1953616)\n assert (model != sqnxt23v5_w2 or weight_count == 3366344)\n\n x = torch.randn(1, 3, 224, 224)\n y = net(x)\n y.sum().backward()\n assert (tuple(y.size()) == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n",
"\"\"\"\n WRN for ImageNet-1K, implemented in PyTorch.\n Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.\n\"\"\"\n\n__all__ = ['WRN', 'wrn50_2']\n\nimport os\nimport torch.nn as nn\nimport torch.nn.init as init\n\n\nclass WRNConv(nn.Module):\n \"\"\"\n WRN specific convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n activate : bool\n Whether activate the convolution block.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n activate):\n super(WRNConv, self).__init__()\n self.activate = activate\n\n self.conv = nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n bias=True)\n if self.activate:\n self.activ = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n if self.activate:\n x = self.activ(x)\n return x\n\n\ndef wrn_conv1x1(in_channels,\n out_channels,\n stride,\n activate):\n \"\"\"\n 1x1 version of the WRN specific convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n activate : bool\n Whether activate the convolution block.\n \"\"\"\n return WRNConv(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n stride=stride,\n padding=0,\n activate=activate)\n\n\ndef wrn_conv3x3(in_channels,\n out_channels,\n stride,\n activate):\n \"\"\"\n 3x3 version of the WRN specific convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n activate : bool\n Whether activate the convolution block.\n \"\"\"\n return WRNConv(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n stride=stride,\n padding=1,\n activate=activate)\n\n\nclass WRNBottleneck(nn.Module):\n \"\"\"\n WRN bottleneck block for residual path in WRN unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n width_factor : float\n Wide scale factor for width of layers.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride,\n width_factor):\n super(WRNBottleneck, self).__init__()\n mid_channels = int(round(out_channels // 4 * width_factor))\n\n self.conv1 = wrn_conv1x1(\n in_channels=in_channels,\n out_channels=mid_channels,\n stride=1,\n activate=True)\n self.conv2 = wrn_conv3x3(\n in_channels=mid_channels,\n out_channels=mid_channels,\n stride=stride,\n activate=True)\n self.conv3 = wrn_conv1x1(\n in_channels=mid_channels,\n out_channels=out_channels,\n stride=1,\n activate=False)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n return x\n\n\nclass WRNUnit(nn.Module):\n \"\"\"\n WRN unit with residual connection.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n width_factor : float\n Wide scale factor for width of layers.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride,\n width_factor):\n super(WRNUnit, self).__init__()\n self.resize_identity = (in_channels != out_channels) or (stride != 1)\n\n self.body = WRNBottleneck(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n width_factor=width_factor)\n if self.resize_identity:\n self.identity_conv = wrn_conv1x1(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n activate=False)\n self.activ = nn.ReLU(inplace=True)\n\n def forward(self, x):\n if self.resize_identity:\n identity = self.identity_conv(x)\n else:\n identity = x\n x = self.body(x)\n x = x + identity\n x = self.activ(x)\n return x\n\n\nclass WRNInitBlock(nn.Module):\n \"\"\"\n WRN specific initial block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels):\n super(WRNInitBlock, self).__init__()\n self.conv = WRNConv(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=7,\n stride=2,\n padding=3,\n activate=True)\n self.pool = nn.MaxPool2d(\n kernel_size=3,\n stride=2,\n padding=1)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.pool(x)\n return x\n\n\nclass WRN(nn.Module):\n \"\"\"\n WRN model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n width_factor : float\n Wide scale factor for width of layers.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n num_classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n width_factor,\n in_channels=3,\n in_size=(224, 224),\n num_classes=1000):\n super(WRN, self).__init__()\n self.in_size = in_size\n self.num_classes = num_classes\n\n self.features = nn.Sequential()\n self.features.add_module(\"init_block\", WRNInitBlock(\n in_channels=in_channels,\n out_channels=init_block_channels))\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n stage = nn.Sequential()\n for j, out_channels in enumerate(channels_per_stage):\n stride = 2 if (j == 0) and (i != 0) else 1\n stage.add_module(\"unit{}\".format(j + 1), WRNUnit(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n width_factor=width_factor))\n in_channels = out_channels\n self.features.add_module(\"stage{}\".format(i + 1), stage)\n self.features.add_module('final_pool', nn.AvgPool2d(\n kernel_size=7,\n stride=1))\n\n self.output = nn.Linear(\n in_features=in_channels,\n out_features=num_classes)\n\n self._init_params()\n\n def _init_params(self):\n for name, module in self.named_modules():\n if isinstance(module, nn.Conv2d):\n init.kaiming_uniform_(module.weight)\n if module.bias is not None:\n init.constant_(module.bias, 0)\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.output(x)\n return x\n\n\ndef get_wrn(blocks,\n width_factor,\n model_name=None,\n pretrained=False,\n root=os.path.join(\"~\", \".torch\", \"models\"),\n **kwargs):\n \"\"\"\n Create WRN model with specific parameters.\n\n Parameters:\n ----------\n blocks : int\n Number of blocks.\n width_factor : float\n Wide scale factor for width of layers.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n if blocks == 50:\n layers = [3, 4, 6, 3]\n elif blocks == 101:\n layers = [3, 4, 23, 3]\n elif blocks == 152:\n layers = [3, 8, 36, 3]\n elif blocks == 200:\n layers = [3, 24, 36, 3]\n else:\n raise ValueError(\"Unsupported WRN with number of blocks: {}\".format(blocks))\n\n init_block_channels = 64\n channels_per_layers = [256, 512, 1024, 2048]\n\n channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]\n\n net = WRN(\n channels=channels,\n init_block_channels=init_block_channels,\n width_factor=width_factor,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import download_model\n download_model(\n net=net,\n model_name=model_name,\n local_model_store_dir_path=root)\n\n return net\n\n\ndef wrn50_2(**kwargs):\n \"\"\"\n WRN-50-2 model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_wrn(blocks=50, width_factor=2.0, model_name=\"wrn50_2\", **kwargs)\n\n\ndef _calc_width(net):\n import numpy as np\n net_params = filter(lambda p: p.requires_grad, net.parameters())\n weight_count = 0\n for param in net_params:\n weight_count += np.prod(param.size())\n return weight_count\n\n\ndef _test():\n import torch\n\n pretrained = False\n\n models = [\n wrn50_2,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n\n # net.train()\n net.eval()\n weight_count = _calc_width(net)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != wrn50_2 or weight_count == 68849128)\n\n x = torch.randn(1, 3, 224, 224)\n y = net(x)\n y.sum().backward()\n assert (tuple(y.size()) == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n"
] |
[
[
"numpy.zeros"
],
[
"torch.nn.Linear",
"torch.nn.init.kaiming_uniform_",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.init.constant_",
"torch.nn.ReLU",
"torch.randn"
],
[
"torch.nn.Linear",
"torch.nn.init.kaiming_uniform_",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.init.constant_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.randn"
]
] |
1e-to/sdc
|
[
"3ca1bd6cb2d085d9b0f464ff19e97aaf178d4304"
] |
[
"sdc/datatypes/pandas_series_functions/map.py"
] |
[
"# *****************************************************************************\n# Copyright (c) 2020, Intel Corporation All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\nimport numpy\nimport pandas\nfrom numba import prange, types\nfrom numba.targets.registry import cpu_target\n\nfrom sdc.hiframes.pd_series_ext import SeriesType\nfrom sdc.utilities.utils import sdc_overload_method\n\nfrom sdc.utilities.sdc_typing_utils import TypeChecker\n\n\n@sdc_overload_method(SeriesType, 'map')\ndef hpat_pandas_series_map(self, arg, na_action=None):\n \"\"\"\n Intel Scalable Dataframe Compiler User Guide\n ********************************************\n\n Pandas API: pandas.Series.map\n\n Limitations\n -----------\n - Series data types String is currently unsupported by Intel Scalable Dataframe Compiler.\n - ``arg`` as Series is currently unsupported by Intel Scalable Dataframe Compiler.\n - ``arg`` as function should return scalar. Other types \\\n are currently unsupported by Intel Scalable Dataframe Compiler.\n - ``na_action`` is currently unsupported by Intel Scalable Dataframe Compiler.\n\n Examples\n --------\n .. literalinclude:: ../../../examples/series/series_map.py\n :language: python\n :lines: 36-\n :caption: `map()` accepts a function.\n :name: ex_series_map\n\n .. command-output:: python ./series/series_map.py\n :cwd: ../../../examples\n\n .. seealso::\n\n :ref:`Series.map <pandas.Series.apply>`\n For applying more complex functions on a Series.\n :ref:`DataFrame.apply <pandas.DataFrame.apply>`\n Apply a function row-/column-wise.\n :ref:`DataFrame.applymap <pandas.DataFrame.applymap>`\n Apply a function elementwise on a whole DataFrame.\n\n Intel Scalable Dataframe Compiler Developer Guide\n *************************************************\n\n .. only:: developer\n Test: python -m sdc.runtests sdc.tests.test_series -k map\n \"\"\"\n\n ty_checker = TypeChecker(\"Method map().\")\n ty_checker.check(self, SeriesType)\n\n if isinstance(arg, types.Callable):\n sig = arg.get_call_type(cpu_target.typing_context, [self.dtype], {})\n output_type = sig.return_type\n\n def impl(self, arg, na_action=None):\n input_arr = self._data\n length = len(input_arr)\n\n output_arr = numpy.empty(length, dtype=output_type)\n\n for i in prange(length):\n output_arr[i] = arg(input_arr[i])\n\n return pandas.Series(output_arr, index=self._index, name=self._name)\n\n return impl\n\n if isinstance(arg, types.DictType):\n output_type = self.dtype\n\n def impl(self, arg, na_action=None):\n input_arr = self._data\n length = len(input_arr)\n\n output_arr = numpy.empty(length, dtype=output_type)\n\n for i in prange(length):\n output_arr[i] = arg.get(input_arr[i], numpy.nan)\n\n return pandas.Series(output_arr, index=self._index, name=self._name)\n\n return impl\n"
] |
[
[
"numpy.empty",
"pandas.Series"
]
] |
KaidongLi/pytorch-3d2d
|
[
"15c0bcef28de3b041c35b38cfa1093e9d23b3a53"
] |
[
"train_cls.py"
] |
[
"\"\"\"\nAuthor: Benny\nDate: Nov 2019\n\"\"\"\nfrom data_utils.ModelNetDataLoader import ModelNetDataLoader\nimport argparse\nimport numpy as np\nimport os\nimport torch\nimport datetime\nimport logging\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport sys\nimport provider\nimport importlib\nimport shutil\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = BASE_DIR\nsys.path.append(os.path.join(ROOT_DIR, 'models'))\n\n\ndef parse_args():\n '''PARAMETERS'''\n parser = argparse.ArgumentParser('PointNet')\n parser.add_argument('--batch_size', type=int, default=24, help='batch size in training [default: 24]')\n parser.add_argument('--model', default='pointnet_cls', help='model name [default: pointnet_cls]')\n parser.add_argument('--epoch', default=200, type=int, help='number of epoch in training [default: 200]')\n parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training [default: 0.001]')\n parser.add_argument('--gpu', type=str, default='0', help='specify gpu device [default: 0]')\n parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')\n parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training [default: Adam]')\n parser.add_argument('--log_dir', type=str, default=None, help='experiment root')\n parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate [default: 1e-4]')\n parser.add_argument('--normal', action='store_true', default=False, help='Whether to use normal information [default: False]')\n return parser.parse_args()\n\ndef test(model, loader, num_class=40):\n mean_correct = []\n class_acc = np.zeros((num_class,3))\n for j, data in tqdm(enumerate(loader), total=len(loader)):\n points, target = data\n target = target[:, 0]\n points = points.transpose(2, 1)\n points, target = points.cuda(), target.cuda()\n classifier = model.eval()\n pred, _ = classifier(points)\n pred_choice = pred.data.max(1)[1]\n for cat in np.unique(target.cpu()):\n\n # kaidong mod: resolve tensor cannot be (target==cat) eq() to a numpy bug\n cat = cat.item()\n\n classacc = pred_choice[target==cat].eq(target[target==cat].long().data).cpu().sum()\n class_acc[cat,0]+= classacc.item()/float(points[target==cat].size()[0])\n class_acc[cat,1]+=1\n correct = pred_choice.eq(target.long().data).cpu().sum()\n mean_correct.append(correct.item()/float(points.size()[0]))\n class_acc[:,2] = class_acc[:,0]/ class_acc[:,1]\n class_acc = np.mean(class_acc[:,2])\n instance_acc = np.mean(mean_correct)\n return instance_acc, class_acc\n\n\ndef main(args):\n def log_string(str):\n logger.info(str)\n print(str)\n\n '''HYPER PARAMETER'''\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\n '''CREATE DIR'''\n timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))\n experiment_dir = Path('./log/')\n experiment_dir.mkdir(exist_ok=True)\n experiment_dir = experiment_dir.joinpath('classification')\n experiment_dir.mkdir(exist_ok=True)\n if args.log_dir is None:\n experiment_dir = experiment_dir.joinpath(timestr)\n else:\n experiment_dir = experiment_dir.joinpath(args.log_dir)\n experiment_dir.mkdir(exist_ok=True)\n checkpoints_dir = experiment_dir.joinpath('checkpoints/')\n checkpoints_dir.mkdir(exist_ok=True)\n log_dir = experiment_dir.joinpath('logs/')\n log_dir.mkdir(exist_ok=True)\n\n '''LOG'''\n args = parse_args()\n logger = logging.getLogger(\"Model\")\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n log_string('PARAMETER ...')\n log_string(args)\n\n '''DATA LOADING'''\n log_string('Load dataset ...')\n DATA_PATH = 'data/modelnet40_normal_resampled/'\n\n TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='train',\n normal_channel=args.normal)\n TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test',\n normal_channel=args.normal)\n trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4)\n testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4)\n\n '''MODEL LOADING'''\n num_class = 40\n MODEL = importlib.import_module(args.model)\n shutil.copy('./models/%s.py' % args.model, str(experiment_dir))\n shutil.copy('./models/pointnet_util.py', str(experiment_dir))\n\n classifier = MODEL.get_model(num_class,normal_channel=args.normal).cuda()\n criterion = MODEL.get_loss().cuda()\n\n try:\n checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')\n start_epoch = checkpoint['epoch']\n classifier.load_state_dict(checkpoint['model_state_dict'])\n log_string('Use pretrain model')\n except:\n log_string('No existing model, starting training from scratch...')\n start_epoch = 0\n\n\n if args.optimizer == 'Adam':\n optimizer = torch.optim.Adam(\n classifier.parameters(),\n lr=args.learning_rate,\n betas=(0.9, 0.999),\n eps=1e-08,\n weight_decay=args.decay_rate\n )\n else:\n optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)\n\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7)\n global_epoch = 0\n global_step = 0\n best_instance_acc = 0.0\n best_class_acc = 0.0\n mean_correct = []\n\n '''TRANING'''\n logger.info('Start training...')\n for epoch in range(start_epoch,args.epoch):\n log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))\n\n scheduler.step()\n for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):\n points, target = data\n points = points.data.numpy()\n points = provider.random_point_dropout(points)\n points[:,:, 0:3] = provider.random_scale_point_cloud(points[:,:, 0:3])\n points[:,:, 0:3] = provider.shift_point_cloud(points[:,:, 0:3])\n points = torch.Tensor(points)\n target = target[:, 0]\n\n points = points.transpose(2, 1)\n points, target = points.cuda(), target.cuda()\n optimizer.zero_grad()\n\n classifier = classifier.train()\n pred, trans_feat = classifier(points)\n loss = criterion(pred, target.long(), trans_feat)\n pred_choice = pred.data.max(1)[1]\n correct = pred_choice.eq(target.long().data).cpu().sum()\n mean_correct.append(correct.item() / float(points.size()[0]))\n loss.backward()\n optimizer.step()\n global_step += 1\n\n train_instance_acc = np.mean(mean_correct)\n log_string('Train Instance Accuracy: %f' % train_instance_acc)\n\n\n with torch.no_grad():\n instance_acc, class_acc = test(classifier.eval(), testDataLoader)\n\n if (instance_acc >= best_instance_acc):\n best_instance_acc = instance_acc\n best_epoch = epoch + 1\n\n if (class_acc >= best_class_acc):\n best_class_acc = class_acc\n log_string('Test Instance Accuracy: %f, Class Accuracy: %f'% (instance_acc, class_acc))\n log_string('Best Instance Accuracy: %f, Class Accuracy: %f'% (best_instance_acc, best_class_acc))\n\n if (instance_acc >= best_instance_acc):\n logger.info('Save model...')\n savepath = str(checkpoints_dir) + '/best_model.pth'\n log_string('Saving at %s'% savepath)\n state = {\n 'epoch': best_epoch,\n 'instance_acc': instance_acc,\n 'class_acc': class_acc,\n 'model_state_dict': classifier.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }\n torch.save(state, savepath)\n global_epoch += 1\n\n logger.info('End of training...')\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n"
] |
[
[
"torch.optim.lr_scheduler.StepLR",
"numpy.zeros",
"torch.no_grad",
"torch.save",
"numpy.mean",
"torch.utils.data.DataLoader",
"torch.Tensor"
]
] |
adematti/obiwan
|
[
"c12b427ac5bc0e16ae836ac063fc2c009264ed25"
] |
[
"py/tests/test_batch.py"
] |
[
"import os\nimport io\nimport sys\nimport shutil\nimport importlib\n\nimport numpy as np\nimport fitsio\nimport legacypipe\nfrom legacypipe import runbrick as lprunbrick\n\nfrom obiwan import setup_logging,runbrick,SimCatalog,RunCatalog,find_file\nfrom obiwan.catalog import ListStages,Stages\nfrom obiwan.batch import TaskManager,EnvironmentManager,environment_manager,run_shell,get_pythonpath\nfrom obiwan.scripts import runlist\n\n\nsetup_logging()\n\n\ndef test_task_manager():\n\n with TaskManager(ntasks=1) as tm:\n lit = list(range(10))\n li = []\n for i in tm.iterate(lit):\n li.append(i)\n assert li == lit\n li = tm.map(lambda i: i+1,lit)\n assert li == list(range(1,len(lit)+1))\n\n\ndef test_environment_manager_runlist():\n # here we run legacypipe and obiwan for different configurations, using environment_manager and runlist scripts\n survey_dir = os.path.join(os.path.dirname(__file__), 'testcase3')\n # first create environment variables\n names_environ,shorts_environ = [],[]\n for name,short in EnvironmentManager.shorts_env.items():\n names_environ.append(name)\n shorts_environ.append(short)\n keys_version = ['LEGPIPEV'] + ['VER_%s' % short for short in EnvironmentManager.shorts_stage.values()]\n keys_version.remove('VER_TIMS') # not in < DR9.6.7\n keys_version.remove('VER_WISE') # not run\n assert 'GAIA_CAT_DIR' in names_environ\n assert 'GAIA_CAT' in shorts_environ\n\n def get_environ(nwise=4,rng=None):\n if rng is None: rng = np.random.RandomState()\n toret = {}\n for iname,(name,short) in enumerate(EnvironmentManager.shorts_env.items()):\n toret[name] = '%s_%d' % (name,iname) # fake paths\n keys = []\n for name,key in EnvironmentManager.keys_env.items():\n if name == 'UNWISE_COADDS_DIR':\n tmp = []\n for i in range(nwise):\n keys.append('UNWISD%d' % (i+1))\n tmp.append('%s_%d' % (name,rng.randint(100)))\n toret[name] = ':'.join(tmp) # fake paths\n elif name not in ['UNWISE_MODEL_SKY_DIR']:\n keys.append(key)\n toret[name] = '%s_%d' % (name,rng.randint(100)) # fake paths\n toret['GAIA_CAT_DIR'] = os.path.join(survey_dir, 'gaia')\n return toret,keys\n\n # test versions\n modules = ['legacypipe']\n configs = {}\n configs['run1'] = {}\n configs['run1']['stages'] = [('writecat',{('legacypipe','DR9.6.7')})]\n configs['run1']['environ'] = get_environ(nwise=4)\n configs['run2'] = {}\n configs['run2']['stages'] = [('tims',{('legacypipe','DR9.6.5')}),('writecat',{('legacypipe','DR9.6.5')})]\n configs['run2']['environ'] = get_environ(nwise=2)\n configs['run3'] = {}\n configs['run3']['stages'] = [('halos',{('legacypipe','DR9.6.5')}),('writecat',{('legacypipe','DR9.6.5')})]\n configs['run3']['environ'] = get_environ(nwise=10)\n\n brickname = '2447p120'\n zoom = [1020,1070,2775,2815]\n\n runbrick_args = ['--brick', brickname, '--zoom', *map(str,zoom),\n '--no-wise',\n '--survey-dir', survey_dir,\n '--threads', '1']\n\n module_dir = legacypipe.__file__\n for i in range(4): module_dir = os.path.dirname(module_dir)\n\n legacypipe_dir,pickle_dir,pickle_fn = {},{},{}\n pythonpath_modules = {}\n\n # clear os.environ for pytest\n for run,config in configs.items():\n for key in list(config['environ'][0].keys()) + ['GAIA_CAT_VER']:\n if key in os.environ: del os.environ[key]\n legacypipe_dir[run] = 'out-testcase3-legacypipe-%s' % run\n pickle_dir[run] = 'pickles_%s' % run\n pickle_fn[run] = os.path.join(pickle_dir[run],'runbrick-%(brick)s-%%(stage)s.pickle')\n for stage,versions in config['stages']:\n for module,version in versions:\n if module in ['legacypipe','obiwan']:\n path = os.path.join(module_dir,'%s_%s' % (module,version),'py')\n assert os.path.isdir(path)\n pythonpath_modules[(module,version)] = path\n # clear sys.path for pytest\n if path in sys.path: sys.path.remove(path)\n\n environ = os.environ.copy()\n os.environ['GAIA_CAT_VER'] = '2'\n\n # first run legacypipe\n for run,config in configs.items():\n\n shutil.rmtree(pickle_dir[run],ignore_errors=True)\n assert not os.path.isdir(pickle_dir[run])\n\n os.environ.update(config['environ'][0])\n\n for stage,versions in config['stages']:\n for module,version in versions:\n path = pythonpath_modules[(module,version)]\n sys.path.insert(0,path)\n m = importlib.reload(importlib.import_module(module))\n assert m.__file__ == os.path.join(path,module,'__init__.py')\n\n args = runbrick_args + ['--outdir',legacypipe_dir[run],'--pickle',pickle_fn[run]]\n if stage == 'writecat':\n args += ['--no-write']\n else:\n args += ['--stage',stage]\n lprunbrick.main(args)\n\n shutil.rmtree(pickle_dir[run],ignore_errors=True)\n assert not os.path.isdir(pickle_dir[run])\n os.environ = environ.copy()\n\n def get_env(header,keys_environ):\n env = {}\n for key in header:\n if header[key] in shorts_environ:\n env[header[key]] = header[key.replace('DEPNAM','DEPVER')]\n for key in keys_version + keys_environ:\n env[key] = header[key]\n return env\n\n def add_syspath(pythonpath):\n pythonpath = pythonpath.copy()\n for path in sys.path:\n if path not in pythonpath:\n pythonpath.append(path)\n sys.path = pythonpath\n\n # check EnvironmentManager works\n for irun,(run,config) in enumerate(configs.items()):\n\n shutil.rmtree(pickle_dir[run],ignore_errors=True)\n assert not os.path.isdir(pickle_dir[run])\n\n legacypipe_fn = find_file(base_dir=legacypipe_dir[run],filetype='tractor',source='legacypipe',brickname=brickname)\n header_legacypipe = fitsio.read_header(legacypipe_fn)\n #print(header_legacypipe)\n keys_environ = config['environ'][1]\n env_legacypipe = get_env(header_legacypipe,keys_environ=keys_environ)\n assert len(env_legacypipe) == len(shorts_environ) + len(keys_version) + len(keys_environ)\n for stage,versions in config['stages']:\n for module,version in versions:\n if module == 'legacypipe':\n if stage == 'tims':\n assert env_legacypipe['LEGPIPEV'] == version\n else:\n assert env_legacypipe['VER_%s' % EnvironmentManager.shorts_stage[stage]] == version\n tractor_legacypipe = SimCatalog(legacypipe_fn)\n\n output_dirs = []\n for i in range(1,5):\n output_dir = 'out-testcase3-obiwan-%d' % i\n shutil.rmtree(output_dir,ignore_errors=True)\n output_dirs.append(output_dir)\n\n for stage,version in config['stages']:\n\n # with pickle; if irun != 0, try obiwan default option which consists in saving pickle in obiwan file structure\n args = runbrick_args.copy()\n if irun == 0: args += ['--pickle',pickle_fn[run]]\n if stage == 'writecat':\n args += ['--no-write']\n else:\n args += ['--stage',stage]\n\n # environment from legacypipe tractor header\n with EnvironmentManager(base_dir=legacypipe_dir[run],brickname=brickname) as em:\n tmppythonpath = get_pythonpath(module_dir,[(module,em.get_module_version(module,stage=stage)) for module in modules],full=False)\n add_syspath(tmppythonpath)\n importlib.reload(legacypipe)\n runbrick.main(args=args + ['--outdir',output_dirs[0]])\n\n assert os.environ == environ\n\n # environment from obiwan tractor header\n with EnvironmentManager(base_dir=output_dirs[0],brickname=brickname,source='obiwan') as em:\n tmppythonpath = get_pythonpath(module_dir,[(module,em.get_module_version(module,stage=stage)) for module in modules],full=True)\n add_syspath(tmppythonpath)\n importlib.reload(legacypipe)\n runbrick.main(args=args + ['--outdir',output_dirs[1]])\n\n assert os.environ == environ\n\n # runbrick environment handling\n runbrick.main(args=args + ['--outdir',output_dirs[2]] + ['--env-header',legacypipe_fn])\n\n assert os.environ == environ\n\n args = ['--module-dir',module_dir,'--outdir',legacypipe_dir[run],'--brick',brickname,'--full-pythonpath']\n if stage != 'writecat': args += ['--stage',stage]\n\n old_stdout = sys.stdout\n sys.stdout = buffer = io.StringIO()\n environment_manager.main(args)\n sys.stdout = old_stdout\n env = buffer.getvalue().split('\\n')[:-1] # last is empty string\n\n env_shell = run_shell(['python',environment_manager.__file__] + args + ['2> /dev/null']).split('\\n')[:-1]\n assert env_shell[1:] == env[1:]\n pythonpath = env[0][len('PYTHONPATH='):].split(':')\n assert pythonpath == tmppythonpath\n\n # remove for pytest\n pythonpath = env_shell[0]\n assert pythonpath.startswith('PYTHONPATH=')\n pythonpath = pythonpath[len('PYTHONPATH='):].split(':')\n assert pythonpath == tmppythonpath\n\n for e in env_shell[1:]:\n key,val = e.split('=')\n assert config['environ'][0][key] == val\n\n shutil.rmtree(pickle_dir[run],ignore_errors=True)\n list_fn = 'runlist.txt'\n try:\n os.remove(list_fn)\n except OSError:\n pass\n assert runlist.main(['--outdir','.']) is None\n runcat = runlist.main(['--outdir',legacypipe_dir[run],'--modules'] + modules)\n assert not os.path.exists(list_fn)\n list_fn = os.path.join(output_dirs[3],'runlist.txt')\n run_shell(['python',runlist.__file__] + ['--outdir',output_dirs[0],'--source','obiwan','--write-list',list_fn,'--modules'] + modules)\n runcat2 = RunCatalog.from_list(list_fn)\n assert runcat2 == runcat\n os.remove(list_fn)\n runcat2 = runlist.main(['--outdir',legacypipe_dir[run],'--source','legacypipe'])\n assert np.all(runcat2.stagesid == 0) and runcat2.get_list_stages() == ListStages([Stages()]) # only writecat, no version\n\n for run in runcat:\n command = []\n for stage,versions in run.stages.items():\n tmppythonpath = 'PYTHONPATH=%s' % get_pythonpath(module_dir,versions,full=True,as_string=True)\n command += [tmppythonpath,'python',runbrick.__file__] + runbrick_args \\\n + ['--outdir',output_dirs[3],'--stage',stage,'--env-header',legacypipe_fn,';']\n #run_shell([tmppythonpath,'python',runbrick.__file__] + runbrick_args \\\n # + ['--outdir',output_dirs[3],'--stage',stage,'--env-header',legacypipe_fn])\n run_shell(command)\n # check same headers\n for iout,output_dir in enumerate(output_dirs):\n\n obiwan_fn = find_file(base_dir=output_dir,filetype='tractor',source='obiwan',brickname=brickname)\n header_obiwan = fitsio.read_header(obiwan_fn)\n env_obiwan = get_env(header_obiwan,keys_environ)\n assert env_legacypipe == env_obiwan\n tractor_obiwan = SimCatalog(obiwan_fn)\n if iout <= 2: assert tractor_obiwan == tractor_legacypipe\n\n #print(header_legacypipe)\n"
] |
[
[
"numpy.all",
"numpy.random.RandomState"
]
] |
Tsaousis/acme
|
[
"14278693bcc5fef0839ac60792d452d3d80acfd7"
] |
[
"acme/agents/jax/ail/networks.py"
] |
[
"# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Networks definitions for the BC agent.\n\nAIRL network architecture follows https://arxiv.org/pdf/1710.11248.pdf.\n\"\"\"\nimport dataclasses\nimport functools\nfrom typing import Any, Callable, Generic, Iterable, Optional\n\nfrom acme import specs\nfrom acme import types\nfrom acme.jax import networks as networks_lib\nfrom acme.jax import utils\nfrom acme.jax.imitation_learning_types import DirectRLNetworks\nimport haiku as hk\nimport jax\nfrom jax import numpy as jnp\nimport numpy as np\n\n# Function from discriminator logit to imitation reward.\nImitationRewardFn = Callable[[networks_lib.Logits], jnp.ndarray]\nState = networks_lib.Params\n\n\n@dataclasses.dataclass\nclass AILNetworks(Generic[DirectRLNetworks]):\n \"\"\"AIL networks data class.\n\n Attributes:\n discriminator_network: Networks which takes as input:\n (observations, actions, next_observations, direct_rl_params)\n to return the logit of the discriminator.\n If the discriminator does not need direct_rl_params you can pass ().\n imitation_reward_fn: Function from logit of the discriminator to imitation\n reward.\n direct_rl_networks: Networks of the direct RL algorithm.\n \"\"\"\n discriminator_network: networks_lib.FeedForwardNetwork\n imitation_reward_fn: ImitationRewardFn\n direct_rl_networks: DirectRLNetworks\n\n\ndef compute_ail_reward(discriminator_params: networks_lib.Params,\n discriminator_state: State,\n policy_params: Optional[networks_lib.Params],\n transitions: types.Transition,\n networks: AILNetworks) -> jnp.ndarray:\n \"\"\"Computes the AIL reward for a given transition.\n\n Args:\n discriminator_params: Parameters of the discriminator network.\n discriminator_state: State of the discriminator network.\n policy_params: Parameters of the direct RL policy.\n transitions: Transitions to compute the reward for.\n networks: AIL networks.\n\n Returns:\n The rewards as an ndarray.\n \"\"\"\n logits, _ = networks.discriminator_network.apply(\n discriminator_params,\n policy_params,\n discriminator_state,\n transitions,\n is_training=False,\n rng=None)\n return networks.imitation_reward_fn(logits)\n\n\nclass SpectralNormalizedLinear(hk.Module):\n \"\"\"SpectralNormalizedLinear module.\n\n This is a Linear layer with a upper-bounded Lipschitz. It is used in iResNet.\n\n Reference:\n Behrmann et al. Invertible Residual Networks. ICML 2019.\n https://arxiv.org/pdf/1811.00995.pdf\n \"\"\"\n\n def __init__(\n self,\n output_size: int,\n lipschitz_coeff: float,\n with_bias: bool = True,\n w_init: Optional[hk.initializers.Initializer] = None,\n b_init: Optional[hk.initializers.Initializer] = None,\n name: Optional[str] = None,\n ):\n \"\"\"Constructs the SpectralNormalizedLinear module.\n\n Args:\n output_size: Output dimensionality.\n lipschitz_coeff: Spectral normalization coefficient.\n with_bias: Whether to add a bias to the output.\n w_init: Optional initializer for weights. By default, uses random values\n from truncated normal, with stddev ``1 / sqrt(fan_in)``. See\n https://arxiv.org/abs/1502.03167v3.\n b_init: Optional initializer for bias. By default, zero.\n name: Name of the module.\n \"\"\"\n super().__init__(name=name)\n self.input_size = None\n self.output_size = output_size\n self.with_bias = with_bias\n self.w_init = w_init\n self.b_init = b_init or jnp.zeros\n self.lipschitz_coeff = lipschitz_coeff\n self.num_iterations = 100\n self.eps = 1e-6\n\n def get_normalized_weights(self,\n weights: jnp.ndarray,\n renormalize: bool = False) -> jnp.ndarray:\n\n def _l2_normalize(x, axis=None, eps=1e-12):\n return x * jax.lax.rsqrt((x * x).sum(axis=axis, keepdims=True) + eps)\n\n output_size = self.output_size\n dtype = weights.dtype\n assert output_size == weights.shape[-1]\n sigma = hk.get_state('sigma', (), init=jnp.ones)\n if renormalize:\n # Power iterations to compute spectral norm V*W*U^T.\n u = hk.get_state(\n 'u', (1, output_size), dtype, init=hk.initializers.RandomNormal())\n for _ in range(self.num_iterations):\n v = _l2_normalize(jnp.matmul(u, weights.transpose()), eps=self.eps)\n u = _l2_normalize(jnp.matmul(v, weights), eps=self.eps)\n u = jax.lax.stop_gradient(u)\n v = jax.lax.stop_gradient(v)\n sigma = jnp.matmul(jnp.matmul(v, weights), jnp.transpose(u))[0, 0]\n hk.set_state('u', u)\n hk.set_state('v', v)\n hk.set_state('sigma', sigma)\n factor = jnp.maximum(1, sigma / self.lipschitz_coeff)\n return weights / factor\n\n def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes a linear transform of the input.\"\"\"\n if not inputs.shape:\n raise ValueError('Input must not be scalar.')\n\n input_size = self.input_size = inputs.shape[-1]\n output_size = self.output_size\n dtype = inputs.dtype\n\n w_init = self.w_init\n if w_init is None:\n stddev = 1. / np.sqrt(self.input_size)\n w_init = hk.initializers.TruncatedNormal(stddev=stddev)\n w = hk.get_parameter('w', [input_size, output_size], dtype, init=w_init)\n w = self.get_normalized_weights(w, renormalize=True)\n\n out = jnp.dot(inputs, w)\n\n if self.with_bias:\n b = hk.get_parameter('b', [self.output_size], dtype, init=self.b_init)\n b = jnp.broadcast_to(b, out.shape)\n out = out + b\n\n return out\n\n\nclass DiscriminatorMLP(hk.Module):\n \"\"\"A multi-layer perceptron module.\"\"\"\n\n def __init__(\n self,\n hidden_layer_sizes: Iterable[int],\n w_init: Optional[hk.initializers.Initializer] = None,\n b_init: Optional[hk.initializers.Initializer] = None,\n with_bias: bool = True,\n activation: Callable[[jnp.ndarray], jnp.ndarray] = jax.nn.relu,\n input_dropout_rate: float = 0.,\n hidden_dropout_rate: float = 0.,\n spectral_normalization_lipschitz_coeff: Optional[float] = None,\n name: Optional[str] = None\n ):\n \"\"\"Constructs an MLP.\n\n Args:\n hidden_layer_sizes: Hiddent layer sizes.\n w_init: Initializer for :class:`~haiku.Linear` weights.\n b_init: Initializer for :class:`~haiku.Linear` bias. Must be ``None`` if\n ``with_bias=False``.\n with_bias: Whether or not to apply a bias in each layer.\n activation: Activation function to apply between :class:`~haiku.Linear`\n layers. Defaults to ReLU.\n input_dropout_rate: Dropout on the input.\n hidden_dropout_rate: Dropout on the hidden layer outputs.\n spectral_normalization_lipschitz_coeff: If not None, the network will have\n spectral normalization with the given constant.\n name: Optional name for this module.\n\n Raises:\n ValueError: If ``with_bias`` is ``False`` and ``b_init`` is not ``None``.\n \"\"\"\n if not with_bias and b_init is not None:\n raise ValueError('When with_bias=False b_init must not be set.')\n\n super().__init__(name=name)\n self._activation = activation\n self._input_dropout_rate = input_dropout_rate\n self._hidden_dropout_rate = hidden_dropout_rate\n layer_sizes = list(hidden_layer_sizes) + [1]\n\n if spectral_normalization_lipschitz_coeff is not None:\n layer_lipschitz_coeff = np.power(spectral_normalization_lipschitz_coeff,\n 1. / len(layer_sizes))\n layer_module = functools.partial(\n SpectralNormalizedLinear,\n lipschitz_coeff=layer_lipschitz_coeff,\n w_init=w_init,\n b_init=b_init,\n with_bias=with_bias)\n else:\n layer_module = functools.partial(\n hk.Linear,\n w_init=w_init,\n b_init=b_init,\n with_bias=with_bias)\n\n layers = []\n for index, output_size in enumerate(layer_sizes):\n layers.append(\n layer_module(output_size=output_size, name=f'linear_{index}'))\n self._layers = tuple(layers)\n\n def __call__(\n self,\n inputs: jnp.ndarray,\n is_training: bool,\n rng: Optional[networks_lib.PRNGKey],\n ) -> networks_lib.Logits:\n rng = hk.PRNGSequence(rng) if rng is not None else None\n\n out = inputs\n for i, layer in enumerate(self._layers):\n if is_training:\n dropout_rate = (\n self._input_dropout_rate if i == 0 else self._hidden_dropout_rate)\n out = hk.dropout(next(rng), dropout_rate, out)\n out = layer(out)\n if i < len(self._layers) - 1:\n out = self._activation(out)\n\n return out\n\n\nclass DiscriminatorModule(hk.Module):\n \"\"\"Discriminator module that concatenates its inputs.\"\"\"\n\n def __init__(self,\n environment_spec: specs.EnvironmentSpec,\n use_action: bool,\n use_next_obs: bool,\n network_core: Callable[..., Any],\n observation_embedding: Callable[[networks_lib.Observation],\n jnp.ndarray] = lambda x: x,\n name='discriminator'):\n super().__init__(name=name)\n self._use_action = use_action\n self._environment_spec = environment_spec\n self._use_next_obs = use_next_obs\n self._network_core = network_core\n self._observation_embedding = observation_embedding\n\n def __call__(self, observations: networks_lib.Observation,\n actions: networks_lib.Action,\n next_observations: networks_lib.Observation, is_training: bool,\n rng: networks_lib.PRNGKey) -> networks_lib.Logits:\n observations = self._observation_embedding(observations)\n if self._use_next_obs:\n next_observations = self._observation_embedding(next_observations)\n data = jnp.concatenate([observations, next_observations], axis=-1)\n else:\n data = observations\n if self._use_action:\n action_spec = self._environment_spec.actions\n if isinstance(action_spec, specs.DiscreteArray):\n actions = jax.nn.one_hot(actions,\n action_spec.num_values)\n data = jnp.concatenate([data, actions], axis=-1)\n output = self._network_core(data, is_training, rng)\n output = jnp.squeeze(output, axis=-1)\n return output\n\n\nclass AIRLModule(hk.Module):\n \"\"\"AIRL Module.\"\"\"\n\n def __init__(self,\n environment_spec: specs.EnvironmentSpec,\n use_action: bool,\n use_next_obs: bool,\n discount: float,\n g_core: Callable[..., Any],\n h_core: Callable[..., Any],\n observation_embedding: Callable[[networks_lib.Observation],\n jnp.ndarray] = lambda x: x,\n name='airl'):\n super().__init__(name=name)\n self._environment_spec = environment_spec\n self._use_action = use_action\n self._use_next_obs = use_next_obs\n self._discount = discount\n self._g_core = g_core\n self._h_core = h_core\n self._observation_embedding = observation_embedding\n\n def __call__(self, observations: networks_lib.Observation,\n actions: networks_lib.Action,\n next_observations: networks_lib.Observation,\n is_training: bool,\n rng: networks_lib.PRNGKey) -> networks_lib.Logits:\n g_output = DiscriminatorModule(\n environment_spec=self._environment_spec,\n use_action=self._use_action,\n use_next_obs=self._use_next_obs,\n network_core=self._g_core,\n observation_embedding=self._observation_embedding,\n name='airl_g')(observations, actions, next_observations, is_training,\n rng)\n h_module = DiscriminatorModule(\n environment_spec=self._environment_spec,\n use_action=False,\n use_next_obs=False,\n network_core=self._h_core,\n observation_embedding=self._observation_embedding,\n name='airl_h')\n return (g_output + self._discount * h_module(next_observations, (),\n (), is_training, rng) -\n h_module(observations, (), (), is_training, rng))\n\n\n# TODO(eorsini): Manipulate FeedForwardNetworks instead of transforms to\n# increase compatibility with Flax.\ndef make_discriminator(\n environment_spec: specs.EnvironmentSpec,\n discriminator_transformed: hk.TransformedWithState,\n logpi_fn: Optional[Callable[\n [networks_lib.Params, networks_lib.Observation, networks_lib.Action],\n jnp.ndarray]] = None\n) -> networks_lib.FeedForwardNetwork:\n \"\"\"Creates the discriminator network.\n\n Args:\n environment_spec: Environment spec\n discriminator_transformed: Haiku transformed of the discriminator.\n logpi_fn: If the policy logpi function is provided, its output will be\n removed from the discriminator logit.\n\n Returns:\n The network.\n \"\"\"\n\n def apply_fn(params: hk.Params,\n policy_params: networks_lib.Params,\n state: hk.State,\n transitions: types.Transition,\n is_training: bool,\n rng: networks_lib.PRNGKey) -> networks_lib.Logits:\n output, state = discriminator_transformed.apply(\n params, state, transitions.observation, transitions.action,\n transitions.next_observation, is_training, rng)\n if logpi_fn is not None:\n logpi = logpi_fn(policy_params, transitions.observation,\n transitions.action)\n\n # Quick Maths:\n # D = exp(output)/(exp(output) + pi(a|s))\n # logit(D) = log(D/(1-D)) = log(exp(output)/pi(a|s))\n # logit(D) = output - logpi\n return output - logpi, state\n return output, state\n\n dummy_obs = utils.zeros_like(environment_spec.observations)\n dummy_obs = utils.add_batch_dim(dummy_obs)\n dummy_actions = utils.zeros_like(environment_spec.actions)\n dummy_actions = utils.add_batch_dim(dummy_actions)\n\n return networks_lib.FeedForwardNetwork(\n # pylint: disable=g-long-lambda\n init=lambda rng: discriminator_transformed.init(\n rng, dummy_obs, dummy_actions, dummy_obs, False, rng),\n apply=apply_fn)\n"
] |
[
[
"numpy.sqrt"
]
] |
FabianKamp/neurolib
|
[
"970bcb02c54e635dcba21f3635c12ccf5df9d36b"
] |
[
"neurolib/optimize/exploration/exploration.py"
] |
[
"import copy\nimport datetime\nimport logging\nimport multiprocessing\nimport os\nimport pathlib\n\nimport numpy as np\nimport pandas as pd\nimport psutil\nimport pypet\nimport tqdm\nimport xarray as xr\n\nfrom ...utils import paths\nfrom ...utils import pypetUtils as pu\nfrom ...utils.collections import dotdict, flat_dict_to_nested, flatten_nested_dict, unwrap_star_dotdict\n\n\nclass BoxSearch:\n \"\"\"\n Paremter box search for a given model and a range of parameters.\n \"\"\"\n\n def __init__(self, model=None, parameterSpace=None, evalFunction=None, filename=None, saveAllModelOutputs=False):\n \"\"\"Either a model has to be passed, or an evalFunction. If an evalFunction\n is passed, then the evalFunction will be called and the model is accessible to the\n evalFunction via `self.getModelFromTraj(traj)`. The parameters of the current\n run are accible via `self.getParametersFromTraj(traj)`.\n\n If no evaluation function is passed, then the model is simulated using `Model.run()`\n for every parameter.\n\n :param model: Model to run for each parameter (or model to pass to the evaluation funciton if an evaluation\n function is used), defaults to None\n :type model: `neurolib.models.model.Model`, optional\n :param parameterSpace: Parameter space to explore, defaults to None\n :type parameterSpace: `neurolib.utils.parameterSpace.ParameterSpace`, optional\n :param evalFunction: Evaluation function to call for each run., defaults to None\n :type evalFunction: function, optional\n :param filename: HDF5 storage file name, if left empty, defaults to ``exploration.hdf``\n :type filename: str\n :param saveAllModelOutputs: If True, save all outputs of model, else only default output of the model will be\n saved. Note: if saveAllModelOutputs==False and the model's parameter model.params['bold']==True, then BOLD\n output will be saved as well, defaults to False\n :type saveAllModelOutputs: bool\n \"\"\"\n self.model = model\n if evalFunction is None and model is not None:\n self.evalFunction = self._runModel\n elif evalFunction is not None:\n self.evalFunction = evalFunction\n\n assert (evalFunction is not None) or (\n model is not None\n ), \"Either a model has to be specified or an evalFunction.\"\n\n assert parameterSpace is not None, \"No parameters to explore.\"\n\n self.parameterSpace = parameterSpace\n self.exploreParameters = parameterSpace.dict()\n\n # TODO: use random ICs for every explored point or rather reuse the ones that are generated at model\n # initialization\n self.useRandomICs = False\n\n filename = filename or \"exploration.hdf\"\n self.filename = filename\n\n self.saveAllModelOutputs = saveAllModelOutputs\n\n # bool to check whether pypet was initialized properly\n self.initialized = False\n self._initializeExploration(self.filename)\n\n self.results = None\n\n def _initializeExploration(self, filename=\"exploration.hdf\"):\n \"\"\"Initialize the pypet environment\n\n :param filename: hdf filename to store the results in , defaults to \"exploration.hdf\"\n :type filename: str, optional\n \"\"\"\n # create hdf file path if it does not exist yet\n pathlib.Path(paths.HDF_DIR).mkdir(parents=True, exist_ok=True)\n\n # set default hdf filename\n self.HDF_FILE = os.path.join(paths.HDF_DIR, filename)\n\n # initialize pypet environment\n trajectoryName = \"results\" + datetime.datetime.now().strftime(\"-%Y-%m-%d-%HH-%MM-%SS\")\n trajectoryfilename = self.HDF_FILE\n\n nprocesses = multiprocessing.cpu_count()\n logging.info(\"Number of processes: {}\".format(nprocesses))\n\n # set up the pypet environment\n env = pypet.Environment(\n trajectory=trajectoryName,\n filename=trajectoryfilename,\n multiproc=True,\n ncores=nprocesses,\n complevel=9,\n log_config=paths.PYPET_LOGGING_CONFIG,\n )\n self.env = env\n # Get the trajectory from the environment\n self.traj = env.trajectory\n self.trajectoryName = self.traj.v_name\n\n # Add all parameters to the pypet trajectory\n if self.model is not None:\n # if a model is specified, use the default parameter of the\n # model to initialize pypet\n self._addParametersToPypet(self.traj, self.model.params)\n else:\n # else, use a random parameter of the parameter space\n self._addParametersToPypet(self.traj, self.parameterSpace.getRandom(safe=True))\n\n # Tell pypet which parameters to explore\n self.pypetParametrization = pypet.cartesian_product(self.exploreParameters)\n # explicitely add all parameters within star notation, hence unwrap star notation into actual params names\n if self.parameterSpace.star:\n assert self.model is not None, \"With star notation, model cannot be None\"\n self.pypetParametrization = unwrap_star_dotdict(self.pypetParametrization, self.model)\n self.nRuns = len(self.pypetParametrization[list(self.pypetParametrization.keys())[0]])\n logging.info(f\"Number of parameter configurations: {self.nRuns}\")\n\n self.traj.f_explore(self.pypetParametrization)\n\n # initialization done\n logging.info(\"BoxSearch: Environment initialized.\")\n self.initialized = True\n\n def _addParametersToPypet(self, traj, params):\n \"\"\"This function registers the parameters of the model to Pypet.\n Parameters can be nested dictionaries. They are unpacked and stored recursively.\n\n :param traj: Pypet trajectory to store the parameters in\n :type traj: `pypet.trajectory.Trajectory`\n :param params: Parameter dictionary\n :type params: dict, dict[dict,]\n \"\"\"\n\n def addParametersRecursively(traj, params, current_level):\n # make dummy list if just string\n if isinstance(current_level, str):\n current_level = [current_level]\n # iterate dict\n for key, value in params.items():\n # if another dict - recurse and increase level\n if isinstance(value, dict):\n addParametersRecursively(traj, value, current_level + [key])\n else:\n param_address = \".\".join(current_level + [key])\n value = \"None\" if value is None else value\n traj.f_add_parameter(param_address, value)\n\n addParametersRecursively(traj, params, [])\n\n def saveToPypet(self, outputs, traj):\n \"\"\"This function takes simulation results in the form of a nested dictionary\n and stores all data into the pypet hdf file.\n\n :param outputs: Simulation outputs as a dictionary.\n :type outputs: dict\n :param traj: Pypet trajectory\n :type traj: `pypet.trajectory.Trajectory`\n \"\"\"\n\n def makeSaveStringForPypet(value, savestr):\n \"\"\"Builds the pypet-style results string from the results\n dictionary's keys.\n \"\"\"\n for k, v in value.items():\n if isinstance(v, dict):\n _savestr = savestr + k + \".\"\n makeSaveStringForPypet(v, _savestr)\n else:\n _savestr = savestr + k\n self.traj.f_add_result(_savestr, v)\n\n assert isinstance(outputs, dict), \"Outputs must be an instance of dict.\"\n value = outputs\n savestr = \"results.$.\"\n makeSaveStringForPypet(value, savestr)\n\n def _runModel(self, traj):\n \"\"\"If not evaluation function is given, we assume that a model will be simulated.\n This function will be called by pypet directly and therefore wants a pypet trajectory as an argument\n\n :param traj: Pypet trajectory\n :type traj: `pypet.trajectory.Trajectory`\n \"\"\"\n if self.useRandomICs:\n logging.warn(\"Random initial conditions not implemented yet\")\n # get parameters of this run from pypet trajectory\n runParams = self.getParametersFromTraj(traj)\n if self.parameterSpace.star:\n runParams = flatten_nested_dict(flat_dict_to_nested(runParams)[\"parameters\"])\n\n # set the parameters for the model\n self.model.params.update(runParams)\n\n # get kwargs from Exploration.run()\n runKwargs = {}\n if hasattr(self, \"runKwargs\"):\n runKwargs = self.runKwargs\n # run it\n self.model.run(**runKwargs)\n # save outputs\n self._saveModelOutputsToPypet(traj)\n\n def _saveModelOutputsToPypet(self, traj):\n # save all data to the pypet trajectory\n if self.saveAllModelOutputs:\n # save all results from exploration\n self.saveToPypet(self.model.outputs, traj)\n else:\n # save only the default output\n self.saveToPypet(\n {\n self.model.default_output: self.model.output,\n \"t\": self.model.outputs[\"t\"],\n },\n traj,\n )\n # save BOLD output\n # if \"bold\" in self.model.params:\n # if self.model.params[\"bold\"] and \"BOLD\" in self.model.outputs:\n # self.saveToPypet(self.model.outputs[\"BOLD\"], traj)\n if \"BOLD\" in self.model.outputs:\n self.saveToPypet(self.model.outputs[\"BOLD\"], traj)\n\n def _validatePypetParameters(self, runParams):\n \"\"\"Helper to handle None's in pypet parameters\n (used for random number generator seed)\n\n :param runParams: parameters as returned by traj.parameters.f_to_dict()\n :type runParams: dict of pypet.parameter.Parameter\n \"\"\"\n\n # fix rng seed, which is saved as a string if None\n if \"seed\" in runParams:\n if runParams[\"seed\"] == \"None\":\n runParams[\"seed\"] = None\n return runParams\n\n def getParametersFromTraj(self, traj):\n \"\"\"Returns the parameters of the current run as a (dot.able) dictionary\n\n :param traj: Pypet trajectory\n :type traj: `pypet.trajectory.Trajectory`\n :return: Parameter set of the current run\n :rtype: dict\n \"\"\"\n # DO NOT use short names for star notation dicts\n runParams = self.traj.parameters.f_to_dict(short_names=not self.parameterSpace.star, fast_access=True)\n runParams = self._validatePypetParameters(runParams)\n return dotdict(runParams)\n\n def getModelFromTraj(self, traj):\n \"\"\"Return the appropriate model with parameters for this run\n :params traj: Pypet trajectory of current run\n\n :returns model: Model with the parameters of this run.\n \"\"\"\n model = self.model\n runParams = self.getParametersFromTraj(traj)\n\n model.params.update(runParams)\n return model\n\n def run(self, **kwargs):\n \"\"\"\n Call this function to run the exploration\n \"\"\"\n self.runKwargs = kwargs\n assert self.initialized, \"Pypet environment not initialized yet.\"\n self._t_start_exploration = datetime.datetime.now()\n self.env.run(self.evalFunction)\n self._t_end_exploration = datetime.datetime.now()\n\n def loadResults(self, all=True, filename=None, trajectoryName=None, pypetShortNames=True, memory_cap=95.0):\n \"\"\"Load results from a hdf file of a previous simulation.\n\n :param all: Load all simulated results into memory, which will be available as the `.results` attribute. Can\n use a lot of RAM if your simulation is large, please use this with caution. , defaults to True\n :type all: bool, optional\n :param filename: hdf file name in which results are stored, defaults to None\n :type filename: str, optional\n :param trajectoryName: Name of the trajectory inside the hdf file, newest will be used if left empty, defaults\n to None\n :type trajectoryName: str, optional\n :param pypetShortNames: Use pypet short names as keys for the results dictionary. Use if you are experiencing\n errors due to natural naming collisions.\n :type pypetShortNames: bool\n :param memory_cap: Percentage memory cap between 0 and 100. If `all=True` is used, a memory cap can be set to\n avoid filling up the available RAM. Example: use `memory_cap = 95` to avoid loading more data if memory is\n at 95% use, defaults to 95\n :type memory_cap: float, int, optional\n \"\"\"\n\n self.loadDfResults(filename, trajectoryName)\n\n # make a list of dictionaries with results\n self.results = dotdict({})\n if all:\n logging.info(\"Loading all results to `results` dictionary ...\")\n for rInd in tqdm.tqdm(range(self.nResults), total=self.nResults):\n\n # check if enough memory is available\n if memory_cap:\n assert isinstance(memory_cap, (int, float)), \"`memory_cap` must be float.\"\n assert (memory_cap > 0) and (memory_cap < 100), \"`memory_cap` must be between 0 and 100\"\n # check ram usage with psutil\n used_memory_percent = psutil.virtual_memory()[2]\n if used_memory_percent > memory_cap:\n raise MemoryError(\n f\"Memory use is at {used_memory_percent}% and capped at {memory_cap}. Aborting.\"\n )\n\n self.pypetTrajectory.results[rInd].f_load()\n result = self.pypetTrajectory.results[rInd].f_to_dict(fast_access=True, short_names=pypetShortNames)\n result = dotdict(result)\n self.pypetTrajectory.results[rInd].f_remove()\n self.results[rInd] = copy.deepcopy(result)\n\n # Postprocess result keys if pypet short names aren't used\n # Before: results.run_00000001.outputs.rates_inh\n # After: outputs.rates_inh\n if not pypetShortNames:\n for i, r in self.results.items():\n new_dict = dotdict({})\n for key, value in r.items():\n new_key = \"\".join(key.split(\".\", 2)[2:])\n new_dict[new_key] = r[key]\n self.results[i] = copy.deepcopy(new_dict)\n\n self.aggregateResultsToDfResults()\n\n logging.info(\"All results loaded.\")\n\n def aggregateResultsToDfResults(self, arrays=True, fillna=False):\n \"\"\"Aggregate all results in to dfResults dataframe.\n\n :param arrays: Load array results (like timeseries) if True. If False, only load scalar results, defaults to\n True\n :type arrays: bool, optional\n :param fillna: Fill nan results (for example if they're not returned in a subset of runs) with zeros, default\n to False\n :type fillna: bool, optional\n \"\"\"\n nan_value = np.nan\n logging.info(\"Aggregating results to `dfResults` ...\")\n # for i, result in tqdm.tqdm(self.results.items()):\n\n for runId, parameters in tqdm.tqdm(self.dfResults.iterrows(), total=len(self.dfResults)):\n # if the results were previously loaded into memory, use them\n if hasattr(self, \"results\"):\n # only if the length matches the number of results\n if len(self.results) == len(self.dfResults):\n result = self.results[runId]\n # else, load results individually from hdf file\n else:\n result = self.getRun(runId)\n # else, load results individually from hdf file\n else:\n result = self.getRun(runId)\n\n for key, value in result.items():\n # only save floats, ints and arrays\n if isinstance(value, (float, int, np.ndarray)):\n # save 1-dim arrays\n if isinstance(value, np.ndarray) and arrays:\n # to save a numpy array, convert column to object type\n if key not in self.dfResults:\n self.dfResults[key] = None\n self.dfResults[key] = self.dfResults[key].astype(object)\n self.dfResults.at[runId, key] = value\n elif isinstance(value, (float, int)):\n # save numbers\n self.dfResults.loc[runId, key] = value\n else:\n self.dfResults.loc[runId, key] = nan_value\n # drop nan columns\n self.dfResults = self.dfResults.dropna(axis=\"columns\", how=\"all\")\n\n if fillna:\n self.dfResults = self.dfResults.fillna(0)\n\n def loadDfResults(self, filename=None, trajectoryName=None):\n \"\"\"Load results from a previous simulation.\n\n :param filename: hdf file name in which results are stored, defaults to None\n :type filename: str, optional\n :param trajectoryName: Name of the trajectory inside the hdf file, newest will be used if left empty, defaults\n to None\n :type trajectoryName: str, optional\n \"\"\"\n # chose HDF file to load\n filename = filename or self.HDF_FILE\n self.pypetTrajectory = pu.loadPypetTrajectory(filename, trajectoryName)\n self.nResults = len(self.pypetTrajectory.f_get_run_names())\n\n exploredParameters = self.pypetTrajectory.f_get_explored_parameters()\n\n # create pandas dataframe of all runs with parameters as keys\n logging.info(\"Creating `dfResults` dataframe ...\")\n niceParKeys = [p[11:] for p in exploredParameters.keys()]\n if not self.parameterSpace:\n niceParKeys = [p.split(\".\")[-1] for p in niceParKeys]\n self.dfResults = pd.DataFrame(columns=niceParKeys, dtype=object)\n for nicep, p in zip(niceParKeys, exploredParameters.keys()):\n self.dfResults[nicep] = exploredParameters[p].f_get_range()\n\n @staticmethod\n def _filterDictionaryBold(filt_dict, bold):\n \"\"\"Filters result dictionary: either keeps ONLY BOLD results, or remove\n BOLD results.\n\n :param filt_dict: dictionary to filter for BOLD keys\n :type filt_dict: dict\n :param bold: whether to remove BOLD keys (bold=False) or keep only BOLD\n keys (bold=True)\n :return: filtered dict, without or only BOLD keys\n :rtype: dict\n \"\"\"\n filt_dict = copy.deepcopy(filt_dict)\n if bold:\n return {k: v for k, v in filt_dict.items() if \"BOLD\" in k}\n else:\n return {k: v for k, v in filt_dict.items() if \"BOLD\" not in k}\n\n def _getCoordsFromRun(self, run_dict, bold=False):\n \"\"\"Find coordinates of a single run - time, output and space dimensions.\n\n :param run_dict: dictionary with run results\n :type run_dict: dict\n :param bold: whether to do only BOLD or without BOLD results\n :type bold: bool\n :return: dictionary of coordinates for xarray\n :rtype: dict\n \"\"\"\n run_dict = copy.deepcopy(run_dict)\n run_dict = self._filterDictionaryBold(run_dict, bold=bold)\n timeDictKey = \"\"\n if \"t\" in run_dict:\n timeDictKey = \"t\"\n else:\n for k in run_dict:\n if k.startswith(\"t\"):\n timeDictKey = k\n logging.info(f\"Assuming {k} to be the time axis.\")\n break\n assert len(timeDictKey) > 0, \"No time array found (starting with t) in model output.\"\n t = run_dict[timeDictKey].copy()\n del run_dict[timeDictKey]\n return timeDictKey, {\n \"output\": list(run_dict.keys()),\n \"space\": list(range(next(iter(run_dict.values())).shape[0])),\n \"time\": t,\n }\n\n def xr(self, bold=False):\n \"\"\"\n Return `xr.Dataset` from the exploration results.\n\n :param bold: if True, will load and return only BOLD output\n :type bold: bool\n \"\"\"\n assert self.results is not None, \"Run `loadResults()` first to populate the results\"\n assert len(self.results) == len(self.dfResults)\n # create intrisinsic dims for one run\n timeDictKey, run_coords = self._getCoordsFromRun(self.results[0], bold=bold)\n dataarrays = []\n orig_search_coords = pypet.cartesian_product(self.exploreParameters)\n for runId, run_result in self.results.items():\n # take exploration coordinates for this run\n expl_coords = {k: v[runId] for k, v in orig_search_coords.items()}\n outputs = []\n run_result = self._filterDictionaryBold(run_result, bold=bold)\n for key, value in run_result.items():\n if key == timeDictKey:\n continue\n outputs.append(value)\n # create DataArray for run only - we need to add exploration coordinates\n data_temp = xr.DataArray(\n np.stack(outputs), dims=[\"output\", \"space\", \"time\"], coords=run_coords, name=\"exploration\"\n )\n expand_coords = {}\n # iterate exploration coordinates\n for k, v in expl_coords.items():\n # if single values, just assing\n if isinstance(v, (str, float, int)):\n expand_coords[k] = [v]\n # if arrays, check whether they can be sqeezed into one value\n elif isinstance(v, np.ndarray):\n if np.unique(v).size == 1:\n # if yes, just assing that one value\n expand_coords[k] = [float(np.unique(v))]\n else:\n # if no, sorry - coordinates cannot be array\n raise ValueError(\"Cannot squeeze coordinates\")\n # assing exploration coordinates to the DataArray\n dataarrays.append(data_temp.expand_dims(expand_coords))\n\n # finally, combine all arrays into one\n combined = xr.combine_by_coords(dataarrays)[\"exploration\"]\n if self.parameterSpace.star:\n combined.attrs = {k: list(self.model.params[k].keys()) for k in orig_search_coords.keys()}\n\n return combined\n\n def getRun(self, runId, filename=None, trajectoryName=None, pypetShortNames=True):\n \"\"\"Load the simulated data of a run and its parameters from a pypetTrajectory.\n\n :param runId: ID of the run\n :type runId: int\n\n :return: Dictionary with simulated data and parameters of the run.\n :type return: dict\n \"\"\"\n # chose HDF file to load\n filename = self.HDF_FILE or filename\n\n # either use loaded pypetTrajectory or load from HDF file if it isn't available\n pypetTrajectory = (\n self.pypetTrajectory\n if hasattr(self, \"pypetTrajectory\")\n else pu.loadPypetTrajectory(filename, trajectoryName)\n )\n\n # # if there was no pypetTrajectory loaded before\n # if pypetTrajectory is None:\n # # chose HDF file to load\n # filename = self.HDF_FILE or filename\n # pypetTrajectory = pu.loadPypetTrajectory(filename, trajectoryName)\n\n return pu.getRun(runId, pypetTrajectory, pypetShortNames=pypetShortNames)\n\n def getResult(self, runId):\n \"\"\"Returns either a loaded result or reads from disk.\n\n :param runId: runId of result\n :type runId: int\n :return: result\n :rtype: dict\n \"\"\"\n # if hasattr(self, \"results\"):\n # # load result from either the preloaded .result attribute (from .loadResults)\n # result = self.results[runId]\n # else:\n # # or from disk if results haven't been loaded yet\n # result = self.getRun(runId)\n\n # load result from either the preloaded .result attribute (from .loadResults)\n # or from disk if results haven't been loaded yet\n # result = self.results[runId] if hasattr(self, \"results\") else self.getRun(runId)\n return self.results[runId] if hasattr(self, \"results\") else self.getRun(runId)\n\n def info(self):\n \"\"\"Print info about the current search.\"\"\"\n now = datetime.datetime.now().strftime(\"%Y-%m-%d-%HH-%MM-%SS\")\n print(f\"Exploration info ({now})\")\n print(f\"HDF name: {self.HDF_FILE}\")\n print(f\"Trajectory name: {self.trajectoryName}\")\n if self.model is not None:\n print(f\"Model: {self.model.name}\")\n if hasattr(self, \"nRuns\"):\n print(f\"Number of runs {self.nRuns}\")\n print(f\"Explored parameters: {self.exploreParameters.keys()}\")\n if hasattr(self, \"_t_end_exploration\") and hasattr(self, \"_t_start_exploration\"):\n print(f\"Duration of exploration: {self._t_end_exploration-self._t_start_exploration}\")\n"
] |
[
[
"pandas.DataFrame",
"numpy.stack",
"numpy.unique"
]
] |
marian2js/agents
|
[
"114c5252fb8d0dd4d739f01ffac36ceae76eff5f",
"114c5252fb8d0dd4d739f01ffac36ceae76eff5f"
] |
[
"tf_agents/environments/wrappers_test.py",
"tf_agents/agents/dqn/examples/v1/train_eval_atari.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test for tf_agents.environments.wrappers.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Using Type Annotations.\nfrom __future__ import print_function\n\nimport collections\nimport cProfile\nimport math\nimport pstats\n\nfrom absl.testing import parameterized\nfrom absl.testing.absltest import mock\n\nimport gym\nimport gym.spaces\nimport numpy as np\n\nfrom tf_agents.environments import gym_wrapper\nfrom tf_agents.environments import random_py_environment\nfrom tf_agents.environments import test_envs\nfrom tf_agents.environments import wrappers\nfrom tf_agents.specs import array_spec\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.utils import test_utils\n\n\nclass PyEnvironmentBaseWrapperTest(parameterized.TestCase):\n\n @parameterized.named_parameters(\n {\n 'testcase_name': 'scalar',\n 'batch_size': None\n },\n {\n 'testcase_name': 'batched',\n 'batch_size': 2\n },\n )\n def test_batch_properties(self, batch_size):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((1,), np.int32, -10, 10)\n env = random_py_environment.RandomPyEnvironment(\n obs_spec,\n action_spec,\n reward_fn=lambda *_: np.array([1.0]),\n batch_size=batch_size)\n wrap_env = wrappers.PyEnvironmentBaseWrapper(env)\n self.assertEqual(wrap_env.batched, env.batched)\n self.assertEqual(wrap_env.batch_size, env.batch_size)\n\n def test_default_batch_properties(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n self.assertFalse(env.batched)\n self.assertEqual(env.batch_size, None)\n wrap_env = wrappers.PyEnvironmentBaseWrapper(env)\n self.assertEqual(wrap_env.batched, env.batched)\n self.assertEqual(wrap_env.batch_size, env.batch_size)\n\n def test_wrapped_method_propagation(self):\n mock_env = mock.MagicMock()\n env = wrappers.PyEnvironmentBaseWrapper(mock_env)\n env.reset()\n self.assertEqual(1, mock_env.reset.call_count)\n env.step(0)\n self.assertEqual(1, mock_env.step.call_count)\n mock_env.step.assert_called_with(0)\n env.seed(0)\n self.assertEqual(1, mock_env.seed.call_count)\n mock_env.seed.assert_called_with(0)\n env.render()\n self.assertEqual(1, mock_env.render.call_count)\n env.close()\n self.assertEqual(1, mock_env.close.call_count)\n\n\nclass TimeLimitWrapperTest(test_utils.TestCase):\n\n def test_limit_duration_wrapped_env_forwards_calls(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n env = wrappers.TimeLimit(env, 10)\n\n action_spec = env.action_spec()\n self.assertEqual((), action_spec.shape)\n self.assertEqual(0, action_spec.minimum)\n self.assertEqual(1, action_spec.maximum)\n\n observation_spec = env.observation_spec()\n self.assertEqual((4,), observation_spec.shape)\n high = np.array([\n 4.8,\n np.finfo(np.float32).max, 2 / 15.0 * math.pi,\n np.finfo(np.float32).max\n ])\n np.testing.assert_array_almost_equal(-high, observation_spec.minimum)\n np.testing.assert_array_almost_equal(high, observation_spec.maximum)\n\n def test_limit_duration_stops_after_duration(self):\n cartpole_env = gym.make('CartPole-v1')\n env = gym_wrapper.GymWrapper(cartpole_env)\n env = wrappers.TimeLimit(env, 2)\n\n env.reset()\n env.step(np.array(0, dtype=np.int32))\n time_step = env.step(np.array(0, dtype=np.int32))\n\n self.assertTrue(time_step.is_last())\n self.assertNotEqual(None, time_step.discount)\n self.assertNotEqual(0.0, time_step.discount)\n\n def test_extra_env_methods_work(self):\n cartpole_env = gym.make('CartPole-v1')\n env = gym_wrapper.GymWrapper(cartpole_env)\n env = wrappers.TimeLimit(env, 2)\n\n self.assertEqual(None, env.get_info())\n env.reset()\n env.step(np.array(0, dtype=np.int32))\n self.assertEqual({}, env.get_info())\n\n def test_automatic_reset(self):\n cartpole_env = gym.make('CartPole-v1')\n env = gym_wrapper.GymWrapper(cartpole_env)\n env = wrappers.TimeLimit(env, 2)\n\n # Episode 1\n first_time_step = env.step(np.array(0, dtype=np.int32))\n self.assertTrue(first_time_step.is_first())\n mid_time_step = env.step(np.array(0, dtype=np.int32))\n self.assertTrue(mid_time_step.is_mid())\n last_time_step = env.step(np.array(0, dtype=np.int32))\n self.assertTrue(last_time_step.is_last())\n\n # Episode 2\n first_time_step = env.step(np.array(0, dtype=np.int32))\n self.assertTrue(first_time_step.is_first())\n mid_time_step = env.step(np.array(0, dtype=np.int32))\n self.assertTrue(mid_time_step.is_mid())\n last_time_step = env.step(np.array(0, dtype=np.int32))\n self.assertTrue(last_time_step.is_last())\n\n def test_duration_applied_after_episode_terminates_early(self):\n cartpole_env = gym.make('CartPole-v1')\n env = gym_wrapper.GymWrapper(cartpole_env)\n env = wrappers.TimeLimit(env, 10000)\n\n # Episode 1 stepped until termination occurs.\n time_step = env.step(np.array(1, dtype=np.int32))\n while not time_step.is_last():\n time_step = env.step(np.array(1, dtype=np.int32))\n\n self.assertTrue(time_step.is_last())\n env._duration = 2\n\n # Episode 2 short duration hits step limit.\n first_time_step = env.step(np.array(0, dtype=np.int32))\n self.assertTrue(first_time_step.is_first())\n mid_time_step = env.step(np.array(0, dtype=np.int32))\n self.assertTrue(mid_time_step.is_mid())\n last_time_step = env.step(np.array(0, dtype=np.int32))\n self.assertTrue(last_time_step.is_last())\n\n\nclass ActionRepeatWrapperTest(test_utils.TestCase):\n\n def _get_mock_env_episode(self):\n mock_env = mock.MagicMock()\n mock_env.step.side_effect = [\n # In practice, the first reward would be 0, but test with a reward of 1.\n ts.TimeStep(ts.StepType.FIRST, 1, 1, [0]),\n ts.TimeStep(ts.StepType.MID, 2, 1, [1]),\n ts.TimeStep(ts.StepType.MID, 3, 1, [2]),\n ts.TimeStep(ts.StepType.MID, 5, 1, [3]),\n ts.TimeStep(ts.StepType.LAST, 7, 1, [4]),\n ]\n return mock_env\n\n def test_action_stops_on_first(self):\n mock_env = self._get_mock_env_episode()\n env = wrappers.ActionRepeat(mock_env, 3)\n env.reset()\n\n time_step = env.step([2])\n mock_env.step.assert_has_calls([mock.call([2])])\n\n self.assertEqual(1, time_step.reward)\n self.assertEqual([0], time_step.observation)\n\n def test_action_repeated(self):\n mock_env = self._get_mock_env_episode()\n env = wrappers.ActionRepeat(mock_env, 3)\n env.reset()\n\n env.step([2])\n env.step([3])\n mock_env.step.assert_has_calls([mock.call([2])] +\n [mock.call([3])] * 3)\n\n def test_action_stops_on_last(self):\n mock_env = self._get_mock_env_episode()\n env = wrappers.ActionRepeat(mock_env, 3)\n env.reset()\n\n env.step([2])\n env.step([3])\n time_step = env.step([4])\n mock_env.step.assert_has_calls([mock.call([2])] +\n [mock.call([3])] * 3 +\n [mock.call([4])])\n\n self.assertEqual(7, time_step.reward)\n self.assertEqual([4], time_step.observation)\n\n def test_checks_times_param(self):\n mock_env = mock.MagicMock()\n with self.assertRaises(ValueError):\n wrappers.ActionRepeat(mock_env, 1)\n\n def test_accumulates_reward(self):\n mock_env = self._get_mock_env_episode()\n env = wrappers.ActionRepeat(mock_env, 3)\n env.reset()\n\n env.step(0)\n time_step = env.step(0)\n\n mock_env.step.assert_called_with(0)\n self.assertEqual(10, time_step.reward)\n self.assertEqual([3], time_step.observation)\n\n\nclass ObservationFilterWrapperTest(test_utils.TestCase):\n\n def _get_mock_env_step(self):\n mock_env = mock.MagicMock()\n mock_env.observation_spec.side_effect = [\n array_spec.BoundedArraySpec((3,), np.int32, -10, 10),\n array_spec.BoundedArraySpec((3,), np.int32, -10, 10),\n array_spec.BoundedArraySpec((3,), np.int32, -10, 10),\n ]\n mock_env.reset.side_effect = [ts.TimeStep(ts.StepType.MID, 5, 1, [3, 5, 2])]\n mock_env.step.side_effect = [ts.TimeStep(ts.StepType.MID, 5, 1, [1, 2, 3])]\n return mock_env\n\n def test_filtered_obs_spec(self):\n mock_env = self._get_mock_env_step()\n env = wrappers.ObservationFilterWrapper(mock_env, [1])\n\n self.assertEqual((1,), env.observation_spec().shape)\n\n def test_obs_filtered_reset(self):\n mock_env = self._get_mock_env_step()\n env = wrappers.ObservationFilterWrapper(mock_env, [0])\n time_step = env.reset()\n\n self.assertLen(time_step.observation, 1)\n self.assertEqual([3], time_step.observation)\n\n def test_obs_filtered_step(self):\n mock_env = self._get_mock_env_step()\n env = wrappers.ObservationFilterWrapper(mock_env, [0, 2])\n env.reset()\n time_step = env.step(0)\n\n self.assertLen(time_step.observation, 2)\n self.assertAllEqual([1, 3], time_step.observation)\n\n def test_checks_nested_obs(self):\n mock_env = self._get_mock_env_step()\n mock_env.observation_spec.side_effect = [\n [array_spec.BoundedArraySpec((2,), np.int32, -10, 10),\n array_spec.BoundedArraySpec((2,), np.int32, -10, 10)]\n ]\n with self.assertRaises(ValueError):\n _ = wrappers.ObservationFilterWrapper(mock_env, [0])\n\n def test_checks_multidim_idx(self):\n mock_env = self._get_mock_env_step()\n with self.assertRaises(ValueError):\n _ = wrappers.ObservationFilterWrapper(mock_env, [[0]])\n\n def test_checks_idx_provided(self):\n mock_env = self._get_mock_env_step()\n with self.assertRaises(ValueError):\n _ = wrappers.ObservationFilterWrapper(mock_env, [])\n\n def test_checks_idx_outofbounds(self):\n mock_env = self._get_mock_env_step()\n with self.assertRaises(ValueError):\n _ = wrappers.ObservationFilterWrapper(mock_env, [5])\n\n\nclass RunStatsWrapperTest(test_utils.TestCase):\n\n def test_episode_count(self):\n cartpole_env = gym.make('CartPole-v1')\n env = gym_wrapper.GymWrapper(cartpole_env)\n env = wrappers.RunStats(env)\n\n self.assertEqual(0, env.episodes)\n time_step = env.reset()\n self.assertEqual(0, env.episodes)\n\n for episode_num in range(1, 4):\n while not time_step.is_last():\n time_step = env.step(np.array(1, dtype=np.int32))\n self.assertEqual(episode_num, env.episodes)\n time_step = env.step(np.array(1, dtype=np.int32))\n\n def test_episode_count_with_time_limit(self):\n cartpole_env = gym.make('CartPole-v1')\n env = gym_wrapper.GymWrapper(cartpole_env)\n env = wrappers.TimeLimit(env, 2)\n env = wrappers.RunStats(env)\n\n env.reset()\n self.assertEqual(0, env.episodes)\n\n env.step(np.array(0, dtype=np.int32))\n time_step = env.step(np.array(0, dtype=np.int32))\n\n self.assertTrue(time_step.is_last())\n self.assertEqual(1, env.episodes)\n\n def test_step_count(self):\n cartpole_env = gym.make('CartPole-v1')\n env = gym_wrapper.GymWrapper(cartpole_env)\n env = wrappers.RunStats(env)\n\n self.assertEqual(0, env.episodes)\n time_step = env.reset()\n self.assertEqual(0, env.episodes)\n\n steps = 0\n for _ in range(0, 4):\n while not time_step.is_last():\n self.assertEqual(steps, env.total_steps)\n time_step = env.step(np.array(1, dtype=np.int32))\n steps += 1\n time_step = env.step(np.array(1, dtype=np.int32))\n\n def test_resets_count(self):\n cartpole_env = gym.make('CartPole-v1')\n env = gym_wrapper.GymWrapper(cartpole_env)\n env = wrappers.RunStats(env)\n\n self.assertEqual(0, env.resets)\n time_step = env.reset()\n self.assertEqual(1, env.resets)\n\n resets = 1\n for _ in range(0, 4):\n while not time_step.is_last():\n self.assertEqual(resets, env.resets)\n time_step = env.step(np.array(1, dtype=np.int32))\n time_step = env.step(np.array(1, dtype=np.int32))\n resets += 1\n\n\nclass ActionDiscretizeWrapper(test_utils.TestCase):\n\n def test_discrete_spec_scalar_limit(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((), np.float32, -10, 10)\n limits = 3\n\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.ActionDiscretizeWrapper(env, limits)\n\n expected_spec = array_spec.BoundedArraySpec((), np.int32, 0,\n np.asarray(limits) - 1)\n self.assertEqual(expected_spec, env.action_spec())\n\n def test_discrete_spec_1d(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((2,), np.float32, -10, 10)\n limits = [5, 3]\n\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.ActionDiscretizeWrapper(env, limits)\n\n expected_spec = array_spec.BoundedArraySpec((2,), np.int32, 0,\n np.asarray(limits) - 1)\n self.assertEqual(expected_spec, env.action_spec())\n\n def test_discrete_spec_nd(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((2, 2), np.float32, -10, 10)\n limits = np.array([[2, 4], [3, 2]])\n\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.ActionDiscretizeWrapper(env, limits)\n\n expected_spec = array_spec.BoundedArraySpec((2, 2), np.int32, 0, limits - 1)\n self.assertEqual(expected_spec, env.action_spec())\n\n def test_action_mapping_1d(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((), np.float32, -10, 10)\n limits = np.array(5)\n\n def mock_step(_, action):\n return action\n\n with mock.patch.object(\n random_py_environment.RandomPyEnvironment,\n '_step',\n side_effect=mock_step,\n autospec=True,\n ):\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.ActionDiscretizeWrapper(env, limits)\n env.reset()\n\n action = env.step(2)\n np.testing.assert_array_almost_equal(0.0, action)\n action = env.step(4)\n np.testing.assert_array_almost_equal(10.0, action)\n\n def test_action_mapping_nd(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((2, 2), np.float32, -10, 10)\n limits = np.array([[2, 5], [3, 2]])\n\n def mock_step(_, action):\n return action\n\n with mock.patch.object(\n random_py_environment.RandomPyEnvironment,\n '_step',\n side_effect=mock_step,\n autospec=True,\n ):\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.ActionDiscretizeWrapper(env, limits)\n env.reset()\n\n action = env.step([[0, 2], [1, 1]])\n np.testing.assert_array_almost_equal([[-10.0, 0.0], [0.0, 10.0]], action)\n\n def test_shapes_broadcast(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((2, 2), np.float32, -10, 10)\n limits = np.array([[2, 5]])\n\n def mock_step(_, action):\n return action\n\n with mock.patch.object(\n random_py_environment.RandomPyEnvironment,\n '_step',\n side_effect=mock_step,\n autospec=True,\n ):\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.ActionDiscretizeWrapper(env, limits)\n env.reset()\n\n action = env.step([[0, 2], [1, 4]])\n np.testing.assert_array_almost_equal([[-10.0, 0.0], [10.0, 10.0]], action)\n\n def test_check_limits(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((2, 2), np.float32, -10, 10)\n limits = np.array([[1, 5], [2, 2]])\n\n with self.assertRaisesRegexp(ValueError, '.*size 2.'):\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.ActionDiscretizeWrapper(env, limits)\n\n def test_check_action_shape(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((2, 2), np.float32, -10, 10)\n limits = np.array([[2, 5], [2, 2]])\n\n with self.assertRaisesRegexp(ValueError, '.*incorrect shape.*'):\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.ActionDiscretizeWrapper(env, limits)\n env.reset()\n env.step([0, 0])\n\n def test_check_array_bounds(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((2,), np.float32, [-10, 0], 10)\n limits = np.array([2, 5])\n\n def mock_step(_, action):\n return action\n\n with mock.patch.object(\n random_py_environment.RandomPyEnvironment,\n '_step',\n side_effect=mock_step,\n autospec=True,\n ):\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.ActionDiscretizeWrapper(env, limits)\n env.reset()\n\n action = env.step([0, 0])\n np.testing.assert_array_almost_equal([-10.0, 0.0], action)\n\n action = env.step([1, 4])\n np.testing.assert_array_almost_equal([10.0, 10.0], action)\n\n action = env.step([0, 2])\n np.testing.assert_array_almost_equal([-10.0, 5.0], action)\n\n def test_action_nest(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = {\n 'action1': array_spec.BoundedArraySpec((2, 2), np.float32, -10, 10)\n }\n limits = np.array([[2, 5]])\n\n def mock_step(_, action):\n return action\n\n with mock.patch.object(\n random_py_environment.RandomPyEnvironment,\n '_step',\n side_effect=mock_step,\n autospec=True,\n ):\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.ActionDiscretizeWrapper(env, limits)\n env.reset()\n\n action = env.step(np.array([[0, 2], [1, 4]]))\n np.testing.assert_array_almost_equal([[-10.0, 0.0], [10.0, 10.0]],\n action['action1'])\n\n\nclass ActionClipWrapper(test_utils.TestCase):\n\n def test_clip(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((2,), np.float32, [-1, 0], 1)\n\n def mock_step(_, action):\n return action\n\n with mock.patch.object(\n random_py_environment.RandomPyEnvironment,\n '_step',\n side_effect=mock_step,\n autospec=True,\n ):\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.ActionClipWrapper(env)\n env.reset()\n\n # actions within bounds, use NumPy action\n action = env.step(np.array([0, 0]))\n np.testing.assert_array_almost_equal([0.0, 0.0], action)\n\n # action 1 outside bounds, use list action\n action = env.step([-4, 0])\n np.testing.assert_array_almost_equal([-1.0, 0.0], action)\n\n # action 2 outside bounds, use NumPy action\n action = env.step(np.array([0, -4]))\n np.testing.assert_array_almost_equal([0.0, 0.0], action)\n\n # actions outside bounds, use list action\n action = env.step([4, 4])\n action = env.step(np.array([4, 4]))\n np.testing.assert_array_almost_equal([1.0, 1.0], action)\n\n def test_nested(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = [\n array_spec.BoundedArraySpec((2,), np.float32, -1, 1), [\n array_spec.BoundedArraySpec((2,), np.float32, -2, 2),\n array_spec.BoundedArraySpec((2,), np.float32, -3, 3)\n ]\n ]\n\n def mock_step(_, action):\n return action\n\n with mock.patch.object(\n random_py_environment.RandomPyEnvironment,\n '_step',\n side_effect=mock_step,\n autospec=True,\n ):\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.ActionClipWrapper(env)\n env.reset()\n\n # use NumPy action\n action = [np.array([10, -10]), [np.array([10, -10]), np.array([10, -10])]]\n action = env.step(action)\n np.testing.assert_array_almost_equal([1, -1], action[0])\n np.testing.assert_array_almost_equal([2, -2], action[1][0])\n np.testing.assert_array_almost_equal([3, -3], action[1][1])\n\n # use list action\n action = [[10, -10], [[10, -10], [10, -10]]]\n action = env.step(action)\n np.testing.assert_array_almost_equal([1, -1], action[0])\n np.testing.assert_array_almost_equal([2, -2], action[1][0])\n np.testing.assert_array_almost_equal([3, -3], action[1][1])\n\n\nclass ActionOffsetWrapperTest(test_utils.TestCase):\n\n def test_nested(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = [\n array_spec.BoundedArraySpec((2,), np.int32, -1, 1), [\n array_spec.BoundedArraySpec((2,), np.int32, -2, 2),\n array_spec.BoundedArraySpec((2,), np.int32, -3, 3)\n ]\n ]\n with self.assertRaisesRegexp(ValueError, 'single-array action specs'):\n env = random_py_environment.RandomPyEnvironment(obs_spec, action_spec)\n env = wrappers.ActionOffsetWrapper(env)\n\n def test_unbounded(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.ArraySpec((2,), np.int32)\n with self.assertRaisesRegexp(ValueError, 'bounded action specs'):\n env = random_py_environment.RandomPyEnvironment(obs_spec, action_spec)\n env = wrappers.ActionOffsetWrapper(env)\n\n def test_continuous(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((2,), np.float32, -1, 1)\n with self.assertRaisesRegexp(ValueError, 'discrete action specs'):\n env = random_py_environment.RandomPyEnvironment(obs_spec, action_spec)\n env = wrappers.ActionOffsetWrapper(env)\n\n def test_action_spec(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((3,), np.int32, -1, 1)\n env = random_py_environment.RandomPyEnvironment(obs_spec, action_spec)\n env = wrappers.ActionOffsetWrapper(env)\n self.assertEqual(array_spec.BoundedArraySpec((3,), np.int32, 0, 2),\n env.action_spec())\n\n def test_step(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((3,), np.int32, -1, 1)\n mock_env = mock.Mock(\n wraps=random_py_environment.RandomPyEnvironment(obs_spec, action_spec))\n env = wrappers.ActionOffsetWrapper(mock_env)\n env.reset()\n\n env.step(np.array([0, 1, 2]))\n self.assertTrue(mock_env.step.called)\n np.testing.assert_array_equal(np.array([-1, 0, 1]),\n mock_env.step.call_args[0][0])\n\n\nclass FlattenObservationsWrapper(parameterized.TestCase):\n\n @parameterized.parameters((['obs1', 'obs2'], [(4,), (5,)], np.int32),\n (['obs1', 'obs2', 'obs3'], [(1,), (1,),\n (4,)], np.float32),\n ((['obs1', 'obs2'], [(5, 2), (3, 3)], np.float32)))\n def test_with_varying_observation_specs(\n self, observation_keys, observation_shapes, observation_dtypes):\n \"\"\"Vary the observation spec and step the environment.\"\"\"\n obs_spec = collections.OrderedDict()\n for idx, key in enumerate(observation_keys):\n obs_spec[key] = array_spec.ArraySpec(observation_shapes[idx],\n observation_dtypes)\n action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)\n\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = wrappers.FlattenObservationsWrapper(env)\n time_step = env.step(\n array_spec.sample_bounded_spec(action_spec, np.random.RandomState()))\n # Check that all observations returned from environment is packed into one\n # dimension.\n expected_shape = self._get_expected_shape(obs_spec, obs_spec.keys())\n self.assertEqual(time_step.observation.shape, expected_shape)\n self.assertEqual(\n env.observation_spec(),\n array_spec.ArraySpec(\n shape=expected_shape,\n dtype=observation_dtypes,\n name='packed_observations'))\n\n @parameterized.parameters((('obs1'),), (('obs1', 'obs3'),))\n def test_with_varying_observation_filters(self, observations_to_keep):\n \"\"\"Vary the observations to save from the environment.\"\"\"\n obs_spec = collections.OrderedDict({\n 'obs1': array_spec.ArraySpec((1,), np.int32),\n 'obs2': array_spec.ArraySpec((2,), np.int32),\n 'obs3': array_spec.ArraySpec((3,), np.int32)\n })\n\n observations_to_keep = np.array([observations_to_keep]).flatten()\n action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)\n\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n # Create the wrapper with list of observations to keep before packing it\n # into one dimension.\n env = wrappers.FlattenObservationsWrapper(\n env, observations_whitelist=observations_to_keep)\n time_step = env.step(\n array_spec.sample_bounded_spec(action_spec, np.random.RandomState()))\n # The expected shape is the sum of observation lengths in the observation\n # spec that has been filtered by the observations_to_keep list.\n expected_shape = self._get_expected_shape(obs_spec, observations_to_keep)\n # Test the expected shape of observations returned from stepping the\n # environment and additionally, check the environment spec.\n self.assertEqual(time_step.observation.shape, expected_shape)\n self.assertEqual(\n env.observation_spec(),\n array_spec.ArraySpec(\n shape=expected_shape, dtype=np.int32, name='packed_observations'))\n\n def test_env_reset(self):\n \"\"\"Test the observations returned after an environment reset.\"\"\"\n obs_spec = collections.OrderedDict({\n 'obs1': array_spec.ArraySpec((1,), np.int32),\n 'obs2': array_spec.ArraySpec((2,), np.int32),\n 'obs3': array_spec.ArraySpec((3,), np.int32)\n })\n\n action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)\n\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n # Create the wrapper with list of observations to keep before packing it\n # into one dimension.\n env = wrappers.FlattenObservationsWrapper(env)\n time_step = env.reset()\n expected_shape = self._get_expected_shape(obs_spec, obs_spec.keys())\n self.assertEqual(time_step.observation.shape, expected_shape)\n self.assertEqual(\n env.observation_spec(),\n array_spec.ArraySpec(\n shape=expected_shape, dtype=np.int32, name='packed_observations'))\n\n @parameterized.parameters(([array_spec.ArraySpec((1,), np.int32)],),\n array_spec.ArraySpec((1,), np.int32))\n def test_observations_wrong_spec_for_whitelist(self, observation_spec):\n \"\"\"Test the Wrapper has ValueError if the observation spec is invalid.\"\"\"\n action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)\n\n env = random_py_environment.RandomPyEnvironment(\n observation_spec, action_spec=action_spec)\n # Create the wrapper with list of observations to keep before packing it\n # into one dimension.\n with self.assertRaises(ValueError):\n env = wrappers.FlattenObservationsWrapper(\n env, observations_whitelist=['obs1'])\n\n def test_observations_unknown_whitelist(self):\n \"\"\"Test the Wrapper has ValueError if given unknown keys.\"\"\"\n action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)\n\n obs_spec = collections.OrderedDict({\n 'obs1': array_spec.ArraySpec((1,), np.int32),\n 'obs2': array_spec.ArraySpec((2,), np.int32),\n 'obs3': array_spec.ArraySpec((3,), np.int32)\n })\n\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n\n whitelist_unknown_keys = ['obs1', 'obs4']\n\n with self.assertRaises(ValueError):\n env = wrappers.FlattenObservationsWrapper(\n env, observations_whitelist=whitelist_unknown_keys)\n\n def test_observations_multiple_dtypes(self):\n \"\"\"Test the Wrapper has ValueError if given unknown keys.\"\"\"\n action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)\n\n obs_spec = collections.OrderedDict({\n 'obs1': array_spec.ArraySpec((1,), np.int32),\n 'obs2': array_spec.ArraySpec((2,), np.float32),\n })\n\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n\n with self.assertRaises(ValueError):\n env = wrappers.FlattenObservationsWrapper(env)\n\n def test_batch_env(self):\n \"\"\"Vary the observation spec and step the environment.\"\"\"\n obs_spec = collections.OrderedDict({\n 'obs1': array_spec.ArraySpec((1,), np.int32),\n 'obs2': array_spec.ArraySpec((2,), np.int32),\n })\n\n action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)\n\n # Generate a randomy py environment with batch size.\n batch_size = 4\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec, batch_size=batch_size)\n\n env = wrappers.FlattenObservationsWrapper(env)\n time_step = env.step(\n array_spec.sample_bounded_spec(action_spec, np.random.RandomState()))\n\n expected_shape = self._get_expected_shape(obs_spec, obs_spec.keys())\n self.assertEqual(time_step.observation.shape,\n (batch_size, expected_shape[0]))\n self.assertEqual(\n env.observation_spec(),\n array_spec.ArraySpec(\n shape=expected_shape, dtype=np.int32, name='packed_observations'))\n\n def _get_expected_shape(self, observation, observations_to_keep):\n \"\"\"Gets the expected shape of a flattened observation nest.\"\"\"\n # The expected shape is the sum of observation lengths in the observation\n # spec. For a multi-dimensional observation, it is flattened, thus the\n # length is the product of its shape, i.e. Two arrays ([3, 3], [2, 3])\n # result in a len-9 and len-6 observation, with total length of 15.\n expected_shape = 0\n for obs in observations_to_keep:\n expected_shape += np.prod(observation[obs].shape)\n return (expected_shape,)\n\n\nclass MockGoalReplayEnvWrapper(wrappers.GoalReplayEnvWrapper):\n \"\"\"Mock environment specific implementation of GoalReplayEnvWrapper.\"\"\"\n\n def get_trajectory_with_goal(self, trajectory, goal):\n # In this mock environment, 'obs1' is the goal\n trajectory.observation.update({'obs1': goal})\n return trajectory\n\n def get_goal_from_trajectory(self, trajectory):\n return trajectory.observation['obs1']\n\n\nclass GoalReplayEnvWrapperTest(parameterized.TestCase):\n\n @parameterized.parameters((['obs1', 'obs2'], [(4,), (5,)], np.int32),\n (['obs1', 'obs2', 'obs3'], [(1,), (1,),\n (4,)], np.float32),\n ((['obs1', 'obs2'], [(5, 2), (3, 3)], np.float32)))\n def test_with_varying_observation_specs(\n self, observation_keys, observation_shapes, observation_dtypes):\n \"\"\"Vary the observation spec and step the environment.\"\"\"\n obs_spec = collections.OrderedDict()\n for idx, key in enumerate(observation_keys):\n obs_spec[key] = array_spec.ArraySpec(observation_shapes[idx],\n observation_dtypes)\n action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)\n\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n env = MockGoalReplayEnvWrapper(env)\n random_action = array_spec.sample_bounded_spec(action_spec,\n np.random.RandomState())\n time_step = env.step(random_action)\n self.assertIsInstance(time_step.observation, dict)\n self.assertEqual(time_step.observation.keys(),\n env.observation_spec().keys())\n time_step = env.reset()\n self.assertIsInstance(time_step.observation, dict)\n self.assertEqual(time_step.observation.keys(),\n env.observation_spec().keys())\n\n def test_batch_env(self):\n \"\"\"Test batched version of the environment.\"\"\"\n obs_spec = collections.OrderedDict({\n 'obs1': array_spec.ArraySpec((1,), np.int32),\n 'obs2': array_spec.ArraySpec((2,), np.int32),\n })\n action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)\n\n # Generate a randomy py environment with batch size.\n batch_size = 4\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec, batch_size=batch_size)\n env = MockGoalReplayEnvWrapper(env)\n random_action = array_spec.sample_bounded_spec(action_spec,\n np.random.RandomState())\n\n time_step = env.step(random_action)\n self.assertIsInstance(time_step.observation, dict)\n self.assertEqual(time_step.observation.keys(),\n env.observation_spec().keys())\n time_step = env.reset()\n self.assertIsInstance(time_step.observation, dict)\n self.assertEqual(time_step.observation.keys(),\n env.observation_spec().keys())\n\n\nclass HistoryWrapperTest(test_utils.TestCase):\n\n def test_observation_spec_changed(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n obs_shape = env.observation_spec().shape\n\n history_env = wrappers.HistoryWrapper(env, 3)\n self.assertEqual((3,) + obs_shape, history_env.observation_spec().shape)\n\n def test_observation_spec_changed_with_action(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n obs_shape = env.observation_spec().shape\n action_shape = env.action_spec().shape\n\n history_env = wrappers.HistoryWrapper(env, 3, include_actions=True)\n self.assertEqual((3,) + obs_shape,\n history_env.observation_spec()['observation'].shape)\n self.assertEqual((3,) + action_shape,\n history_env.observation_spec()['action'].shape)\n\n def test_observation_stacked(self):\n env = test_envs.CountingEnv()\n history_env = wrappers.HistoryWrapper(env, 3)\n time_step = history_env.reset()\n self.assertEqual([0, 0, 0], time_step.observation.tolist())\n\n time_step = history_env.step(0)\n self.assertEqual([0, 0, 1], time_step.observation.tolist())\n\n time_step = history_env.step(0)\n self.assertEqual([0, 1, 2], time_step.observation.tolist())\n\n time_step = history_env.step(0)\n self.assertEqual([1, 2, 3], time_step.observation.tolist())\n\n def test_observation_and_action_stacked(self):\n env = test_envs.CountingEnv()\n history_env = wrappers.HistoryWrapper(env, 3, include_actions=True)\n time_step = history_env.reset()\n self.assertEqual([0, 0, 0], time_step.observation['observation'].tolist())\n self.assertEqual([0, 0, 0], time_step.observation['action'].tolist())\n\n time_step = history_env.step(5)\n self.assertEqual([0, 0, 1], time_step.observation['observation'].tolist())\n self.assertEqual([0, 0, 5], time_step.observation['action'].tolist())\n\n time_step = history_env.step(6)\n self.assertEqual([0, 1, 2], time_step.observation['observation'].tolist())\n self.assertEqual([0, 5, 6], time_step.observation['action'].tolist())\n\n time_step = history_env.step(7)\n self.assertEqual([1, 2, 3], time_step.observation['observation'].tolist())\n self.assertEqual([5, 6, 7], time_step.observation['action'].tolist())\n\n\nclass PerformanceProfilerWrapperTest(test_utils.TestCase):\n\n def test_profiling(self):\n cartpole_env = gym.make('CartPole-v1')\n env = gym_wrapper.GymWrapper(cartpole_env)\n profile = [None]\n def profile_fn(p):\n self.assertIsInstance(p, cProfile.Profile)\n profile[0] = p\n\n env = wrappers.PerformanceProfiler(\n env, process_profile_fn=profile_fn,\n process_steps=2)\n\n env.reset()\n\n # Resets are also profiled.\n s = pstats.Stats(env._profile)\n self.assertGreater(s.total_calls, 0) # pytype: disable=attribute-error\n\n for _ in range(2):\n env.step(np.array(1, dtype=np.int32))\n\n self.assertIsNotNone(profile[0])\n previous_profile = profile[0]\n\n updated_s = pstats.Stats(profile[0])\n self.assertGreater(updated_s.total_calls, s.total_calls) # pytype: disable=attribute-error\n\n for _ in range(2):\n env.step(np.array(1, dtype=np.int32))\n\n self.assertIsNotNone(profile[0])\n # We saw a new profile.\n self.assertNotEqual(profile[0], previous_profile)\n\n\nclass OneHotActionWrapperTest(test_utils.TestCase):\n\n def testActionSpec(self):\n cartpole_env = gym.spec('CartPole-v1').make()\n env = gym_wrapper.GymWrapper(cartpole_env)\n one_hot_action_wrapper = wrappers.OneHotActionWrapper(env)\n expected_spec = array_spec.BoundedArraySpec(\n shape=(2,),\n dtype=np.int64,\n minimum=0,\n maximum=1,\n name='one_hot_action_spec')\n self.assertEqual(one_hot_action_wrapper.action_spec(), expected_spec)\n\n def testStepDiscrete(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((1,), np.int32, 1, 3)\n mock_env = mock.Mock(\n wraps=random_py_environment.RandomPyEnvironment(obs_spec, action_spec))\n one_hot_action_wrapper = wrappers.OneHotActionWrapper(mock_env)\n one_hot_action_wrapper.reset()\n\n one_hot_action_wrapper.step(np.array([[0, 1, 0]]).astype(np.int32))\n self.assertTrue(mock_env.step.called)\n np.testing.assert_array_equal(\n np.array([2]).astype(np.int32), mock_env.step.call_args[0][0])\n\n def testStepContinuous(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.ArraySpec((2,), np.float32)\n mock_env = mock.Mock(\n wraps=random_py_environment.RandomPyEnvironment(obs_spec, action_spec))\n one_hot_action_wrapper = wrappers.OneHotActionWrapper(mock_env)\n one_hot_action_wrapper.reset()\n\n one_hot_action_wrapper.step(np.array([0.5, 0.3]).astype(np.float32))\n self.assertTrue(mock_env.step.called)\n np.testing.assert_array_equal(np.array([0.5, 0.3]).astype(np.float32),\n mock_env.step.call_args[0][0])\n\n def testStepHybrid(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = {\n 'discrete':\n array_spec.BoundedArraySpec((1,), np.int32, 1, 3),\n 'continuous':\n array_spec.ArraySpec((2,), np.float32)\n }\n mock_env = mock.Mock(\n wraps=random_py_environment.RandomPyEnvironment(obs_spec, action_spec))\n one_hot_action_wrapper = wrappers.OneHotActionWrapper(mock_env)\n one_hot_action_wrapper.reset()\n\n action = {\n 'discrete':\n np.array([[0, 1, 0]]).astype(np.int32),\n 'continuous':\n np.array([0.5, 0.3]).astype(np.float32)\n }\n\n one_hot_action_wrapper.step(action)\n self.assertTrue(mock_env.step.called)\n\n expected_action = {\n 'discrete':\n np.array([2]),\n 'continuous':\n np.array([0.5, 0.3])\n }\n np.testing.assert_array_almost_equal(\n expected_action['discrete'], mock_env.step.call_args[0][0]['discrete'])\n np.testing.assert_array_almost_equal(\n expected_action['continuous'],\n mock_env.step.call_args[0][0]['continuous'])\n\n\nif __name__ == '__main__':\n test_utils.main()\n",
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Train and Eval DQN on Atari environments.\n\nTraining and evaluation proceeds alternately in iterations, where each\niteration consists of a 1M frame training phase followed by a 500K frame\nevaluation phase. In the literature, some papers report averages of the train\nphases, while others report averages of the eval phases.\n\nThis example is configured to use dopamine.atari.preprocessing, which, among\nother things, repeats every action it receives for 4 frames, and then returns\nthe max-pool over the last 2 frames in the group. In this example, when we\nrefer to \"ALE frames\" we refer to the frames before the max-pooling step (i.e.\nthe raw data available for processing). Because of this, many of the\nconfiguration parameters (like initial_collect_steps) are divided by 4 in the\nbody of the trainer (e.g. if you want to evaluate with 400 frames in the\ninitial collection, you actually only need to .step the environment 100 times).\n\nFor a good survey of training on Atari, see Machado, et al. 2017:\nhttps://arxiv.org/pdf/1709.06009.pdf.\n\nTo run:\n\n```bash\ntf_agents/agents/dqn/examples/v1/train_eval_atari \\\n --root_dir=$HOME/atari/pong \\\n --atari_roms_path=/tmp\n --alsologtostderr\n```\n\nAdditional flags are available such as `--replay_buffer_capacity` and\n`--n_step_update`.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport gin\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nfrom tf_agents.agents.dqn import dqn_agent\nfrom tf_agents.environments import batched_py_environment\nfrom tf_agents.environments import suite_atari\nfrom tf_agents.eval import metric_utils\nfrom tf_agents.metrics import py_metric\nfrom tf_agents.metrics import py_metrics\nfrom tf_agents.networks import network\nfrom tf_agents.networks import q_network\nfrom tf_agents.policies import epsilon_greedy_policy\nfrom tf_agents.policies import policy_saver\nfrom tf_agents.policies import py_tf_policy\nfrom tf_agents.policies import random_py_policy\nfrom tf_agents.replay_buffers import py_hashed_replay_buffer\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.trajectories import trajectory\nfrom tf_agents.utils import common\nfrom tf_agents.utils import timer\n\nflags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),\n 'Root directory for writing logs/summaries/checkpoints.')\nflags.DEFINE_string('environment_name', None,\n 'Full name of Atari game to run, ex. PongNoFrameskip-v4.')\nflags.DEFINE_string('game_name', 'Pong', 'Name of Atari game to run.')\n\nflags.DEFINE_integer('num_iterations', None,\n 'Number of train/eval iterations to run.')\nflags.DEFINE_integer('initial_collect_steps', None,\n 'Number of frames to ALE frames to process before '\n 'beginning to train. Since this is in ALE frames, there '\n 'will be initial_collect_steps/4 items in the replay '\n 'buffer when training starts.')\nflags.DEFINE_integer('replay_buffer_capacity', None,\n 'Maximum number of items to store in the replay buffer.')\nflags.DEFINE_integer('train_steps_per_iteration', None,\n 'Number of ALE frames to run through for each iteration '\n 'of training.')\nflags.DEFINE_integer('n_step_update', None, 'The number of steps to consider '\n 'when computing TD error and TD loss.')\nflags.DEFINE_integer('eval_steps_per_iteration', None,\n 'Number of ALE frames to run through for each iteration '\n 'of evaluation.')\nFLAGS = flags.FLAGS\n\n# AtariPreprocessing runs 4 frames at a time, max-pooling over the last 2\n# frames. We need to account for this when computing things like update\n# intervals.\nATARI_FRAME_SKIP = 4\n\n\nclass AtariQNetwork(network.Network):\n \"\"\"QNetwork subclass that divides observations by 255.\"\"\"\n\n def __init__(self, input_tensor_spec, action_spec, **kwargs):\n super(AtariQNetwork, self).__init__(input_tensor_spec, state_spec=())\n input_tensor_spec = tf.TensorSpec(\n dtype=tf.float32, shape=input_tensor_spec.shape)\n self._q_network = q_network.QNetwork(input_tensor_spec, action_spec,\n **kwargs)\n\n def call(self,\n observation,\n step_type=None,\n network_state=(),\n training=False):\n state = tf.cast(observation, tf.float32)\n # We divide the grayscale pixel values by 255 here rather than storing\n # normalized values beause uint8s are 4x cheaper to store than float32s.\n state = state / 255\n return self._q_network(\n state,\n step_type=step_type,\n network_state=network_state,\n training=training)\n\n\ndef log_metric(metric, prefix):\n tag = common.join_scope(prefix, metric.name)\n logging.info('%s', '{0} = {1}'.format(tag, metric.result()))\n\n\n@gin.configurable\nclass TrainEval(object):\n \"\"\"Train and evaluate DQN on Atari.\"\"\"\n\n def __init__(\n self,\n root_dir,\n env_name,\n num_iterations=200,\n max_episode_frames=108000, # ALE frames\n terminal_on_life_loss=False,\n conv_layer_params=((32, (8, 8), 4), (64, (4, 4), 2), (64, (3, 3), 1)),\n fc_layer_params=(512,),\n # Params for collect\n initial_collect_steps=80000, # ALE frames\n epsilon_greedy=0.01,\n epsilon_decay_period=1000000, # ALE frames\n replay_buffer_capacity=1000000,\n # Params for train\n train_steps_per_iteration=1000000, # ALE frames\n update_period=16, # ALE frames\n target_update_tau=1.0,\n target_update_period=32000, # ALE frames\n batch_size=32,\n learning_rate=2.5e-4,\n n_step_update=1,\n gamma=0.99,\n reward_scale_factor=1.0,\n gradient_clipping=None,\n # Params for eval\n do_eval=True,\n eval_steps_per_iteration=500000, # ALE frames\n eval_epsilon_greedy=0.001,\n # Params for checkpoints, summaries, and logging\n log_interval=1000,\n summary_interval=1000,\n summaries_flush_secs=10,\n debug_summaries=False,\n summarize_grads_and_vars=False,\n eval_metrics_callback=None):\n \"\"\"A simple Atari train and eval for DQN.\n\n Args:\n root_dir: Directory to write log files to.\n env_name: Fully-qualified name of the Atari environment (i.e. Pong-v0).\n num_iterations: Number of train/eval iterations to run.\n max_episode_frames: Maximum length of a single episode, in ALE frames.\n terminal_on_life_loss: Whether to simulate an episode termination when a\n life is lost.\n conv_layer_params: Params for convolutional layers of QNetwork.\n fc_layer_params: Params for fully connected layers of QNetwork.\n initial_collect_steps: Number of frames to ALE frames to process before\n beginning to train. Since this is in ALE frames, there will be\n initial_collect_steps/4 items in the replay buffer when training starts.\n epsilon_greedy: Final epsilon value to decay to for training.\n epsilon_decay_period: Period over which to decay epsilon, from 1.0 to\n epsilon_greedy (defined above).\n replay_buffer_capacity: Maximum number of items to store in the replay\n buffer.\n train_steps_per_iteration: Number of ALE frames to run through for each\n iteration of training.\n update_period: Run a train operation every update_period ALE frames.\n target_update_tau: Coeffecient for soft target network updates (1.0 ==\n hard updates).\n target_update_period: Period, in ALE frames, to copy the live network to\n the target network.\n batch_size: Number of frames to include in each training batch.\n learning_rate: RMS optimizer learning rate.\n n_step_update: The number of steps to consider when computing TD error and\n TD loss. Applies standard single-step updates when set to 1.\n gamma: Discount for future rewards.\n reward_scale_factor: Scaling factor for rewards.\n gradient_clipping: Norm length to clip gradients.\n do_eval: If True, run an eval every iteration. If False, skip eval.\n eval_steps_per_iteration: Number of ALE frames to run through for each\n iteration of evaluation.\n eval_epsilon_greedy: Epsilon value to use for the evaluation policy (0 ==\n totally greedy policy).\n log_interval: Log stats to the terminal every log_interval training\n steps.\n summary_interval: Write TF summaries every summary_interval training\n steps.\n summaries_flush_secs: Flush summaries to disk every summaries_flush_secs\n seconds.\n debug_summaries: If True, write additional summaries for debugging (see\n dqn_agent for which summaries are written).\n summarize_grads_and_vars: Include gradients in summaries.\n eval_metrics_callback: A callback function that takes (metric_dict,\n global_step) as parameters. Called after every eval with the results of\n the evaluation.\n \"\"\"\n self._update_period = update_period / ATARI_FRAME_SKIP\n self._train_steps_per_iteration = (train_steps_per_iteration\n / ATARI_FRAME_SKIP)\n self._do_eval = do_eval\n self._eval_steps_per_iteration = eval_steps_per_iteration / ATARI_FRAME_SKIP\n self._eval_epsilon_greedy = eval_epsilon_greedy\n self._initial_collect_steps = initial_collect_steps / ATARI_FRAME_SKIP\n self._summary_interval = summary_interval\n self._num_iterations = num_iterations\n self._log_interval = log_interval\n self._eval_metrics_callback = eval_metrics_callback\n\n with gin.unlock_config():\n gin.bind_parameter('AtariPreprocessing.terminal_on_life_loss',\n terminal_on_life_loss)\n\n root_dir = os.path.expanduser(root_dir)\n train_dir = os.path.join(root_dir, 'train')\n eval_dir = os.path.join(root_dir, 'eval')\n\n train_summary_writer = tf.compat.v2.summary.create_file_writer(\n train_dir, flush_millis=summaries_flush_secs * 1000)\n train_summary_writer.set_as_default()\n self._train_summary_writer = train_summary_writer\n\n self._eval_summary_writer = None\n if self._do_eval:\n self._eval_summary_writer = tf.compat.v2.summary.create_file_writer(\n eval_dir, flush_millis=summaries_flush_secs * 1000)\n self._eval_metrics = [\n py_metrics.AverageReturnMetric(\n name='PhaseAverageReturn', buffer_size=np.inf),\n py_metrics.AverageEpisodeLengthMetric(\n name='PhaseAverageEpisodeLength', buffer_size=np.inf),\n ]\n\n self._global_step = tf.compat.v1.train.get_or_create_global_step()\n with tf.compat.v2.summary.record_if(\n lambda: tf.math.equal(self._global_step % self._summary_interval, 0)):\n self._env = suite_atari.load(\n env_name,\n max_episode_steps=max_episode_frames / ATARI_FRAME_SKIP,\n gym_env_wrappers=suite_atari.DEFAULT_ATARI_GYM_WRAPPERS_WITH_STACKING)\n self._env = batched_py_environment.BatchedPyEnvironment([self._env])\n\n observation_spec = tensor_spec.from_spec(self._env.observation_spec())\n time_step_spec = ts.time_step_spec(observation_spec)\n action_spec = tensor_spec.from_spec(self._env.action_spec())\n\n with tf.device('/cpu:0'):\n epsilon = tf.compat.v1.train.polynomial_decay(\n 1.0,\n self._global_step,\n epsilon_decay_period / ATARI_FRAME_SKIP / self._update_period,\n end_learning_rate=epsilon_greedy)\n\n with tf.device('/gpu:0'):\n optimizer = tf.compat.v1.train.RMSPropOptimizer(\n learning_rate=learning_rate,\n decay=0.95,\n momentum=0.0,\n epsilon=0.00001,\n centered=True)\n q_net = AtariQNetwork(\n observation_spec,\n action_spec,\n conv_layer_params=conv_layer_params,\n fc_layer_params=fc_layer_params)\n agent = dqn_agent.DqnAgent(\n time_step_spec,\n action_spec,\n q_network=q_net,\n optimizer=optimizer,\n epsilon_greedy=epsilon,\n n_step_update=n_step_update,\n target_update_tau=target_update_tau,\n target_update_period=(\n target_update_period / ATARI_FRAME_SKIP / self._update_period),\n td_errors_loss_fn=common.element_wise_huber_loss,\n gamma=gamma,\n reward_scale_factor=reward_scale_factor,\n gradient_clipping=gradient_clipping,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=self._global_step)\n\n self._collect_policy = py_tf_policy.PyTFPolicy(agent.collect_policy)\n\n if self._do_eval:\n self._eval_policy = py_tf_policy.PyTFPolicy(\n epsilon_greedy_policy.EpsilonGreedyPolicy(\n policy=agent.policy,\n epsilon=self._eval_epsilon_greedy))\n\n py_observation_spec = self._env.observation_spec()\n py_time_step_spec = ts.time_step_spec(py_observation_spec)\n py_action_spec = policy_step.PolicyStep(self._env.action_spec())\n data_spec = trajectory.from_transition(\n py_time_step_spec, py_action_spec, py_time_step_spec)\n self._replay_buffer = py_hashed_replay_buffer.PyHashedReplayBuffer(\n data_spec=data_spec, capacity=replay_buffer_capacity)\n\n with tf.device('/cpu:0'):\n ds = self._replay_buffer.as_dataset(\n sample_batch_size=batch_size, num_steps=n_step_update + 1)\n ds = ds.prefetch(4)\n ds = ds.apply(tf.data.experimental.prefetch_to_device('/gpu:0'))\n\n with tf.device('/gpu:0'):\n self._ds_itr = tf.compat.v1.data.make_one_shot_iterator(ds)\n experience = self._ds_itr.get_next()\n self._train_op = agent.train(experience)\n\n self._env_steps_metric = py_metrics.EnvironmentSteps()\n self._step_metrics = [\n py_metrics.NumberOfEpisodes(),\n self._env_steps_metric,\n ]\n self._train_metrics = self._step_metrics + [\n py_metrics.AverageReturnMetric(buffer_size=10),\n py_metrics.AverageEpisodeLengthMetric(buffer_size=10),\n ]\n # The _train_phase_metrics average over an entire train iteration,\n # rather than the rolling average of the last 10 episodes.\n self._train_phase_metrics = [\n py_metrics.AverageReturnMetric(\n name='PhaseAverageReturn', buffer_size=np.inf),\n py_metrics.AverageEpisodeLengthMetric(\n name='PhaseAverageEpisodeLength', buffer_size=np.inf),\n ]\n self._iteration_metric = py_metrics.CounterMetric(name='Iteration')\n\n # Summaries written from python should run every time they are\n # generated.\n with tf.compat.v2.summary.record_if(True):\n self._steps_per_second_ph = tf.compat.v1.placeholder(\n tf.float32, shape=(), name='steps_per_sec_ph')\n self._steps_per_second_summary = tf.compat.v2.summary.scalar(\n name='global_steps_per_sec', data=self._steps_per_second_ph,\n step=self._global_step)\n\n for metric in self._train_metrics:\n metric.tf_summaries(\n train_step=self._global_step, step_metrics=self._step_metrics)\n\n for metric in self._train_phase_metrics:\n metric.tf_summaries(\n train_step=self._global_step,\n step_metrics=(self._iteration_metric,))\n self._iteration_metric.tf_summaries(train_step=self._global_step)\n\n if self._do_eval:\n with self._eval_summary_writer.as_default():\n for metric in self._eval_metrics:\n metric.tf_summaries(\n train_step=self._global_step,\n step_metrics=(self._iteration_metric,))\n\n self._train_dir = train_dir\n self._policy_exporter = policy_saver.PolicySaver(\n agent.policy, train_step=self._global_step)\n self._train_checkpointer = common.Checkpointer(\n ckpt_dir=train_dir,\n agent=agent,\n global_step=self._global_step,\n optimizer=optimizer,\n metrics=metric_utils.MetricsGroup(\n self._train_metrics + self._train_phase_metrics +\n [self._iteration_metric], 'train_metrics'))\n self._policy_checkpointer = common.Checkpointer(\n ckpt_dir=os.path.join(train_dir, 'policy'),\n policy=agent.policy,\n global_step=self._global_step)\n self._rb_checkpointer = common.Checkpointer(\n ckpt_dir=os.path.join(train_dir, 'replay_buffer'),\n max_to_keep=1,\n replay_buffer=self._replay_buffer)\n\n self._init_agent_op = agent.initialize()\n\n def game_over(self):\n return self._env.envs[0].game_over\n\n def run(self):\n \"\"\"Execute the train/eval loop.\"\"\"\n with tf.compat.v1.Session(\n config=tf.compat.v1.ConfigProto(allow_soft_placement=True)) as sess:\n # Initialize the graph.\n self._initialize_graph(sess)\n\n # Initial collect\n self._initial_collect()\n\n while self._iteration_metric.result() < self._num_iterations:\n # Train phase\n env_steps = 0\n for metric in self._train_phase_metrics:\n metric.reset()\n while env_steps < self._train_steps_per_iteration:\n env_steps += self._run_episode(\n sess, self._train_metrics + self._train_phase_metrics, train=True)\n for metric in self._train_phase_metrics:\n log_metric(metric, prefix='Train/Metrics')\n py_metric.run_summaries(\n self._train_phase_metrics + [self._iteration_metric])\n\n global_step_val = sess.run(self._global_step)\n\n if self._do_eval:\n # Eval phase\n env_steps = 0\n for metric in self._eval_metrics:\n metric.reset()\n while env_steps < self._eval_steps_per_iteration:\n env_steps += self._run_episode(\n sess, self._eval_metrics, train=False)\n\n py_metric.run_summaries(self._eval_metrics + [self._iteration_metric])\n if self._eval_metrics_callback:\n results = dict((metric.name, metric.result())\n for metric in self._eval_metrics)\n self._eval_metrics_callback(results, global_step_val)\n for metric in self._eval_metrics:\n log_metric(metric, prefix='Eval/Metrics')\n\n self._iteration_metric()\n\n self._train_checkpointer.save(global_step=global_step_val)\n self._policy_checkpointer.save(global_step=global_step_val)\n self._rb_checkpointer.save(global_step=global_step_val)\n\n export_dir = os.path.join(self._train_dir, 'saved_policy',\n 'step_' + ('%d' % global_step_val).zfill(8))\n self._policy_exporter.save(export_dir)\n common.save_spec(self._collect_policy.trajectory_spec,\n os.path.join(export_dir, 'trajectory_spec'))\n\n def _initialize_graph(self, sess):\n \"\"\"Initialize the graph for sess.\"\"\"\n self._train_checkpointer.initialize_or_restore(sess)\n self._rb_checkpointer.initialize_or_restore(sess)\n common.initialize_uninitialized_variables(sess)\n\n sess.run(self._init_agent_op)\n\n self._train_step_call = sess.make_callable(self._train_op)\n\n self._collect_timer = timer.Timer()\n self._train_timer = timer.Timer()\n self._action_timer = timer.Timer()\n self._step_timer = timer.Timer()\n self._observer_timer = timer.Timer()\n\n global_step_val = sess.run(self._global_step)\n self._timed_at_step = global_step_val\n\n # Call save to initialize the save_counter (need to do this before\n # finalizing the graph).\n self._train_checkpointer.save(global_step=global_step_val)\n self._policy_checkpointer.save(global_step=global_step_val)\n self._rb_checkpointer.save(global_step=global_step_val)\n sess.run(self._train_summary_writer.init())\n\n if self._do_eval:\n sess.run(self._eval_summary_writer.init())\n\n def _initial_collect(self):\n \"\"\"Collect initial experience before training begins.\"\"\"\n logging.info('Collecting initial experience...')\n time_step_spec = ts.time_step_spec(self._env.observation_spec())\n random_policy = random_py_policy.RandomPyPolicy(\n time_step_spec, self._env.action_spec())\n time_step = self._env.reset()\n while self._replay_buffer.size < self._initial_collect_steps:\n if self.game_over():\n time_step = self._env.reset()\n action_step = random_policy.action(time_step)\n next_time_step = self._env.step(action_step.action)\n self._replay_buffer.add_batch(trajectory.from_transition(\n time_step, action_step, next_time_step))\n time_step = next_time_step\n logging.info('Done.')\n\n def _run_episode(self, sess, metric_observers, train=False):\n \"\"\"Run a single episode.\"\"\"\n env_steps = 0\n time_step = self._env.reset()\n while True:\n with self._collect_timer:\n time_step = self._collect_step(\n time_step,\n metric_observers,\n train=train)\n env_steps += 1\n\n if self.game_over():\n break\n elif train and self._env_steps_metric.result() % self._update_period == 0:\n with self._train_timer:\n total_loss = self._train_step_call()\n global_step_val = sess.run(self._global_step)\n self._maybe_log(sess, global_step_val, total_loss)\n self._maybe_record_summaries(global_step_val)\n\n return env_steps\n\n def _observe(self, metric_observers, traj):\n with self._observer_timer:\n for observer in metric_observers:\n observer(traj)\n\n def _store_to_rb(self, traj):\n # Clip the reward to (-1, 1) to normalize rewards in training.\n traj = traj._replace(\n reward=np.asarray(np.clip(traj.reward, -1, 1)))\n self._replay_buffer.add_batch(traj)\n\n def _collect_step(self, time_step, metric_observers, train=False):\n \"\"\"Run a single step (or 2 steps on life loss) in the environment.\"\"\"\n if train:\n policy = self._collect_policy\n else:\n policy = self._eval_policy\n\n with self._action_timer:\n action_step = policy.action(time_step)\n with self._step_timer:\n next_time_step = self._env.step(action_step.action)\n traj = trajectory.from_transition(time_step, action_step, next_time_step)\n\n if next_time_step.is_last() and not self.game_over():\n traj = traj._replace(discount=np.array([1.0], dtype=np.float32))\n\n if train:\n self._store_to_rb(traj)\n\n # When AtariPreprocessing.terminal_on_life_loss is True, we receive LAST\n # time_steps when lives are lost but the game is not over. In this mode, the\n # replay buffer and agent's policy must see the life loss as a LAST step\n # and the subsequent step as a FIRST step. However, we do not want to\n # actually terminate the episode and metrics should be computed as if all\n # steps were MID steps, since life loss is not actually a terminal event\n # (it is mostly a trick to make it easier to propagate rewards backwards by\n # shortening episode durations from the agent's perspective).\n if next_time_step.is_last() and not self.game_over():\n # Update metrics as if this is a mid-episode step.\n next_time_step = ts.transition(\n next_time_step.observation, next_time_step.reward)\n self._observe(metric_observers, trajectory.from_transition(\n time_step, action_step, next_time_step))\n\n # Produce the next step as if this is the first step of an episode and\n # store to RB as such. The next_time_step will be a MID time step.\n reward = time_step.reward\n time_step = ts.restart(next_time_step.observation)\n with self._action_timer:\n action_step = policy.action(time_step)\n with self._step_timer:\n next_time_step = self._env.step(action_step.action)\n if train:\n self._store_to_rb(trajectory.from_transition(\n time_step, action_step, next_time_step))\n\n # Update metrics as if this is a mid-episode step.\n time_step = ts.transition(time_step.observation, reward)\n traj = trajectory.from_transition(time_step, action_step, next_time_step)\n\n self._observe(metric_observers, traj)\n\n return next_time_step\n\n def _maybe_record_summaries(self, global_step_val):\n \"\"\"Record summaries if global_step_val is a multiple of summary_interval.\"\"\"\n if global_step_val % self._summary_interval == 0:\n py_metric.run_summaries(self._train_metrics)\n\n def _maybe_log(self, sess, global_step_val, total_loss):\n \"\"\"Log some stats if global_step_val is a multiple of log_interval.\"\"\"\n if global_step_val % self._log_interval == 0:\n logging.info('step = %d, loss = %f', global_step_val, total_loss.loss)\n logging.info('%s', 'action_time = {}'.format(self._action_timer.value()))\n logging.info('%s', 'step_time = {}'.format(self._step_timer.value()))\n logging.info('%s', 'observer_time = {}'.format(\n self._observer_timer.value()))\n steps_per_sec = ((global_step_val - self._timed_at_step) /\n (self._collect_timer.value()\n + self._train_timer.value()))\n sess.run(self._steps_per_second_summary,\n feed_dict={self._steps_per_second_ph: steps_per_sec})\n logging.info('%.3f steps/sec', steps_per_sec)\n logging.info('%s', 'collect_time = {}, train_time = {}'.format(\n self._collect_timer.value(), self._train_timer.value()))\n for metric in self._train_metrics:\n log_metric(metric, prefix='Train/Metrics')\n self._timed_at_step = global_step_val\n self._collect_timer.reset()\n self._train_timer.reset()\n self._action_timer.reset()\n self._step_timer.reset()\n self._observer_timer.reset()\n\n\ndef get_run_args():\n \"\"\"Builds a dict of run arguments from flags.\"\"\"\n run_args = {}\n if FLAGS.num_iterations:\n run_args['num_iterations'] = FLAGS.num_iterations\n if FLAGS.initial_collect_steps:\n run_args['initial_collect_steps'] = FLAGS.initial_collect_steps\n if FLAGS.replay_buffer_capacity:\n run_args['replay_buffer_capacity'] = FLAGS.replay_buffer_capacity\n if FLAGS.train_steps_per_iteration:\n run_args['train_steps_per_iteration'] = FLAGS.train_steps_per_iteration\n if FLAGS.n_step_update:\n run_args['n_step_update'] = FLAGS.n_step_update\n if FLAGS.eval_steps_per_iteration:\n run_args['eval_steps_per_iteration'] = FLAGS.eval_steps_per_iteration\n return run_args\n\n\ndef main(_):\n logging.set_verbosity(logging.INFO)\n tf.compat.v1.enable_resource_variables()\n environment_name = FLAGS.environment_name\n if environment_name is None:\n environment_name = suite_atari.game(name=FLAGS.game_name)\n TrainEval(FLAGS.root_dir, environment_name, **get_run_args()).run()\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('root_dir')\n app.run(main)\n"
] |
[
[
"numpy.array",
"numpy.asarray",
"numpy.random.RandomState",
"numpy.testing.assert_array_almost_equal",
"numpy.finfo",
"numpy.prod"
],
[
"tensorflow.compat.v1.placeholder",
"tensorflow.TensorSpec",
"tensorflow.compat.v1.data.make_one_shot_iterator",
"tensorflow.compat.v2.summary.scalar",
"numpy.clip",
"tensorflow.compat.v1.train.RMSPropOptimizer",
"numpy.array",
"tensorflow.compat.v2.summary.create_file_writer",
"tensorflow.data.experimental.prefetch_to_device",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v2.summary.record_if",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.device",
"tensorflow.compat.v1.train.polynomial_decay",
"tensorflow.compat.v1.enable_resource_variables",
"tensorflow.math.equal",
"tensorflow.cast"
]
] |
cHemingway/MRCG_python
|
[
"341f4ce646f5b1e25e3f4b9c16b56d27e1da13ff"
] |
[
"test_MRCG.py"
] |
[
"# Unit tests for MRCG, comparing against reference implementation\n\n# Chris Hemingway 2019, MIT License\n# See LICENSE file for details\n\nimport os\nimport sys\nimport unittest\nimport cProfile\nimport argparse\n\nimport scipy.io.wavfile, scipy.io.matlab\nimport numpy as np\nfrom matplotlib import transforms, pyplot as plt\nimport MRCG\n\nTEST_FILE = 'test_data/SNR103F3MIC021002_ch01'\n\n\nclass Test_mrcg(object):\n ''' Base class for testing MRCG '''\n\n # Args to set tolerance for np.testing.assert_allclose\n tolerance_kwargs = {\n 'rtol': 1e-7, \n 'atol': 0 # Don't check absolute tolerance, only relative\n }\n\n def setUp(self):\n script_path = os.path.dirname(os.path.abspath(__file__))\n # Load audio\n wav = os.path.join(script_path, TEST_FILE + '.wav')\n sr, audio = scipy.io.wavfile.read(wav)\n self.sampFreq = sr\n self.sig = audio.astype(float) / 32767 # Convert to range -1 to 1\n # Load matlab .mat file\n mat = os.path.join(script_path, TEST_FILE + '.mat')\n mat_dict = scipy.io.matlab.loadmat(mat)\n self.mat_dict = mat_dict\n self.mrcg = self.mat_dict['mrcg']\n\n # Define some constants\n # Each cochleogram is 64 long, and we have 4 of them, so 4 * 64 = 256\n # Note they are still 393 wide, which we do not explicitly state\n self.all_coch_len = 256\n\n\nclass Test_gammatone(Test_mrcg, unittest.TestCase):\n def test_value(self):\n ''' Compare gammatone value against MATLAB implementation '''\n known_g = self.mat_dict['g']\n\n # Scale using beta as recommended\n sig = self.sig\n beta = MRCG.get_beta(sig)\n sig = sig*beta\n sig = sig.reshape(len(sig), 1)\n our_g = MRCG.gammatone(sig, 64, self.sampFreq)\n\n # Check shape\n self.assertEqual(our_g.shape, known_g.shape)\n\n # Check values are close\n np.testing.assert_allclose(\n our_g, known_g, **Test_mrcg.tolerance_kwargs)\n\n def test_numChan(self):\n ''' Check channel count is correct '''\n sig = np.random.randn(10000)\n for num_chan in (32, 64, 128, 256, 255):\n g = MRCG.gammatone(sig, num_chan)\n self.assertEqual(num_chan, g.shape[0])\n\n\nclass Test_beta(Test_mrcg, unittest.TestCase):\n def test_value(self):\n ''' Compare beta value against MATLAB implementation '''\n good_beta = self.mat_dict['beta']\n our_beta = MRCG.get_beta(self.sig)\n # FIXME high tolerance of 0.1%, why?\n tolerance_kwargs = Test_mrcg.tolerance_kwargs\n tolerance_kwargs['rtol'] = 1e-04\n np.testing.assert_allclose(good_beta, our_beta, **tolerance_kwargs)\n\n\nclass Test_all_cochleagrams(Test_mrcg, unittest.TestCase):\n\n def setUp(self):\n super().setUp()\n sig = self.sig\n beta = MRCG.get_beta(sig)\n sig = sig*beta\n sig = sig.reshape(len(sig), 1)\n self.g = MRCG.gammatone(sig, 64, self.sampFreq)\n\n def test_values(self):\n ''' Test all cochleagrams match MATLAB implementation '''\n # Get all cochleagrams and flatten\n c1, c2, c3, c4 = MRCG.all_cochleagrams(self.g, self.sampFreq)\n # Get what MATLAB generated\n good_all_cochleas = self.mrcg[0:self.all_coch_len]\n # Compare each individually. Each are 64 wide\n i = 0\n errors = []\n for c in [c1, c2, c3, c4]:\n try:\n np.testing.assert_allclose(c, good_all_cochleas[i:i+64],\n err_msg = f\"c{i//64 + 1}\",\n verbose=False)\n except AssertionError as e:\n errors.append(e)\n i += 64\n # Check if we got any errors\n self.assertEqual(len(errors), 0, \n msg=\"mismatch\" + \"\\n\".join( [ str(e) for e in errors] ))\n\n def test_concat(self):\n ''' Test all_cochs are correctly concatanated into MRCG '''\n # Could also have put this in Test_mrcg_extract instead\n c1, c2, c3, c4 = MRCG.all_cochleagrams(self.g, self.sampFreq)\n all_cochleas = np.concatenate([c1, c2, c3, c4], 0)\n # Get MRCG, should be [all_cochleas; delta; delta2]\n samp_mrcg = MRCG.mrcg_extract(self.sig, self.sampFreq)\n # Check they are _exactly_ equal, as concatanation should not modify\n np.testing.assert_equal(all_cochleas, samp_mrcg[0:self.all_coch_len])\n\n\nclass Test_mrcg_extract(Test_mrcg, unittest.TestCase):\n\n def test_extract(self):\n ''' Test final MRCG matches MATLAB implementation '''\n samp_mrcg = MRCG.mrcg_extract(self.sig, self.sampFreq)\n # Plot for reference\n self.plot_mrcg(samp_mrcg)\n # Check the type\n self.assertIsNotNone(samp_mrcg)\n self.assertIsInstance(samp_mrcg, np.ndarray)\n # Check size and values against original MATLAB code result\n self.assertEqual(self.mrcg.shape, samp_mrcg.shape)\n np.testing.assert_allclose(samp_mrcg, self.mrcg, **Test_mrcg.tolerance_kwargs)\n\n\n def test_all_cochleas(self):\n ''' Test cochleagrams in output are correct '''\n samp_mrcg = MRCG.mrcg_extract(self.sig, self.sampFreq)\n good_all_cochleas = self.mrcg[0:self.all_coch_len]\n our_all_cochleas = samp_mrcg[0:self.all_coch_len]\n\n # Compare\n np.testing.assert_allclose(our_all_cochleas, good_all_cochleas,\n **Test_mrcg.tolerance_kwargs)\n\n\n\n def plot_mrcg(self, mrcg, filename='mrcg_comparison.png'):\n ''' Utility function to save plot of our MRCG to a file '''\n fig, (ref_ax, our_ax, diff_ax) = plt.subplots(nrows=1, ncols=3, \n sharey=True)\n fig.set_size_inches(10, 7)\n format_kwargs = {\n 'cmap':'jet', # Use full range color map for clarity \n }\n \n ref_im = ref_ax.imshow(self.mrcg, **format_kwargs)\n ref_ax.set_title(\"MATLAB\")\n our_ax.imshow(mrcg, **format_kwargs)\n our_ax.set_title(\"Python\")\n \n # Plot relative difference\n diff = np.abs(self.mrcg - mrcg)\n diff_im = diff_ax.imshow(diff, **format_kwargs)\n diff_ax.set_title(\"abs(MATLAB - Python)\")\n\n # Add colorbar to difference\n diff_cbar = plt.colorbar(diff_im, ax=diff_ax, orientation='horizontal')\n diff_cbar.set_label(\"Difference\")\n\n # Add colorbar for total value\n cbar = plt.colorbar(ref_im, ax=[ref_ax,our_ax], orientation='horizontal')\n cbar.set_label(\"Value\")\n\n # Save figure, minimal padding/border\n plt.savefig(filename, bbox_inches='tight', pad_inches=0.5)\n\n\nif __name__ == \"__main__\":\n # If we call python -m cProfile test_MRCG.py, we get no tests!\n # See https://stackoverflow.com/q/11645285\n # So instead we include profiling in the script directly. Not ideal\n\n # To make the off by default, we parse the args to look if profiling is \n # enabled _before_ we call unittest.main(), and hide the arg from it\n # See https://stackoverflow.com/a/44255084 for this trick\n parser = argparse.ArgumentParser()\n parser.add_argument('--profile', action='store_true', default=False)\n parser.add_argument('unittest_args', nargs='*')\n args = parser.parse_args()\n sys.argv[1:] = args.unittest_args # Remove any args not for unittest\n\n if args.profile:\n pr = cProfile.Profile()\n print(\"Running profiler on unit tests\")\n pr.enable()\n try: # Wrap in try so we still save stats on exception\n unittest.main()\n finally: # We don't want to _catch_ the exception as that would hide it\n pr.disable()\n pr.dump_stats(__file__ + \".prof\")\n else:\n unittest.main()\n"
] |
[
[
"numpy.concatenate",
"numpy.testing.assert_allclose",
"matplotlib.pyplot.colorbar",
"numpy.testing.assert_equal",
"matplotlib.pyplot.savefig",
"numpy.random.randn",
"matplotlib.pyplot.subplots",
"numpy.abs"
]
] |
Marcel-Rodekamp/qcdanalysistools
|
[
"945c8201337ba0d52bc37267198d367bbe3e75e3"
] |
[
"test/Fitting/t_DiagonalLS_initialization.py"
] |
[
"import numpy as np\nimport qcdanalysistools.fitting as fitting\nimport qcdanalysistools.analysis as ana\n\n# number of data points i.e. gauge configurations\nN = 212\n\n# dimension i.e. size of temporal dimension\nD = 48\n\n# abscissa\nx = np.array([x for x in range(D)])\n\n# ordinate data\ny = np.array( [[ *x ] for _ in range(N)] )\n\n# bootstrap params\nbst_param = ana.BootstrapParams(N,100)\n# jackknife params\njkn_param = ana.JackknifeParams(N)\n# blocking params\nblk_param = ana.BlockingParams(N,50)\n\n# ordinate with plain\no_p = np.average(y,axis=0)\n# ordinate with bootstrap\no_b = ana.Bootstrap.est(y,bst_param,axis=0)\n# ordinate with jackknife\no_j = ana.Jackknife.est(y,jkn_param,axis=0)\n# ordinate with blocking\no_bl = ana.Blocking.est(y,blk_param,axis=0)\n\n# ordinate variance with plain\nov_p = np.var(y,axis=0)\n# ordinate variance with bootstrap\nov_b = ana.Bootstrap.var(y,bst_param,axis=0)\n# ordinate variance with jackknife\nov_j = ana.Jackknife.var(y,jkn_param,axis=0)\n# ordinate varince with blocking\nov_bl = ana.Blocking.var(y,blk_param,axis=0)\n\nprint(f\"Ordinate data shape = {y.shape}\")\nprint(f\"Abscissa shape = {x.shape}\\n\")\n\nprint(f\"Ordinate plain shape = {o_p.shape}\")\nprint(f\"Ordinate bootstrap shape = {o_b.shape}\")\nprint(f\"Ordinate jackknife shape = {o_j.shape}\")\nprint(f\"Ordinate blocking shape = {o_bl.shape}\\n\")\n\nprint(f\"Ordinate variance plain shape = {o_p.shape}\")\nprint(f\"Ordinate variance bootstrap shape = {o_b.shape}\")\nprint(f\"Ordinate variance jackknife shape = {o_j.shape}\")\nprint(f\"Ordinate variance blocking shape = {o_bl.shape}\\n\")\n\n# model\nm = fitting.MonomialModel(t_A0=0,t_order=1)\n\nprint(\"Creating fitting base with data and plain average\", end=\"... \")\nf = fitting.DiagonalLeastSquare(m,x,t_data=y)\nif not np.allclose( f.ordinate,o_p ):\n raise RuntimeError(f\"Ordinate computation failed \\n {f.ordinate} \\n {o_p}\")\nif not np.allclose( f.ordinate_var,ov_p ):\n raise RuntimeError(f\"Ordinate variance computation failed \\n {f.ordinate_var} \\n {ov_p}\")\nprint(\"worked\")\n\nprint(\"Creating fitting base with data and bootstrap average\", end=\"... \")\nf = fitting.DiagonalLeastSquare(m,x,t_data=y,t_analysis_params=bst_param)\nif not np.allclose( f.ordinate,o_b ):\n raise RuntimeError(f\"Ordinate computation failed \\n {f.ordinate} \\n {o_b}\")\nif not np.allclose( f.ordinate_var,ov_b ):\n raise RuntimeError(f\"Ordinate variance computation failed \\n {f.ordinate_var} \\n {ov_b}\")\nprint(\"worked\")\n\nprint(\"Creating fitting base with data and Jackknife average\", end=\"... \")\nf = fitting.DiagonalLeastSquare(m,x,t_data=y,t_analysis_params=jkn_param)\nif not np.allclose( f.ordinate,o_j ):\n raise RuntimeError(f\"Ordinate computation failed \\n {f.ordinate} \\n {o_j}\")\nif not np.allclose( f.ordinate_var,ov_j ):\n raise RuntimeError(f\"Ordinate variance computation failed \\n {f.ordinate_var} \\n {ov_j}\")\nprint(\"worked\")\n\nprint(\"Creating fitting base with data and blocking average\", end=\"... \")\nf = fitting.DiagonalLeastSquare(m,x,t_data=y,t_analysis_params=blk_param)\nif not np.allclose( f.ordinate,o_bl ):\n raise RuntimeError(f\"Ordinate computation failed \\n {f.ordinate} \\n {o_bl}\")\nif not np.allclose( f.ordinate_var,ov_bl ):\n raise RuntimeError(f\"Ordinate variance computation failed \\n {f.ordinate_var} \\n {ov_bl}\")\nprint(\"worked\")\n"
] |
[
[
"numpy.average",
"numpy.allclose",
"numpy.var"
]
] |
PaperCodeStorage/MLVSNet
|
[
"967692542b73affb46715ba97728825ce541068c"
] |
[
"searchspace.py"
] |
[
"import numpy as np\nfrom pomegranate import MultivariateGaussianDistribution, GeneralMixtureModel\nimport logging\n\n\nclass SearchSpace(object):\n\n def reset(self):\n raise NotImplementedError\n\n def sample(self):\n raise NotImplementedError\n\n def addData(self, data, score):\n return\n\n\nclass ExhaustiveSearch(SearchSpace):\n\n def __init__(self,\n search_space=[[-3.0, 3.0], [-3.0, 3.0], [-10.0, 10.0]],\n search_dims=[7, 7, 3]):\n\n x_space = np.linspace(\n search_space[0][0], search_space[0][1],\n search_dims[0])\n\n y_space = np.linspace(\n search_space[1][0], search_space[1][1],\n search_dims[1])\n\n a_space = np.linspace(\n search_space[2][0], search_space[2][1],\n search_dims[2])\n\n X, Y, A = np.meshgrid(x_space, y_space, a_space) # create mesh grid\n\n self.search_grid = np.array([X.flatten(), Y.flatten(), A.flatten()]).T\n\n self.reset()\n\n def reset(self):\n return\n\n def sample(self, n=0):\n return self.search_grid\n\n\nclass ParticleFiltering(SearchSpace):\n def __init__(self, bnd=[1, 1, 10]):\n self.bnd = bnd\n self.reset()\n\n def sample(self, n=10):\n samples = []\n for i in range(n):\n if len(self.data) > 0:\n i_mean = np.random.choice(\n list(range(len(self.data))),\n p=self.score / np.linalg.norm(self.score, ord=1))\n sample = np.random.multivariate_normal(\n mean=self.data[i_mean], cov=np.diag(np.array(self.bnd)))\n else:\n sample = np.random.multivariate_normal(\n mean=np.zeros(len(self.bnd)),\n cov=np.diag(np.array(self.bnd) * 3))\n\n samples.append(sample)\n return np.array(samples)\n\n def addData(self, data, score):\n score = score.clip(min=1e-5) # prevent sum=0 in case of bad scores\n self.data = data\n self.score = score\n\n def reset(self):\n if len(self.bnd) == 2:\n self.data = np.array([[], []]).T\n else:\n self.data = np.array([[], [], []]).T\n self.score = np.ones(np.shape(self.data)[0])\n self.score = self.score / np.linalg.norm(self.score, ord=1)\n\n\nclass KalmanFiltering(SearchSpace):\n def __init__(self, bnd=[1, 1, 10]):\n self.bnd = bnd\n self.reset()\n\n def sample(self, n=10):\n return np.random.multivariate_normal(self.mean, self.cov, size=n)\n\n def addData(self, data, score):\n score = score.clip(min=1e-5) # prevent sum=0 in case of bad scores\n self.data = np.concatenate((self.data, data))\n self.score = np.concatenate((self.score, score))\n self.mean = np.average(self.data, weights=self.score, axis=0)\n self.cov = np.cov(self.data.T, ddof=0, aweights=self.score)\n\n def reset(self):\n self.mean = np.zeros(len(self.bnd))\n self.cov = np.diag(self.bnd)\n if len(self.bnd) == 2:\n self.data = np.array([[], []]).T\n else:\n self.data = np.array([[], [], []]).T\n self.score = np.array([])\n\n\nclass GaussianMixtureModel(SearchSpace):\n\n def __init__(self, n_comp=5, dim=3):\n self.dim = dim\n self.reset(n_comp)\n\n def sample(self, n=10):\n try:\n X1 = np.stack(self.model.sample(int(np.round(0.8 * n))))\n if self.dim == 2:\n mean = np.mean(X1, axis=0)\n std = np.diag([1.0, 1.0])\n gmm = MultivariateGaussianDistribution(mean, std)\n X2 = np.stack(gmm.sample(int(np.round(0.1 * n))))\n\n mean = np.mean(X1, axis=0)\n std = np.diag([1e-3, 1e-3])\n gmm = MultivariateGaussianDistribution(mean, std)\n X3 = np.stack(gmm.sample(int(np.round(0.1 * n))))\n\n else:\n mean = np.mean(X1, axis=0)\n std = np.diag([1.0, 1.0, 1e-3])\n gmm = MultivariateGaussianDistribution(mean, std)\n X2 = np.stack(gmm.sample(int(np.round(0.1 * n))))\n\n mean = np.mean(X1, axis=0)\n std = np.diag([1e-3, 1e-3, 10.0])\n gmm = MultivariateGaussianDistribution(mean, std)\n X3 = np.stack(gmm.sample(int(np.round(0.1 * n))))\n\n X = np.concatenate((X1, X2, X3))\n\n except ValueError:\n print(\"exception caught on sampling\")\n if self.dim == 2:\n mean = np.zeros(self.dim)\n std = np.diag([1.0, 1.0])\n gmm = MultivariateGaussianDistribution(mean, std)\n X = gmm.sample(int(n))\n else:\n mean = np.zeros(self.dim)\n std = np.diag([1.0, 1.0, 5.0])\n gmm = MultivariateGaussianDistribution(mean, std)\n X = gmm.sample(int(n))\n return X\n\n def addData(self, data, score):\n score = score.clip(min=1e-5)\n self.data = data\n self.score = score\n\n score_normed = self.score / np.linalg.norm(self.score, ord=1)\n try:\n model = GeneralMixtureModel.from_samples(\n MultivariateGaussianDistribution,\n n_components=self.n_comp,\n X=self.data,\n weights=score_normed)\n self.model = model\n except:\n logging.info(\"catched an exception\")\n\n def reset(self, n_comp=5):\n self.n_comp = n_comp\n\n if self.dim == 2:\n self.data = np.array([[], []]).T\n else:\n self.data = np.array([[], [], []]).T\n self.score = np.ones(np.shape(self.data)[0])\n self.score = self.score / np.linalg.norm(self.score, ord=1)\n if self.dim == 2:\n self.model = MultivariateGaussianDistribution(\n np.zeros(self.dim), np.diag([1.0, 1.0]))\n else:\n self.model = MultivariateGaussianDistribution(\n np.zeros(self.dim), np.diag([1.0, 1.0, 5.0]))\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.linalg.norm",
"numpy.cov",
"numpy.zeros",
"numpy.round",
"numpy.shape",
"numpy.mean",
"numpy.random.multivariate_normal",
"numpy.diag",
"numpy.average",
"numpy.linspace",
"numpy.meshgrid"
]
] |
frankinspace/l2ss-py
|
[
"29b88621c0fabe5d696aeffcc8f7039f108ff20b"
] |
[
"podaac/subsetter/xarray_enhancements.py"
] |
[
"# Copyright 2019, by the California Institute of Technology.\n# ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged.\n# Any commercial use must be negotiated with the Office of Technology\n# Transfer at the California Institute of Technology.\n#\n# This software may be subject to U.S. export control laws. By accepting\n# this software, the user agrees to comply with all applicable U.S. export\n# laws and regulations. User has the responsibility to obtain export\n# licenses, or other export authority as may be required before exporting\n# such information to foreign countries or providing access to foreign\n# persons.\n\n\"\"\"\n======================\nxarray_enhancements.py\n======================\n\nFunctions which improve upon existing xarray functionality, optimized\nfor this specific use-case.\n\"\"\"\n\nimport logging\nimport numpy as np\nimport xarray as xr\n\n\ndef get_indexers_from_1d(cond):\n \"\"\"\n Get indexers from a dataset with 1 dimension.\n\n Parameters\n ----------\n cond : xarray.Dataset\n Contains the result of the initial lat lon condition.\n\n Returns\n -------\n dict\n Indexer dictionary for the provided condition.\n \"\"\"\n cols = cond.values\n\n if not cols.any():\n logging.info(\"No data within the given bounding box.\")\n\n indexers = {\n cond.dims[0]: np.where(cols)[0]\n }\n return indexers\n\n\ndef get_indexers_from_nd(cond, cut):\n \"\"\"\n Get indexers from a dataset with more than 1 dimensions.\n\n Parameters\n ----------\n cond : xarray.Dataset\n Contains the result of the initial lat lon condition.\n cut : bool\n True if the scanline should be cut.\n\n Returns\n -------\n dict\n Indexer dictionary for the provided condition.\n \"\"\"\n\n rows = np.any(cond.values.squeeze(), axis=1)\n if cut:\n cols = np.any(cond.values.squeeze(), axis=0)\n else:\n cols = np.ones(len(cond.values[0]))\n\n # If the subsetted area is equal to the original area\n if np.all(rows) & np.all(cols):\n logging.info(\"Subsetted area equal to the original granule.\")\n\n # If the subsetted area is empty\n if not np.any(rows) | np.any(cols):\n logging.info(\"No data within the given bounding box.\")\n\n cond_shape_list = list(cond.shape)\n cond_list = list(cond.dims)\n output = [idx for idx, element in enumerate(cond_shape_list) if cond_shape_list[idx] == 1]\n for i in output:\n cond_list.pop(i)\n\n indexers = {\n cond_list[0]: np.where(rows)[0],\n cond_list[1]: np.where(cols)[0]\n }\n\n return indexers\n\n\ndef copy_empty_dataset(dataset):\n \"\"\"\n Copy an dataset into a new, empty dataset. This dataset should:\n * Contain the same structure as the input dataset (only include\n requested variables, if variable subset)\n * Contain the same global metadata as the input dataset\n * Contain a history field which describes this subset operation.\n\n Parameters\n ----------\n dataset: xarray.Dataset\n The dataset to copy into a empty dataset.\n\n Returns\n -------\n xarray.Dataset\n The new dataset which has no data.\n \"\"\"\n empty_dataset = xr.Dataset()\n for variable_name, variable in dataset.data_vars.items():\n empty_dataset[variable_name] = []\n empty_dataset[variable_name].attrs = variable.attrs\n # Copy global metadata\n empty_dataset.attrs = dataset.attrs\n return empty_dataset\n\n\ndef cast_type(var, var_type):\n \"\"\"\n Type cast a variable into a var type.\n\n Parameters\n ----------\n var: xarray.core.dataarray.DataArray\n The dataarray to be type casted.\n var_type: string\n New type the variable will be type casted to.\n Returns\n -------\n xarray.core.dataarray.DataArray\n The newly type casted variable.\n \"\"\"\n\n return var.astype(var_type)\n\n\ndef where(dataset, cond, cut):\n \"\"\"\n Return a dataset which meets the given condition.\n\n This is a modification of the existing xarray 'where' function.\n https://github.com/pydata/xarray/blob/master/xarray/core/common.py#L999\n\n Parameters\n ----------\n dataset : xarray.Dataset\n The dataset to filter and return.\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this object's values.\n cut : boolean\n True if the scanline should be cut, False if the scanline should\n not be cut.\n\n Returns\n -------\n xarray.Dataset\n The filtered Dataset\n\n Notes\n -----\n The `cond` variable contains a boolean mask of valid data indices.\n However in that mask, True represents valid data and False\n represents invalid data.\n \"\"\"\n if cond.values.ndim == 1:\n indexers = get_indexers_from_1d(cond)\n else:\n indexers = get_indexers_from_nd(cond, cut)\n # If any of the indexer dimensions are empty, return an empty dataset\n if not all(len(value) > 0 for value in indexers.values()):\n return copy_empty_dataset(dataset)\n\n indexed_cond = cond.isel(**indexers)\n indexed_ds = dataset.isel(**indexers)\n new_dataset = indexed_ds.where(indexed_cond)\n\n # Cast all variables to their original type\n for variable_name, variable in new_dataset.data_vars.items():\n original_type = indexed_ds[variable_name].dtype\n new_type = variable.dtype\n\n # Check if variable has no _FillValue. If so, use original data\n if '_FillValue' not in variable.attrs:\n\n if original_type != new_type:\n new_dataset[variable_name] = xr.apply_ufunc(cast_type, variable,\n str(original_type), dask='allowed',\n keep_attrs=True)\n\n # Replace nans with values from original dataset. If the\n # variable has more than one dimension, copy the entire\n # variable over, otherwise use a NaN mask to copy over the\n # relevant values.\n if len(variable.shape) > 1:\n new_dataset[variable_name] = indexed_ds[variable_name]\n else:\n nan_mask = np.isnan(variable.data)\n if nan_mask.any():\n variable.data[nan_mask] = indexed_ds[variable_name][nan_mask]\n\n new_dataset[variable_name].attrs = indexed_ds[variable_name].attrs\n variable.attrs = indexed_ds[variable_name].attrs\n new_dataset[variable_name].encoding['_FillValue'] = None\n variable.encoding['_FillValue'] = None\n\n else:\n # Manually replace nans with FillValue\n variable.data[np.isnan(variable.data)] = variable.attrs.get(\"_FillValue\")\n\n if original_type != new_type:\n new_dataset[variable_name] = xr.apply_ufunc(cast_type, variable,\n str(original_type), dask='allowed',\n keep_attrs=True)\n\n return new_dataset\n"
] |
[
[
"numpy.all",
"numpy.where",
"numpy.isnan",
"numpy.any"
]
] |
kylebarron/tablefill
|
[
"5f56a54c63939e03538d7530f5a64ede2b2455be"
] |
[
"tablefill/tablefill.py"
] |
[
"#!/usr/bin/env python2\n# encoding: utf-8\n\n\"\"\"Fill LaTeX template files with external inputs\n\nDescription\n-----------\n\ntablefill is a python module designed to fill LaTeX and Lyx tables\nwith output from text files (usually output from Stata or Matlab). The\noriginal tablefill does the same for LyX files only, and has fewer\nerror checks. Note this is intended both for command line _AND_ script\nusage. Hence both the following are valid\n\n>>> from tablefill import tablefill\n\n$ tablefill --help\n\nUsage\n-----\n\ntablefill [-h] [-v] [FLAGS] [-i [INPUT [INPUT ...]]] [-o OUTPUT]\n [--pvals [PVALS [PVALS ...]]] [--stars [STARS [STARS ...]]]\n [--xml-tables [INPUT [INPUT ...]]] [-t {auto,lyx,tex,md}]\n TEMPLATE\n\nFill tagged tables in LaTeX, LyX, and Markdown files with external text tables\n\npositional arguments:\n TEMPLATE Code template\n\noptional arguments:\n -h, --help show this help message and exit\n -v, --version Show current version\n -i [INPUT [INPUT ...]], --input [INPUT [INPUT ...]]\n Input files with tables (default: TEMPLATE_table)\n -o OUTPUT, --output OUTPUT\n Processed template file (default: TEMPLATE_filled)\n -t {auto,lyx,tex,md}, --type {auto,lyx,tex,md}\n Template file type (default: auto)\n --pvals [PVALS [PVALS ...]]\n Significance thresholds\n --stars [STARS [STARS ...]]\n Stars for sig thresholds (enclose each entry in quotes)\n --xml-tables [INPUT [INPUT ...]]\n Files with custom xml combinations.\n\nflags:\n -f, --force Name input/output automatically\n -c, --compile Compile output\n -b, --bibtex Run bibtex on .aux file and re-compile\n -fc, --fill-comments Fill in commented out placeholders.\n --numpy-syntax Numpy syntax for custom XML tables.\n --use-floats Force floats when passing objects to custom XML python.\n --ignore-xml Ignore XML in template comments.\n --verbose Verbose printing (for debugging)\n --silent Try to say nothing\n\nFor details on the files and the replace engine, see the online documentation.\n\n https://mcaceresb.github.io/tablefill/getting-started.html\n\nWARNING\n-------\n\nThe program currently does not handle trailing comments. If a line\ndoesn't start with a comment, it will replace everything in that line,\neven if there is a comment halfway through.\n\nExamples\n--------\n\nIf you installed the program via PIP, then simply run\n\n$ ls\ntest.tex\ntest_table.txt\n$ tablefill test.tex --force --silent\n\nIf you have a copy of `tablefill.py`, then run\n\n$ ls\ntablefill.py\ntest.tex\ntest_table.txt\ntest_filled.txt\n$ python tablefill.py test.tex -i test_table.txt -o output.tex --verbose\n\nNotes\n-----\n\nSeveral try-catch pairs and error checks are redundant because right\nnow this may also be run from python and not just from the command line\n(done for backwards compatibility's sake).\n\nI also specify python 2 because I use python 3 on my local machine (as\neveryone should) but am forced to use python 2 over ssh (as MIT servers\navailable to me come with python 2.6).\n\"\"\"\n\n# NOTE: For all my personal projects I import the print function from\n# the future. You should do that also. Seriously (:\n\nfrom __future__ import division, print_function\nfrom os import linesep, path, access, W_OK, system, chdir, remove\nfrom decimal import Decimal, ROUND_HALF_UP\nfrom collections import Iterable as Iter\nfrom traceback import format_exc\nfrom operator import itemgetter\nfrom sys import exit as sysexit\nfrom sys import version_info\nfrom tempfile import mktemp\nimport xml.etree.ElementTree as xml\nimport argparse\nimport re\ntry:\n import numpy\n numpyok = True\nexcept:\n numpyok = False\n\n__program__ = \"tablefill.py\"\n__usage__ = \"\"\"[-h] [-v] [FLAGS] [-i [INPUT [INPUT ...]]] [-o OUTPUT]\n [--pvals [PVALS [PVALS ...]]] [--stars [STARS [STARS ...]]]\n [--xml-tables [INPUT [INPUT ...]]] [-t {auto,lyx,tex,md}]\n TEMPLATE\"\"\"\n__purpose__ = \"Fill tagged tables in LaTeX files with external text tables\"\n__author__ = \"Mauricio Caceres <caceres@nber.org>\"\n__created__ = \"Thu Jun 18, 2015\"\n__updated__ = \"Tue Feb 26, 2019\"\n__version__ = __program__ + \" version 0.9.3 updated \" + __updated__\n\n# Define basestring in a backwards-compatible way\ntry:\n \"\" is basestring\nexcept NameError:\n basestring = str\n\n\ndef main():\n \"\"\"\n WARNING: This function expects command-line inputs to exist.\n \"\"\"\n\n fill = tablefill_internals_cliparse()\n fill.get_input_parser()\n fill.get_parsed_arguments()\n fill.get_argument_strings()\n fill.get_file_type()\n print_verbose(fill.verbose, \"Arguments look OK. Will run tablefill.\")\n\n exit, exit_msg = tablefill(template = fill.template,\n input = fill.input,\n output = fill.output,\n filetype = fill.ext,\n verbose = fill.verbose,\n silent = fill.silent,\n pvals = fill.pvals,\n stars = fill.stars,\n fillc = fill.fillc,\n legacy_parsing = fill.legacy_parsing,\n numpy_syntax = fill.numpy_syntax,\n use_floats = fill.use_floats,\n ignore_xml = fill.ignore_xml,\n xml_tables = fill.xml_tables)\n\n if exit == 'SUCCESS':\n fill.get_compiled()\n sysexit(0)\n elif exit == 'WARNING':\n print_silent(fill.silent, \"Exit status came with a warning\")\n print_silent(fill.silent, \"Output might not be as expected!\")\n print_silent(fill.silent, \"Rerun program with --verbose option.\")\n fill.get_compiled()\n sysexit(-1)\n elif exit == 'ERROR':\n fillerror_msg = 'ERROR while filling table.'\n fillerror_msg += ' Check function call.' + linesep\n print_silent(fill.silent, fillerror_msg)\n fill.parser.print_usage()\n sysexit(1)\n\n\n# Backwards-compatible file concatenation\ndef concat_files(flist):\n if version_info >= (3, 0):\n readlist = [open(fn, 'r', newline = None).readlines() for fn in flist]\n else:\n readlist = [open(fn, 'rU').readlines() for fn in flist]\n\n return sum(readlist, [])\n\n\n# Backwards-compatible string formatting\ndef compat_format(x):\n if version_info >= (2, 7):\n return format(x, ',d')\n else:\n import locale\n locale.setlocale(locale.LC_ALL, 'en_US')\n return locale.format(\"%d\", x, grouping = True)\n\n\n# Backwards-compatible list flattening\n# http://stackoverflow.com/questions/2158395/\ndef flatten(l):\n if version_info >= (3, 0):\n for el in l:\n if isinstance(el, Iter) and not isinstance(el, (str, bytes)):\n for sub in flatten(el):\n yield sub\n else:\n yield el\n else:\n for el in l:\n if isinstance(el, Iter) and not isinstance(el, basestring):\n for sub in flatten(el):\n yield sub\n else:\n yield el\n\n\ndef tolist(anything):\n return anything if isinstance(anything, list) else [anything]\n\n\ndef tolist2(anything):\n return list(anything) if hasattr(anything, '__iter__') else [anything]\n\n\ndef print_verbose(prints, stuff):\n if prints:\n print(stuff)\n\n\ndef print_silent(silence, stuff):\n if not silence:\n print(stuff)\n\n\ndef custom_convert(x, func):\n if isinstance(x, func):\n return x\n else:\n try:\n return func(x)\n except:\n return None\n\n\ndef nested_convert(item, func):\n if hasattr(item, '__iter__') and not isinstance(item, basestring):\n return [nested_convert(x, func) for x in item]\n\n return custom_convert(item, func)\n\n\n# ---------------------------------------------------------------------\n# tablefill\n\ndef tablefill(silent = False,\n verbose = True,\n filetype = 'auto',\n pvals = [0.1, 0.05, 0.01],\n stars = ['*', '**', '***'],\n fillc = False,\n legacy_parsing = False,\n numpy_syntax = False,\n use_floats = False,\n ignore_xml = False,\n xml_tables = None,\n **kwargs):\n \"\"\"Fill LaTeX, LyX, or Markdown template files with external inputs\n\n Description\n -----------\n\n tablefill is a python function designed to fill LaTeX, LyX, or Markdown\n tables with output from text files (usually output from Stata or Matlab).\n The original tablefill.py does the same but only for LyX files, and\n has fewer error checks. The regexps are also slightly different.\n\n Required Input\n --------------\n\n template : str\n Name of user-written document to use as basis for update\n input : str\n Space-separated list of files with tables to be used in update.\n output : str\n Filled template to be produced.\n\n For details on the files and the replace engine, see the online documentation.\n\n https://mcaceresb.github.io/tablefill/getting-started.html\n\n Optional Input\n --------------\n verbose : bool\n print a lot of info\n silent : bool\n try to print nothing at all\n filetype : str\n auto, lyx, tex, or md\n\n Output\n ------\n exit : str\n One of SUCCESS, WARNING, ERROR\n exit_msg : str\n Details on the exit status\n\n\n Usage\n -----\n exit, exit_msg = tablefill(template = 'template_file',\n input = 'input_file(s)',\n output = 'output_file')\n \"\"\"\n try:\n verbose = verbose and not silent\n logmsg = \"Parsing arguments...\"\n print_verbose(verbose, logmsg)\n fill_engine = tablefill_internals_engine(filetype,\n verbose,\n silent,\n pvals,\n stars,\n fillc,\n legacy_parsing,\n numpy_syntax,\n use_floats,\n ignore_xml,\n xml_tables)\n\n fill_engine.get_parsed_arguments(kwargs)\n fill_engine.get_file_type()\n fill_engine.get_regexps()\n\n logmsg = \"Parsing tables in into dictionary:\" + linesep + '\\t'\n logmsg += (linesep + '\\t').join(tolist(fill_engine.input))\n print_verbose(verbose, logmsg)\n fill_engine.get_parsed_tables()\n\n logmsg = \"Searching for labels in template:\" + linesep + '\\t'\n logmsg += (linesep + '\\t').join(tolist(fill_engine.template))\n print_verbose(verbose, logmsg + linesep)\n fill_engine.get_filled_template()\n\n logmsg = \"Adding warning that this was automatically generated...\"\n print_verbose(verbose, logmsg)\n fill_engine.get_notification_message()\n\n logmsg = \"Writing to output file '%s'\" % fill_engine.output\n print_verbose(verbose, logmsg)\n fill_engine.write_to_output(fill_engine.filled_template)\n\n logmsg = \"Wrapping up...\" + linesep\n print_verbose(verbose, logmsg)\n fill_engine.get_exit_message()\n print_silent(silent, fill_engine.exit + '!')\n print_silent(silent, fill_engine.exit_msg)\n return fill_engine.exit, fill_engine.exit_msg\n except:\n exit_msg = format_exc()\n exit = 'ERROR'\n print_silent(silent, exit + '!')\n print_silent(silent, exit_msg)\n return exit, exit_msg\n\n# ---------------------------------------------------------------------\n# tablefill_internals_cliparse\n\n\nclass tablefill_internals_cliparse:\n \"\"\"\n WARNING: Internal class to parse arguments to pass to tablefill\n \"\"\"\n def __init__(self):\n self.compiler = {'tex': \"xelatex \",\n 'lyx': \"lyx -e pdf2 \",\n 'md': \"pandoc -i \"}\n self.bibtex = {'tex': \"bibtex \",\n 'lyx': \"echo Not sure how to run BiBTeX via LyX on \",\n 'md': \"echo Not sure how to run BiBTeX via pandoc on \"}\n\n def get_input_parser(self):\n \"\"\"\n Parse command-line arguments using argparse; return parser\n \"\"\"\n parser_desc = __purpose__\n parser_prog = __program__\n # parser_use = __program__ + ' ' + __usage__\n parser_version = __version__\n parser = argparse.ArgumentParser(prog = parser_prog,\n description = parser_desc)\n parser.add_argument('-v', '--version',\n action = 'version',\n version = parser_version,\n help = \"Show current version\")\n parser.add_argument('template',\n nargs = 1,\n type = str,\n metavar = 'TEMPLATE',\n help = \"Code template\")\n parser.add_argument('-i', '--input',\n dest = 'input',\n type = str,\n nargs = '*',\n metavar = 'INPUT',\n default = None,\n help = \"Input files with tables\"\n \" (default: INPUT_table)\",\n required = False)\n parser.add_argument('-o', '--output',\n dest = 'output',\n type = str,\n nargs = 1,\n metavar = 'OUTPUT',\n default = None,\n help = \"Processed template file\"\n \" (default: INPUT_filled)\",\n required = False)\n parser.add_argument('-t', '--type',\n dest = 'filetype',\n type = str,\n nargs = 1,\n choices = ['auto', 'lyx', 'tex', 'md'],\n default = ['auto'],\n help = \"Template file type (default: auto)\",\n required = False)\n parser.add_argument('--pvals',\n dest = 'pvals',\n type = str,\n nargs = '*',\n default = ['0.1', '0.05', '0.01'],\n help = \"Significance thresholds\",\n required = False)\n parser.add_argument('--stars',\n dest = 'stars',\n type = str,\n nargs = '*',\n default = ['*', '**', '***'],\n help = \"Stars for sig thresholds \"\n \"(enclose each in quotes)\",\n required = False)\n parser.add_argument('-f', '--force',\n dest = 'force',\n action = 'store_true',\n help = \"Name input/output automatically\",\n required = False)\n parser.add_argument('-c', '--compile',\n dest = 'compile',\n action = 'store_true',\n help = \"Compile output\",\n required = False)\n parser.add_argument('-b', '--bibtex',\n dest = 'bibtex',\n action = 'store_true',\n help = \"Compile BiBTeX\",\n required = False)\n parser.add_argument('-fc', '--fill-comments',\n dest = 'fill_comments',\n action = 'store_true',\n help = \"Fill placeholders in comments\",\n required = False)\n parser.add_argument('--ignore-xml',\n dest = 'ignore_xml',\n action = 'store_true',\n help = \"Ignore XML in template comments.\",\n required = False)\n parser.add_argument('--legacy-parsing',\n dest = 'legacy_parsing',\n action = 'store_true',\n help = \"Legacy parsing for XML tables.\",\n required = False)\n parser.add_argument('--numpy-syntax',\n dest = 'numpy_syntax',\n action = 'store_true',\n help = \"Numpy syntax for custom XML tables.\",\n required = False)\n parser.add_argument('--use-floats',\n dest = 'use_floats',\n action = 'store_true',\n help = \"Use floats for custom XML python.\",\n required = False)\n parser.add_argument('--xml-tables',\n dest = 'xml_tables',\n type = str,\n nargs = '*',\n metavar = 'INPUT',\n default = None,\n help = \"Files with custom XML combinations.\",\n required = False),\n parser.add_argument('--verbose',\n dest = 'verbose',\n action = 'store_true',\n help = \"Verbose printing\",\n required = False)\n parser.add_argument('--silent',\n dest = 'silent',\n action = 'store_true',\n help = \"No printing\",\n required = False)\n self.parser = parser\n\n def get_parsed_arguments(self):\n \"\"\"\n Get arguments; if input and output names are missing, guess them\n (only guess with the --force option, otherwise don't run).\n \"\"\"\n args = self.parser.parse_args()\n missing_args = []\n missing_args += ['INPUT'] if args.input is None else []\n missing_args += ['OUTPUT'] if args.output is None else []\n if missing_args != []:\n if not args.force:\n isare = ' is ' if len(missing_args) == 1 else ' are '\n missing_args_msg = ' and '.join(missing_args)\n missing_args_msg += isare + 'missing without --force option.'\n raise KeyError(missing_args_msg)\n else:\n template_name = path.basename(args.template[0])\n if 'INPUT' in missing_args:\n args.input = self.rename_file(template_name,\n '_table', 'txt')\n if 'OUTPUT' in missing_args:\n args.output = self.rename_file(template_name,\n '_filled')\n\n self.args = args\n\n def rename_file(self, base, add, ext = None):\n out = path.splitext(base)\n add += out[-1] if ext is None else '.' + ext\n return [out[0] + add]\n\n def get_argument_strings(self):\n \"\"\"\n Get arguments as strings to pass to tablefill\n \"\"\"\n self.template = path.abspath(self.args.template[0])\n self.input = ' '.join([path.abspath(f) for f in self.args.input])\n self.output = path.abspath(self.args.output[0])\n self.silent = self.args.silent\n self.verbose = self.args.verbose and not self.args.silent\n self.stars = self.args.stars\n self.fillc = self.args.fill_comments\n self.legacy_parsing = self.args.legacy_parsing\n self.numpy_syntax = self.args.numpy_syntax\n self.use_floats = self.args.use_floats\n self.ignore_xml = self.args.ignore_xml\n self.xml_tables = self.args.xml_tables\n try:\n self.pvals = [float(p) for p in self.args.pvals]\n assert all([(0 < p < 1) for p in self.pvals])\n except:\n raise ValueError(\"--pvals only takes numbers between 0 and 1\")\n\n args_msg = linesep + \"I found these arguments:\"\n args_msg += linesep + \"template = %s\" % self.template\n args_msg += linesep + \"input = %s\" % self.input\n args_msg += linesep + \"output = %s\" % self.output\n args_msg += linesep\n print_verbose(self.verbose, args_msg)\n\n def get_file_type(self):\n fname = path.basename(self.template)\n ext = path.splitext(fname)[-1].lower().strip('. ')\n inext = self.args.filetype[0].lower()\n if inext not in ['auto', 'tex', 'lyx', 'md', 'markdown']:\n unknown_type = \"Type '%s' not allowed. Expected {auto,lyx,tex}.\"\n unknown_type = unknown_type % inext\n raise KeyError(unknown_type)\n elif inext == 'auto':\n if ext not in ['tex', 'lyx', 'md', 'markdown']:\n unknown_type = \"File type '%s' not known.\"\n unknown_type += \" Expecting .lyx, .tex, or .md file.\"\n unknown_type = unknown_type % ext\n raise KeyError(unknown_type)\n else:\n if ext in ['md', 'markdown']:\n self.ext = 'md'\n else:\n self.ext = ext.lower()\n logmsg = \"NOTE: Automatically detected input type as %s\" % ext\n print_verbose(self.verbose, logmsg)\n else:\n self.ext = inext\n if ext != inext:\n mismatch_msg = \"NOTE: Provided template type '%s' \"\n mismatch_msg += \"does not match detected template type '%s'. \"\n mismatch_msg += linesep + \"Using program associated with '%s'\"\n mismatch_msg = mismatch_msg % (inext, ext, inext)\n print_verbose(self.verbose, mismatch_msg + linesep)\n\n def get_compiled(self):\n \"\"\"\n Compile the filled template with the corresponding program.\n \"\"\"\n\n if not self.args.compile and self.args.bibtex:\n print(\"NOTE: Cannot run BiBTeX without compiling.\" + linesep)\n\n if self.args.compile:\n chdir(path.dirname(path.abspath(self.output)))\n compile_program = self.compiler[self.ext]\n compile_program += ' ' + self.output\n\n bibtex_auxfile = path.splitext(path.basename(self.output))[0]\n bibtex_program = self.bibtex[self.ext]\n bibtex_program += ' ' + bibtex_auxfile + '.aux'\n\n logmsg = \"Compiling in beta! Use with caution. Running\"\n print_verbose(self.verbose, logmsg)\n print_verbose(self.verbose, compile_program + linesep)\n system(compile_program + linesep)\n if self.args.bibtex:\n system(bibtex_program + linesep)\n system(compile_program + linesep)\n system(compile_program + linesep)\n\n\n# ---------------------------------------------------------------------\n# tablefill_internals_engine\n\nclass tablefill_internals_engine:\n \"\"\"\n WARNING: Internal class used by tablefill_tex\n \"\"\"\n def __init__(self,\n filetype = 'auto',\n verbose = True,\n silent = False,\n pvals = [0.1, 0.05, 0.01],\n stars = ['*', '**', '***'],\n fillc = False,\n legacy_parsing = False,\n numpy_syntax = False,\n use_floats = False,\n ignore_xml = False,\n xml_tables = None):\n\n # Get file type\n self.filetype = filetype.lower()\n if self.filetype not in ['auto', 'lyx', 'tex', 'md']:\n unknown_type = \"File type '%s' not known.\"\n unknown_type += \" Expecting 'auto' or a .lyx, .tex, or .md file.\"\n unknown_type = unknown_type % filetype\n raise KeyError(unknown_type)\n\n self.warn_msg = {'nomatch': '',\n 'notable': '',\n 'nolabel': '',\n 'toolong': ''}\n self.warnings = {'nomatch': [],\n 'notable': [],\n 'nolabel': [],\n 'toolong': []}\n self.warn_pre = \"\"\n self.verbose = verbose and not silent\n self.silent = silent\n\n while len(pvals) > len(stars):\n i = 1\n while '*' * i in stars:\n i += 1\n stars += ['*' * i]\n\n stars = stars[:len(pvals)]\n starlist = [(p, s) for (p, s) in zip(pvals, stars)]\n starlist.sort(key = lambda p: p[0], reverse = True)\n self.pvals = [p for (p, s) in starlist]\n self.stars = [s for (p, s) in starlist]\n self.fillc = fillc\n self.legacy_parsing = legacy_parsing\n self.numpy_syntax = numpy_syntax\n self.use_floats = use_floats\n self.ignore_xml = ignore_xml\n self.xml_tables = xml_tables\n\n def get_parsed_arguments(self, kwargs):\n \"\"\"\n Gets template, input, and output from kwargs with checks for\n - All arguments are there as strings\n - All files exist\n - Output directory exists and is writable\n \"\"\"\n args = ['input', 'template', 'output']\n\n # XX\n missing_args = list(filter(lambda arg: arg not in kwargs.keys(), args))\n if missing_args != []:\n isare = \" is \" if len(missing_args) == 1 else \" are \"\n missing_args_msg = \" and \".join(missing_args)\n missing_args_msg += isare + \"missing. Check function call.\"\n raise KeyError(missing_args_msg)\n\n # XX\n m = filter(lambda t: not isinstance(t[1], basestring), kwargs.items())\n mismatched_types = list(m)\n if mismatched_types != []:\n msg = \"Expected str for '%s' but got type '%s'\"\n msg = [msg % (k, v.__class__.__name__)\n for k, v in mismatched_types]\n mismatched_msg = linesep.join(msg)\n raise TypeError(mismatched_msg)\n\n self.template = path.abspath(kwargs['template'])\n self.output = path.abspath(kwargs['output'])\n self.input = [path.abspath(ins) for ins in kwargs['input'].split()]\n\n infiles = [self.template] + self.input\n missing_files = list(filter(lambda f: not path.isfile(f), infiles))\n if missing_files != []:\n missing_files_msg = \"Please check the following are available:\"\n missing_files_msg += linesep + linesep.join(missing_files)\n raise IOError(missing_files_msg)\n\n outdir = path.split(self.output)[0]\n missing_path = not path.isdir(outdir)\n if missing_path:\n missing_outdir_msg = \"Please check the directory exists:\"\n missing_outdir_msg += outdir\n raise IOError(missing_outdir_msg)\n\n cannot_write = not access(outdir, W_OK)\n if cannot_write:\n cannot_write_msg = \"Please check you have write access to: \"\n cannot_write_msg += outdir\n raise IOError(cannot_write_msg)\n\n def get_file_type(self):\n \"\"\"\n Get file type and check if it matches the compilation type that\n was requested, if one was requested.\n \"\"\"\n fname = path.basename(self.template)\n ext = path.splitext(fname)[-1].lower().strip('. ')\n inext = self.filetype\n if inext == 'auto':\n if ext not in ['tex', 'lyx', 'md', 'markdown']:\n unknown_type = \"Option filetype = 'auto' detected type '%s'\"\n unknown_type += \" but was expecting a .lyx or .tex file.\"\n unknown_type = unknown_type % ext\n raise KeyError(unknown_type)\n else:\n if ext in ['md', 'markdown']:\n self.filetype = 'md'\n else:\n self.filetype = ext.lower()\n logmsg = \"NOTE: Automatically detected input type as %s\" % ext\n print_verbose(self.verbose, logmsg)\n elif ext != inext:\n mismatch_msg = \"NOTE: Provided template type '%s' \"\n mismatch_msg += \"does not match detected template type '%s'\"\n mismatch_msg += linesep + \"Will use program associated with '%s'\"\n mismatch_msg = mismatch_msg % (inext, ext, inext)\n print_verbose(self.verbose, mismatch_msg + linesep)\n\n def get_regexps(self):\n \"\"\"\n Define the regular expressions to use to find a token to fill,\n the start/end of a table, etc. based on the file type.\n \"\"\"\n # The regexes are looking for\n # - matche: strings to escape (&, %)\n # - match0: either matcha or matchb\n # - matcha: ### for non-numeric matches or #*# for p-val parsing\n # - matchb: numeric matches (#\\d+%?#, \\#\\d+,?\\#)\n # - matchc: (-?)integer(.decimal)?\n # - matchd: absolute value\n # - comments: comment\n self.tags = '^<Tab:(.+)>' + linesep\n self.matche = r'[^\\\\](%|&)'\n self.match0 = r'\\\\?#\\|?((\\d+)(,?|\\\\?%)?|\\\\?(#|\\*))\\|?\\\\?#'\n self.matcha = r'\\\\?#\\\\?(#|\\*)\\\\?#'\n self.matchb = r'\\\\?#\\|?(\\d+)(,?|\\\\?%)\\|?\\\\?#'\n self.matchc = '(-?\\d+)(\\.?\\d*)'\n self.matchd = r'\\\\?#\\|.{1,4}\\|\\\\?#'\n self.comments = '^\\s*%'\n\n # TODO: Allow custom regexes!\n dictRegexes = {\n 'tex': {\n 'begin': r'.*\\\\begin{table}.*',\n 'end': r'.*\\\\end{table}.*',\n 'label': r'.*\\\\label{tab:(.+)}'\n },\n 'lyx': {\n 'begin': r'.*\\\\begin_inset Float table.*',\n 'end': r'</lyxtabular>',\n 'label': r'name \"tab:(.+)\"'\n },\n 'md': {\n 'begin': r'(^<!--.*tablefill:start.*-->$)|(^\\s*\\\\begin{table}.*)',\n 'end': r'(^<!--.*tablefill:end.*-->$)|(.*\\\\end{table}.*)',\n 'label': r'(?:^<!--.*\\b|.*\\\\label{)tab:(.+)(?:\\b.*-->$|})'\n }\n # 'md': {\n # 'begin': r'^<!--.*tablefill:start.*-->$',\n # 'end': r'^<!--.*tablefill:end.*-->$',\n # 'label': r'^<!--.*\\btab:(.+)\\b.*-->$'\n # }\n }\n\n if self.filetype == 'tex':\n self.begin = dictRegexes['tex']['begin']\n self.end = dictRegexes['tex']['end']\n self.label = dictRegexes['tex']['label']\n elif self.filetype == 'lyx':\n self.begin = dictRegexes['lyx']['begin']\n self.end = dictRegexes['lyx']['end']\n self.label = dictRegexes['lyx']['label']\n elif self.filetype == 'md':\n self.begin = dictRegexes['md']['begin']\n self.end = dictRegexes['md']['end']\n self.label = dictRegexes['md']['label']\n\n def get_parsed_tables(self):\n \"\"\"\n Parse table file(s) into a dictionary with tags as keys and\n lists of table entries as values\n \"\"\"\n\n # Read in all the tables\n parse_data = concat_files(self.input)\n ctables = {}\n for row in parse_data:\n if re.match(self.tags, row, flags = re.IGNORECASE):\n tag = re.findall(self.tags, row, flags = re.IGNORECASE)\n tag = tag[0].lower()\n ctables[tag] = []\n else:\n clean_row_entries = [e.strip() for e in row.split('\\t')]\n ctables[tag] += [clean_row_entries]\n\n if self.xml_tables is None and not self.ignore_xml:\n if self.legacy_parsing:\n self.parse_xml_file_legacy(ctables,\n self.template,\n prefix = '^%\\s*')\n else:\n self.parse_xml_file(ctables, self.template, prefix = '^%\\s*')\n else:\n if self.legacy_parsing:\n self.parse_xml_file_legacy(ctables,\n self.xml_tables,\n prefix = '')\n else:\n self.parse_xml_file(ctables, self.xml_tables, prefix = '')\n\n # Read in actual and custom tables\n # self.tables = {k: self.filter_missing(v) for k, v in tables.items()}\n self.tables = dict((k, self.filter_missing(list(flatten(v))))\n for (k, v) in ctables.items())\n\n def parse_xml_file(self, ctables, xml_input, prefix = ''):\n \"\"\"Parse custom tabs in comments/XML files\n\n Note that the parsing here is VERY crude (you will note it uses\n a combination of XML parsing and regexes). This is more or less\n intentional, since I want to be inflexible when using this\n feature as it is VERY experimental. As it becomes stable the\n function may move to proper XML parsing.\n\n Args:\n ctables (dict): Dictionary with input tables\n xml_input (list): Template or XML files with custom tags\n\n Kwargs:\n prefix (str): regex with prefix for XML parsing (if parsing\n from template comments, this should be a LaTeX\n comment, e.g. '^%\\s*', as the tables would be\n commented out in the file).\n\n Returns: Dictionary with resulting custom tables\n\n \"\"\"\n\n # Read in all the custom tables\n xml_list = tolist(xml_input)\n xml_toparse = concat_files(xml_list)\n\n xml_regex = prefix\n xml_regex += \"<tablefill-python\\s+tag\\s*=\\s*['\\\"](.+)\\s*['\\\"]\"\n\n # Figure out where the custom XML tags are\n i = 0\n custom = []\n for line in xml_toparse:\n s = re.search(xml_regex, line)\n if s:\n j = i\n search = True\n while search and j <= len(xml_toparse):\n if re.search('</\\s*tablefill-python\\s*>', xml_toparse[j]):\n search = False\n j += 1\n\n if not search:\n custom += [range(i, j)]\n\n i += 1\n\n # Prase each custom XMl tag into a dictionary\n cdict = {}\n for c in custom:\n chtml = []\n cobj = itemgetter(*c)(xml_toparse)\n for obj in cobj:\n chtml += [re.sub('^%\\s*', '', obj)]\n\n try:\n cxml = xml.fromstringlist(chtml)\n except:\n xml_parse_msg = \"Could not parse custom XML in lines %d-%d.\"\n raise Warning('\\t' + xml_parse_msg % (c[0], c[-1]))\n\n t = cxml.get('tag')\n cdict[t] = cxml\n\n # Get temporary string and numeric dictionaries\n strdict = ctables\n numdict = {}\n for tag, table in ctables.items():\n numdict[tag] = nested_convert(table, float)\n\n numpy_strdict = {}\n numpy_numdict = {}\n if numpyok:\n for tag, table in strdict.items():\n numpy_strdict[tag] = numpy.asmatrix(table)\n\n for tag, table in numdict.items():\n numpy_numdict[tag] = numpy.asmatrix(table)\n\n # Create all the custom tables using python/numpy slicing\n for tag, cxml in cdict.items():\n print_verbose(self.verbose, \"\\tcreating custom tab:%s\" % (tag))\n\n csyntax = cxml.get('syntax')\n if csyntax not in [None, 'python', 'numpy']:\n xml_syntax_msg = \"Custom table '%s' requested unknown syntax\"\n xml_syntax_msg += \" '%s'. Specify 'python' or 'numpy'.\"\n raise Warning('\\t' + xml_syntax_msg % (tag, csyntax))\n\n usenumpy = self.numpy_syntax and not csyntax == 'python'\n usenumpy = usenumpy or (csyntax == 'numpy')\n if usenumpy and not numpyok:\n xml_numpy_msg = \"Custom table '%s' requested syntax 'numpy'\"\n xml_numpy_msg += \" but python failed to import numpy.\"\n raise Warning('\\t' + xml_numpy_msg % tag)\n\n usetype = 'float' if self.use_floats else cxml.get('type')\n if usetype not in [None, 'float', 'numeric', 'str', 'string']:\n xml_usetype_msg = \"Custom table '%s' asked unknown type\"\n xml_usetype_msg += \" '%s'. Specify 'float' or 'str'.\"\n raise Warning('\\t' + xml_usetype_msg % (tag, usetype))\n\n if usetype in ['float', 'numeric']:\n usedict = numpy_numdict if numpyok and usenumpy else numdict\n else:\n usedict = numpy_strdict if numpyok and usenumpy else strdict\n\n try:\n clean_text = re.subn('\\s|' + linesep, '', cxml.text)[0]\n ceval = eval(clean_text, usedict)\n if numpyok and usenumpy:\n if usetype in ['float', 'numeric']:\n numpy_numdict[tag] = ceval\n else:\n numpy_strdict[tag] = ceval\n\n ceval = tolist2(ceval)\n toadd = list(flatten([numpy.array(l) for l in ceval]))\n strdict[tag] = nested_convert(toadd, str)\n numdict[tag] = nested_convert(toadd, float)\n else:\n ceval = tolist2(ceval)\n toadd = list(flatten(ceval))\n strdict[tag] = nested_convert(ceval, str)\n numdict[tag] = nested_convert(ceval, float)\n if numpyok:\n numpy_strdict[tag] = numpy.asmatrix(strdict[tag])\n numpy_numdict[tag] = numpy.asmatrix(numdict[tag])\n\n except:\n warn_custom = \"custom 'tab:%s' failed to parse.\" % tag\n print_verbose(self.verbose, '\\t' + warn_custom)\n continue\n\n ctables[tag] = list(nested_convert(toadd, str))\n\n def parse_xml_file_legacy(self, ctables, xml_input, prefix = ''):\n \"\"\"Parse custom tabs in comments/XML files\n\n Note that the parsing here is VERY crude (you will note it uses\n a combination of XML parsing and regexes). This is more or less\n intentional, since I want to be inflexible when using this\n feature as it is VERY experimental. As it becomes stable the\n function may move to proper XML parsing.\n\n Args:\n ctables (dict): Dictionary with input tables\n xml_input (list): Template or XML files with custom tags\n\n Kwargs:\n prefix (str): regex with prefix for XML parsing (if parsing\n from template comments, this should be a LaTeX\n comment, e.g. '^%\\s*', as the tables would be\n commented out in the file).\n\n Returns: Dictionary with resulting custom tables\n\n \"\"\"\n\n # Read in all the custom tables\n xml_list = tolist(xml_input)\n xml_toparse = concat_files(xml_list)\n\n xml_regex = prefix\n xml_regex += \"<tablefill-(custom|python)\\s+tag\\s*=\\s*['\\\"](.+)\\s*['\\\"]\"\n\n # Figure out where the custom XML tags are\n i = 0\n custom = []\n todo = []\n for line in xml_toparse:\n s = re.search(xml_regex, line)\n if s:\n j = i\n w = s.groups()[0]\n todo += [w]\n search = True\n while search and j <= len(xml_toparse):\n if re.search('</\\s*tablefill-%s\\s*>' % w, xml_toparse[j]):\n search = False\n j += 1\n\n if not search:\n custom += [range(i, j)]\n\n i += 1\n\n # Put them into a dictionary\n cdict = {}\n edict = {}\n for c, e in zip(custom, todo):\n chtml = []\n cobj = itemgetter(*c)(xml_toparse)\n for obj in cobj:\n chtml += [re.sub('^%\\s*', '', obj)]\n\n try:\n cxml = xml.fromstringlist(chtml)\n except:\n xml_parse_msg = \"Could not parse custom XML in lines %d-%d.\"\n raise Warning('\\t' + xml_parse_msg % (c[0], c[-1]))\n\n t = cxml.get('tag')\n cdict[t] = cxml\n edict[t] = e\n\n # Create all the custom tables using python/numpy slicing\n for tag, cxml in cdict.items():\n print_verbose(self.verbose, \"\\tcreating custom tab:%s\" % (tag))\n\n csyntax = cxml.get('syntax')\n if csyntax not in [None, 'python', 'numpy']:\n xml_syntax_msg = \"Custom table '%s' requested unknown syntax\"\n xml_syntax_msg += \" '%s'. Specify 'python' or 'numpy'.\"\n raise Warning('\\t' + xml_syntax_msg % (tag, csyntax))\n\n usenumpy = self.numpy_syntax and not csyntax == 'python'\n usenumpy = usenumpy or (csyntax == 'numpy')\n if usenumpy and not numpyok:\n xml_numpy_msg = \"Custom table '%s' requested syntax 'numpy'\"\n xml_numpy_msg += \" but python failed to import numpy.\"\n raise Warning('\\t' + xml_numpy_msg % tag)\n\n convert = 'float' if self.use_floats else cxml.get('convert')\n if convert not in [None, 'float', 'str']:\n xml_convert_msg = \"Custom table '%s' asked unknown conversion\"\n xml_convert_msg += \" '%s'. Specify 'float' or 'str'.\"\n raise Warning('\\t' + xml_convert_msg % (tag, convert))\n\n if edict[tag] == 'python':\n inputs = [l.strip() for l in cxml.get('inputs').split(',')]\n inputs = list(filter(lambda a: a != '', inputs))\n xmlexec = mktemp()\n with open(xmlexec, \"w+\") as tmp:\n tmp.writelines(cxml.text)\n\n python = {}\n for table in inputs:\n ptable = tolist(ctables[table])\n if convert == 'float':\n ptable = nested_convert(ptable, float)\n\n if numpyok and usenumpy:\n ptable = numpy.asmatrix(ptable)\n\n python[table] = ptable\n\n try:\n execfile(xmlexec, python)\n except:\n xml_python_msg = \"Custom code for '%s' failed to run.\"\n raise Warning('\\t' + xml_python_msg % tag)\n\n remove(xmlexec)\n try:\n table_tag = python[tag]\n except:\n xml_python_msg = \"Code for '%s' did not create 'tag'\"\n raise Warning('\\t' + xml_python_msg % tag)\n\n if numpyok and usenumpy:\n table_tag = numpy.array(table_tag)\n\n table_tag = list(flatten(table_tag))\n if convert == 'float':\n table_tag = nested_convert(table_tag, str)\n\n ctables[tag] = table_tag\n else:\n ctables[tag] = table_tag = []\n for combine in cxml.findall('combine'):\n ctag = combine.get('tag')\n clist = ctables[ctag]\n if numpyok and usenumpy:\n clist = numpy.asmatrix(clist)\n\n for subset in combine.text.split(';'):\n clean_subset = subset.strip(linesep).replace(' ', '')\n if clean_subset == '':\n continue\n\n try:\n add = eval(\"clist%s\" % (clean_subset))\n if numpyok and usenumpy:\n add = numpy.array(add)\n\n table_tag += [add]\n except:\n warn_custom = \"custom 'tab:%s' failed to subset \"\n warn_custom += \"'%s' from 'tab:%s'; will continue.\"\n warn_msg = warn_custom % (tag, clean_subset, ctag)\n print_verbose(self.verbose, warn_msg)\n continue\n\n ctables[tag] = list(flatten(table_tag))\n\n def filter_missing(self, string_list):\n filters = ['.', '', 'NA', 'nan', 'NaN', 'None']\n return list(filter(lambda a: a not in filters, string_list))\n\n def get_filled_template(self):\n \"\"\"\n Fill template file using table input(s). The idea is to read the\n template line by line and if the line matches the start of a\n table, search for the table label (to match to the input tags).\n\n If no label, print a note to that effect. If there is a label,\n grab the corresponding matrix from the inputs, and replace the\n tokens with the input values until the values run out or we\n reach the end of the table. Repeat for all template lines.\n\n This function raises warnings for\n - Token matched but in a commented out line.\n - Too many tokens in table and not enough values.\n - Token outside of begin/end table statement.\n - Table label does not match tag in inputs.\n \"\"\"\n read_template = open(self.template, 'rU').readlines()\n table_start = -1\n table_search = False\n table_tag = ''\n table_entry = 0\n\n warn = self.warn_pre\n for n in range(len(read_template)):\n line = read_template[n]\n if not table_search and re.search(self.begin, line):\n table_search, table_tag = self.search_label(read_template, n)\n table_start = n\n search_msg = self.get_search_msg(table_search, table_tag, n)\n print_verbose(self.verbose, search_msg)\n\n if re.search(self.matcha, line) or re.search(self.matchb, line):\n if re.search(self.comments, line.strip()) and not self.fillc:\n warn_incomments = \"Line %d matches #(#|\\d+,*)#\"\n warn_incomments += \" but it appears to be commented out.\"\n warn_incomments += \" Skipping...\"\n print_verbose(self.verbose, warn + warn_incomments % n)\n elif table_search:\n table = self.tables[table_tag]\n ntable = len(table)\n update = self.replace_line(line, table, table_entry)\n read_template[n], table_entry, entry_start = update\n if ntable < table_entry:\n self.warnings['toolong'] += [str(n)]\n\n nstart = entry_start + 1\n nend = table_entry\n aux_toolong = (n, nstart, nend, table_tag, ntable)\n\n warn_toolong = \"Line %d has matches %d-%d for table\"\n warn_toolong += \" %s but the corresponding input\"\n warn_toolong += \" matrix only has %d entries.\"\n warn_toolong += \" Skipping...\"\n warn_toolong = warn_toolong % aux_toolong\n\n print_verbose(self.verbose, warn + warn_toolong)\n elif table_start == -1:\n self.warnings['notable'] += [str(n)]\n\n warn_notable = \"Line %d matches #(#|\\d+,*)# but\"\n warn_notable += \" is not in begin/end table statements.\"\n warn_notable += \" Skipping...\"\n\n print_verbose(self.verbose, warn + warn_notable % n)\n elif table_tag == '':\n self.warnings['nolabel'] += [str(n)]\n warn_nolabel = \"Line %d matches #(#|\\d+,*)#\"\n warn_nolabel += \" but couldn't find \" + self.label\n warn_nolabel += \" Skipping...\"\n print_verbose(self.verbose, warn + warn_nolabel % n)\n\n if re.search(self.end, line) and table_search:\n search_msg = \"Table '%s' in line %d ended in line %d.\"\n search_msg += \" %d replacements were made.\" % table_entry\n search_msg = search_msg % (table_tag, table_start, n)\n print_verbose(self.verbose, search_msg + linesep)\n\n table_start = -1\n table_search = False\n table_tag = ''\n table_entry = 0\n\n self.filled_template = read_template\n\n def search_label(self, intext, start):\n \"\"\"\n Search for label in list 'intext' from position 'start' until an\n \\end{table} statement. Returns label value ('' if none is found)\n and whether it matches a tag in the tables file\n \"\"\"\n N = start\n searchline = intext[N]\n searchmatch = re.search(self.label, searchline,\n flags = re.IGNORECASE)\n searchend = re.search(self.end, searchline)\n while not searchmatch and not searchend:\n N += 1\n searchline = intext[N]\n searchmatch = re.search(self.label, searchline,\n flags = re.IGNORECASE)\n searchend = re.search(self.end, searchline)\n\n if not searchend and searchmatch:\n label = re.findall(self.label, searchline,\n flags = re.IGNORECASE)[0]\n label = label.strip('{}\"').lower()\n return label in self.tables, label\n else:\n return False, ''\n\n def get_search_msg(self, search, tag, start):\n warn_nomatch = ''\n search_msg = \"Found table in line %d. \" % start\n if tag == '':\n search_msg += \"No label. Skipping...\"\n else:\n search_msg += \"Found label '%s'... \" % tag\n if search:\n search_msg += \"Found match!\"\n else:\n self.warnings['nomatch'] += [tag]\n warn_nomatch = linesep + self.warn_pre\n warn_nomatch += \"NO MACHES FOR '%s' IN\" + linesep + '\\t'\n warn_nomatch += (linesep + '\\t').join(self.input)\n warn_nomatch += linesep + \"Please check input file(s)\"\n warn_nomatch = warn_nomatch % tag + linesep\n\n return search_msg + warn_nomatch\n\n def replace_line(self, line, table, tablen):\n \"\"\"\n Replaces all matches of #(#|\\d+,*)#. Splits by & because that's\n how LaTeX delimits tables. Returns how many values it replaced\n because LaTeX can have any number of entries per line.\n \"\"\"\n i = 0\n force_stop = False\n starts = tablen\n match0 = re.search(self.match0, line)\n while match0 and not force_stop:\n s, e = match0.span()\n cell = line[s:e]\n matcha = re.search(self.matcha, cell)\n matchb = re.search(self.matchb, cell)\n\n if len(table) > tablen:\n # Replace all pattern A matches (simply replace the text)\n if matcha:\n entry = re.sub(self.matche, '\\\\\\\\\\\\1', table[tablen])\n if '*' in matcha.groups():\n cell = self.parse_pval_to_stars(cell, entry)\n else:\n cell = re.sub(self.matcha, entry, cell, count = 1)\n\n line = re.sub(self.matcha, cell, line, count = 1)\n tablen += 1\n\n # Replace all pattern B matches (round, comma and % format)\n if matchb:\n entry = re.sub(self.matche, '\\\\\\\\\\\\1', table[tablen])\n cell = self.round_and_format(cell, entry)\n line = re.sub(self.matchb, cell, line, count = 1)\n tablen += 1\n else:\n if matcha or matchb:\n starts = tablen if tablen - starts == i + 1 else starts\n tablen += 1\n\n force_stop = True\n\n match0 = re.search(self.match0, line)\n i += 1\n\n return line, tablen, starts\n\n def round_and_format(self, cell, entry):\n \"\"\"\n Rounds entry according to the format in cell. Note Decimal's\n quantize makes the object have the same number of significant\n digits as the input passed. format(str, ',d') returns str with\n comma as thousands separator.\n \"\"\"\n precision, comma = re.findall(self.matchb, cell)[0]\n precision = int(precision)\n roundas = 0 if precision == 0 else pow(10, -precision)\n roundas = Decimal(str(roundas))\n dentry = 100 * Decimal(entry) if '%' in comma else Decimal(entry)\n dentry = abs(dentry) if re.search(self.matchd, cell) else dentry\n rounded = str(dentry.quantize(roundas, rounding = ROUND_HALF_UP))\n if ',' in comma:\n integer_part, decimal_part = re.findall(self.matchc, rounded)[0]\n neg = '-' if re.match('^-0', integer_part) else ''\n rounded = neg + compat_format(int(integer_part)) + decimal_part\n return re.sub(self.matchb, rounded, cell, count = 1)\n\n def parse_pval_to_stars(self, cell, entry):\n \"\"\"\n Parse a p-value to significance symbols. The default is to\n parse 0.1, 0.05, 0.01 to *, **, ***, but the user can specify\n arbitrary thresholds and symbols.\n \"\"\"\n pos = sum([float(entry) < p for p in self.pvals]) - 1\n star = '' if pos < 0 else self.stars[pos]\n return re.sub(self.matcha, star, cell, count = 1)\n\n def get_notification_message(self):\n \"\"\"\n Inserts a message atop the LaTeX file that this was created by\n tablefill_tex. includes the following warnings, when applicable\n - #(#|\\d+,*)# is found on a line outside a table environment\n - #(#|\\d+,*)# is on a table environment with no label\n - A tabular environment's label has no match in tables.txt\n \"\"\"\n n = 0\n if self.filetype == 'tex':\n head = 3 * [72 * '%' + linesep]\n tail = head\n pre = '% '\n after = linesep\n elif self.filetype == 'lyx':\n pre = \"\\\\begin_layout Plain Layout\" + linesep\n after = \"\\\\end_layout\" + linesep\n head = [\"\\\\begin_layout Standard\" + linesep]\n head += [\"\\\\begin_inset Note Note\" + linesep]\n head += [\"status open\" + linesep + linesep]\n tail = [\"\\\\end_inset\" + linesep]\n tail += [\"\\\\end_layout\" + linesep]\n while not self.filled_template[n].startswith('\\\\begin_body'):\n n += 1\n n += 1\n elif self.filetype == 'md':\n pre = \"\"\n after = linesep\n head = [\"<!-- \"]\n tail = [\" -->\", linesep, linesep]\n\n for key in self.warnings.keys():\n self.warnings[key] = ', '.join(self.warnings[key])\n\n self.warning = True in [v != '' for v in self.warnings.values()]\n if self.warning:\n fillt = (self.template, self.input)\n fillh = self.template\n fillt = (\"'template' file\", \"'input' file(s)\")\n fillh = \"'template' file\"\n imtags = \"WARNING: These tags were in %s but not in %s: \" % fillt\n imhead = \"WARNING: Lines in %s matching '#(#|d+,*)#'\" % fillh\n imend = linesep + pre if self.filetype == 'tex' else '; '\n imend += \"Output '%s' may not compile!\" % self.output\n\n if self.warnings['nomatch'] != '':\n self.warn_msg['nomatch'] = imtags\n self.warn_msg['nomatch'] += self.warnings['nomatch'] + imend\n\n if self.warnings['notable'] != '':\n self.warn_msg['notable'] = imhead\n self.warn_msg['notable'] += \" were not in a table environment: \"\n self.warn_msg['notable'] += self.warnings['notable'] + imend\n\n if self.warnings['nolabel'] != '':\n self.warn_msg['nolabel'] = imhead\n self.warn_msg['nolabel'] += \" but the environment had no label: \"\n self.warn_msg['nolabel'] += self.warnings['nolabel'] + imend\n\n if self.warnings['toolong'] != '':\n self.warn_msg['toolong'] = imhead\n self.warn_msg['toolong'] += \" but their corresponding input matrix\"\n self.warn_msg['toolong'] += \" ran out of entries: \"\n self.warn_msg['toolong'] += self.warnings['toolong'] + imend\n\n msg = [\"This file was produced by 'tablefill.py'\"]\n msg += [\"\\tTemplate file: %s\" % self.template]\n msg += [\"\\tInput file(s): %s\" % self.input]\n msg += [\"To make changes, edit the input and template files.\"]\n msg += [pre + after]\n\n if self.warning:\n msg += [\"THERE WAS AN ISSUE CREATING THIS FILE!\"]\n msg += [s for s in self.warn_msg.values()]\n else:\n msg += [\"DO NOT EDIT THIS FILE DIRECTLY.\"]\n\n msg = [pre + m + after for m in msg]\n self.filled_template[n:n] = head + msg + tail\n\n def write_to_output(self, text):\n outfile = open(self.output, 'w')\n outfile.write(''.join(text))\n outfile.close()\n\n def get_exit_message(self):\n if self.warning:\n msg = [\"The following issues were found:\"]\n msg += list(filter(lambda wm: wm != '', self.warn_msg.values()))\n self.exit_msg = linesep.join(msg)\n self.exit = 'WARNING'\n else:\n msg = \"All tags in '%s' successfully filled by 'tablefill.py'\"\n msg += linesep + \"Output can be found in '%s'\" + linesep\n self.exit_msg = msg % (self.template, self.output)\n self.exit = 'SUCCESS'\n\n\n# ---------------------------------------------------------------------\n# Run the function\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.asmatrix",
"numpy.array"
]
] |
aikovsky/tegridy-tools
|
[
"4a87e1a27e2ad9ee6e16049bde7835836caa4fdd"
] |
[
"tegridy-tools/pretty_midi/utilities.py"
] |
[
"\"\"\"Utilty functions for converting between MIDI data and human-readable/usable\nvalues\n\n\"\"\"\n\nimport numpy as np\nimport re\n\nfrom pm_constants import DRUM_MAP, INSTRUMENT_MAP, INSTRUMENT_CLASSES\n\n\ndef key_number_to_key_name(key_number):\n \"\"\"Convert a key number to a key string.\n\n Parameters\n ----------\n key_number : int\n Uses pitch classes to represent major and minor keys.\n For minor keys, adds a 12 offset.\n For example, C major is 0 and C minor is 12.\n\n Returns\n -------\n key_name : str\n Key name in the format ``'(root) (mode)'``, e.g. ``'Gb minor'``.\n Gives preference for keys with flats, with the exception of F#, G# and\n C# minor.\n \"\"\"\n\n if not isinstance(key_number, int):\n raise ValueError('`key_number` is not int!')\n if not ((key_number >= 0) and (key_number < 24)):\n raise ValueError('`key_number` is larger than 24')\n\n # preference to keys with flats\n keys = ['C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb',\n 'G', 'Ab', 'A', 'Bb', 'B']\n\n # circle around 12 pitch classes\n key_idx = key_number % 12\n mode = key_number // 12\n\n # check if mode is major or minor\n if mode == 0:\n return keys[key_idx] + ' Major'\n elif mode == 1:\n # preference to C#, F# and G# minor\n if key_idx in [1, 6, 8]:\n return keys[key_idx-1] + '# minor'\n else:\n return keys[key_idx] + ' minor'\n\n\ndef key_name_to_key_number(key_string):\n \"\"\"Convert a key name string to key number.\n\n Parameters\n ----------\n key_string : str\n Format is ``'(root) (mode)'``, where:\n * ``(root)`` is one of ABCDEFG or abcdefg. A lowercase root\n indicates a minor key when no mode string is specified. Optionally\n a # for sharp or b for flat can be specified.\n\n * ``(mode)`` is optionally specified either as one of 'M', 'Maj',\n 'Major', 'maj', or 'major' for major or 'm', 'Min', 'Minor', 'min',\n 'minor' for minor. If no mode is specified and the root is\n uppercase, the mode is assumed to be major; if the root is\n lowercase, the mode is assumed to be minor.\n\n Returns\n -------\n key_number : int\n Integer representing the key and its mode. Integers from 0 to 11\n represent major keys from C to B; 12 to 23 represent minor keys from C\n to B.\n \"\"\"\n # Create lists of possible mode names (major or minor)\n major_strs = ['M', 'Maj', 'Major', 'maj', 'major']\n minor_strs = ['m', 'Min', 'Minor', 'min', 'minor']\n # Construct regular expression for matching key\n pattern = re.compile(\n # Start with any of A-G, a-g\n '^(?P<key>[ABCDEFGabcdefg])'\n # Next, look for #, b, or nothing\n '(?P<flatsharp>[#b]?)'\n # Allow for a space between key and mode\n ' ?'\n # Next, look for any of the mode strings\n '(?P<mode>(?:(?:' +\n # Next, look for any of the major or minor mode strings\n ')|(?:'.join(major_strs + minor_strs) + '))?)$')\n # Match provided key string\n result = re.match(pattern, key_string)\n if result is None:\n raise ValueError('Supplied key {} is not valid.'.format(key_string))\n # Convert result to dictionary\n result = result.groupdict()\n\n # Map from key string to pitch class number\n key_number = {'c': 0, 'd': 2, 'e': 4, 'f': 5,\n 'g': 7, 'a': 9, 'b': 11}[result['key'].lower()]\n # Increment or decrement pitch class if a flat or sharp was specified\n if result['flatsharp']:\n if result['flatsharp'] == '#':\n key_number += 1\n elif result['flatsharp'] == 'b':\n key_number -= 1\n # Circle around 12 pitch classes\n key_number = key_number % 12\n # Offset if mode is minor, or the key name is lowercase\n if result['mode'] in minor_strs or (result['key'].islower() and\n result['mode'] not in major_strs):\n key_number += 12\n\n return key_number\n\n\ndef mode_accidentals_to_key_number(mode, num_accidentals):\n \"\"\"Convert a given number of accidentals and mode to a key number.\n\n Parameters\n ----------\n mode : int\n 0 is major, 1 is minor.\n num_accidentals : int\n Positive number is used for sharps, negative number is used for flats.\n\n Returns\n -------\n key_number : int\n Integer representing the key and its mode.\n \"\"\"\n\n if not (isinstance(num_accidentals, int) and\n num_accidentals > -8 and\n num_accidentals < 8):\n raise ValueError('Number of accidentals {} is not valid'.format(\n num_accidentals))\n if mode not in (0, 1):\n raise ValueError('Mode {} is not recognizable, must be 0 or 1'.format(\n mode))\n\n sharp_keys = 'CGDAEBF'\n flat_keys = 'FBEADGC'\n\n # check if key signature has sharps or flats\n if num_accidentals >= 0:\n num_sharps = num_accidentals // 6\n key = sharp_keys[num_accidentals % 7] + '#' * int(num_sharps)\n else:\n if num_accidentals == -1:\n key = 'F'\n else:\n key = flat_keys[(-1 * num_accidentals - 1) % 7] + 'b'\n\n # find major key number\n key += ' Major'\n\n # use routine to convert from string notation to number notation\n key_number = key_name_to_key_number(key)\n\n # if minor, offset\n if mode == 1:\n key_number = 12 + ((key_number - 3) % 12)\n\n return key_number\n\n\ndef key_number_to_mode_accidentals(key_number):\n \"\"\"Converts a key number to number of accidentals and mode.\n\n Parameters\n ----------\n key_number : int\n Key number as used in ``pretty_midi``.\n\n Returns\n -------\n mode : int\n 0 for major, 1 for minor.\n num_accidentals : int\n Number of accidentals.\n Positive is for sharps and negative is for flats.\n \"\"\"\n\n if not ((isinstance(key_number, int) and\n key_number >= 0 and\n key_number < 24)):\n raise ValueError('Key number {} is not a must be an int between 0 and '\n '24'.format(key_number))\n\n pc_to_num_accidentals_major = {0: 0, 1: -5, 2: 2, 3: -3, 4: 4, 5: -1, 6: 6,\n 7: 1, 8: -4, 9: 3, 10: -2, 11: 5}\n mode = key_number // 12\n\n if mode == 0:\n num_accidentals = pc_to_num_accidentals_major[key_number]\n return mode, num_accidentals\n elif mode == 1:\n key_number = (key_number + 3) % 12\n num_accidentals = pc_to_num_accidentals_major[key_number]\n return mode, num_accidentals\n else:\n return None\n\n\ndef qpm_to_bpm(quarter_note_tempo, numerator, denominator):\n \"\"\"Converts from quarter notes per minute to beats per minute.\n\n Parameters\n ----------\n quarter_note_tempo : float\n Quarter note tempo.\n numerator : int\n Numerator of time signature.\n denominator : int\n Denominator of time signature.\n\n Returns\n -------\n bpm : float\n Tempo in beats per minute.\n \"\"\"\n\n if not (isinstance(quarter_note_tempo, (int, float)) and\n quarter_note_tempo > 0):\n raise ValueError(\n 'Quarter notes per minute must be an int or float '\n 'greater than 0, but {} was supplied'.format(quarter_note_tempo))\n if not (isinstance(numerator, int) and numerator > 0):\n raise ValueError(\n 'Time signature numerator must be an int greater than 0, but {} '\n 'was supplied.'.format(numerator))\n if not (isinstance(denominator, int) and denominator > 0):\n raise ValueError(\n 'Time signature denominator must be an int greater than 0, but {} '\n 'was supplied.'.format(denominator))\n\n # denominator is whole, half, quarter, eighth, sixteenth or 32nd note\n if denominator in [1, 2, 4, 8, 16, 32]:\n # simple triple\n if numerator == 3:\n return quarter_note_tempo * denominator / 4.0\n # compound meter 6/8*n, 9/8*n, 12/8*n...\n elif numerator % 3 == 0:\n return quarter_note_tempo / 3.0 * denominator / 4.0\n # strongly assume two eighths equal a beat\n else:\n return quarter_note_tempo * denominator / 4.0\n else:\n return quarter_note_tempo\n\n\ndef note_number_to_hz(note_number):\n \"\"\"Convert a (fractional) MIDI note number to its frequency in Hz.\n\n Parameters\n ----------\n note_number : float\n MIDI note number, can be fractional.\n\n Returns\n -------\n note_frequency : float\n Frequency of the note in Hz.\n\n \"\"\"\n # MIDI note numbers are defined as the number of semitones relative to C0\n # in a 440 Hz tuning\n return 440.0*(2.0**((note_number - 69)/12.0))\n\n\ndef hz_to_note_number(frequency):\n \"\"\"Convert a frequency in Hz to a (fractional) note number.\n\n Parameters\n ----------\n frequency : float\n Frequency of the note in Hz.\n\n Returns\n -------\n note_number : float\n MIDI note number, can be fractional.\n\n \"\"\"\n # MIDI note numbers are defined as the number of semitones relative to C0\n # in a 440 Hz tuning\n return 12*(np.log2(frequency) - np.log2(440.0)) + 69\n\n\ndef note_name_to_number(note_name):\n \"\"\"Converts a note name in the format\n ``'(note)(accidental)(octave number)'`` (e.g. ``'C#4'``) to MIDI note\n number.\n\n ``'(note)'`` is required, and is case-insensitive.\n\n ``'(accidental)'`` should be ``''`` for natural, ``'#'`` for sharp and\n ``'!'`` or ``'b'`` for flat.\n\n If ``'(octave)'`` is ``''``, octave 0 is assumed.\n\n Parameters\n ----------\n note_name : str\n A note name, as described above.\n\n Returns\n -------\n note_number : int\n MIDI note number corresponding to the provided note name.\n\n Notes\n -----\n Thanks to Brian McFee.\n\n \"\"\"\n\n # Map note name to the semitone\n pitch_map = {'C': 0, 'D': 2, 'E': 4, 'F': 5, 'G': 7, 'A': 9, 'B': 11}\n # Relative change in semitone denoted by each accidental\n acc_map = {'#': 1, '': 0, 'b': -1, '!': -1}\n\n # Reg exp will raise an error when the note name is not valid\n try:\n # Extract pitch, octave, and accidental from the supplied note name\n match = re.match(r'^(?P<n>[A-Ga-g])(?P<off>[#b!]?)(?P<oct>[+-]?\\d+)$',\n note_name)\n\n pitch = match.group('n').upper()\n offset = acc_map[match.group('off')]\n octave = int(match.group('oct'))\n except:\n raise ValueError('Improper note format: {}'.format(note_name))\n\n # Convert from the extrated ints to a full note number\n return 12*(octave + 1) + pitch_map[pitch] + offset\n\n\ndef note_number_to_name(note_number):\n \"\"\"Convert a MIDI note number to its name, in the format\n ``'(note)(accidental)(octave number)'`` (e.g. ``'C#4'``).\n\n Parameters\n ----------\n note_number : int\n MIDI note number. If not an int, it will be rounded.\n\n Returns\n -------\n note_name : str\n Name of the supplied MIDI note number.\n\n Notes\n -----\n Thanks to Brian McFee.\n\n \"\"\"\n\n # Note names within one octave\n semis = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']\n\n # Ensure the note is an int\n note_number = int(np.round(note_number))\n\n # Get the semitone and the octave, and concatenate to create the name\n return semis[note_number % 12] + str(note_number//12 - 1)\n\n\ndef note_number_to_drum_name(note_number):\n \"\"\"Converts a MIDI note number in a percussion instrument to the\n corresponding drum name, according to the General MIDI standard.\n\n Any MIDI note number outside of the valid range (note 35-81, zero-indexed)\n will result in an empty string.\n\n Parameters\n ----------\n note_number : int\n MIDI note number. If not an int, it will be rounded.\n\n Returns\n -------\n drum_name : str\n Name of the drum for this note for a percussion instrument.\n\n Notes\n -----\n See http://www.midi.org/techspecs/gm1sound.php\n\n \"\"\"\n\n # Ensure note is an int\n note_number = int(np.round(note_number))\n # General MIDI only defines drum names for notes 35-81\n if note_number < 35 or note_number > 81:\n return ''\n else:\n # Our DRUM_MAP starts from index 0; drum names start from 35\n return DRUM_MAP[note_number - 35]\n\n\ndef __normalize_str(name):\n \"\"\"Removes all non-alphanumeric characters from a string and converts\n it to lowercase.\n\n \"\"\"\n return ''.join(ch for ch in name if ch.isalnum()).lower()\n\n\ndef drum_name_to_note_number(drum_name):\n \"\"\"Converts a drum name to the corresponding MIDI note number for a\n percussion instrument. Conversion is case, whitespace, and\n non-alphanumeric character insensitive.\n\n Parameters\n ----------\n drum_name : str\n Name of a drum which exists in the general MIDI standard.\n If the drum is not found, a ValueError is raised.\n\n Returns\n -------\n note_number : int\n The MIDI note number corresponding to this drum.\n\n Notes\n -----\n See http://www.midi.org/techspecs/gm1sound.php\n\n \"\"\"\n\n normalized_drum_name = __normalize_str(drum_name)\n # Create a list of the entries DRUM_MAP, normalized, to search over\n normalized_drum_names = [__normalize_str(name) for name in DRUM_MAP]\n\n # If the normalized drum name is not found, complain\n try:\n note_index = normalized_drum_names.index(normalized_drum_name)\n except:\n raise ValueError('{} is not a valid General MIDI drum '\n 'name.'.format(drum_name))\n\n # If an index was found, it will be 0-based; add 35 to get the note number\n return note_index + 35\n\n\ndef program_to_instrument_name(program_number):\n \"\"\"Converts a MIDI program number to the corresponding General MIDI\n instrument name.\n\n Parameters\n ----------\n program_number : int\n MIDI program number, between 0 and 127.\n\n Returns\n -------\n instrument_name : str\n Name of the instrument corresponding to this program number.\n\n Notes\n -----\n See http://www.midi.org/techspecs/gm1sound.php\n\n \"\"\"\n\n # Check that the supplied program is in the valid range\n if program_number < 0 or program_number > 127:\n raise ValueError('Invalid program number {}, should be between 0 and'\n ' 127'.format(program_number))\n # Just grab the name from the instrument mapping list\n return INSTRUMENT_MAP[program_number]\n\n\ndef instrument_name_to_program(instrument_name):\n \"\"\"Converts an instrument name to the corresponding General MIDI program\n number. Conversion is case, whitespace, and non-alphanumeric character\n insensitive.\n\n Parameters\n ----------\n instrument_name : str\n Name of an instrument which exists in the general MIDI standard.\n If the instrument is not found, a ValueError is raised.\n\n Returns\n -------\n program_number : int\n The MIDI program number corresponding to this instrument.\n\n Notes\n -----\n See http://www.midi.org/techspecs/gm1sound.php\n\n \"\"\"\n\n normalized_inst_name = __normalize_str(instrument_name)\n # Create a list of the entries INSTRUMENT_MAP, normalized, to search over\n normalized_inst_names = [__normalize_str(name) for name in\n INSTRUMENT_MAP]\n\n # If the normalized drum name is not found, complain\n try:\n program_number = normalized_inst_names.index(normalized_inst_name)\n except:\n raise ValueError('{} is not a valid General MIDI instrument '\n 'name.'.format(instrument_name))\n\n # Return the index (program number) if a match was found\n return program_number\n\n\ndef program_to_instrument_class(program_number):\n \"\"\"Converts a MIDI program number to the corresponding General MIDI\n instrument class.\n\n Parameters\n ----------\n program_number : int\n MIDI program number, between 0 and 127.\n\n Returns\n -------\n instrument_class : str\n Name of the instrument class corresponding to this program number.\n\n Notes\n -----\n See http://www.midi.org/techspecs/gm1sound.php\n\n \"\"\"\n\n # Check that the supplied program is in the valid range\n if program_number < 0 or program_number > 127:\n raise ValueError('Invalid program number {}, should be between 0 and'\n ' 127'.format(program_number))\n # Just grab the name from the instrument mapping list\n return INSTRUMENT_CLASSES[int(program_number)//8]\n\n\ndef pitch_bend_to_semitones(pitch_bend, semitone_range=2.):\n \"\"\"Convert a MIDI pitch bend value (in the range ``[-8192, 8191]``) to the\n bend amount in semitones.\n\n Parameters\n ----------\n pitch_bend : int\n MIDI pitch bend amount, in ``[-8192, 8191]``.\n semitone_range : float\n Convert to +/- this semitone range. Default is 2., which is the\n General MIDI standard +/-2 semitone range.\n\n Returns\n -------\n semitones : float\n Number of semitones corresponding to this pitch bend amount.\n\n \"\"\"\n\n return semitone_range*pitch_bend/8192.0\n\n\ndef semitones_to_pitch_bend(semitones, semitone_range=2.):\n \"\"\"Convert a semitone value to the corresponding MIDI pitch bend integer.\n\n Parameters\n ----------\n semitones : float\n Number of semitones for the pitch bend.\n semitone_range : float\n Convert to +/- this semitone range. Default is 2., which is the\n General MIDI standard +/-2 semitone range.\n\n Returns\n -------\n pitch_bend : int\n MIDI pitch bend amount, in ``[-8192, 8191]``.\n\n \"\"\"\n return int(8192*(semitones/semitone_range))\n"
] |
[
[
"numpy.round",
"numpy.log2"
]
] |
lravindr/cvat
|
[
"b025acea43fbb55c7ea7eac7b12007f0eb6d3f45"
] |
[
"datumaro/datumaro/plugins/tf_detection_api_format/extractor.py"
] |
[
"\n# Copyright (C) 2019 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom collections import OrderedDict\nimport numpy as np\nimport os.path as osp\nimport re\n\nfrom datumaro.components.extractor import (SourceExtractor, DatasetItem,\n AnnotationType, Bbox, Mask, LabelCategories\n)\nfrom datumaro.util.image import Image, decode_image, lazy_image\nfrom datumaro.util.tf_util import import_tf as _import_tf\n\nfrom .format import DetectionApiPath\ntf = _import_tf()\n\n\ndef clamp(value, _min, _max):\n return max(min(_max, value), _min)\n\nclass TfDetectionApiExtractor(SourceExtractor):\n def __init__(self, path):\n assert osp.isfile(path), path\n images_dir = ''\n root_dir = osp.dirname(osp.abspath(path))\n if osp.basename(root_dir) == DetectionApiPath.ANNOTATIONS_DIR:\n root_dir = osp.dirname(root_dir)\n images_dir = osp.join(root_dir, DetectionApiPath.IMAGES_DIR)\n if not osp.isdir(images_dir):\n images_dir = ''\n\n super().__init__(subset=osp.splitext(osp.basename(path))[0])\n\n items, labels = self._parse_tfrecord_file(path, self._subset, images_dir)\n self._items = items\n self._categories = self._load_categories(labels)\n\n def categories(self):\n return self._categories\n\n def __iter__(self):\n for item in self._items:\n yield item\n\n def __len__(self):\n return len(self._items)\n\n @staticmethod\n def _load_categories(labels):\n label_categories = LabelCategories()\n labels = sorted(labels.items(), key=lambda item: item[1])\n for label, _ in labels:\n label_categories.add(label)\n return {\n AnnotationType.label: label_categories\n }\n\n @classmethod\n def _parse_labelmap(cls, text):\n id_pattern = r'(?:id\\s*:\\s*(?P<id>\\d+))'\n name_pattern = r'(?:name\\s*:\\s*[\\'\\\"](?P<name>.*?)[\\'\\\"])'\n entry_pattern = r'(\\{(?:[\\s\\n]*(?:%(id)s|%(name)s)[\\s\\n]*){2}\\})+' % \\\n {'id': id_pattern, 'name': name_pattern}\n matches = re.finditer(entry_pattern, text)\n\n labelmap = {}\n for match in matches:\n label_id = match.group('id')\n label_name = match.group('name')\n if label_id is not None and label_name is not None:\n labelmap[label_name] = int(label_id)\n\n return labelmap\n\n @classmethod\n def _parse_tfrecord_file(cls, filepath, subset, images_dir):\n dataset = tf.data.TFRecordDataset(filepath)\n features = {\n 'image/filename': tf.io.FixedLenFeature([], tf.string),\n 'image/source_id': tf.io.FixedLenFeature([], tf.string),\n 'image/height': tf.io.FixedLenFeature([], tf.int64),\n 'image/width': tf.io.FixedLenFeature([], tf.int64),\n 'image/encoded': tf.io.FixedLenFeature([], tf.string),\n 'image/format': tf.io.FixedLenFeature([], tf.string),\n # Object boxes and classes.\n 'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32),\n 'image/object/class/label': tf.io.VarLenFeature(tf.int64),\n 'image/object/class/text': tf.io.VarLenFeature(tf.string),\n 'image/object/mask': tf.io.VarLenFeature(tf.string),\n }\n\n dataset_labels = OrderedDict()\n labelmap_path = osp.join(osp.dirname(filepath),\n DetectionApiPath.LABELMAP_FILE)\n if osp.exists(labelmap_path):\n with open(labelmap_path, 'r', encoding='utf-8') as f:\n labelmap_text = f.read()\n dataset_labels.update({ label: id - 1\n for label, id in cls._parse_labelmap(labelmap_text).items()\n })\n\n dataset_items = []\n\n for record in dataset:\n parsed_record = tf.io.parse_single_example(record, features)\n frame_id = parsed_record['image/source_id'].numpy().decode('utf-8')\n frame_filename = \\\n parsed_record['image/filename'].numpy().decode('utf-8')\n frame_height = tf.cast(\n parsed_record['image/height'], tf.int64).numpy().item()\n frame_width = tf.cast(\n parsed_record['image/width'], tf.int64).numpy().item()\n frame_image = parsed_record['image/encoded'].numpy()\n xmins = tf.sparse.to_dense(\n parsed_record['image/object/bbox/xmin']).numpy()\n ymins = tf.sparse.to_dense(\n parsed_record['image/object/bbox/ymin']).numpy()\n xmaxs = tf.sparse.to_dense(\n parsed_record['image/object/bbox/xmax']).numpy()\n ymaxs = tf.sparse.to_dense(\n parsed_record['image/object/bbox/ymax']).numpy()\n label_ids = tf.sparse.to_dense(\n parsed_record['image/object/class/label']).numpy()\n labels = tf.sparse.to_dense(\n parsed_record['image/object/class/text'],\n default_value=b'').numpy()\n masks = tf.sparse.to_dense(\n parsed_record['image/object/mask'],\n default_value=b'').numpy()\n\n for label, label_id in zip(labels, label_ids):\n label = label.decode('utf-8')\n if not label:\n continue\n if label_id <= 0:\n continue\n if label in dataset_labels:\n continue\n dataset_labels[label] = label_id - 1\n\n item_id = osp.splitext(frame_filename)[0]\n\n annotations = []\n for shape_id, shape in enumerate(\n np.dstack((labels, xmins, ymins, xmaxs, ymaxs))[0]):\n label = shape[0].decode('utf-8')\n\n mask = None\n if len(masks) != 0:\n mask = masks[shape_id]\n\n if mask is not None:\n if isinstance(mask, bytes):\n mask = lazy_image(mask, decode_image)\n annotations.append(Mask(image=mask,\n label=dataset_labels.get(label)\n ))\n else:\n x = clamp(shape[1] * frame_width, 0, frame_width)\n y = clamp(shape[2] * frame_height, 0, frame_height)\n w = clamp(shape[3] * frame_width, 0, frame_width) - x\n h = clamp(shape[4] * frame_height, 0, frame_height) - y\n annotations.append(Bbox(x, y, w, h,\n label=dataset_labels.get(label)\n ))\n\n image_size = None\n if frame_height and frame_width:\n image_size = (frame_height, frame_width)\n\n image_params = {}\n if frame_image:\n image_params['data'] = lazy_image(frame_image, decode_image)\n if frame_filename:\n image_params['path'] = osp.join(images_dir, frame_filename)\n\n image = None\n if image_params:\n image = Image(**image_params, size=image_size)\n\n dataset_items.append(DatasetItem(id=item_id, subset=subset,\n image=image, annotations=annotations,\n attributes={'source_id': frame_id}))\n\n return dataset_items, dataset_labels\n"
] |
[
[
"numpy.dstack"
]
] |
xy-always/2020Iberlef
|
[
"b3c5a7046955d6f841a6fcb55fdd8bb8981015db"
] |
[
"src/optimization.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Functions and classes related to optimization (weight updates).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport tensorflow as tf\nfrom accoptimization import *\n\ndef create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n # optimizer = MultistepAdamWeightDecayOptimizer(\n # learning_rate=learning_rate,\n # weight_decay_rate=0.01,\n # beta_1=0.9,\n # beta_2=0.999,\n # epsilon=1e-6,\n # n=8,\n # exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n # optimizer = tf.train.AdamOptimizer(learning_rate=2e-5,epsilon=1e-6)\n\n if use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n # print_op = tf.print(new_global_step,'----',learning_rate)\n # with tf.control_dependencies([print_op]):\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n \n return train_op\n\n\nclass AdamWeightDecayOptimizer(tf.train.Optimizer):\n \"\"\"A basic Adam optimizer that includes \"correct\" L2 weight decay.\"\"\"\n\n def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n"
] |
[
[
"tensorflow.zeros_initializer",
"tensorflow.trainable_variables",
"tensorflow.multiply",
"tensorflow.group",
"tensorflow.contrib.tpu.CrossShardOptimizer",
"tensorflow.gradients",
"tensorflow.constant",
"tensorflow.train.polynomial_decay",
"tensorflow.sqrt",
"tensorflow.train.get_or_create_global_step",
"tensorflow.clip_by_global_norm",
"tensorflow.square",
"tensorflow.cast"
]
] |
vlegout/pandas-profiling
|
[
"3d0d0e4bb85a32cd7b7cd63e7ad083cb9a65970c"
] |
[
"tests/unit/test_describe.py"
] |
[
"import datetime\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom pandas_profiling import config\nfrom pandas_profiling.model.base import Variable\nfrom pandas_profiling.model.describe import describe\nfrom pandas_profiling.model.summary import describe_1d\n\ncheck_is_NaN = \"pandas_profiling.check_is_NaN\"\n\n\ntestdata = [\n # Unique values\n (pd.Series([1, 2]), True, 1, 1),\n # Unique values including nan\n (pd.Series([np.nan]), None, None, None),\n # Unique values all nan\n (pd.Series([1, 2, np.nan]), True, 1, 1),\n # Non unique values\n (pd.Series([1, 2, 2]), False, 2 / 3, 1 / 3),\n # Non unique nan\n (pd.Series([1, np.nan, np.nan]), True, 1, 1),\n # Non unique values including nan\n (pd.Series([1, 2, 2, np.nan]), False, 2 / 3, 1 / 3),\n # Non unique values including non unique nan\n (pd.Series([1, 2, 2, np.nan, np.nan]), False, 2 / 3, 1 / 3),\n]\n\n\n@pytest.mark.parametrize(\"data,is_unique,p_distinct,p_unique\", testdata)\ndef test_describe_unique(data, is_unique, p_distinct, p_unique):\n \"\"\"Test the unique feature of 1D data\"\"\"\n\n desc_1d = describe_1d(data)\n if is_unique is not None:\n assert desc_1d[\"p_unique\"] == p_unique, \"Describe 1D p_unique incorrect\"\n assert desc_1d[\"p_distinct\"] == p_distinct, \"Describe 1D p_distinct incorrect\"\n assert desc_1d[\"is_unique\"] == is_unique, \"Describe 1D should return unique\"\n\n\n@pytest.fixture\ndef recoding_data():\n data = {\n \"x\": [\n \"chien\",\n \"chien\",\n \"chien\",\n \"chien\",\n \"chat\",\n \"chat\",\n \"chameaux\",\n \"chameaux\",\n ],\n \"y\": [\"dog\", \"dog\", \"dog\", \"dog\", \"cat\", \"cat\", \"camel\", \"camel\"],\n }\n df = pd.DataFrame(data)\n\n return df\n\n\n@pytest.fixture\ndef describe_data():\n data = {\n \"id\": [chr(97 + c) for c in range(1, 9)] + [\"d\"],\n \"x\": [50, 50, -10, 0, 0, 5, 15, -3, np.nan],\n \"y\": [\n 0.000001,\n 654.152,\n np.nan,\n 15.984512,\n 3122,\n -3.1415926535,\n 111,\n 15.9,\n 13.5,\n ],\n \"cat\": [\n \"a\",\n \"long text value\",\n u\"Élysée\",\n \"\",\n None,\n \"some <b> B.s </div> </div> HTML stuff\",\n \"c\",\n \"c\",\n \"c\",\n ],\n \"s1\": np.ones(9),\n \"s2\": [u\"some constant text $ % value {obj} \" for _ in range(1, 10)],\n \"somedate\": [\n datetime.date(2011, 7, 4),\n datetime.datetime(2022, 1, 1, 13, 57),\n datetime.datetime(1990, 12, 9),\n np.nan,\n datetime.datetime(1990, 12, 9),\n datetime.datetime(1950, 12, 9),\n datetime.datetime(1898, 1, 2),\n datetime.datetime(1950, 12, 9),\n datetime.datetime(1950, 12, 9),\n ],\n \"bool_tf\": [True, True, False, True, False, True, True, False, True],\n \"bool_tf_with_nan\": [\n True,\n False,\n False,\n False,\n False,\n True,\n True,\n False,\n np.nan,\n ],\n \"bool_01\": [1, 1, 0, 1, 1, 0, 0, 0, 1],\n \"bool_01_with_nan\": [1, 0, 1, 0, 0, 1, 1, 0, np.nan],\n \"list\": [\n [1, 2],\n [1, 2],\n [1, 2],\n [1, 2],\n [1, 2],\n [1, 2],\n [1, 2],\n [1, 2],\n [1, 2],\n ],\n \"mixed\": [1, 2, \"a\", 4, 5, 6, 7, 8, 9],\n \"dict\": [\n {\"a\": \"a\"},\n {\"b\": \"b\"},\n {\"c\": \"c\"},\n {\"d\": \"d\"},\n {\"e\": \"e\"},\n {\"f\": \"f\"},\n {\"g\": \"g\"},\n {\"h\": \"h\"},\n {\"i\": \"i\"},\n ],\n \"tuple\": [\n (1, 2),\n (3, 4),\n (5, 6),\n (7, 8),\n (9, 10),\n (11, 12),\n (13, 14),\n (15, 16),\n (17, 18),\n ],\n }\n return data\n\n\n@pytest.fixture\ndef expected_results():\n return {\n \"id\": {\n \"25%\": check_is_NaN,\n \"5%\": check_is_NaN,\n \"50%\": check_is_NaN,\n \"75%\": check_is_NaN,\n \"95%\": check_is_NaN,\n \"count\": 9,\n \"cv\": check_is_NaN,\n \"n_distinct\": 8,\n \"freq\": 2,\n \"histogram\": check_is_NaN,\n \"iqr\": check_is_NaN,\n \"is_unique\": False,\n \"kurtosis\": check_is_NaN,\n \"mad\": check_is_NaN,\n \"max\": check_is_NaN,\n \"mean\": check_is_NaN,\n \"min\": check_is_NaN,\n \"mini_histogram\": check_is_NaN,\n \"n_missing\": 0,\n \"p_missing\": 0.0,\n \"p_distinct\": 0.88888888,\n \"p_zeros\": check_is_NaN,\n \"range\": check_is_NaN,\n \"skewness\": check_is_NaN,\n \"std\": check_is_NaN,\n \"sum\": check_is_NaN,\n \"top\": \"d\",\n \"type\": Variable.TYPE_CAT,\n \"variance\": check_is_NaN,\n },\n \"x\": {\n \"25%\": -0.75,\n \"5%\": -7.5499999999999989,\n \"50%\": 2.5,\n \"75%\": 23.75,\n \"95%\": 50.0,\n \"count\": 8,\n \"n_infinite\": 0,\n \"p_infinite\": 0,\n \"cv\": 1.771071190261633,\n \"n_distinct\": 6,\n \"freq\": check_is_NaN,\n \"iqr\": 24.5,\n \"is_unique\": False,\n \"kurtosis\": -0.50292858929003803,\n \"mad\": 9.0,\n \"max\": 50.0,\n \"mean\": 13.375,\n \"min\": -10.0,\n \"n_missing\": 1,\n \"p_missing\": 0.11111111111111116,\n \"p_distinct\": 6 / 8,\n \"n\": 9,\n \"n_zeros\": 2,\n \"p_zeros\": 0.2222222222222222,\n \"range\": 60.0,\n \"skewness\": 1.0851622393567653,\n \"std\": 23.688077169749342,\n \"sum\": 107.0,\n \"top\": check_is_NaN,\n \"type\": Variable.TYPE_NUM,\n \"variance\": 561.125,\n },\n \"y\": {\n \"25%\": 10.125000249999999,\n \"5%\": -2.0420348747749997,\n \"50%\": 15.942256,\n \"75%\": 246.78800000000001,\n \"95%\": 2258.2531999999987,\n \"count\": 8,\n \"n_infinite\": 0,\n \"p_infinite\": 0,\n \"cv\": 2.2112992878833846,\n \"n_distinct\": 8,\n \"freq\": check_is_NaN,\n \"iqr\": 236.66299975000001,\n \"is_unique\": True,\n \"kurtosis\": 6.974137018717359,\n \"mad\": 17.51305182675,\n \"max\": 3122.0,\n \"mean\": 491.17436504331249,\n \"min\": -3.1415926535000001,\n \"n_missing\": 1,\n \"p_missing\": 0.11111111111111116,\n \"p_distinct\": 1,\n \"n_zeros\": 0,\n \"p_zeros\": 0.0,\n \"range\": 3125.1415926535001,\n \"skewness\": 2.6156591135729266,\n \"std\": 1086.1335236468506,\n \"sum\": 3929.3949203464999,\n \"top\": check_is_NaN,\n \"type\": Variable.TYPE_NUM,\n \"variance\": 1179686.0311895239,\n },\n \"cat\": {\n \"25%\": check_is_NaN,\n \"5%\": check_is_NaN,\n \"50%\": check_is_NaN,\n \"75%\": check_is_NaN,\n \"95%\": check_is_NaN,\n \"count\": 8,\n \"cv\": check_is_NaN,\n \"n_distinct\": 6,\n \"freq\": 3,\n \"histogram\": check_is_NaN,\n \"iqr\": check_is_NaN,\n \"is_unique\": False,\n \"kurtosis\": check_is_NaN,\n \"mad\": check_is_NaN,\n \"max\": check_is_NaN,\n \"mean\": check_is_NaN,\n \"min\": check_is_NaN,\n \"mini_histogram\": check_is_NaN,\n \"n_missing\": 1,\n \"p_missing\": 0.11111111111111116,\n \"p_distinct\": 6 / 8,\n \"p_zeros\": check_is_NaN,\n \"range\": check_is_NaN,\n \"skewness\": check_is_NaN,\n \"std\": check_is_NaN,\n \"sum\": check_is_NaN,\n \"top\": \"c\",\n \"type\": Variable.TYPE_CAT,\n \"variance\": check_is_NaN,\n },\n \"s1\": {\n \"25%\": check_is_NaN,\n \"5%\": check_is_NaN,\n \"50%\": check_is_NaN,\n \"75%\": check_is_NaN,\n \"95%\": check_is_NaN,\n \"count\": 9,\n \"cv\": check_is_NaN,\n \"n_distinct\": 1,\n \"freq\": 9,\n \"histogram\": check_is_NaN,\n \"iqr\": check_is_NaN,\n \"is_unique\": False,\n \"kurtosis\": check_is_NaN,\n \"mad\": check_is_NaN,\n \"max\": check_is_NaN,\n \"mean\": check_is_NaN,\n \"min\": check_is_NaN,\n \"mini_histogram\": check_is_NaN,\n \"n_missing\": 0,\n \"p_missing\": 0.0,\n \"p_distinct\": 0.1111111111111111,\n \"p_zeros\": check_is_NaN,\n \"range\": check_is_NaN,\n \"skewness\": check_is_NaN,\n \"std\": check_is_NaN,\n \"sum\": check_is_NaN,\n \"top\": 1.0,\n \"type\": Variable.TYPE_BOOL,\n \"variance\": check_is_NaN,\n },\n \"s2\": {\n \"25%\": check_is_NaN,\n \"5%\": check_is_NaN,\n \"50%\": check_is_NaN,\n \"75%\": check_is_NaN,\n \"95%\": check_is_NaN,\n \"count\": 9,\n \"cv\": check_is_NaN,\n \"n_distinct\": 1,\n \"freq\": 9,\n \"histogram\": check_is_NaN,\n \"iqr\": check_is_NaN,\n \"is_unique\": False,\n \"kurtosis\": check_is_NaN,\n \"mad\": check_is_NaN,\n \"max\": check_is_NaN,\n \"mean\": check_is_NaN,\n \"min\": check_is_NaN,\n \"mini_histogram\": check_is_NaN,\n \"n_missing\": 0,\n \"p_missing\": 0.0,\n \"p_distinct\": 0.1111111111111111,\n \"p_zeros\": check_is_NaN,\n \"range\": check_is_NaN,\n \"skewness\": check_is_NaN,\n \"std\": check_is_NaN,\n \"sum\": check_is_NaN,\n \"top\": \"some constant text $ % value {obj} \",\n \"type\": Variable.TYPE_CAT,\n \"variance\": check_is_NaN,\n },\n \"somedate\": {\n \"25%\": check_is_NaN,\n \"5%\": check_is_NaN,\n \"50%\": check_is_NaN,\n \"75%\": check_is_NaN,\n \"95%\": check_is_NaN,\n \"count\": 8,\n \"cv\": check_is_NaN,\n \"n_distinct\": 5,\n \"freq\": check_is_NaN,\n \"iqr\": check_is_NaN,\n \"is_unique\": False,\n \"kurtosis\": check_is_NaN,\n \"mad\": check_is_NaN,\n \"max\": datetime.datetime(2022, 1, 1, 13, 57),\n \"mean\": check_is_NaN,\n \"min\": datetime.datetime(1898, 1, 2),\n \"n_missing\": 1,\n \"p_missing\": 0.11111111111111116,\n \"p_distinct\": 5 / 8,\n \"p_zeros\": check_is_NaN,\n \"range\": datetime.timedelta(45289, hours=13, minutes=57),\n \"skewness\": check_is_NaN,\n \"std\": check_is_NaN,\n \"sum\": check_is_NaN,\n \"top\": check_is_NaN,\n \"type\": Variable.TYPE_DATE,\n },\n \"bool_tf\": {\n \"25%\": check_is_NaN,\n \"5%\": check_is_NaN,\n \"50%\": check_is_NaN,\n \"75%\": check_is_NaN,\n \"95%\": check_is_NaN,\n \"count\": 9,\n \"cv\": check_is_NaN,\n \"n_distinct\": 2,\n \"freq\": 6,\n \"histogram\": check_is_NaN,\n \"iqr\": check_is_NaN,\n \"is_unique\": False,\n \"kurtosis\": check_is_NaN,\n \"mad\": check_is_NaN,\n \"max\": check_is_NaN,\n \"min\": check_is_NaN,\n \"mini_histogram\": check_is_NaN,\n \"n_missing\": 0,\n \"p_missing\": 0,\n \"p_distinct\": 2 / 9,\n \"p_zeros\": check_is_NaN,\n \"range\": check_is_NaN,\n \"skewness\": check_is_NaN,\n \"std\": check_is_NaN,\n \"sum\": check_is_NaN,\n \"top\": True,\n \"type\": Variable.TYPE_BOOL,\n \"variance\": check_is_NaN,\n },\n \"bool_tf_with_nan\": {\n \"25%\": check_is_NaN,\n \"5%\": check_is_NaN,\n \"50%\": check_is_NaN,\n \"75%\": check_is_NaN,\n \"95%\": check_is_NaN,\n \"count\": 8,\n \"cv\": check_is_NaN,\n \"n_distinct\": 2,\n \"freq\": 5,\n \"histogram\": check_is_NaN,\n \"iqr\": check_is_NaN,\n \"is_unique\": False,\n \"kurtosis\": check_is_NaN,\n \"mad\": check_is_NaN,\n \"max\": check_is_NaN,\n \"min\": check_is_NaN,\n \"mini_histogram\": check_is_NaN,\n \"n_missing\": 1,\n \"p_missing\": 0.11111111111111116,\n \"p_distinct\": 2 / 8,\n \"p_zeros\": check_is_NaN,\n \"range\": check_is_NaN,\n \"skewness\": check_is_NaN,\n \"std\": check_is_NaN,\n \"sum\": check_is_NaN,\n \"top\": False,\n \"type\": Variable.TYPE_BOOL,\n \"variance\": check_is_NaN,\n },\n \"bool_01\": {\n \"25%\": check_is_NaN,\n \"5%\": check_is_NaN,\n \"50%\": check_is_NaN,\n \"75%\": check_is_NaN,\n \"95%\": check_is_NaN,\n \"count\": 9,\n \"cv\": check_is_NaN,\n \"n_distinct\": 2,\n \"freq\": 5,\n \"histogram\": check_is_NaN,\n \"iqr\": check_is_NaN,\n \"is_unique\": False,\n \"kurtosis\": check_is_NaN,\n \"mad\": check_is_NaN,\n \"max\": check_is_NaN,\n \"min\": check_is_NaN,\n \"mini_histogram\": check_is_NaN,\n \"n_missing\": 0,\n \"p_missing\": 0,\n \"p_distinct\": 2 / 9,\n \"p_zeros\": check_is_NaN,\n \"range\": check_is_NaN,\n \"skewness\": check_is_NaN,\n \"std\": check_is_NaN,\n \"sum\": check_is_NaN,\n \"top\": 1,\n \"type\": Variable.TYPE_BOOL,\n \"variance\": check_is_NaN,\n },\n \"bool_01_with_nan\": {\n \"25%\": check_is_NaN,\n \"5%\": check_is_NaN,\n \"50%\": check_is_NaN,\n \"75%\": check_is_NaN,\n \"95%\": check_is_NaN,\n \"count\": 8,\n \"cv\": check_is_NaN,\n \"n_distinct\": 2,\n \"freq\": 4,\n \"iqr\": check_is_NaN,\n \"is_unique\": False,\n \"kurtosis\": check_is_NaN,\n \"mad\": check_is_NaN,\n \"max\": check_is_NaN,\n \"min\": check_is_NaN,\n \"n_missing\": 1,\n \"p_missing\": 0.11111111111111116,\n \"p_distinct\": 2 / 8,\n \"p_zeros\": check_is_NaN,\n \"range\": check_is_NaN,\n \"skewness\": check_is_NaN,\n \"std\": check_is_NaN,\n \"sum\": check_is_NaN,\n \"top\": 0,\n \"type\": Variable.TYPE_BOOL,\n \"variance\": check_is_NaN,\n },\n \"list\": {\n \"count\": 9,\n \"n_missing\": 0,\n \"p_missing\": 0,\n \"type\": Variable.S_TYPE_UNSUPPORTED,\n },\n \"mixed\": {\n \"count\": 9,\n \"n_missing\": 0,\n \"p_missing\": 0,\n \"type\": Variable.S_TYPE_UNSUPPORTED,\n },\n \"dict\": {\n \"count\": 9,\n \"n_missing\": 0,\n \"p_missing\": 0,\n \"type\": Variable.S_TYPE_UNSUPPORTED,\n },\n \"tuple\": {\n \"count\": 9,\n \"n_missing\": 0,\n \"p_missing\": 0,\n \"type\": Variable.S_TYPE_UNSUPPORTED,\n },\n }\n\n\ndef test_describe_df(describe_data, expected_results):\n config[\"vars\"][\"num\"][\"low_categorical_threshold\"].set(0)\n describe_data_frame = pd.DataFrame(describe_data)\n describe_data_frame[\"somedate\"] = pd.to_datetime(describe_data_frame[\"somedate\"])\n\n results = describe(\"title\", describe_data_frame)\n\n assert {\n \"analysis\",\n \"table\",\n \"variables\",\n \"scatter\",\n \"correlations\",\n \"missing\",\n \"messages\",\n \"package\",\n \"sample\",\n \"duplicates\",\n } == set(results.keys()), \"Not in results\"\n\n assert {\"BOOL\": 5, \"CAT\": 3, \"UNSUPPORTED\": 4, \"NUM\": 2, \"DATE\": 1} == results[\n \"table\"\n ][\"types\"], \"Variable analysis failed\"\n\n # Loop over variables\n for col in describe_data.keys():\n for k, v in expected_results[col].items():\n if v == check_is_NaN:\n assert (\n k not in results[\"variables\"][col]\n ) == True, \"Value `{}` for key `{}` in column `{}` is not NaN\".format(\n results[\"variables\"][col][k], k, col\n )\n elif isinstance(v, float):\n assert (\n pytest.approx(v) == results[\"variables\"][col][k]\n ), \"Value `{}` for key `{}` in column `{}` is not NaN\".format(\n results[\"variables\"][col][k], k, col\n )\n else:\n assert (\n v == results[\"variables\"][col][k]\n ), \"Value `{}` for key `{}` in column `{}` is not NaN\".format(\n results[\"variables\"][col][k], k, col\n )\n\n if results[\"variables\"][col][\"type\"].value in [\"NUM\", \"DATE\"]:\n assert (\n \"histogram\" in results[\"variables\"][col]\n ), \"Histogram missing for column {} \".format(col)\n\n\ndef test_describe_empty():\n empty_frame = pd.DataFrame()\n with pytest.raises(ValueError):\n describe(\"\", empty_frame)\n\n\ndef test_describe_list():\n with pytest.raises(AttributeError):\n with pytest.warns(UserWarning):\n describe(\"\", [1, 2, 3])\n"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame",
"numpy.ones",
"pandas.Series"
]
] |
ezdac/raiden
|
[
"d7504996e6738b55d5a9dcf9a36ef66797f6f326"
] |
[
"tools/debugging/plot/scatter.py"
] |
[
"#!/usr/bin/env python\nimport argparse\nimport csv\nimport datetime\nimport sys\n\nfrom matplotlib import dates, pyplot\nfrom matplotlib.axes import Axes\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--width\", default=1000, help=\"Configures width of the output in pixels.\")\nparser.add_argument(\"--height\", default=800, help=\"Configures height of the output in pixels.\")\nparser.add_argument(\n \"--header\", help=\"If the csv does not have a header, use this to give a name to each column\"\n)\nparser.add_argument(\n \"output\", help=\"file name for the result image, filetype is inferred from this.\"\n)\nparser.add_argument(\"x\")\nparser.add_argument(\"y\")\n\nargs = parser.parse_args()\n\n\ndef parse_datetime(data: str) -> datetime.datetime:\n return datetime.datetime.fromisoformat(data)\n\n\ndef configure_axes(axes: Axes) -> None:\n hour_fmt = dates.DateFormatter(\"%H:%M\")\n minutes_fmt = dates.DateFormatter(\"%M\")\n\n axes.xaxis.set_major_locator(dates.HourLocator(interval=1))\n axes.xaxis.set_major_formatter(hour_fmt)\n axes.xaxis.set_minor_locator(dates.MinuteLocator(interval=5))\n axes.xaxis.set_minor_formatter(minutes_fmt)\n axes.xaxis.set_tick_params(which=\"major\", rotation=90)\n axes.xaxis.set_tick_params(which=\"minor\", rotation=90)\n\n\nx_axis = list()\ny_axis = list()\n\nif args.header:\n headers = args.header.split(\",\")\n reader = csv.DictReader(sys.stdin, fieldnames=headers)\nelse:\n reader = csv.DictReader(sys.stdin)\n\nfor line in reader:\n x_axis.append(parse_datetime(line[args.x]))\n y_axis.append(float(line[args.y]))\n\ndpi = 60\npyplot.figure(figsize=(args.width / dpi, args.height / dpi), dpi=dpi)\n\naxes = pyplot.gca()\n\nconfigure_axes(axes)\naxes.set_xlabel(args.x)\naxes.set_ylabel(args.y)\naxes.set_xlim(min(x_axis), max(x_axis))\n\npyplot.scatter(x_axis, y_axis, alpha=0.2)\npyplot.savefig(args.output)\n"
] |
[
[
"matplotlib.dates.MinuteLocator",
"matplotlib.pyplot.savefig",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.figure",
"matplotlib.dates.HourLocator",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.gca"
]
] |
gfgb/nglod
|
[
"801b662e9417562419dfac577c60c0152e6fdff4"
] |
[
"sdf-net/lib/tracer/SphereTracer_VC.py"
] |
[
"# The MIT License (MIT)\r\n#\r\n# Copyright (c) 2021, NVIDIA CORPORATION.\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\r\n# this software and associated documentation files (the \"Software\"), to deal in\r\n# the Software without restriction, including without limitation the rights to\r\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\r\n# the Software, and to permit persons to whom the Software is furnished to do so,\r\n# subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\r\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\r\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\r\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport torch.nn as nn\r\nimport numpy as np\r\n\r\nfrom ..utils import PerfTimer\r\nfrom ..diffutils import gradient\r\nfrom ..geoutils import sample_unif_sphere\r\nfrom .RenderBuffer import RenderBuffer\r\n\r\nfrom ..PsDebugger import PsDebugger\r\n\r\nfrom .BaseTracer import BaseTracer\r\n\r\nfrom sol_nglod import aabb\r\n\r\nclass SphereTracer_VC(BaseTracer):\r\n\r\n def forward(self, net, ray_o, ray_d):\r\n \"\"\"Native implementation of sphere tracing.\"\"\"\r\n timer = PerfTimer(activate=False)\r\n nettimer = PerfTimer(activate=False)\r\n\r\n # Distanace from ray origin\r\n t = torch.zeros(ray_o.shape[0], 1, device=ray_o.device)\r\n\r\n # Position in model space\r\n x = torch.addcmul(ray_o, ray_d, t)\r\n\r\n cond = torch.ones_like(t).bool()[:,0]\r\n x, t, cond = aabb(ray_o, ray_d)\r\n\r\n normal = torch.zeros_like(x)\r\n # This function is in fact differentiable, but we treat it as if it's not, because\r\n # it evaluates a very long chain of recursive neural networks (essentially a NN with depth of\r\n # ~1600 layers or so). This is not sustainable in terms of memory use, so we return the final hit\r\n # locations, where additional quantities (normal, depth, segmentation) can be determined. The\r\n # gradients will propagate only to these locations. \r\n with torch.no_grad():\r\n\r\n d, _ = net(x)\r\n \r\n dprev = d.clone()\r\n\r\n # If cond is TRUE, then the corresponding ray has not hit yet.\r\n # OR, the corresponding ray has exit the clipping plane.\r\n #cond = torch.ones_like(d).bool()[:,0]\r\n\r\n # If miss is TRUE, then the corresponding ray has missed entirely.\r\n hit = torch.zeros_like(d).byte()\r\n \r\n for i in range(self.num_steps):\r\n timer.check(\"start\")\r\n # 1. Check if ray hits.\r\n #hit = (torch.abs(d) < self._MIN_DIS)[:,0] \r\n # 2. Check that the sphere tracing is not oscillating\r\n #hit = hit | (torch.abs((d + dprev) / 2.0) < self._MIN_DIS * 3)[:,0]\r\n \r\n # 3. Check that the ray has not exit the far clipping plane.\r\n #cond = (torch.abs(t) < self.clamp[1])[:,0]\r\n \r\n hit = (torch.abs(t) < self.camera_clamp[1])[:,0]\r\n \r\n # 1. not hit surface\r\n cond = cond & (torch.abs(d) > self.min_dis)[:,0] \r\n\r\n # 2. not oscillating\r\n cond = cond & (torch.abs((d + dprev) / 2.0) > self.min_dis * 3)[:,0]\r\n \r\n # 3. not a hit\r\n cond = cond & hit\r\n \r\n #cond = cond & ~hit\r\n \r\n # If the sum is 0, that means that all rays have hit, or missed.\r\n if not cond.any():\r\n break\r\n\r\n # Advance the x, by updating with a new t\r\n x = torch.where(cond.view(cond.shape[0], 1), torch.addcmul(ray_o, ray_d, t), x)\r\n \r\n # Store the previous distance\r\n dprev = torch.where(cond.unsqueeze(1), d, dprev)\r\n\r\n nettimer.check(\"nstart\")\r\n # Update the distance to surface at x\r\n d[cond] = net(x[cond])[0] * self.step_size\r\n\r\n nettimer.check(\"nend\")\r\n \r\n # Update the distance from origin \r\n t = torch.where(cond.view(cond.shape[0], 1), t+d, t)\r\n timer.check(\"end\")\r\n \r\n # AABB cull \r\n\r\n hit = hit & ~(torch.abs(x) > 1.0).any(dim=-1)\r\n \r\n # The function will return \r\n # x: the final model-space coordinate of the render\r\n # t: the final distance from origin\r\n # d: the final distance value from\r\n # miss: a vector containing bools of whether each ray was a hit or miss\r\n #_normal = F.normalize(gradient(x[hit], net, method='finitediff'), p=2, dim=-1, eps=1e-5)\r\n \r\n grad = gradient(x[hit], net, method=self.grad_method)\r\n _normal = F.normalize(grad, p=2, dim=-1, eps=1e-5)\r\n\r\n normal[hit] = _normal\r\n\r\n return RenderBuffer(x=x, depth=t, hit=hit, normal=normal)\r\n \r\n def get_min(self, net, ray_o, ray_d):\r\n\r\n timer = PerfTimer(activate=False)\r\n nettimer = PerfTimer(activate=False)\r\n\r\n # Distance from ray origin\r\n t = torch.zeros(ray_o.shape[0], 1, device=ray_o.device)\r\n\r\n # Position in model space\r\n x = torch.addcmul(ray_o, ray_d, t)\r\n\r\n x, t, hit = aabb(ray_o, ray_d)\r\n\r\n normal = torch.zeros_like(x)\r\n\r\n with torch.no_grad():\r\n d, _ = net(x)\r\n dprev = d.clone()\r\n mind = d.clone()\r\n minx = x.clone()\r\n\r\n # If cond is TRUE, then the corresponding ray has not hit yet.\r\n # OR, the corresponding ray has exit the clipping plane.\r\n cond = torch.ones_like(d).bool()[:,0]\r\n\r\n # If miss is TRUE, then the corresponding ray has missed entirely.\r\n hit = torch.zeros_like(d).byte()\r\n \r\n for i in range(self.num_steps):\r\n timer.check(\"start\")\r\n\r\n hit = (torch.abs(t) < self.camera_clamp[1])[:,0]\r\n \r\n # 1. not hit surface\r\n cond = (torch.abs(d) > self.min_dis)[:,0] \r\n\r\n # 2. not oscillating\r\n cond = cond & (torch.abs((d + dprev) / 2.0) > self.min_dis * 3)[:,0]\r\n \r\n # 3. not a hit\r\n cond = cond & hit\r\n \r\n \r\n #cond = cond & ~hit\r\n \r\n # If the sum is 0, that means that all rays have hit, or missed.\r\n if not cond.any():\r\n break\r\n\r\n # Advance the x, by updating with a new t\r\n x = torch.where(cond.view(cond.shape[0], 1), torch.addcmul(ray_o, ray_d, t), x)\r\n \r\n new_mins = (d<mind)[...,0]\r\n mind[new_mins] = d[new_mins]\r\n minx[new_mins] = x[new_mins]\r\n \r\n # Store the previous distance\r\n dprev = torch.where(cond.unsqueeze(1), d, dprev)\r\n\r\n nettimer.check(\"nstart\")\r\n # Update the distance to surface at x\r\n d[cond] = net(x[cond]) * self.step_size\r\n\r\n nettimer.check(\"nend\")\r\n \r\n # Update the distance from origin \r\n t = torch.where(cond.view(cond.shape[0], 1), t+d, t)\r\n timer.check(\"end\")\r\n\r\n # AABB cull \r\n\r\n hit = hit & ~(torch.abs(x) > 1.0).any(dim=-1)\r\n #hit = torch.ones_like(d).byte()[...,0]\r\n \r\n # The function will return \r\n # x: the final model-space coordinate of the render\r\n # t: the final distance from origin\r\n # d: the final distance value from\r\n # miss: a vector containing bools of whether each ray was a hit or miss\r\n #_normal = F.normalize(gradient(x[hit], net, method='finitediff'), p=2, dim=-1, eps=1e-5)\r\n _normal = gradient(x[hit], net, method=self.grad_method)\r\n normal[hit] = _normal\r\n \r\n\r\n return RenderBuffer(x=x, depth=t, hit=hit, normal=normal, minx=minx)\r\n\r\n def sample_surface(self, n, net):\r\n \r\n # Sample surface using random tracing (resample until num_samples is reached)\r\n \r\n timer = PerfTimer(activate=True)\r\n \r\n with torch.no_grad():\r\n i = 0\r\n while i < 1000:\r\n ray_o = torch.rand((n, 3), device=self.device) * 2.0 - 1.0\r\n # this really should just return a torch array in the first place\r\n ray_d = torch.from_numpy(sample_unif_sphere(n)).float().to(self.device)\r\n rb = self.forward(net, ray_o, ray_d)\r\n\r\n #d = torch.abs(net(rb.x)[..., 0])\r\n #idx = torch.where(d < 0.0003)\r\n #pts_pr = rb.x[idx] if i == 0 else torch.cat([pts_pr, rb.x[idx]], dim=0)\r\n \r\n pts_pr = rb.x[rb.hit] if i == 0 else torch.cat([pts_pr, rb.x[rb.hit]], dim=0)\r\n if pts_pr.shape[0] >= n:\r\n break\r\n i += 1\r\n if i == 50:\r\n print('Taking an unusually long time to sample desired # of points.')\r\n \r\n return pts_pr\r\n\r\n"
] |
[
[
"torch.zeros",
"torch.nn.functional.normalize",
"torch.cat",
"torch.rand",
"torch.no_grad",
"torch.abs",
"torch.ones_like",
"torch.zeros_like",
"torch.addcmul"
]
] |
mokeeqian/ZhengFangJiaoWu
|
[
"7ac697ed9c1914f7f4aba47d51ba40a60eec1ebd"
] |
[
"main.py"
] |
[
"#!/usr/bin/env python3\n# encoding=utf-8\n# Copyright: Qian Jipeng(C) 2019\n\"\"\"\nTODO:\n\t数据清洗与进一步解析!\n\"\"\"\n\n\n\nimport os\nimport re\nimport chardet\t\t# encoding\nimport urllib.parse\n\nimport requests\nimport numpy as np\nimport pandas as pd\n\nimport config_loader as cfl \t# load config\n\nif os.name == \"nt\":\n\tos.sys.path.append(\".\\\\pytesser_v0.0.1\")\t# for Windows\nelif os.name == \"posix\":\n\tos.sys.path.append(\"./pytesser_v0.0.1\")\n\nimport pytesser as ocr\t\t\t# google ocr\n\nfrom html.parser import *\nfrom PIL import Image\nfrom libtiff import TIFF\n\n\n# 解析html标签\nclass TagParser(HTMLParser):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.view_state = list() # 用来存放viewstate\n\t\tself.event_validation = list()\n\t\t\n\tdef __del__(self):\n\t\tdel self.view_state # 释放资源\n\t\tdel self.event_validation\n\n\tdef handle_starttag(self, tag, attrs):\n\t\tif tag == 'input':\n\t\t\tattrs = dict(attrs)\n\t\t\tif attrs.__contains__('name'):\n\t\t\t\tif attrs['name'] == '__VIEWSTATE':\n\t\t\t\t\tself.view_state.append(attrs['value'])\n\t\t\t\telif attrs['name'] == '__EVENTVALIDATION':\n\t\t\t\t\tself.event_valifation.append(attrs['value'])\t\t\t\t\t\n\n\tdef doParse(self, webData):\n\t\tself.feed(data=webData)\n\n\n\nclass Login:\n\n\t# 有参构造\n\tdef __init__(self, uid=cfl.getUserId(), upwd=cfl.getUserPassword()):\n\t\t#self.user_id = cfl.getUserId()\n\t\t#self.user_pwd = cfl.getUserPassword()\t\n\t\tself.user_id = uid\n\t\tself.user_pwd = upwd\n\t\tself.user_name = \"\"\n\t\tself.login_url = cfl.getLoginUrl()\n\t\tself.checkcode_url = cfl.getCheckcodeUrl()\n\t\tself.cookies = requests.get(self.login_url).cookies\n\t\tself.headers = {\n\t\t\t\t'User-Agent': r'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',\n\t\t}\n\n\t\t# self.query_headers = {\n\t\t# \t'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n\t\t# \t'Accept-Encoding': 'gzip, deflate',\n\t\t# \t'Accept-Language': 'en-US,en;q=0.9',\n\t\t# \t'Connection': 'keep-alive',\n\t\t# \t'Content-Type': 'text/html; charset=gb2312',\n\t\t# \t'Referer': '', # cfl.getIndexUrl() + 'xskbcx.aspx?xh=' + self.user_id + \"&xm=\" + self.user_name + \"&gnmkdm=\" + kdn_code,\n\t\t# \t# 'Referer': website + 'xs_main.aspx?xh=' + userxh,\n\t\t# \t'Upgrade-Insecure-Requests': '1',\n\t\t# \t'User-Agent': r'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',\n\t\t# }\n\n\t\tself.config = {\n\t\t\t'__VIEWSTATE': '', # viewstate\n\t\t\t'TextBox1': self.user_id, # userid\n\t\t\t'TextBox2': self.user_pwd, # password\n\t\t\t'TextBox3': '', # checkcode\n\t\t\t'RadioButtonList1': '%D1%A7%C9%FA', # session\n\t\t\t'Button1': \"\",\n\t\t\t'lbLanguage': '',\n\t\t}\n\t\tself.tag_parser = TagParser()\n\t\tself.tag_parser.doParse(requests.get(self.login_url).text) # 解析\n\t\t\n\n\tdef setUserId(self, uid):\n\t\tself.user_id = uid\n\t\n\tdef setUserPwd(self, upwd):\n\t\tself.user_pwd = upwd\n\n\t# 获取并保存验证码图片\n\t# return: PIL.Image对象\n\tdef getAndSaveCheckCode(self, filename):\n\n\t\tpic = requests.post(url=self.checkcode_url, cookies=self.cookies, headers=self.headers)\n\t\tif os.path.exists(filename):\n\t\t\tos.remove(filename)\n\t\t# write as byte\n\t\twith open(filename, 'wb') as filewriter:\n\t\t\tfilewriter.write(pic.content)\n\t\timage = Image.open(filename) # PIL\n\t\t#image.show()\n\n\t\tout_tiff = TIFF.open(filename, mode = 'w')\t\t# TTIF\n\t\t#img = cv2.imdecode(np.fromstring(pic.content, np.uint8) )\n\t\tout_tiff.write_image(np.array(image), compression=None, write_rgb=False)\n\t\tout_tiff.close()\n\t\t\n\t\treturn image\n\t\n\n\t# 调用google ocr(Linux) \n\t# reutrn: string of the check code\n\tdef getCheckCodeStringLinux(self, filename):\n\t\ttext = str(ocr.image_file_to_string(filename))\n\t\tprint(\"验证码: \" + text)\n\t\ttext = text.strip() # 去除两边空格!!!\n\n\t\tif os.path.exists(filename):\n\t\t\tos.remove(filename) # 删除验证码\n\t\treturn text\n\n\n\t# 获取验证码字符串(Windows)\n\t# return: string\n\tdef getCheckCodeStringWindows(self, imageObj:Image):\n\t\ttext = str(ocr.image_to_string(imageObj))\n\t\tprint(\"验证码: \" + text)\n\t\ttext = text.strip()\n\t\t\n\t\treturn text\n\n\n\t# 应该在获取验证码后调用\n\tdef updateConfig(self, viewstate, checkcode):\n\t\tself.config['__VIEWSTATE'] = viewstate\n\t\tself.config['TextBox3'] = checkcode\n\n\t# 是否登陆成功\n\tdef checkIfSuccess(self, webContent):\n\t\tpattern = r'<title>(.*?)</title>'\n\t\titems = re.findall(pattern, webContent.text)\n\t\tif items[0] == \"欢迎使用正方教务管理系统!请登录\": # 特征匹配\n\t\t\treturn False\n\t\telse:\n\t\t\t# 抓取名字\n\t\t\tcatch = '<span id=\"xhxm\">(.*?)</span></em>'\n\t\t\tname = re.findall(catch, webContent.text)\n\t\t\tname = name[0][:-2]\n\t\t\t# name = name[:-2]\n\t\t\tprint(name)\n\t\t\tself.user_name = urllib.parse.quote(name.encode(\"gb2312\")) # 更新用户姓名\n\t\t\treturn True\n\n\n# 对外接口,用于从csv文件中获取用户配置信息\ndef getAllUsersFromExcel(filename:str):\n\n\twith open(filename, \"rb\") as f:\n\t\t# data_frame = pd.read_csv(f)\n\t\tencoding = chardet.detect(f.read())['encoding']\n\tprint(encoding)\n\n\twith open(filename, \"r\", encoding=encoding, errors='replace') as fo:\n\t\tdata_info = pd.read_csv(fo)\n\n\t# print(data_info['学号'])\n\n\treturn data_info[['学号', '身份证件号']]\n\n\n\n\n# 全局函数,对外接口\n# return: true if success\ndef doLogin(loginobject:Login, filename:str, resultdir:str):\n\n\tsep = os.sep\t\t# 文件名分隔符\n\t\n\tcheckcodeimage = loginobject.getAndSaveCheckCode(filename)\n\t#checkcode = input(\"输入验证码: \")\n\n\t# 这里非常奇怪,谷歌pytesser引擎在不同系统具有不同的处理逻辑\n\t# 所以我也做了跨平台的处理,不同系统平台调用不同api\n\tif os.name == \"nt\":\n\t\tcheckcode = loginobject.getCheckCodeStringWindows(checkcodeimage)\n\telif os.name == \"posix\":\n\t\tcheckcode = loginobject.getCheckCodeStringLinux(filename)\n\n\t#print(checkcode)\n\n\tloginobject.updateConfig(loginobject.tag_parser.view_state[0], checkcode)\n\t# print(loginobject.config)\n\tcontent = requests.post(url=loginobject.login_url, data=loginobject.config,\n\t headers=loginobject.headers, cookies=loginobject.cookies)\n\n\tif loginobject.checkIfSuccess(content):\n\t\tprint(\"登陆成功!!!\")\n\n\t\t# 检查结果路径\n\t\tif not os.path.exists(resultdir):\n\t\t\tos.system(\"mkdir \" + resultdir)\n\t\telse:\n\t\t\tfor f in os.listdir(resultdir):\n\t\t\t\tos.remove(resultdir + sep + f)\n\n\telse:\n\t\tprint(\"登录失败~~~\")\n\t\treturn False\n\n\t# query = Query()\n\t# query.queryCourse()\n\n\tprint(\"-------------开始查询----------\")\n\t# 配置区(一般无需修改)\n\tcourse_url = cfl.getIndexUrl() + 'xskbcx.aspx?xh=' + loginobject.user_id + \"&xm=\" + loginobject.user_name + \"&gnmkdm=\" + \"N121603\"\n\texam_url = cfl.getIndexUrl() + 'xskscx.aspx?xh=' + loginobject.user_id + \"&xm=\" + loginobject.user_name + \"&gnmkdm=\" + \"N121604\"\n\tclassexam_url = cfl.getIndexUrl() + 'xsdjkscx.aspx?xh=' + loginobject.user_id + \"&xm=\" + loginobject.user_name + \"&gnmkdm=\" + \"N121606\"\n\tplan_url = cfl.getIndexUrl() + 'pyjh.aspx?xh=' + loginobject.user_id + \"&xm=\" + loginobject.user_name + \"&gnmkdm=\" + \"N121607\"\n\tselect_course_url = cfl.getIndexUrl() + 'pyjh.aspx?xh=' + loginobject.user_id + \"&xm=\" + loginobject.user_name + \"&gnmkdm=\" + \"N121615\"\n\tadd_exam_url = cfl.getIndexUrl() + 'xsbkkscx.aspx?xh=' + loginobject.user_id + \"&xm=\" + loginobject.user_name + \"&gnmkdm=\" + \"N121613\"\n\n\n\tquery_config = {\n\t\t'__EVENTTARGET': '',\n\t\t'__EVENTARGUMENT': '',\n\t\t'__VIEWSTATE': '',\n\t}\n\tquery_headers = {\n\t\t'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n\t\t'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.9', 'Connection': 'keep-alive',\n\t\t'Content-Type': 'text/html; charset=gb2312', 'Referer': '', 'Upgrade-Insecure-Requests': '1',\n\t\t'User-Agent': r'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'}\n\t# end 配置区\n\n\t# ------------------------- 查询课表 ----------------------\n\tprint(\"开始查询课表...\")\n\tquery_headers['Referer'] = course_url\n\t# 先get一下,获取view_state\n\tcourse_html = requests.get(course_url, cookies=loginobject.cookies,\n\t headers=query_headers)\n\tcatch = '<input type=\"hidden\" name=\"__VIEWSTATE\" value=\"(.*?)\" />'\n\tquery_state = re.findall(catch, course_html.text)[0]\n\tquery_config['__VIEWSTATE'] = query_state\n\tdel query_state\n\tcourse = requests.session().get(url=course_url, data=query_config,\n\t headers=query_headers, cookies=loginobject.cookies)\n\t# print(course.text) # 测试ok\n\t# 写入文件\n\tcatch = '<td>(.*?)</td>'\n\tcourse_table = re.findall(catch, course.text)\n\tdel course\n\n\tf = open(resultdir + sep + \"course_table.txt\", \"w\")\n\tfor each_line in course_table:\n\t\tif \" \" in each_line:\n\t\t\t# TODO: 数据清洗\n\t\t\tpass\n\t\tf.write(each_line + \"\\n\")\n\tf.close()\n\tdel course_table\n\t# ------------------------- 课表结束 ------------------------\n\n\t# ------------------------- 查询考试安排 -----------------------\n\tprint(\"开始查询考试安排...\")\n\tquery_headers['Referer'] = exam_url\n\texam_html = requests.get(exam_url, cookies=loginobject.cookies,\n\t headers=query_headers)\n\tcatch = '<input type=\"hidden\" name=\"__VIEWSTATE\" value=\"(.*?)\" />'\n\tquery_state = re.findall(catch, exam_html.text)[0]\n\tquery_config['__VIEWSTATE'] = query_state\n\tdel query_state\n\texam = requests.session().get(url=exam_url, data=query_config,\n\t headers=query_headers, cookies=loginobject.cookies)\n\t# print(course.text) # 测试ok\n\t# 写入文件\n\tcatch = '<td>(.*?)</td>'\n\texam_table = re.findall(catch, exam.text)\n\tdel exam\n\n\tf = open(resultdir + sep + \"exam_arrangement.txt\", \"w\")\n\tfor each_line in exam_table:\n\t\tif \" \" in each_line:\n\t\t\t# TODO: 数据清洗\n\t\t\tpass\n\t\tf.write(each_line + \"\\n\")\n\tf.close()\n\tdel exam_table\n\t# ----------------------------------- 结束 -----------------------------------------\n\n\t# ----------------------------------等级考试成绩查询 --------------------------------\n\tprint(\"开始查询等级考试成绩...\")\n\tquery_headers['Referer'] = classexam_url\n\tclassexam_html = requests.get(classexam_url, cookies=loginobject.cookies,\n\t headers=query_headers)\n\tcatch = '<input type=\"hidden\" name=\"__VIEWSTATE\" value=\"(.*?)\" />'\n\tquery_state = re.findall(catch, classexam_html.text)[0]\n\tquery_config['__VIEWSTATE'] = query_state\n\tdel query_state\n\tclassexam = requests.session().get(url=classexam_url, data=query_config,\n\t headers=query_headers, cookies=loginobject.cookies)\n\t# print(course.text) # 测试ok\n\t# 写入文件\n\tcatch = '<td>(.*?)</td>'\n\tclassexam_table = re.findall(catch, classexam.text)\n\tdel classexam\n\n\tf = open(resultdir + sep + \"class_exam.txt\", \"w\")\n\tfor each_line in classexam_table:\n\t\tif \" \" in each_line:\n\t\t\t# TODO: 数据清洗\n\t\t\tpass\n\t\tf.write(each_line + \"\\n\")\n\tf.close()\n\tdel classexam_table\n\t# --------------------------- 结束 --------------------------\n\n\t# -------------------- 培养计划查询 ------------------------\n\tprint(\"开始查询培养计划...\")\n\tquery_headers['Referer'] = plan_url\n\tplan_html = requests.get(plan_url, cookies=loginobject.cookies,\n\t headers=query_headers)\n\tcatch = '<input type=\"hidden\" name=\"__VIEWSTATE\" value=\"(.*?)\" />'\n\tquery_state = re.findall(catch, plan_html.text)[0]\n\tquery_config['__VIEWSTATE'] = query_state\n\tdel query_state\n\tplan = requests.session().get(url=plan_url, data=query_config,\n\t headers=query_headers, cookies=loginobject.cookies)\n\t# print(course.text) # 测试ok\n\t# 写入文件\n\tcatch = '<td>(.*?)</td>'\n\tplan_table = re.findall(catch, plan.text)\n\tdel plan\n\n\tf = open( resultdir + sep + \"development_plan.txt\", \"w\")\n\tfor each_line in plan_table:\n\t\tif \" \" in each_line:\n\t\t\t# TODO: 数据清洗\n\t\t\tpass\n\t\tf.write(each_line + \"\\n\")\n\tf.close()\n\tdel plan_table\n\t# --------------------- 结束 ----------------------------\n\n\t# --------------------- 学生选课情况查询 ------------------------------\n\tprint(\"开始查询选课情况...\")\n\tquery_headers['Referer'] = select_course_url\n\tselect_course_html = requests.get(select_course_url, cookies=loginobject.cookies,\n\t headers=query_headers)\n\tcatch = '<input type=\"hidden\" name=\"__VIEWSTATE\" value=\"(.*?)\" />'\n\tquery_state = re.findall(catch, select_course_html.text)[0]\n\tquery_config['__VIEWSTATE'] = query_state\n\tdel query_state\n\tselect_course = requests.session().get(url=select_course_url, data=query_config,\n\t headers=query_headers, cookies=loginobject.cookies)\n\t# print(course.text) # 测试ok\n\t# 写入文件\n\tcatch = '<td>(.*?)</td>'\n\tselect_course_table = re.findall(catch, select_course.text)\n\tdel select_course\n\n\tf = open(resultdir + sep + \"select_course.txt\", \"w\")\n\tfor each_line in select_course_table:\n\t\tif \" \" in each_line:\n\t\t\t# TODO: 数据清洗\n\t\t\tpass\n\t\tf.write(each_line + \"\\n\")\n\tf.close()\n\tdel select_course_table\n\t# --------------------- 结束 ----------------------------\n\n\t# ------------------- 补考开始查询 ----------------------\n\tprint(\"开始查询补考安排...\")\n\tquery_headers['Referer'] = add_exam_url\n\tadd_exam_html = requests.get(add_exam_url, cookies=loginobject.cookies,\n\t headers=query_headers)\n\tcatch = '<input type=\"hidden\" name=\"__VIEWSTATE\" value=\"(.*?)\" />'\n\tquery_state = re.findall(catch, add_exam_html.text)[0]\n\tquery_config['__VIEWSTATE'] = query_state\n\tdel query_state\n\tadd_exam = requests.session().get(url=add_exam_url, data=query_config,\n\t headers=query_headers, cookies=loginobject.cookies)\n\t# print(course.text) # 测试ok\n\t# 写入文件\n\tcatch = '<td>(.*?)</td>'\n\tadd_exam_table = re.findall(catch, add_exam.text)\n\tdel add_exam\n\n\tf = open(resultdir + sep + \"add_exam.txt\", \"w\")\n\tfor each_line in add_exam_table:\n\t\tif \" \" in each_line:\n\t\t\t# TODO: 数据清洗\n\t\t\tpass\n\t\tf.write(each_line + \"\\n\")\n\tf.close()\n\tdel add_exam_table\n\t# ------------------- 结束 ------------------------\n\n\tprint(\"------------查询成功-----------\")\n\treturn True\n\n\n# 学生成绩查询,单独的url, 单独的模块,需要内网支持!!!\ndef queryScore():\n\tindex_url = \"http://211.70.149.134:8080/stud_score/brow_stud_score.aspx\"\n\n\trequest_headers = {\n\t 'Accept': 'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8',\n\t 'Accept-Encoding': 'gzip, deflate',\n\t 'Accept-Language': 'zh-CN',\n\t 'Cache-Control': 'max-age=0',\n\t 'Connection': 'Keep-Alive',\n\t 'Content-Length': '1979',\n\t 'Content-Type': 'application/x-www-form-urlencoded',\n\t 'Host': '211.70.149.134:8080',\n\t 'Referer': 'http://211.70.149.134:8080/stud_score/brow_stud_score.aspx',\n\t 'Upgrade-Insecure-Requests': '1',\n\t 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362',\n\t}\n\n\t# pre_content = requests.get(url=index_url, headers=request_headers)\n\t# tag = TagParser()\n\t# tag.doParse(pre_content)\n\n\trequest_body = {\n\t\t'__VIEWSTATE': '/wEPDwUKLTc5MTY3NzY2OA9kFgICAw9kFg4CBQ8QZBAVEg09Peivt+mAieaLqT09CTIwMTgtMjAxOQkyMDE3LTIwMTgJMjAxNi0yMDE3CTIwMTUtMjAxNgkyMDE0LTIwMTUJMjAxNC0yMDE1CTIwMTMtMjAxNAkyMDEzLTIwMTQJMjAxMi0yMDEzCTIwMTEtMjAxMwkyMDExLTIwMTIJMjAxMC0yMDExCTIwMDktMjAxMAkyMDA4LTIwMDkJMjAwNy0yMDA4CTIwMDYtMjAwNwkyMDA1LTIwMDYVEgAJMjAxOC0yMDE5CTIwMTctMjAxOAkyMDE2LTIwMTcJMjAxNS0yMDE2CTIwMTQtMjAxNQkyMDE0LTIwMTUJMjAxMy0yMDE0CTIwMTMtMjAxNAkyMDEyLTIwMTMJMjAxMS0yMDEzCTIwMTEtMjAxMgkyMDEwLTIwMTEJMjAwOS0yMDEwCTIwMDgtMjAwOQkyMDA3LTIwMDgJMjAwNi0yMDA3CTIwMDUtMjAwNhQrAxJnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dkZAIHDxBkEBUDDT096K+36YCJ5oupPT0BMgExFQMAATIBMRQrAwNnZ2dkZAIdD2QWAgIFDzwrABEBDBQrAABkAh8PZBYCAgEPPCsAEQEMFCsAAGQCIw9kFgICCQ88KwARAQwUKwAAZAIlD2QWAgIDDxBkZBYBZmQCJw9kFgICAQ88KwARAgEQFgAWABYADBQrAABkGAQFCUdyaWRWaWV3Mw9nZAUJR3JpZFZpZXcxD2dkBQxHcmlkVmlld19jajAPZ2QFC0dyaWRWaWV3X2NqD2dkfgx9G639stk68qu1tbM6nXH+3YdlwZe2JIGSN2ZE088=',\t\t# 提前抓取\n\t\t'__EVENTVALIDATION': '/wEdACUp2xwQA5+MxGAylYqe6RvwESCFkFW/RuhzY1oLb/NUVB2nXP6dhZn6mKtmTGNHd3MwXQrG5ab0It+QKWTLaqmfIGx0hKQHqP/3fgB45pITpVPxlwAt6+N0jBvmsiExk1IL7R6YXHs4CYW6xoeIyFd16zXVvnblom7uU1wVnGg7wjTQGKLCZEVQGnXF5+HuveXO10VSDEID+Eh3nI1jlRVvAThS3H1vAk8dedvccz5HgzmT0s6BT0Wkysz2I1SVLK+BBoNsEnjusSCSqveEZIqKAqG9xWW5285pNEE/6xwtSGnvX+yWIf+Wd+BdgehLsTAOZMoWbJOD2xQz+jIVoq5usPGGtH1tfrRv2ZNXrhgDFgrzjXp2SzUmL/y0eqihj4CJd11haMaOlgGzsMzfNEGBCbGJdvVBiiKVFujA6Ty1+MteZur+FCukzKhg+dlfCCZ5ZXtzeYjop7ggcaKI2ArSMioS6xc0u4fT37iCAdJxSZ6Mq6ynQbr4SRbEt4fHquJ8HmzRIlNrYaLaoxNFkxJO8yv94tgoHy9fqXClMftcBv4KKg+fSMB29qVef+gLDI7R6mxQNC8xefIWSo4ykR6hMTQRw0wuRK4Dl44ooHPTmt+ZBkpb3m69wIYc3c25ONNlxf5DDVPFMmdvMOOjsZ8t2Mw1Ns2QByN3423c3ELIzh9H7TEwiNBjwLWQMcU1zfvVpr/9fHHTrBelNp+6kgKqR2Lfb8DngnqdQ/vCfpj94mQiGuqbQE6PCJUV5xtw06aeG3AaN7W8QZ1ogxwj25m/E4I+VOAdW6Oiquo3Vm9x9KL3uQI+SWcZLuVZNvx80A0=',\t# 提前抓取\n\t\t'Button_cjcx': '查询',\n\t\t'drop_type': '全部成绩',\t# TODO: 不要写死了\n\t\t'drop_xn': '2018-2019',\t# 不要写死了\n\t\t'drop_xq': '1',\t\t# 1,2\n\t\t'hid_dqszj': '',\n\t\t'TextBox1': cfl.getUserId(),\t# id\n\t\t'TextBox2': cfl.getUserPassword(),\t# pwd\n\t}\n\tcontent = requests.post(url=index_url, headers=request_headers, data=request_body)\n\t# print(content.text)\n\n\tcatch = '<td>(.*?)</td>'\n\tdata = re.findall(catch, content.text)\n\tfor item in data:\n\t\tprint(item)\n\n\n\n# 批量登录入口\ndef main_loop():\n\tuserset = getAllUsersFromExcel(\"info_table.csv\")\t# 获取所有的用户信息\n\tfor index, row in userset.iterrows():\n\t\tuserid = str(row['学号'])\n\t\tuserpwd = str(row['身份证件号'])\n\t\t#print(userid)\n\t\t#print(userpwd)\n\t\tcheckcodefile = os.curdir + os.sep + cfl.getCheckcodeFilename()\n\t\tresultfile = os.curdir + os.sep + cfl.getResultDirname()\n\t\t\n\t\tlogin = Login(userid.strip(), userpwd.strip())\n\t\t\n\t\tprint(\"### \" + login.user_id)\n\t\tprint( \"### \" + login.user_pwd )\n\t\t\n\t\t# 当前信息不对,跳过\n\t\tif doLogin( login, checkcodefile, resultfile ) == False:\n\t\t\tcontinue\n\n\t\tif os.path.exists(checkcodefile):\n\t\t\tos.remove(checkcodefile)\n\t\n\t\t\n# 单个查询\ndef main():\n\tcheckcodefile = os.curdir + os.sep + cfl.getCheckcodeFilename()\n\tresultfile = os.curdir + os.sep + cfl.getResultDirname()\n\tlogin = Login()\n\tdoLogin( login, checkcodefile, resultfile )\n\n\tif os.path.exists(checkcodefile):\n\t\tos.remove(checkcodefile)\n\n\nif __name__ == '__main__':\n\t#main_loop()\t\n\t#main()\n\tqueryScore()\n\t\n\n"
] |
[
[
"numpy.array",
"pandas.read_csv"
]
] |
OmriBromberg/great_expectations
|
[
"60eb81ebfb08fef5d37d55c316dc962928beb165",
"60eb81ebfb08fef5d37d55c316dc962928beb165"
] |
[
"tests/test_definitions/test_expectations_cfe.py",
"tests/checkpoint/test_checkpoint.py"
] |
[
"import glob\nimport json\nimport os\nimport random\nimport string\n\nimport pandas as pd\nimport pytest\n\nfrom great_expectations.execution_engine.pandas_batch_data import PandasBatchData\nfrom great_expectations.execution_engine.sparkdf_batch_data import SparkDFBatchData\nfrom great_expectations.execution_engine.sqlalchemy_batch_data import (\n SqlAlchemyBatchData,\n)\nfrom great_expectations.self_check.util import (\n BigQueryDialect,\n candidate_test_is_on_temporary_notimplemented_list_cfe,\n evaluate_json_test_cfe,\n get_test_validator_with_data,\n mssqlDialect,\n mysqlDialect,\n postgresqlDialect,\n sqliteDialect,\n)\nfrom tests.conftest import build_test_backends_list_cfe\nfrom tests.test_definitions.test_expectations import tmp_dir\n\n\ndef pytest_generate_tests(metafunc):\n # Load all the JSON files in the directory\n dir_path = os.path.dirname(os.path.realpath(__file__))\n expectation_dirs = [\n dir_\n for dir_ in os.listdir(dir_path)\n if os.path.isdir(os.path.join(dir_path, dir_))\n ]\n parametrized_tests = []\n ids = []\n backends = build_test_backends_list_cfe(metafunc)\n for expectation_category in expectation_dirs:\n\n test_configuration_files = glob.glob(\n dir_path + \"/\" + expectation_category + \"/*.json\"\n )\n for c in backends:\n for filename in test_configuration_files:\n file = open(filename)\n test_configuration = json.load(file)\n\n for d in test_configuration[\"datasets\"]:\n datasets = []\n if candidate_test_is_on_temporary_notimplemented_list_cfe(\n c, test_configuration[\"expectation_type\"]\n ):\n skip_expectation = True\n schemas = validator_with_data = None\n else:\n skip_expectation = False\n if isinstance(d[\"data\"], list):\n sqlite_db_path = os.path.abspath(\n os.path.join(\n tmp_dir,\n \"sqlite_db\"\n + \"\".join(\n [\n random.choice(\n string.ascii_letters + string.digits\n )\n for _ in range(8)\n ]\n )\n + \".db\",\n )\n )\n for dataset in d[\"data\"]:\n datasets.append(\n get_test_validator_with_data(\n c,\n dataset[\"data\"],\n dataset.get(\"schemas\"),\n table_name=dataset.get(\"dataset_name\"),\n sqlite_db_path=sqlite_db_path,\n )\n )\n validator_with_data = datasets[0]\n else:\n schemas = d[\"schemas\"] if \"schemas\" in d else None\n validator_with_data = get_test_validator_with_data(\n c, d[\"data\"], schemas=schemas\n )\n\n for test in d[\"tests\"]:\n generate_test = True\n skip_test = False\n if \"only_for\" in test:\n # if we're not on the \"only_for\" list, then never even generate the test\n generate_test = False\n if not isinstance(test[\"only_for\"], list):\n raise ValueError(\"Invalid test specification.\")\n\n if validator_with_data and isinstance(\n validator_with_data.execution_engine.active_batch_data,\n SqlAlchemyBatchData,\n ):\n # Call out supported dialects\n if \"sqlalchemy\" in test[\"only_for\"]:\n generate_test = True\n elif (\n \"sqlite\" in test[\"only_for\"]\n and sqliteDialect is not None\n and isinstance(\n validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,\n sqliteDialect,\n )\n ):\n generate_test = True\n elif (\n \"postgresql\" in test[\"only_for\"]\n and postgresqlDialect is not None\n and isinstance(\n validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,\n postgresqlDialect,\n )\n ):\n generate_test = True\n elif (\n \"mysql\" in test[\"only_for\"]\n and mysqlDialect is not None\n and isinstance(\n validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,\n mysqlDialect,\n )\n ):\n generate_test = True\n elif (\n \"mssql\" in test[\"only_for\"]\n and mssqlDialect is not None\n and isinstance(\n validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,\n mssqlDialect,\n )\n ):\n generate_test = True\n elif (\n \"bigquery\" in test[\"only_for\"]\n and BigQueryDialect is not None\n and hasattr(\n validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,\n \"name\",\n )\n and validator_with_data.execution_engine.active_batch_data.sql_engine_dialect.name\n == \"bigquery\"\n ):\n generate_test = True\n\n elif validator_with_data and isinstance(\n validator_with_data.execution_engine.active_batch_data,\n PandasBatchData,\n ):\n if \"pandas\" in test[\"only_for\"]:\n generate_test = True\n if (\n \"pandas_022\" in test[\"only_for\"]\n or \"pandas_023\" in test[\"only_for\"]\n ) and int(pd.__version__.split(\".\")[1]) in [22, 23]:\n generate_test = True\n if (\"pandas>=24\" in test[\"only_for\"]) and int(\n pd.__version__.split(\".\")[1]\n ) > 24:\n generate_test = True\n elif validator_with_data and isinstance(\n validator_with_data.execution_engine.active_batch_data,\n SparkDFBatchData,\n ):\n if \"spark\" in test[\"only_for\"]:\n generate_test = True\n\n if not generate_test:\n continue\n\n if \"suppress_test_for\" in test and (\n (\n \"sqlalchemy\" in test[\"suppress_test_for\"]\n and validator_with_data\n and isinstance(\n validator_with_data.execution_engine.active_batch_data,\n SqlAlchemyBatchData,\n )\n )\n or (\n \"sqlite\" in test[\"suppress_test_for\"]\n and sqliteDialect is not None\n and validator_with_data\n and isinstance(\n validator_with_data.execution_engine.active_batch_data,\n SqlAlchemyBatchData,\n )\n and isinstance(\n validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,\n sqliteDialect,\n )\n )\n or (\n \"postgresql\" in test[\"suppress_test_for\"]\n and postgresqlDialect is not None\n and validator_with_data\n and isinstance(\n validator_with_data.execution_engine.active_batch_data,\n SqlAlchemyBatchData,\n )\n and isinstance(\n validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,\n postgresqlDialect,\n )\n )\n or (\n \"mysql\" in test[\"suppress_test_for\"]\n and mysqlDialect is not None\n and validator_with_data\n and isinstance(\n validator_with_data.execution_engine.active_batch_data,\n SqlAlchemyBatchData,\n )\n and isinstance(\n validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,\n mysqlDialect,\n )\n )\n or (\n \"mssql\" in test[\"suppress_test_for\"]\n and mssqlDialect is not None\n and validator_with_data\n and isinstance(\n validator_with_data.execution_engine.active_batch_data,\n SqlAlchemyBatchData,\n )\n and isinstance(\n validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,\n mssqlDialect,\n )\n )\n or (\n \"bigquery\" in test[\"suppress_test_for\"]\n and BigQueryDialect is not None\n and validator_with_data\n and isinstance(\n validator_with_data.execution_engine.active_batch_data,\n SqlAlchemyBatchData,\n )\n and hasattr(\n validator_with_data.execution_engine.active_batch_data.sql_engine_dialect,\n \"name\",\n )\n and validator_with_data.execution_engine.active_batch_data.sql_engine_dialect.name\n == \"bigquery\"\n )\n or (\n \"pandas\" in test[\"suppress_test_for\"]\n and validator_with_data\n and isinstance(\n validator_with_data.execution_engine.active_batch_data,\n PandasBatchData,\n )\n )\n or (\n \"spark\" in test[\"suppress_test_for\"]\n and validator_with_data\n and isinstance(\n validator_with_data.execution_engine.active_batch_data,\n SparkDFBatchData,\n )\n )\n ):\n skip_test = True\n # Known condition: SqlAlchemy does not support allow_cross_type_comparisons\n if (\n \"allow_cross_type_comparisons\" in test[\"in\"]\n and validator_with_data\n and isinstance(\n validator_with_data.execution_engine.active_batch_data,\n SqlAlchemyBatchData,\n )\n ):\n skip_test = True\n\n parametrized_tests.append(\n {\n \"expectation_type\": test_configuration[\n \"expectation_type\"\n ],\n \"validator_with_data\": validator_with_data,\n \"test\": test,\n \"skip\": skip_expectation or skip_test,\n }\n )\n\n ids.append(\n c\n + \"/\"\n + expectation_category\n + \"/\"\n + test_configuration[\"expectation_type\"]\n + \":\"\n + test[\"title\"]\n )\n metafunc.parametrize(\"test_case\", parametrized_tests, ids=ids)\n\n\n@pytest.mark.order(index=0)\ndef test_case_runner_cfe(test_case):\n if test_case[\"skip\"]:\n pytest.skip()\n\n # Note: this should never be done in practice, but we are wiping expectations to reuse batches during testing.\n # test_case[\"batch\"]._initialize_expectations()\n if \"parse_strings_as_datetimes\" in test_case[\"test\"][\"in\"]:\n with pytest.deprecated_call():\n evaluate_json_test_cfe(\n validator=test_case[\"validator_with_data\"],\n expectation_type=test_case[\"expectation_type\"],\n test=test_case[\"test\"],\n )\n else:\n evaluate_json_test_cfe(\n validator=test_case[\"validator_with_data\"],\n expectation_type=test_case[\"expectation_type\"],\n test=test_case[\"test\"],\n )\n",
"import logging\nimport unittest.mock as mock\nfrom typing import Union\n\nimport pandas as pd\nimport pytest\nfrom ruamel.yaml import YAML\nfrom ruamel.yaml.comments import CommentedMap\n\nimport great_expectations as ge\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.checkpoint import Checkpoint, LegacyCheckpoint\nfrom great_expectations.checkpoint.types.checkpoint_result import CheckpointResult\nfrom great_expectations.core.batch import BatchRequest, RuntimeBatchRequest\nfrom great_expectations.data_context.data_context import DataContext\nfrom great_expectations.data_context.types.base import CheckpointConfig\nfrom great_expectations.data_context.types.resource_identifiers import (\n ConfigurationIdentifier,\n)\nfrom great_expectations.util import filter_properties_dict\nfrom great_expectations.validation_operators.types.validation_operator_result import (\n ValidationOperatorResult,\n)\n\ntry:\n pyspark = pytest.importorskip(\"pyspark\")\n from pyspark.sql import SparkSession\nexcept ImportError:\n pyspark = None\n SparkSession = None\n\nyaml = YAML()\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_checkpoint_raises_typeerror_on_incorrect_data_context():\n with pytest.raises(TypeError):\n Checkpoint(name=\"my_checkpoint\", data_context=\"foo\", config_version=1)\n\n\ndef test_checkpoint_with_no_config_version_has_no_action_list(empty_data_context):\n checkpoint = Checkpoint(\"foo\", empty_data_context, config_version=None)\n with pytest.raises(AttributeError):\n _ = checkpoint.action_list\n\n\ndef test_checkpoint_with_config_version_has_action_list(empty_data_context):\n checkpoint = Checkpoint(\n \"foo\", empty_data_context, config_version=1, action_list=[{\"foo\": \"bar\"}]\n )\n obs = checkpoint.action_list\n assert isinstance(obs, list)\n assert obs == [{\"foo\": \"bar\"}]\n\n\n@mock.patch(\n \"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit\"\n)\ndef test_basic_checkpoint_config_validation(\n mock_emit,\n empty_data_context_stats_enabled,\n caplog,\n capsys,\n):\n context: DataContext = empty_data_context_stats_enabled\n yaml_config_erroneous: str\n config_erroneous: CommentedMap\n checkpoint_config: Union[CheckpointConfig, dict]\n checkpoint: Checkpoint\n\n yaml_config_erroneous = f\"\"\"\n name: misconfigured_checkpoint\n unexpected_property: UNKNOWN_PROPERTY_VALUE\n \"\"\"\n config_erroneous = yaml.load(yaml_config_erroneous)\n with pytest.raises(TypeError):\n # noinspection PyUnusedLocal\n checkpoint_config = CheckpointConfig(**config_erroneous)\n with pytest.raises(KeyError):\n # noinspection PyUnusedLocal\n checkpoint = context.test_yaml_config(\n yaml_config=yaml_config_erroneous,\n name=\"my_erroneous_checkpoint\",\n )\n assert mock_emit.call_count == 1\n expected_call_args_list = [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\"diagnostic_info\": [\"__class_name_not_provided__\"]},\n \"success\": False,\n }\n ),\n ]\n assert mock_emit.call_args_list == expected_call_args_list\n\n yaml_config_erroneous = f\"\"\"\n config_version: 1\n \"\"\"\n config_erroneous = yaml.load(yaml_config_erroneous)\n with pytest.raises(ge_exceptions.InvalidConfigError):\n # noinspection PyUnusedLocal\n checkpoint_config = CheckpointConfig.from_commented_map(\n commented_map=config_erroneous\n )\n with pytest.raises(KeyError):\n # noinspection PyUnusedLocal\n checkpoint = context.test_yaml_config(\n yaml_config=yaml_config_erroneous,\n name=\"my_erroneous_checkpoint\",\n )\n assert mock_emit.call_count == 2\n expected_call_args_list.extend(\n [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"diagnostic_info\": [\"__class_name_not_provided__\"]\n },\n \"success\": False,\n }\n ),\n ]\n )\n assert mock_emit.call_args_list == expected_call_args_list\n\n with pytest.raises(ge_exceptions.InvalidConfigError):\n # noinspection PyUnusedLocal\n checkpoint = context.test_yaml_config(\n yaml_config=yaml_config_erroneous,\n name=\"my_erroneous_checkpoint\",\n class_name=\"Checkpoint\",\n )\n assert mock_emit.call_count == 3\n expected_call_args_list.extend(\n [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\"parent_class\": \"Checkpoint\"},\n \"success\": False,\n }\n ),\n ]\n )\n assert mock_emit.call_args_list == expected_call_args_list\n\n yaml_config_erroneous = f\"\"\"\n config_version: 1\n name: my_erroneous_checkpoint\n class_name: Checkpoint\n \"\"\"\n # noinspection PyUnusedLocal\n checkpoint = context.test_yaml_config(\n yaml_config=yaml_config_erroneous,\n name=\"my_erroneous_checkpoint\",\n class_name=\"Checkpoint\",\n )\n captured = capsys.readouterr()\n assert any(\n [\n 'Your current Checkpoint configuration has an empty or missing \"validations\" attribute'\n in message\n for message in [caplog.text, captured.out]\n ]\n )\n assert any(\n [\n 'Your current Checkpoint configuration has an empty or missing \"action_list\" attribute'\n in message\n for message in [caplog.text, captured.out]\n ]\n )\n assert mock_emit.call_count == 4\n # Substitute anonymized name since it changes for each run\n anonymized_name = mock_emit.call_args_list[3][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n expected_call_args_list.extend(\n [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_name,\n \"parent_class\": \"Checkpoint\",\n },\n \"success\": True,\n }\n ),\n ]\n )\n assert mock_emit.call_args_list == expected_call_args_list\n\n assert len(context.list_checkpoints()) == 0\n context.add_checkpoint(**yaml.load(yaml_config_erroneous))\n assert len(context.list_checkpoints()) == 1\n\n yaml_config: str = f\"\"\"\n name: my_checkpoint\n config_version: 1\n class_name: Checkpoint\n validations: []\n action_list:\n - name: store_validation_result\n action:\n class_name: StoreValidationResultAction\n - name: store_evaluation_params\n action:\n class_name: StoreEvaluationParametersAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n \"\"\"\n\n expected_checkpoint_config: dict = {\n \"name\": \"my_checkpoint\",\n \"config_version\": 1.0,\n \"class_name\": \"Checkpoint\",\n \"module_name\": \"great_expectations.checkpoint\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\"class_name\": \"UpdateDataDocsAction\"},\n },\n ],\n }\n\n config: CommentedMap = yaml.load(yaml_config)\n checkpoint_config = CheckpointConfig(**config)\n checkpoint_config = checkpoint_config.to_json_dict()\n checkpoint = Checkpoint(data_context=context, **checkpoint_config)\n assert (\n filter_properties_dict(\n properties=checkpoint.self_check()[\"config\"],\n clean_falsy=True,\n )\n == expected_checkpoint_config\n )\n assert (\n filter_properties_dict(\n properties=checkpoint.config.to_json_dict(),\n clean_falsy=True,\n )\n == expected_checkpoint_config\n )\n\n checkpoint = context.test_yaml_config(\n yaml_config=yaml_config,\n name=\"my_checkpoint\",\n )\n assert (\n filter_properties_dict(\n properties=checkpoint.self_check()[\"config\"],\n clean_falsy=True,\n )\n == expected_checkpoint_config\n )\n assert (\n filter_properties_dict(\n properties=checkpoint.config.to_json_dict(),\n clean_falsy=True,\n )\n == expected_checkpoint_config\n )\n assert mock_emit.call_count == 5\n # Substitute anonymized name since it changes for each run\n anonymized_name = mock_emit.call_args_list[4][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n expected_call_args_list.extend(\n [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_name,\n \"parent_class\": \"Checkpoint\",\n },\n \"success\": True,\n }\n ),\n ]\n )\n assert mock_emit.call_args_list == expected_call_args_list\n\n assert len(context.list_checkpoints()) == 1\n context.add_checkpoint(**yaml.load(yaml_config))\n assert len(context.list_checkpoints()) == 2\n\n context.create_expectation_suite(expectation_suite_name=\"my_expectation_suite\")\n with pytest.raises(\n ge_exceptions.DataContextError,\n match=r'Checkpoint \"my_checkpoint\" does not contain any validations.',\n ):\n # noinspection PyUnusedLocal\n result: CheckpointResult = context.run_checkpoint(\n checkpoint_name=checkpoint.config.name,\n )\n\n context.delete_checkpoint(name=\"my_erroneous_checkpoint\")\n context.delete_checkpoint(name=\"my_checkpoint\")\n assert len(context.list_checkpoints()) == 0\n\n\n@mock.patch(\n \"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit\"\n)\ndef test_checkpoint_configuration_no_nesting_using_test_yaml_config(\n mock_emit,\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n monkeypatch,\n):\n monkeypatch.setenv(\"VAR\", \"test\")\n monkeypatch.setenv(\"MY_PARAM\", \"1\")\n monkeypatch.setenv(\"OLD_PARAM\", \"2\")\n\n checkpoint: Checkpoint\n\n data_context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n yaml_config: str = f\"\"\"\n name: my_fancy_checkpoint\n config_version: 1\n class_name: Checkpoint\n run_name_template: \"%Y-%M-foo-bar-template-$VAR\"\n validations:\n - batch_request:\n datasource_name: my_datasource\n data_connector_name: my_special_data_connector\n data_asset_name: users\n data_connector_query:\n index: -1\n expectation_suite_name: users.delivery\n action_list:\n - name: store_validation_result\n action:\n class_name: StoreValidationResultAction\n - name: store_evaluation_params\n action:\n class_name: StoreEvaluationParametersAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n evaluation_parameters:\n param1: \"$MY_PARAM\"\n param2: 1 + \"$OLD_PARAM\"\n runtime_configuration:\n result_format:\n result_format: BASIC\n partial_unexpected_count: 20\n \"\"\"\n\n expected_checkpoint_config: dict = {\n \"name\": \"my_fancy_checkpoint\",\n \"config_version\": 1.0,\n \"class_name\": \"Checkpoint\",\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\n \"index\": -1,\n },\n },\n \"expectation_suite_name\": \"users.delivery\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\"class_name\": \"UpdateDataDocsAction\"},\n },\n ],\n \"evaluation_parameters\": {\"param1\": \"1\", \"param2\": '1 + \"2\"'},\n \"runtime_configuration\": {\n \"result_format\": {\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n }\n },\n }\n ],\n \"template_name\": None,\n \"module_name\": \"great_expectations.checkpoint\",\n \"run_name_template\": \"%Y-%M-foo-bar-template-test\",\n \"expectation_suite_name\": None,\n \"batch_request\": None,\n \"action_list\": [],\n \"evaluation_parameters\": {},\n \"runtime_configuration\": {},\n \"profilers\": [],\n }\n\n checkpoint = data_context.test_yaml_config(\n yaml_config=yaml_config,\n name=\"my_fancy_checkpoint\",\n )\n assert filter_properties_dict(\n properties=checkpoint.config.to_json_dict(),\n clean_falsy=True,\n ) == filter_properties_dict(\n properties=expected_checkpoint_config,\n clean_falsy=True,\n )\n\n # Test usage stats messages\n assert mock_emit.call_count == 1\n # Substitute current anonymized name since it changes for each run\n anonymized_checkpoint_name = mock_emit.call_args_list[0][0][0][\"event_payload\"][\n \"anonymized_name\"\n ]\n assert mock_emit.call_args_list == [\n mock.call(\n {\n \"event\": \"data_context.test_yaml_config\",\n \"event_payload\": {\n \"anonymized_name\": anonymized_checkpoint_name,\n \"parent_class\": \"Checkpoint\",\n },\n \"success\": True,\n }\n )\n ]\n\n assert len(data_context.list_checkpoints()) == 0\n data_context.add_checkpoint(**yaml.load(yaml_config))\n assert len(data_context.list_checkpoints()) == 1\n\n data_context.create_expectation_suite(expectation_suite_name=\"users.delivery\")\n result: CheckpointResult = data_context.run_checkpoint(\n checkpoint_name=checkpoint.config.name,\n )\n assert len(result.list_validation_results()) == 1\n assert len(data_context.validations_store.list_keys()) == 1\n assert result.success\n\n data_context.delete_checkpoint(name=\"my_fancy_checkpoint\")\n assert len(data_context.list_checkpoints()) == 0\n\n\ndef test_checkpoint_configuration_nesting_provides_defaults_for_most_elements_test_yaml_config(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n monkeypatch,\n):\n monkeypatch.setenv(\"VAR\", \"test\")\n monkeypatch.setenv(\"MY_PARAM\", \"1\")\n monkeypatch.setenv(\"OLD_PARAM\", \"2\")\n\n checkpoint: Checkpoint\n\n data_context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n yaml_config: str = f\"\"\"\n name: my_fancy_checkpoint\n config_version: 1\n class_name: Checkpoint\n run_name_template: \"%Y-%M-foo-bar-template-$VAR\"\n validations:\n - batch_request:\n datasource_name: my_datasource\n data_connector_name: my_special_data_connector\n data_asset_name: users\n data_connector_query:\n index: -1\n - batch_request:\n datasource_name: my_datasource\n data_connector_name: my_other_data_connector\n data_asset_name: users\n data_connector_query:\n index: -2\n expectation_suite_name: users.delivery\n action_list:\n - name: store_validation_result\n action:\n class_name: StoreValidationResultAction\n - name: store_evaluation_params\n action:\n class_name: StoreEvaluationParametersAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n evaluation_parameters:\n param1: \"$MY_PARAM\"\n param2: 1 + \"$OLD_PARAM\"\n runtime_configuration:\n result_format:\n result_format: BASIC\n partial_unexpected_count: 20\n \"\"\"\n\n expected_checkpoint_config: dict = {\n \"name\": \"my_fancy_checkpoint\",\n \"config_version\": 1.0,\n \"class_name\": \"Checkpoint\",\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\n \"index\": -1,\n },\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\n \"index\": -2,\n },\n }\n },\n ],\n \"expectation_suite_name\": \"users.delivery\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\"class_name\": \"UpdateDataDocsAction\"},\n },\n ],\n \"evaluation_parameters\": {\"param1\": \"1\", \"param2\": '1 + \"2\"'},\n \"runtime_configuration\": {\n \"result_format\": {\"result_format\": \"BASIC\", \"partial_unexpected_count\": 20}\n },\n \"template_name\": None,\n \"module_name\": \"great_expectations.checkpoint\",\n \"run_name_template\": \"%Y-%M-foo-bar-template-test\",\n \"batch_request\": None,\n \"profilers\": [],\n }\n\n checkpoint = data_context.test_yaml_config(\n yaml_config=yaml_config,\n name=\"my_fancy_checkpoint\",\n )\n assert filter_properties_dict(\n properties=checkpoint.config.to_json_dict(),\n clean_falsy=True,\n ) == filter_properties_dict(\n properties=expected_checkpoint_config,\n clean_falsy=True,\n )\n\n assert len(data_context.list_checkpoints()) == 0\n data_context.add_checkpoint(**yaml.load(yaml_config))\n assert len(data_context.list_checkpoints()) == 1\n\n data_context.create_expectation_suite(expectation_suite_name=\"users.delivery\")\n result: CheckpointResult = data_context.run_checkpoint(\n checkpoint_name=checkpoint.config.name,\n )\n assert len(result.list_validation_results()) == 2\n assert len(data_context.validations_store.list_keys()) == 2\n assert result.success\n\n data_context.delete_checkpoint(name=\"my_fancy_checkpoint\")\n assert len(data_context.list_checkpoints()) == 0\n\n\ndef test_checkpoint_configuration_using_RuntimeDataConnector_with_Airflow_test_yaml_config(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n checkpoint: Checkpoint\n\n data_context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n yaml_config: str = f\"\"\"\n name: airflow_checkpoint\n config_version: 1\n class_name: Checkpoint\n validations:\n - batch_request:\n datasource_name: my_datasource\n data_connector_name: my_runtime_data_connector\n data_asset_name: IN_MEMORY_DATA_ASSET\n expectation_suite_name: users.delivery\n action_list:\n - name: store_validation_result\n action:\n class_name: StoreValidationResultAction\n - name: store_evaluation_params\n action:\n class_name: StoreEvaluationParametersAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n \"\"\"\n\n expected_checkpoint_config: dict = {\n \"name\": \"airflow_checkpoint\",\n \"config_version\": 1.0,\n \"class_name\": \"Checkpoint\",\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"IN_MEMORY_DATA_ASSET\",\n }\n }\n ],\n \"expectation_suite_name\": \"users.delivery\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\"class_name\": \"UpdateDataDocsAction\"},\n },\n ],\n \"template_name\": None,\n \"module_name\": \"great_expectations.checkpoint\",\n \"run_name_template\": None,\n \"batch_request\": None,\n \"evaluation_parameters\": {},\n \"runtime_configuration\": {},\n \"profilers\": [],\n }\n\n checkpoint = data_context.test_yaml_config(\n yaml_config=yaml_config,\n name=\"airflow_checkpoint\",\n )\n assert filter_properties_dict(\n properties=checkpoint.config.to_json_dict(),\n clean_falsy=True,\n ) == filter_properties_dict(\n properties=expected_checkpoint_config,\n clean_falsy=True,\n )\n\n assert len(data_context.list_checkpoints()) == 0\n data_context.add_checkpoint(**yaml.load(yaml_config))\n assert len(data_context.list_checkpoints()) == 1\n\n data_context.create_expectation_suite(expectation_suite_name=\"users.delivery\")\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n result: CheckpointResult = data_context.run_checkpoint(\n checkpoint_name=checkpoint.config.name,\n batch_request={\n \"runtime_parameters\": {\n \"batch_data\": test_df,\n },\n \"batch_identifiers\": {\n \"airflow_run_id\": 1234567890,\n },\n },\n run_name=\"airflow_run_1234567890\",\n )\n assert len(result.list_validation_results()) == 1\n assert len(data_context.validations_store.list_keys()) == 1\n assert result.success\n\n data_context.delete_checkpoint(name=\"airflow_checkpoint\")\n assert len(data_context.list_checkpoints()) == 0\n\n\ndef test_checkpoint_configuration_warning_error_quarantine_test_yaml_config(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n monkeypatch,\n):\n monkeypatch.setenv(\"GE_ENVIRONMENT\", \"my_ge_environment\")\n\n checkpoint: Checkpoint\n\n data_context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n yaml_config: str = f\"\"\"\n name: airflow_users_node_3\n config_version: 1\n class_name: Checkpoint\n batch_request:\n datasource_name: my_datasource\n data_connector_name: my_special_data_connector\n data_asset_name: users\n data_connector_query:\n index: -1\n validations:\n - expectation_suite_name: users.warning # runs the top-level action list against the top-level batch_request\n - expectation_suite_name: users.error # runs the locally-specified action_list union the top level action-list against the top-level batch_request\n action_list:\n - name: quarantine_failed_data\n action:\n class_name: CreateQuarantineData\n - name: advance_passed_data\n action:\n class_name: CreatePassedData\n action_list:\n - name: store_validation_result\n action:\n class_name: StoreValidationResultAction\n - name: store_evaluation_params\n action:\n class_name: StoreEvaluationParametersAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n evaluation_parameters:\n environment: $GE_ENVIRONMENT\n tolerance: 0.01\n runtime_configuration:\n result_format:\n result_format: BASIC\n partial_unexpected_count: 20\n \"\"\"\n\n mock_create_quarantine_data = mock.MagicMock()\n mock_create_quarantine_data.run.return_value = True\n ge.validation_operators.CreateQuarantineData = mock_create_quarantine_data\n\n mock_create_passed_data = mock.MagicMock()\n mock_create_passed_data.run.return_value = True\n ge.validation_operators.CreatePassedData = mock_create_passed_data\n\n expected_checkpoint_config: dict = {\n \"name\": \"airflow_users_node_3\",\n \"config_version\": 1.0,\n \"class_name\": \"Checkpoint\",\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\n \"index\": -1,\n },\n },\n \"validations\": [\n {\"expectation_suite_name\": \"users.warning\"},\n {\n \"expectation_suite_name\": \"users.error\",\n \"action_list\": [\n {\n \"name\": \"quarantine_failed_data\",\n \"action\": {\"class_name\": \"CreateQuarantineData\"},\n },\n {\n \"name\": \"advance_passed_data\",\n \"action\": {\"class_name\": \"CreatePassedData\"},\n },\n ],\n },\n ],\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\"class_name\": \"UpdateDataDocsAction\"},\n },\n ],\n \"evaluation_parameters\": {\n \"environment\": \"my_ge_environment\",\n \"tolerance\": 0.01,\n },\n \"runtime_configuration\": {\n \"result_format\": {\"result_format\": \"BASIC\", \"partial_unexpected_count\": 20}\n },\n \"template_name\": None,\n \"module_name\": \"great_expectations.checkpoint\",\n \"run_name_template\": None,\n \"expectation_suite_name\": None,\n \"profilers\": [],\n }\n\n checkpoint = data_context.test_yaml_config(\n yaml_config=yaml_config,\n name=\"airflow_users_node_3\",\n )\n assert filter_properties_dict(\n properties=checkpoint.config.to_json_dict(),\n clean_falsy=True,\n ) == filter_properties_dict(\n properties=expected_checkpoint_config,\n clean_falsy=True,\n )\n\n assert len(data_context.list_checkpoints()) == 0\n data_context.add_checkpoint(**yaml.load(yaml_config))\n assert len(data_context.list_checkpoints()) == 1\n\n data_context.create_expectation_suite(expectation_suite_name=\"users.warning\")\n data_context.create_expectation_suite(expectation_suite_name=\"users.error\")\n result: CheckpointResult = data_context.run_checkpoint(\n checkpoint_name=checkpoint.config.name,\n )\n assert len(result.list_validation_results()) == 2\n assert len(data_context.validations_store.list_keys()) == 2\n assert result.success\n\n data_context.delete_checkpoint(name=\"airflow_users_node_3\")\n assert len(data_context.list_checkpoints()) == 0\n\n\ndef test_checkpoint_configuration_template_parsing_and_usage_test_yaml_config(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n monkeypatch,\n):\n monkeypatch.setenv(\"VAR\", \"test\")\n monkeypatch.setenv(\"MY_PARAM\", \"1\")\n monkeypatch.setenv(\"OLD_PARAM\", \"2\")\n\n checkpoint: Checkpoint\n yaml_config: str\n expected_checkpoint_config: dict\n result: CheckpointResult\n\n data_context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n yaml_config = f\"\"\"\n name: my_base_checkpoint\n config_version: 1\n class_name: Checkpoint\n run_name_template: \"%Y-%M-foo-bar-template-$VAR\"\n action_list:\n - name: store_validation_result\n action:\n class_name: StoreValidationResultAction\n - name: store_evaluation_params\n action:\n class_name: StoreEvaluationParametersAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n evaluation_parameters:\n param1: \"$MY_PARAM\"\n param2: 1 + \"$OLD_PARAM\"\n runtime_configuration:\n result_format:\n result_format: BASIC\n partial_unexpected_count: 20\n \"\"\"\n\n expected_checkpoint_config = {\n \"name\": \"my_base_checkpoint\",\n \"config_version\": 1.0,\n \"template_name\": None,\n \"module_name\": \"great_expectations.checkpoint\",\n \"class_name\": \"Checkpoint\",\n \"run_name_template\": \"%Y-%M-foo-bar-template-test\",\n \"expectation_suite_name\": None,\n \"batch_request\": None,\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\"class_name\": \"StoreValidationResultAction\"},\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\"class_name\": \"StoreEvaluationParametersAction\"},\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\"class_name\": \"UpdateDataDocsAction\"},\n },\n ],\n \"evaluation_parameters\": {\"param1\": \"1\", \"param2\": '1 + \"2\"'},\n \"runtime_configuration\": {\n \"result_format\": {\"result_format\": \"BASIC\", \"partial_unexpected_count\": 20}\n },\n \"validations\": [],\n \"profilers\": [],\n }\n\n checkpoint = data_context.test_yaml_config(\n yaml_config=yaml_config,\n name=\"my_base_checkpoint\",\n )\n assert filter_properties_dict(\n properties=checkpoint.config.to_json_dict(),\n clean_falsy=True,\n ) == filter_properties_dict(\n properties=expected_checkpoint_config,\n clean_falsy=True,\n )\n\n assert len(data_context.list_checkpoints()) == 0\n data_context.add_checkpoint(**yaml.load(yaml_config))\n assert len(data_context.list_checkpoints()) == 1\n\n with pytest.raises(\n ge_exceptions.DataContextError,\n match=r'Checkpoint \"my_base_checkpoint\" does not contain any validations.',\n ):\n # noinspection PyUnusedLocal\n result: CheckpointResult = data_context.run_checkpoint(\n checkpoint_name=checkpoint.config.name,\n )\n\n data_context.create_expectation_suite(expectation_suite_name=\"users.delivery\")\n\n result = data_context.run_checkpoint(\n checkpoint_name=\"my_base_checkpoint\",\n validations=[\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\n \"index\": -1,\n },\n },\n \"expectation_suite_name\": \"users.delivery\",\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\n \"index\": -2,\n },\n },\n \"expectation_suite_name\": \"users.delivery\",\n },\n ],\n )\n assert len(result.list_validation_results()) == 2\n assert len(data_context.validations_store.list_keys()) == 2\n assert result.success\n\n yaml_config = f\"\"\"\n name: my_fancy_checkpoint\n config_version: 1\n class_name: Checkpoint\n template_name: my_base_checkpoint\n validations:\n - batch_request:\n datasource_name: my_datasource\n data_connector_name: my_special_data_connector\n data_asset_name: users\n data_connector_query:\n index: -1\n - batch_request:\n datasource_name: my_datasource\n data_connector_name: my_other_data_connector\n data_asset_name: users\n data_connector_query:\n index: -2\n expectation_suite_name: users.delivery\n \"\"\"\n\n expected_checkpoint_config = {\n \"name\": \"my_fancy_checkpoint\",\n \"config_version\": 1.0,\n \"class_name\": \"Checkpoint\",\n \"template_name\": \"my_base_checkpoint\",\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\n \"index\": -1,\n },\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\n \"index\": -2,\n },\n }\n },\n ],\n \"expectation_suite_name\": \"users.delivery\",\n \"module_name\": \"great_expectations.checkpoint\",\n \"run_name_template\": None,\n \"batch_request\": None,\n \"action_list\": [],\n \"evaluation_parameters\": {},\n \"runtime_configuration\": {},\n \"profilers\": [],\n }\n\n checkpoint = data_context.test_yaml_config(\n yaml_config=yaml_config,\n name=\"my_fancy_checkpoint\",\n )\n assert filter_properties_dict(\n properties=checkpoint.config.to_json_dict(),\n clean_falsy=True,\n ) == filter_properties_dict(\n properties=expected_checkpoint_config,\n clean_falsy=True,\n )\n\n assert len(data_context.list_checkpoints()) == 1\n data_context.add_checkpoint(**yaml.load(yaml_config))\n assert len(data_context.list_checkpoints()) == 2\n\n result: CheckpointResult = data_context.run_checkpoint(\n checkpoint_name=checkpoint.config.name,\n )\n assert len(result.list_validation_results()) == 2\n assert len(data_context.validations_store.list_keys()) == 4\n assert result.success\n\n data_context.delete_checkpoint(name=\"my_base_checkpoint\")\n data_context.delete_checkpoint(name=\"my_fancy_checkpoint\")\n assert len(data_context.list_checkpoints()) == 0\n\n\ndef test_legacy_checkpoint_instantiates_and_produces_a_validation_result_when_run(\n filesystem_csv_data_context_with_validation_operators,\n):\n rad_datasource = list(\n filter(\n lambda element: element[\"name\"] == \"rad_datasource\",\n filesystem_csv_data_context_with_validation_operators.list_datasources(),\n )\n )[0]\n base_directory = rad_datasource[\"batch_kwargs_generators\"][\"subdir_reader\"][\n \"base_directory\"\n ]\n batch_kwargs = {\n \"path\": base_directory + \"/f1.csv\",\n \"datasource\": \"rad_datasource\",\n \"reader_method\": \"read_csv\",\n }\n\n checkpoint_config_dict = {\n \"name\": \"my_checkpoint\",\n \"validation_operator_name\": \"action_list_operator\",\n \"batches\": [\n {\"batch_kwargs\": batch_kwargs, \"expectation_suite_names\": [\"my_suite\"]}\n ],\n }\n\n checkpoint = LegacyCheckpoint(\n data_context=filesystem_csv_data_context_with_validation_operators,\n **checkpoint_config_dict,\n )\n\n with pytest.raises(\n ge_exceptions.DataContextError, match=r\"expectation_suite .* not found\"\n ):\n checkpoint.run()\n\n assert (\n len(\n filesystem_csv_data_context_with_validation_operators.validations_store.list_keys()\n )\n == 0\n )\n\n filesystem_csv_data_context_with_validation_operators.create_expectation_suite(\n \"my_suite\"\n )\n # noinspection PyUnusedLocal\n results = checkpoint.run()\n\n assert (\n len(\n filesystem_csv_data_context_with_validation_operators.validations_store.list_keys()\n )\n == 1\n )\n\n\n# TODO: add more test cases\ndef test_newstyle_checkpoint_instantiates_and_produces_a_validation_result_when_run(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n # add checkpoint config\n checkpoint_config = CheckpointConfig(\n name=\"my_checkpoint\",\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n validations=[\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_basic_data_connector\",\n \"data_asset_name\": \"Titanic_1911\",\n }\n }\n ],\n )\n checkpoint_config_key = ConfigurationIdentifier(\n configuration_key=checkpoint_config.name\n )\n context.checkpoint_store.set(key=checkpoint_config_key, value=checkpoint_config)\n checkpoint = context.get_checkpoint(checkpoint_config.name)\n\n with pytest.raises(\n ge_exceptions.DataContextError, match=r\"expectation_suite .* not found\"\n ):\n checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 0\n\n context.create_expectation_suite(\"my_expectation_suite\")\n # noinspection PyUnusedLocal\n results = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 1\n assert results[\"success\"] == True\n try:\n print(results)\n except Exception as exception:\n raise pytest.fail(f\"EXCEPTION: {exception}\")\n\n\ndef test_newstyle_checkpoint_instantiates_and_produces_a_validation_result_when_run_batch_request_object(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n # add checkpoint config\n batch_request = BatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_basic_data_connector\",\n \"data_asset_name\": \"Titanic_1911\",\n }\n )\n checkpoint = Checkpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n validations=[{\"batch_request\": batch_request}],\n )\n with pytest.raises(\n ge_exceptions.DataContextError, match=r\"expectation_suite .* not found\"\n ):\n checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 0\n\n context.create_expectation_suite(\"my_expectation_suite\")\n # noinspection PyUnusedLocal\n results = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 1\n assert results[\"success\"] == True\n try:\n print(results)\n except Exception as exception:\n raise pytest.fail(f\"EXCEPTION: {exception}\")\n\n\ndef test_newstyle_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_object_pandasdf(\n data_context_with_datasource_pandas_engine,\n):\n context: DataContext = data_context_with_datasource_pandas_engine\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n # add checkpoint config\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"test_df\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n checkpoint = Checkpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n validations=[{\"batch_request\": batch_request}],\n )\n with pytest.raises(\n ge_exceptions.DataContextError, match=r\"expectation_suite .* not found\"\n ):\n checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 0\n\n context.create_expectation_suite(\"my_expectation_suite\")\n # noinspection PyUnusedLocal\n results = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 1\n assert results[\"success\"] == True\n try:\n print(results)\n except Exception as exception:\n raise pytest.fail(f\"EXCEPTION: {exception}\")\n\n\ndef test_newstyle_checkpoint_instantiates_and_produces_a_validation_result_when_run_runtime_batch_request_object_sparkdf(\n data_context_with_datasource_spark_engine,\n):\n context: DataContext = data_context_with_datasource_spark_engine\n spark = SparkSession.builder.getOrCreate()\n pandas_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n test_df = spark.createDataFrame(pandas_df)\n # add checkpoint config\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"test_df\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n checkpoint = Checkpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n validations=[{\"batch_request\": batch_request}],\n )\n with pytest.raises(\n ge_exceptions.DataContextError, match=r\"expectation_suite .* not found\"\n ):\n checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 0\n\n context.create_expectation_suite(\"my_expectation_suite\")\n # noinspection PyUnusedLocal\n results = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 1\n assert results[\"success\"] == True\n try:\n print(results)\n except Exception as exception:\n raise pytest.fail(f\"EXCEPTION: {exception}\")\n\n\ndef test_newstyle_checkpoint_instantiates_and_produces_a_validation_result_when_run_batch_request_object_multi_validation_pandasdf(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n test_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n # add checkpoint config\n batch_request = BatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_basic_data_connector\",\n \"data_asset_name\": \"Titanic_1911\",\n }\n )\n runtime_batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"test_df\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n checkpoint = Checkpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n validations=[\n {\"batch_request\": runtime_batch_request},\n {\"batch_request\": batch_request},\n ],\n )\n with pytest.raises(\n ge_exceptions.DataContextError, match=r\"expectation_suite .* not found\"\n ):\n checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 0\n\n context.create_expectation_suite(\"my_expectation_suite\")\n # noinspection PyUnusedLocal\n results = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 2\n assert results[\"success\"] == True\n try:\n print(results)\n except Exception as exception:\n raise pytest.fail(f\"EXCEPTION: {exception}\")\n\n\ndef test_newstyle_checkpoint_instantiates_and_produces_a_validation_result_when_run_batch_request_object_multi_validation_sparkdf(\n titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n spark = SparkSession.builder.getOrCreate()\n pandas_df: pd.DataFrame = pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n test_df = spark.createDataFrame(pandas_df)\n # add checkpoint config\n batch_request = BatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_basic_data_connector\",\n \"data_asset_name\": \"Titanic_1911\",\n }\n )\n runtime_batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_runtime_data_connector\",\n \"data_asset_name\": \"test_df\",\n \"batch_identifiers\": {\n \"pipeline_stage_name\": \"core_processing\",\n \"airflow_run_id\": 1234567890,\n },\n \"runtime_parameters\": {\"batch_data\": test_df},\n }\n )\n checkpoint = Checkpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n validations=[\n {\"batch_request\": runtime_batch_request},\n {\"batch_request\": batch_request},\n ],\n )\n with pytest.raises(\n ge_exceptions.DataContextError, match=r\"expectation_suite .* not found\"\n ):\n checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 0\n\n context.create_expectation_suite(\"my_expectation_suite\")\n # noinspection PyUnusedLocal\n results = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 2\n assert results[\"success\"] == True\n try:\n print(results)\n except Exception as exception:\n raise pytest.fail(f\"EXCEPTION: {exception}\")\n\n\ndef test_newstyle_checkpoint_instantiates_and_produces_a_validation_result_when_run_single_runtime_batch_request_query_in_validations(\n data_context_with_datasource_sqlalchemy_engine, sa\n):\n context: DataContext = data_context_with_datasource_sqlalchemy_engine\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query\n batch_request = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\n \"query\": \"SELECT * from table_partitioned_by_date_column__A LIMIT 10\"\n },\n }\n )\n\n # add checkpoint config\n checkpoint = Checkpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n validations=[{\"batch_request\": batch_request}],\n )\n\n results = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 1\n assert results[\"success\"] == True\n try:\n print(results)\n except Exception as exception:\n raise pytest.fail(f\"EXCEPTION: {exception}\")\n\n\ndef test_newstyle_checkpoint_instantiates_and_produces_a_validation_result_when_run_multiple_runtime_batch_request_query_in_validations(\n data_context_with_datasource_sqlalchemy_engine, sa\n):\n context: DataContext = data_context_with_datasource_sqlalchemy_engine\n\n # create expectation suite\n context.create_expectation_suite(\"my_expectation_suite\")\n\n # RuntimeBatchRequest with a query 1\n batch_request_1 = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\n \"query\": \"SELECT * from table_partitioned_by_date_column__A LIMIT 10\"\n },\n }\n )\n\n # RuntimeBatchRequest with a query 2\n batch_request_2 = RuntimeBatchRequest(\n **{\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"default_data_asset_name\",\n \"batch_identifiers\": {\"default_identifier_name\": \"test_identifier\"},\n \"runtime_parameters\": {\n \"query\": \"SELECT * from table_partitioned_by_date_column__A LIMIT 5\"\n },\n }\n )\n\n # add checkpoint config\n checkpoint = Checkpoint(\n name=\"my_checkpoint\",\n data_context=context,\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template\",\n expectation_suite_name=\"my_expectation_suite\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n validations=[\n {\"batch_request\": batch_request_1},\n {\"batch_request\": batch_request_2},\n ],\n )\n\n results = checkpoint.run()\n\n assert len(context.validations_store.list_keys()) == 1\n assert results[\"success\"] == True\n try:\n print(results)\n except Exception as exception:\n raise pytest.fail(f\"EXCEPTION: {exception}\")\n\n\ndef test_newstyle_checkpoint_config_substitution_simple(\n titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,\n monkeypatch,\n):\n monkeypatch.setenv(\"GE_ENVIRONMENT\", \"my_ge_environment\")\n monkeypatch.setenv(\"VAR\", \"test\")\n monkeypatch.setenv(\"MY_PARAM\", \"1\")\n monkeypatch.setenv(\"OLD_PARAM\", \"2\")\n\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates\n\n simplified_checkpoint_config = CheckpointConfig(\n name=\"my_simplified_checkpoint\",\n config_version=1,\n template_name=\"my_simple_template_checkpoint\",\n expectation_suite_name=\"users.delivery\",\n validations=[\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -1},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -2},\n }\n },\n ],\n )\n simplified_checkpoint = Checkpoint(\n data_context=context, **simplified_checkpoint_config.to_json_dict()\n )\n\n # template only\n expected_substituted_checkpoint_config_template_only = CheckpointConfig(\n name=\"my_simplified_checkpoint\",\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template-test\",\n expectation_suite_name=\"users.delivery\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n evaluation_parameters={\n \"environment\": \"my_ge_environment\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"1\",\n \"aux_param_1\": \"1 + 1\",\n },\n runtime_configuration={\n \"result_format\": {\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n }\n },\n validations=[\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -1},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -2},\n }\n },\n ],\n )\n\n substituted_config_template_only = simplified_checkpoint.get_substituted_config()\n assert (\n substituted_config_template_only.to_json_dict()\n == expected_substituted_checkpoint_config_template_only.to_json_dict()\n )\n # make sure operation is idempotent\n simplified_checkpoint.get_substituted_config()\n assert (\n substituted_config_template_only.to_json_dict()\n == expected_substituted_checkpoint_config_template_only.to_json_dict()\n )\n\n # template and runtime kwargs\n expected_substituted_checkpoint_config_template_and_runtime_kwargs = (\n CheckpointConfig(\n name=\"my_simplified_checkpoint\",\n config_version=1,\n run_name_template=\"runtime_run_template\",\n expectation_suite_name=\"runtime_suite_name\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"MyCustomStoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs_deluxe\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n evaluation_parameters={\n \"environment\": \"runtime-my_ge_environment\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"runtime-1\",\n \"aux_param_1\": \"1 + 1\",\n \"new_runtime_eval_param\": \"bloopy!\",\n },\n runtime_configuration={\n \"result_format\": {\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 999,\n \"new_runtime_config_key\": \"bleepy!\",\n }\n },\n validations=[\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -1},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -2},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector_2\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -3},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector_3\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -4},\n }\n },\n ],\n )\n )\n\n substituted_config_template_and_runtime_kwargs = (\n simplified_checkpoint.get_substituted_config(\n runtime_kwargs={\n \"expectation_suite_name\": \"runtime_suite_name\",\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector_2\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -3},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector_3\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -4},\n }\n },\n ],\n \"run_name_template\": \"runtime_run_template\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"MyCustomStoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": None,\n },\n {\n \"name\": \"update_data_docs_deluxe\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n \"evaluation_parameters\": {\n \"environment\": \"runtime-$GE_ENVIRONMENT\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"runtime-$MY_PARAM\",\n \"aux_param_1\": \"1 + $MY_PARAM\",\n \"new_runtime_eval_param\": \"bloopy!\",\n },\n \"runtime_configuration\": {\n \"result_format\": {\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 999,\n \"new_runtime_config_key\": \"bleepy!\",\n }\n },\n }\n )\n )\n assert (\n substituted_config_template_and_runtime_kwargs.to_json_dict()\n == expected_substituted_checkpoint_config_template_and_runtime_kwargs.to_json_dict()\n )\n\n\ndef test_newstyle_checkpoint_config_substitution_nested(\n titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,\n monkeypatch,\n):\n monkeypatch.setenv(\"GE_ENVIRONMENT\", \"my_ge_environment\")\n monkeypatch.setenv(\"VAR\", \"test\")\n monkeypatch.setenv(\"MY_PARAM\", \"1\")\n monkeypatch.setenv(\"OLD_PARAM\", \"2\")\n\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates\n\n nested_checkpoint_config = CheckpointConfig(\n name=\"my_nested_checkpoint\",\n config_version=1,\n template_name=\"my_nested_checkpoint_template_2\",\n expectation_suite_name=\"users.delivery\",\n validations=[\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -1},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -2},\n }\n },\n ],\n )\n nested_checkpoint = Checkpoint(\n data_context=context, **nested_checkpoint_config.to_json_dict()\n )\n\n # template only\n expected_nested_checkpoint_config_template_only = CheckpointConfig(\n name=\"my_nested_checkpoint\",\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template-test-template-2\",\n expectation_suite_name=\"users.delivery\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"MyCustomStoreEvaluationParametersActionTemplate2\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n {\n \"name\": \"new_action_from_template_2\",\n \"action\": {\"class_name\": \"Template2SpecialAction\"},\n },\n ],\n evaluation_parameters={\n \"environment\": \"my_ge_environment\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"1\",\n \"aux_param_1\": \"1 + 1\",\n \"template_1_key\": 456,\n },\n runtime_configuration={\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n \"template_1_key\": 123,\n },\n validations=[\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource_template_1\",\n \"data_connector_name\": \"my_special_data_connector_template_1\",\n \"data_asset_name\": \"users_from_template_1\",\n \"data_connector_query\": {\"partition_index\": -999},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -1},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -2},\n }\n },\n ],\n )\n\n substituted_config_template_only = nested_checkpoint.get_substituted_config()\n assert (\n substituted_config_template_only.to_json_dict()\n == expected_nested_checkpoint_config_template_only.to_json_dict()\n )\n # make sure operation is idempotent\n nested_checkpoint.get_substituted_config()\n assert (\n substituted_config_template_only.to_json_dict()\n == expected_nested_checkpoint_config_template_only.to_json_dict()\n )\n\n # runtime kwargs with new checkpoint template name passed at runtime\n expected_nested_checkpoint_config_template_and_runtime_template_name = (\n CheckpointConfig(\n name=\"my_nested_checkpoint\",\n config_version=1,\n run_name_template=\"runtime_run_template\",\n expectation_suite_name=\"runtime_suite_name\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"MyCustomRuntimeStoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"new_action_from_template_2\",\n \"action\": {\"class_name\": \"Template2SpecialAction\"},\n },\n {\n \"name\": \"new_action_from_template_3\",\n \"action\": {\"class_name\": \"Template3SpecialAction\"},\n },\n {\n \"name\": \"update_data_docs_deluxe_runtime\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n evaluation_parameters={\n \"environment\": \"runtime-my_ge_environment\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"runtime-1\",\n \"aux_param_1\": \"1 + 1\",\n \"template_1_key\": 456,\n \"template_3_key\": 123,\n \"new_runtime_eval_param\": \"bloopy!\",\n },\n runtime_configuration={\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 999,\n \"template_1_key\": 123,\n \"template_3_key\": \"bloopy!\",\n \"new_runtime_config_key\": \"bleepy!\",\n },\n validations=[\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource_template_1\",\n \"data_connector_name\": \"my_special_data_connector_template_1\",\n \"data_asset_name\": \"users_from_template_1\",\n \"data_connector_query\": {\"partition_index\": -999},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_special_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -1},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -2},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector_2_runtime\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -3},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector_3_runtime\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -4},\n }\n },\n ],\n )\n )\n\n substituted_config_template_and_runtime_kwargs = nested_checkpoint.get_substituted_config(\n runtime_kwargs={\n \"expectation_suite_name\": \"runtime_suite_name\",\n \"template_name\": \"my_nested_checkpoint_template_3\",\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector_2_runtime\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -3},\n }\n },\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource\",\n \"data_connector_name\": \"my_other_data_connector_3_runtime\",\n \"data_asset_name\": \"users\",\n \"data_connector_query\": {\"partition_index\": -4},\n }\n },\n ],\n \"run_name_template\": \"runtime_run_template\",\n \"action_list\": [\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"MyCustomRuntimeStoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": None,\n },\n {\n \"name\": \"update_data_docs_deluxe_runtime\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n \"evaluation_parameters\": {\n \"environment\": \"runtime-$GE_ENVIRONMENT\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"runtime-$MY_PARAM\",\n \"aux_param_1\": \"1 + $MY_PARAM\",\n \"new_runtime_eval_param\": \"bloopy!\",\n },\n \"runtime_configuration\": {\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 999,\n \"new_runtime_config_key\": \"bleepy!\",\n },\n }\n )\n assert (\n substituted_config_template_and_runtime_kwargs.to_json_dict()\n == expected_nested_checkpoint_config_template_and_runtime_template_name.to_json_dict()\n )\n"
] |
[
[
"pandas.__version__.split"
],
[
"pandas.DataFrame"
]
] |
Leslie-Fang/incubator-tvm
|
[
"aa035f4650926f5e714b02cbab6d974f0a17352f"
] |
[
"tests/python/relay/test_external_runtime.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom shutil import which\nimport json\nimport pytest\nimport sys\nimport numpy as np\n\nimport tvm\nfrom tvm import te\nimport tvm.runtime._ffi_api\nfrom tvm import relay\nfrom tvm.contrib import util\n\ntmp_path = util.tempdir()\n\n\ndef generate_csource_module():\n \"\"\"Mock the codegen with an external library (e.g., CBLAS/cuDNN)\"\"\"\n\n code = r'''\n #include <tvm/runtime/c_runtime_api.h>\n #include <tvm/runtime/packed_func.h>\n #include <dlpack/dlpack.h>\n #include <cstdint>\n #include <cstring>\n #include <iostream>\n\n #define GCC_BINARY_OP_1D(p_ID_, p_OP_, p_DIM1_) \\\n extern \"C\" void p_ID_(float* a, float* b, float* out) { \\\n for (int64_t i = 0; i < p_DIM1_; ++i) { \\\n out[i] = a[i] p_OP_ b[i]; \\\n } \\\n }\n\n #define GCC_BINARY_OP_2D(p_ID_, p_OP_, p_DIM1_, p_DIM2_) \\\n extern \"C\" void p_ID_(float* a, float* b, float* out) { \\\n for (int64_t i = 0; i < p_DIM1_; ++i) { \\\n for (int64_t j = 0; j < p_DIM2_; ++j) { \\\n int64_t k = i * p_DIM2_ + j; \\\n out[k] = a[k] p_OP_ b[k]; \\\n } \\\n } \\\n }\n GCC_BINARY_OP_2D(gcc_1_0, *, 10, 10);\n GCC_BINARY_OP_2D(gcc_1_1, -, 10, 10);\n GCC_BINARY_OP_2D(gcc_1_2, +, 10, 10);\n\n extern \"C\" void gcc_1_(float* gcc_input4, float* gcc_input5,\n float* gcc_input6, float* gcc_input7, float* out) {\n float* buf_0 = (float*)malloc(4 * 100);\n float* buf_1 = (float*)malloc(4 * 100);\n gcc_1_2(gcc_input4, gcc_input5, buf_0);\n gcc_1_1(buf_0, gcc_input6, buf_1);\n gcc_1_0(buf_1, gcc_input7, out);\n free(buf_0);\n free(buf_1);\n }\n\n extern \"C\" int ccompiler_wrapper_1_(DLTensor* arg0, DLTensor* arg1,\n DLTensor* arg2, DLTensor* arg3,\n DLTensor* out) {\n gcc_1_(static_cast<float*>(arg0->data), static_cast<float*>(arg1->data),\n static_cast<float*>(arg2->data), static_cast<float*>(arg3->data),\n static_cast<float*>(out->data));\n return 0;\n }\n\n TVM_DLL_EXPORT_TYPED_FUNC(json_rt_1, ccompiler_wrapper_1_);\n\n GCC_BINARY_OP_2D(gcc_0_0, *, 10, 10);\n GCC_BINARY_OP_2D(gcc_0_1, -, 10, 10);\n GCC_BINARY_OP_2D(gcc_0_2, +, 10, 10);\n\n extern \"C\" void gcc_0_(float* gcc_input0, float* gcc_input1,\n float* gcc_input2, float* gcc_input3, float* out) {\n float* buf_0 = (float*)malloc(4 * 100);\n float* buf_1 = (float*)malloc(4 * 100);\n gcc_0_2(gcc_input0, gcc_input1, buf_0);\n gcc_0_1(buf_0, gcc_input2, buf_1);\n gcc_0_0(buf_1, gcc_input3, out);\n free(buf_0);\n free(buf_1);\n }\n\n extern \"C\" int ccompiler_wrapper_0_(DLTensor* arg0, DLTensor* arg1,\n DLTensor* arg2, DLTensor* arg3,\n DLTensor* out) {\n gcc_0_(static_cast<float*>(arg0->data), static_cast<float*>(arg1->data),\n static_cast<float*>(arg2->data), static_cast<float*>(arg3->data),\n static_cast<float*>(out->data));\n return 0;\n }\n\n TVM_DLL_EXPORT_TYPED_FUNC(json_rt_0, ccompiler_wrapper_0_);\n\n '''\n csource_module = tvm.runtime._ffi_api.CSourceModuleCreate(code, \"cc\", \"\",\n None)\n return csource_module\n\n\ndef generate_engine_module():\n \"\"\"\n Mock the codegen of an external backend with its own runtime engine\n (e.g., MKL-DNN/TensorRT)\n \"\"\"\n\n code = r'''\n #include <tvm/runtime/c_runtime_api.h>\n #include <tvm/runtime/packed_func.h>\n #include <dlpack/dlpack.h>\n #include \"json_engine.h\"\n\n extern \"C\" void json_1_(float* json_input4, float* json_input5,\n float* json_input6, float* json_input7, float* out) {\n\n std::string graph =\n \"add_2d,10,10\\n\"\n \"sub_2d,10,10\\n\"\n \"mul_2d,10,10\\n\";\n\n Engine engine;\n engine.run(graph, {json_input4, json_input5, json_input6, json_input7}, out);\n }\n\n extern \"C\" int json_wrapper_1_(DLTensor* arg0, DLTensor* arg1,\n DLTensor* arg2, DLTensor* arg3,\n DLTensor* out) {\n json_1_(static_cast<float*>(arg0->data), static_cast<float*>(arg1->data),\n static_cast<float*>(arg2->data), static_cast<float*>(arg3->data),\n static_cast<float*>(out->data));\n return 0;\n }\n\n TVM_DLL_EXPORT_TYPED_FUNC(json_rt_1, json_wrapper_1_);\n\n extern \"C\" void json_0_(float* json_input0, float* json_input1,\n float* json_input2, float* json_input3, float* out) {\n\n std::string graph =\n \"add_2d,10,10\\n\"\n \"sub_2d,10,10\\n\"\n \"mul_2d,10,10\\n\";\n\n Engine engine;\n engine.run(graph, {json_input0, json_input1, json_input2, json_input3}, out);\n\n }\n\n extern \"C\" int json_wrapper_0_(DLTensor* arg0, DLTensor* arg1,\n DLTensor* arg2, DLTensor* arg3,\n DLTensor* out) {\n json_0_(static_cast<float*>(arg0->data), static_cast<float*>(arg1->data),\n static_cast<float*>(arg2->data), static_cast<float*>(arg3->data),\n static_cast<float*>(out->data));\n return 0;\n }\n\n TVM_DLL_EXPORT_TYPED_FUNC(json_rt_0, json_wrapper_0_);\n\n '''\n\n gen_json_engine()\n csource_module = tvm.runtime._ffi_api.CSourceModuleCreate(code, \"cc\", \"\",\n None)\n return csource_module\n\n\ndef gen_json_engine():\n \"\"\"An example of external backend runtime engine. This is supposed to be provided\n by third-party vendors and included when building the generated external kernel code.\n \"\"\"\n\n code = r'''\n #ifndef _JSON_ENGINE_H_\n #define _JSON_ENGINE_H_\n #include <cstdint>\n #include <string>\n #include <sstream>\n #include <vector>\n\n #define GCC_BINARY_OP_2D(p_ID_, p_OP_) \\\n void p_ID_(int64_t dim1, int64_t dim2, float* a, float* b, float* out) { \\\n for (int64_t i = 0; i < dim1; ++i) { \\\n for (int64_t j = 0; j < dim2; ++j) { \\\n int64_t k = i * dim2 + j; \\\n out[k] = a[k] p_OP_ b[k]; \\\n } \\\n } \\\n }\n GCC_BINARY_OP_2D(add_2d, +);\n GCC_BINARY_OP_2D(sub_2d, -);\n GCC_BINARY_OP_2D(mul_2d, *);\n\n struct Layer {\n void (*op)(int64_t, int64_t, float*, float*, float*);\n std::vector<int64_t> shapes;\n std::vector<float*> args;\n };\n\n class Engine {\n public:\n float* alloc_buffer(int64_t size) {\n float* buf = (float*)malloc(sizeof(float) * size);\n buffers.push_back(buf);\n return buf;\n }\n void add(std::string op, int64_t dim1, int64_t dim2, float* in1, float* in2, float* out) {\n Layer layer;\n layer.shapes.push_back(dim1);\n layer.shapes.push_back(dim2);\n layer.args.push_back(in1);\n layer.args.push_back(in2);\n layer.args.push_back(out);\n\n if (op == \"add_2d\")\n layer.op = &add_2d;\n else if (op == \"sub_2d\")\n layer.op = &sub_2d;\n else if (op == \"mul_2d\")\n layer.op = &mul_2d;\n net.push_back(layer);\n return ;\n }\n\n void run(std::string graph, std::vector<float*> args, float* out) {\n std::stringstream ss(graph);\n std::string line;\n int layer_idx = 0;\n int arg_idx = 0;\n float* buf = nullptr;\n\n while (std::getline(ss, line, '\\n')) {\n std::stringstream ss2(line);\n std::string token;\n std::vector<std::string> attrs;\n while (std::getline(ss2, token, ',')) {\n attrs.push_back(token);\n }\n int64_t dim1 = stoll(attrs[1]);\n int64_t dim2 = stoll(attrs[2]);\n auto out_buf = this->alloc_buffer(dim1 * dim2);\n\n if (layer_idx == 0) {\n this->add(attrs[0], dim1, dim2, args[0], args[1], out_buf);\n buf = out_buf;\n arg_idx = 2;\n }\n else {\n this->add(attrs[0], dim1, dim2, buf, args[arg_idx], out_buf);\n buf = out_buf;\n arg_idx++;\n }\n layer_idx++;\n }\n this->net.back().args.back() = out;\n\n for (auto layer : net) {\n (*layer.op)(layer.shapes[0], layer.shapes[1], layer.args[0], layer.args[1], layer.args[2]);\n }\n }\n ~Engine() {\n for (auto buf : buffers) {\n free(buf);\n }\n }\n private:\n std::vector<Layer> net;\n std::vector<float*> buffers;\n };\n\n #endif // _JSON_ENGINE_H_\n '''\n header_file = tmp_path.relpath(\"json_engine.h\")\n with open(header_file, 'w') as f:\n f.write(code)\n\n\ndef get_synthetic_lib():\n x = relay.var('x', shape=(10, 10))\n w0 = relay.var('w0', shape=(10, 10))\n w1 = relay.var('w1', shape=(10, 10))\n w2 = relay.var('w2', shape=(10, 10))\n w3 = relay.var('w3', shape=(10, 10))\n w4 = relay.var('w4', shape=(10, 10))\n w5 = relay.var('w5', shape=(10, 10))\n w6 = relay.var('w6', shape=(10, 10))\n w7 = relay.var('w7', shape=(10, 10))\n\n # subgraph0\n gcc_input0 = relay.var('gcc_input0', shape=(10, 10))\n gcc_input1 = relay.var('gcc_input1', shape=(10, 10))\n gcc_input2 = relay.var('gcc_input2', shape=(10, 10))\n gcc_input3 = relay.var('gcc_input3', shape=(10, 10))\n subgraph0 = relay.Function([gcc_input0, gcc_input1, gcc_input2,\n gcc_input3], relay.copy(gcc_input0))\n subgraph0 = subgraph0.with_attr(\n \"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n\n # Call subgraph0\n subgraph0_ret = relay.Call(subgraph0, [x, w0, w1, w2])\n\n # subgraph1\n gcc_input4 = relay.var('gcc_input4', shape=(10, 10))\n gcc_input5 = relay.var('gcc_input5', shape=(10, 10))\n gcc_input6 = relay.var('gcc_input6', shape=(10, 10))\n gcc_input7 = relay.var('gcc_input7', shape=(10, 10))\n subgraph1 = relay.Function([gcc_input4, gcc_input5, gcc_input6,\n gcc_input7], relay.copy(gcc_input4))\n subgraph1 = subgraph1.with_attr(\n \"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n\n # Call subgraph1\n subgraph1_ret = relay.Call(subgraph1, [x, w3, w4, w5])\n\n # Other ops that will be executed on TVM.\n add2 = relay.add(x, w6)\n sub2 = relay.subtract(add2, w7)\n ret = relay.concatenate((subgraph0_ret, subgraph1_ret, sub2), 0)\n func = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], ret)\n mod = tvm.IRModule.from_expr(func)\n _, lib, _ = relay.build(mod, \"llvm\")\n return lib\n\ndef get_whole_graph_json():\n nodex = {\"op\": \"null\", \"name\": \"x\", \"inputs\": []}\n node0 = {\"op\": \"null\", \"name\": \"w0\", \"inputs\": []}\n node1 = {\"op\": \"null\", \"name\": \"w1\", \"inputs\": []}\n node2 = {\"op\": \"null\", \"name\": \"w2\", \"inputs\": []}\n node3 = {\"op\": \"null\", \"name\": \"w3\", \"inputs\": []}\n node4 = {\"op\": \"null\", \"name\": \"w4\", \"inputs\": []}\n node5 = {\"op\": \"null\", \"name\": \"w5\", \"inputs\": []}\n node6 = {\"op\": \"null\", \"name\": \"w6\", \"inputs\": []}\n node7 = {\"op\": \"null\", \"name\": \"w7\", \"inputs\": []}\n\n subgraph0 = {\n \"op\": \"tvm_op\",\n \"name\": \"json_rt_0\",\n \"attrs\": {\n \"num_outputs\": \"1\",\n \"num_inputs\": \"4\",\n \"func_name\": \"json_rt_0\",\n \"flatten_data\": \"0\"\n },\n \"inputs\": [\n [0, 0, 0],\n [1, 0, 0],\n [2, 0, 0],\n [3, 0, 0],\n ]\n }\n subgraph1 = {\n \"op\": \"tvm_op\",\n \"name\": \"json_rt_1\",\n \"attrs\": {\n \"num_outputs\": \"1\",\n \"num_inputs\": \"4\",\n \"func_name\": \"json_rt_1\",\n \"flatten_data\": \"0\"\n },\n \"inputs\": [\n [0, 0, 0],\n [4, 0, 0],\n [5, 0, 0],\n [6, 0, 0],\n ]\n }\n\n fused_op = {\n \"op\": \"tvm_op\",\n \"name\": \"fused_add_subtract_concatenate\",\n \"attrs\": {\n \"num_outputs\": \"1\",\n \"num_inputs\": \"5\",\n \"func_name\": \"fused_add_subtract_concatenate\",\n \"flatten_data\": \"0\"\n },\n \"inputs\": [\n [9, 0, 0],\n [10, 0, 0],\n [0, 0, 0],\n [7, 0, 0],\n [8, 0, 0]\n ]\n }\n nodes = [nodex, node0, node1, node2, node3, node4,\n node5, node6, node7, subgraph0, subgraph1, fused_op]\n arg_nodes = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n heads = [[11, 0, 0]]\n node_row_ptr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n storage_id = [\"list_int\", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]\n\n shape = [\"list_shape\", [\n [10, 10], [10, 10], [10, 10], [10, 10], [10, 10], [10, 10],\n [10, 10], [10, 10], [10, 10], [10, 10], [10, 10], [30, 10]]]\n\n dltype = [\"list_str\", [\n \"float32\", \"float32\", \"float32\", \"float32\", \"float32\", \"float32\",\n \"float32\", \"float32\", \"float32\", \"float32\", \"float32\", \"float32\"]]\n\n attrs = {\n \"shape\": shape,\n \"dltype\": dltype,\n \"storage_id\": storage_id,\n }\n\n graph = {\"nodes\": nodes,\n \"arg_nodes\": arg_nodes,\n \"node_row_ptr\": node_row_ptr,\n \"heads\": heads,\n \"attrs\": attrs}\n\n return json.dumps(graph)\n\n\ndef run_extern(label, get_extern_src, **kwargs):\n if which(\"gcc\") is None:\n print(\"Skip test because gcc is not available.\")\n return\n\n obj_name = \"{}.o\".format(label)\n lib_name = \"external_{}.so\".format(label)\n\n # Get Json and the compiled library.\n graph_json = get_whole_graph_json()\n lib = get_synthetic_lib()\n lib.save(obj_name)\n\n # library that contains external code.\n csource_module = get_extern_src()\n kwargs[\"options\"] = [obj_name] + kwargs[\"options\"]\n lib_path = tmp_path.relpath(lib_name)\n csource_module.export_library(lib_path, fcompile=False, **kwargs)\n # load module for execution.\n lib = tvm.runtime.load_module(lib_path)\n mod = tvm.contrib.graph_runtime.create(graph_json, lib, tvm.cpu(0))\n\n x_data = np.random.rand(10, 10).astype('float32')\n mod.set_input(\"x\", x_data)\n w_data = []\n for i in range(8):\n data = np.random.rand(10, 10).astype('float32')\n w_data.append(data)\n var = \"w\" + str(i)\n mod.set_input(var, data)\n mod.run()\n out = tvm.nd.empty((30, 10), ctx=tvm.cpu())\n out = mod.get_output(0, out)\n tvm.testing.assert_allclose(\n out.asnumpy(),\n np.concatenate((((x_data + w_data[0]) - w_data[1]) * w_data[2],\n ((x_data + w_data[3]) - w_data[4]) * w_data[5],\n x_data + w_data[6] - w_data[7]),\n axis=0))\n\n\ndef test_dso_extern():\n run_extern(\"lib\", generate_csource_module, options=[\"-O2\", \"-std=c++14\"])\n\n\ndef test_engine_extern():\n run_extern(\"engine\",\n generate_engine_module,\n options=[\"-O2\", \"-std=c++14\", \"-I\" + tmp_path.relpath(\"\")])\n\ndef test_json_extern():\n if not tvm.get_global_func(\"module.loadfile_examplejson\", True):\n print(\"Skip because JSON example runtime is not enabled.\")\n return\n\n # Get subgraph Json.\n subgraph_json = (\"json_rt_0\\n\" +\n \"input 0 10 10\\n\" +\n \"input 1 10 10\\n\" +\n \"input 2 10 10\\n\" +\n \"input 3 10 10\\n\" +\n \"add 4 inputs: 0 1 shape: 10 10\\n\" +\n \"sub 5 inputs: 4 2 shape: 10 10\\n\" +\n \"mul 6 inputs: 5 3 shape: 10 10\\n\" +\n \"json_rt_1\\n\" +\n \"input 0 10 10\\n\" +\n \"input 1 10 10\\n\" +\n \"input 2 10 10\\n\" +\n \"input 3 10 10\\n\" +\n \"add 4 inputs: 0 1 shape: 10 10\\n\" +\n \"sub 5 inputs: 4 2 shape: 10 10\\n\" +\n \"mul 6 inputs: 5 3 shape: 10 10\")\n\n subgraph_path = tmp_path.relpath('subgraph.examplejson')\n with open(subgraph_path, 'w') as f:\n f.write(subgraph_json)\n\n # Get Json and module.\n graph_json = get_whole_graph_json()\n\n\n lib = get_synthetic_lib()\n ext_lib = tvm.runtime.load_module(subgraph_path, \"examplejson\")\n lib.import_module(ext_lib)\n lib_name = 'external.so'\n lib_path = tmp_path.relpath(lib_name)\n lib.export_library(lib_path)\n\n # load module for execution.\n lib = tvm.runtime.load_module(lib_path)\n mod = tvm.contrib.graph_runtime.create(graph_json, lib, tvm.cpu(0))\n\n x_data = np.random.rand(10, 10).astype('float32')\n mod.set_input(\"x\", x_data)\n w_data = []\n for i in range(8):\n data = np.random.rand(10, 10).astype('float32')\n w_data.append(data)\n var = \"w\" + str(i)\n mod.set_input(var, data)\n\n mod.run()\n out = tvm.nd.empty((30, 10), ctx=tvm.cpu())\n out = mod.get_output(0, out)\n tvm.testing.assert_allclose(\n out.asnumpy(),\n np.concatenate((((x_data + w_data[0]) - w_data[1]) * w_data[2],\n ((x_data + w_data[3]) - w_data[4]) * w_data[5],\n x_data + w_data[6] - w_data[7]),\n axis=0))\n\n\nif __name__ == \"__main__\":\n test_dso_extern()\n test_engine_extern()\n test_json_extern()\n"
] |
[
[
"numpy.concatenate",
"numpy.random.rand"
]
] |
J-E-J-S/aaRS-Pipeline
|
[
"43f59f28ab06e4b16328c3bc405cdddc6e69ac44"
] |
[
"resources/mgltools_x86_64Linux2_1.5.6/lib/python2.5/site-packages/matplotlib/axis.py"
] |
[
"\"\"\"\nClasses for the ticks and x and y axis\n\"\"\"\nfrom __future__ import division\n\nfrom matplotlib import rcParams\nimport matplotlib.artist as artist\nimport matplotlib.cbook as cbook\nimport matplotlib.font_manager as font_manager\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\nimport matplotlib.scale as mscale\nimport matplotlib.text as mtext\nimport matplotlib.ticker as mticker\nimport matplotlib.transforms as mtransforms\nimport matplotlib.units as munits\n\n\nclass Tick(artist.Artist):\n \"\"\"\n Abstract base class for the axis ticks, grid lines and labels\n\n 1 refers to the bottom of the plot for xticks and the left for yticks\n 2 refers to the top of the plot for xticks and the right for yticks\n\n Publicly accessible attributes:\n\n :attr:`tick1line`\n a Line2D instance\n\n :attr:`tick2line`\n a Line2D instance\n\n :attr:`gridline`\n a Line2D instance\n\n :attr:`label1`\n a Text instance\n\n :attr:`label2`\n a Text instance\n\n :attr:`gridOn`\n a boolean which determines whether to draw the tickline\n\n :attr:`tick1On`\n a boolean which determines whether to draw the 1st tickline\n\n :attr:`tick2On`\n a boolean which determines whether to draw the 2nd tickline\n\n :attr:`label1On`\n a boolean which determines whether to draw tick label\n\n :attr:`label2On`\n a boolean which determines whether to draw tick label\n\n \"\"\"\n def __init__(self, axes, loc, label,\n size = None, # points\n gridOn = None, # defaults to axes.grid\n tick1On = True,\n tick2On = True,\n label1On = True,\n label2On = False,\n major = True,\n ):\n \"\"\"\n bbox is the Bound2D bounding box in display coords of the Axes\n loc is the tick location in data coords\n size is the tick size in relative, axes coords\n \"\"\"\n artist.Artist.__init__(self)\n\n if gridOn is None: gridOn = rcParams['axes.grid']\n\n self.set_figure(axes.figure)\n self.axes = axes\n\n name = self.__name__.lower()\n if size is None:\n if major:\n size = rcParams['%s.major.size'%name]\n pad = rcParams['%s.major.pad'%name]\n else:\n size = rcParams['%s.minor.size'%name]\n pad = rcParams['%s.minor.pad'%name]\n\n self._tickdir = rcParams['%s.direction'%name]\n if self._tickdir == 'in':\n self._xtickmarkers = (mlines.TICKUP, mlines.TICKDOWN)\n self._ytickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)\n self._pad = pad\n else:\n self._xtickmarkers = (mlines.TICKDOWN, mlines.TICKUP)\n self._ytickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)\n self._pad = pad + size\n\n self._loc = loc\n self._size = size\n\n self.tick1line = self._get_tick1line()\n self.tick2line = self._get_tick2line()\n self.gridline = self._get_gridline()\n\n self.label1 = self._get_text1()\n self.label = self.label1 # legacy name\n self.label2 = self._get_text2()\n\n self.gridOn = gridOn\n self.tick1On = tick1On\n self.tick2On = tick2On\n self.label1On = label1On\n self.label2On = label2On\n\n self.update_position(loc)\n\n def get_children(self):\n children = [self.tick1line, self.tick2line, self.gridline, self.label1, self.label2]\n return children\n\n def set_clip_path(self, clippath, transform=None):\n artist.Artist.set_clip_path(self, clippath, transform)\n #self.tick1line.set_clip_path(clippath, transform)\n #self.tick2line.set_clip_path(clippath, transform)\n self.gridline.set_clip_path(clippath, transform)\n set_clip_path.__doc__ = artist.Artist.set_clip_path.__doc__\n\n def get_pad_pixels(self):\n return self.figure.dpi * self._pad / 72.0\n\n def contains(self, mouseevent):\n \"\"\"\n Test whether the mouse event occured in the Tick marks.\n\n This function always returns false. It is more useful to test if the\n axis as a whole contains the mouse rather than the set of tick marks.\n \"\"\"\n if callable(self._contains): return self._contains(self,mouseevent)\n return False,{}\n\n def set_pad(self, val):\n \"\"\"\n Set the tick label pad in points\n\n ACCEPTS: float\n \"\"\"\n self._pad = val\n\n def get_pad(self):\n 'Get the value of the tick label pad in points'\n return self._pad\n\n def _get_text1(self):\n 'Get the default Text 1 instance'\n pass\n\n def _get_text2(self):\n 'Get the default Text 2 instance'\n pass\n\n def _get_tick1line(self):\n 'Get the default line2D instance for tick1'\n pass\n\n def _get_tick2line(self):\n 'Get the default line2D instance for tick2'\n pass\n\n def _get_gridline(self):\n 'Get the default grid Line2d instance for this tick'\n pass\n\n\n def get_loc(self):\n 'Return the tick location (data coords) as a scalar'\n return self._loc\n\n def draw(self, renderer):\n if not self.get_visible(): return\n renderer.open_group(self.__name__)\n midPoint = mtransforms.interval_contains(self.get_view_interval(), self.get_loc())\n\n if midPoint:\n if self.gridOn:\n self.gridline.draw(renderer)\n if self.tick1On:\n self.tick1line.draw(renderer)\n if self.tick2On:\n self.tick2line.draw(renderer)\n\n if self.label1On:\n self.label1.draw(renderer)\n if self.label2On:\n self.label2.draw(renderer)\n\n renderer.close_group(self.__name__)\n\n def set_label1(self, s):\n \"\"\"\n Set the text of ticklabel\n\n ACCEPTS: str\n \"\"\"\n self.label1.set_text(s)\n set_label = set_label1\n\n def set_label2(self, s):\n \"\"\"\n Set the text of ticklabel2\n\n ACCEPTS: str\n \"\"\"\n self.label2.set_text(s)\n\n def _set_artist_props(self, a):\n a.set_figure(self.figure)\n #if isinstance(a, mlines.Line2D): a.set_clip_box(self.axes.bbox)\n\n def get_view_interval(self):\n 'return the view Interval instance for the axis this tick is ticking'\n raise NotImplementedError('Derived must override')\n\n def set_view_interval(self, vmin, vmax, ignore=False):\n raise NotImplementedError('Derived must override')\n\n\nclass XTick(Tick):\n \"\"\"\n Contains all the Artists needed to make an x tick - the tick line,\n the label text and the grid line\n \"\"\"\n __name__ = 'xtick'\n def _get_text1(self):\n 'Get the default Text instance'\n # the y loc is 3 points below the min of y axis\n # get the affine as an a,b,c,d,tx,ty list\n # x in data coords, y in axes coords\n #t = mtext.Text(\n trans, vert, horiz = self.axes.get_xaxis_text1_transform(self._pad)\n size = rcParams['xtick.labelsize']\n t = mtext.TextWithDash(\n x=0, y=0,\n fontproperties=font_manager.FontProperties(size=size),\n color=rcParams['xtick.color'],\n verticalalignment=vert,\n horizontalalignment=horiz,\n dashdirection=0,\n xaxis=True,\n )\n\n t.set_transform(trans)\n self._set_artist_props(t)\n return t\n\n\n def _get_text2(self):\n\n 'Get the default Text 2 instance'\n # x in data coords, y in axes coords\n #t = mtext.Text(\n trans, vert, horiz = self.axes.get_xaxis_text2_transform(self._pad)\n\n t = mtext.TextWithDash(\n x=0, y=1,\n fontproperties=font_manager.FontProperties(size=rcParams['xtick.labelsize']),\n color=rcParams['xtick.color'],\n verticalalignment=vert,\n dashdirection=1,\n xaxis=True,\n horizontalalignment=horiz,\n )\n t.set_transform(trans)\n self._set_artist_props(t)\n return t\n\n def _get_tick1line(self):\n 'Get the default line2D instance'\n # x in data coords, y in axes coords\n l = mlines.Line2D(xdata=(0,), ydata=(0,),\n color='k',\n linestyle = 'None',\n marker = self._xtickmarkers[0],\n markersize=self._size,\n )\n l.set_transform(self.axes.get_xaxis_transform())\n self._set_artist_props(l)\n return l\n\n def _get_tick2line(self):\n 'Get the default line2D instance'\n # x in data coords, y in axes coords\n l = mlines.Line2D( xdata=(0,), ydata=(1,),\n color='k',\n linestyle = 'None',\n marker = self._xtickmarkers[1],\n markersize=self._size,\n )\n\n l.set_transform(self.axes.get_xaxis_transform())\n self._set_artist_props(l)\n return l\n\n def _get_gridline(self):\n 'Get the default line2D instance'\n # x in data coords, y in axes coords\n l = mlines.Line2D(xdata=(0.0, 0.0), ydata=(0, 1.0),\n color=rcParams['grid.color'],\n linestyle=rcParams['grid.linestyle'],\n linewidth=rcParams['grid.linewidth'],\n )\n l.set_transform(self.axes.get_xaxis_transform())\n self._set_artist_props(l)\n\n return l\n\n def update_position(self, loc):\n 'Set the location of tick in data coords with scalar *loc*'\n x = loc\n\n nonlinear = (hasattr(self.axes, 'yaxis') and\n self.axes.yaxis.get_scale() != 'linear' or\n hasattr(self.axes, 'xaxis') and\n self.axes.xaxis.get_scale() != 'linear')\n\n if self.tick1On:\n self.tick1line.set_xdata((x,))\n if self.tick2On:\n self.tick2line.set_xdata((x,))\n if self.gridOn:\n self.gridline.set_xdata((x,))\n if self.label1On:\n self.label1.set_x(x)\n if self.label2On:\n self.label2.set_x(x)\n\n if nonlinear:\n self.tick1line._invalid = True\n self.tick2line._invalid = True\n self.gridline._invalid = True\n\n self._loc = loc\n\n def get_view_interval(self):\n 'return the Interval instance for this axis view limits'\n return self.axes.viewLim.intervalx\n\n def set_view_interval(self, vmin, vmax, ignore = False):\n if ignore:\n self.axes.viewLim.intervalx = vmin, vmax\n else:\n Vmin, Vmax = self.get_view_interval()\n self.axes.viewLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)\n\n def get_minpos(self):\n return self.axes.dataLim.minposx\n\n def get_data_interval(self):\n 'return the Interval instance for this axis data limits'\n return self.axes.dataLim.intervalx\n\n\nclass YTick(Tick):\n \"\"\"\n Contains all the Artists needed to make a Y tick - the tick line,\n the label text and the grid line\n \"\"\"\n __name__ = 'ytick'\n\n # how far from the y axis line the right of the ticklabel are\n def _get_text1(self):\n 'Get the default Text instance'\n # x in axes coords, y in data coords\n #t = mtext.Text(\n trans, vert, horiz = self.axes.get_yaxis_text1_transform(self._pad)\n\n t = mtext.TextWithDash(\n x=0, y=0,\n fontproperties=font_manager.FontProperties(size=rcParams['ytick.labelsize']),\n color=rcParams['ytick.color'],\n verticalalignment=vert,\n horizontalalignment=horiz,\n dashdirection=0,\n xaxis=False,\n )\n t.set_transform(trans)\n #t.set_transform( self.axes.transData )\n self._set_artist_props(t)\n return t\n\n def _get_text2(self):\n 'Get the default Text instance'\n # x in axes coords, y in data coords\n #t = mtext.Text(\n trans, vert, horiz = self.axes.get_yaxis_text2_transform(self._pad)\n\n t = mtext.TextWithDash(\n x=1, y=0,\n fontproperties=font_manager.FontProperties(size=rcParams['ytick.labelsize']),\n color=rcParams['ytick.color'],\n verticalalignment=vert,\n dashdirection=1,\n xaxis=False,\n horizontalalignment=horiz,\n )\n t.set_transform(trans)\n self._set_artist_props(t)\n return t\n\n def _get_tick1line(self):\n 'Get the default line2D instance'\n # x in axes coords, y in data coords\n\n l = mlines.Line2D( (0,), (0,), color='k',\n marker = self._ytickmarkers[0],\n linestyle = 'None',\n markersize=self._size,\n )\n l.set_transform(self.axes.get_yaxis_transform())\n self._set_artist_props(l)\n return l\n\n def _get_tick2line(self):\n 'Get the default line2D instance'\n # x in axes coords, y in data coords\n l = mlines.Line2D( (1,), (0,), color='k',\n marker = self._ytickmarkers[1],\n linestyle = 'None',\n markersize=self._size,\n )\n\n l.set_transform(self.axes.get_yaxis_transform())\n self._set_artist_props(l)\n return l\n\n def _get_gridline(self):\n 'Get the default line2D instance'\n # x in axes coords, y in data coords\n l = mlines.Line2D( xdata=(0,1), ydata=(0, 0),\n color=rcParams['grid.color'],\n linestyle=rcParams['grid.linestyle'],\n linewidth=rcParams['grid.linewidth'],\n )\n\n l.set_transform(self.axes.get_yaxis_transform())\n self._set_artist_props(l)\n return l\n\n\n def update_position(self, loc):\n 'Set the location of tick in data coords with scalar loc'\n y = loc\n\n nonlinear = (hasattr(self.axes, 'yaxis') and\n self.axes.yaxis.get_scale() != 'linear' or\n hasattr(self.axes, 'xaxis') and\n self.axes.xaxis.get_scale() != 'linear')\n\n if self.tick1On:\n self.tick1line.set_ydata((y,))\n if self.tick2On:\n self.tick2line.set_ydata((y,))\n if self.gridOn:\n self.gridline.set_ydata((y, ))\n if self.label1On:\n self.label1.set_y( y )\n if self.label2On:\n self.label2.set_y( y )\n if nonlinear:\n self.tick1line._invalid = True\n self.tick2line._invalid = True\n self.gridline._invalid = True\n\n self._loc = loc\n\n\n def get_view_interval(self):\n 'return the Interval instance for this axis view limits'\n return self.axes.viewLim.intervaly\n\n def set_view_interval(self, vmin, vmax):\n if ignore:\n self.axes.viewLim.intervaly = vmin, vmax\n else:\n Vmin, Vmax = self.get_view_interval()\n self.axes.viewLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)\n\n def get_minpos(self):\n return self.axes.dataLim.minposy\n\n def get_data_interval(self):\n 'return the Interval instance for this axis data limits'\n return self.axes.dataLim.intervaly\n\n\nclass Ticker:\n locator = None\n formatter = None\n\n\n\nclass Axis(artist.Artist):\n\n \"\"\"\n Public attributes\n\n * :attr:`transData` - transform data coords to display coords\n * :attr:`transAxis` - transform axis coords to display coords\n\n \"\"\"\n LABELPAD = 5\n OFFSETTEXTPAD = 3\n\n def __str__(self):\n return self.__class__.__name__ \\\n + \"(%f,%f)\"%tuple(self.axes.transAxes.transform_point((0,0)))\n\n def __init__(self, axes, pickradius=15):\n \"\"\"\n Init the axis with the parent Axes instance\n \"\"\"\n artist.Artist.__init__(self)\n self.set_figure(axes.figure)\n\n self.axes = axes\n self.major = Ticker()\n self.minor = Ticker()\n self.callbacks = cbook.CallbackRegistry(('units', 'units finalize'))\n\n #class dummy:\n # locator = None\n # formatter = None\n #self.major = dummy()\n #self.minor = dummy()\n\n self._autolabelpos = True\n self.label = self._get_label()\n self.offsetText = self._get_offset_text()\n self.majorTicks = []\n self.minorTicks = []\n self.pickradius = pickradius\n\n self.cla()\n self.set_scale('linear')\n\n\n def set_label_coords(self, x, y, transform=None):\n \"\"\"\n Set the coordinates of the label. By default, the x\n coordinate of the y label is determined by the tick label\n bounding boxes, but this can lead to poor alignment of\n multiple ylabels if there are multiple axes. Ditto for the y\n coodinate of the x label.\n\n You can also specify the coordinate system of the label with\n the transform. If None, the default coordinate system will be\n the axes coordinate system (0,0) is (left,bottom), (0.5, 0.5)\n is middle, etc\n\n \"\"\"\n\n self._autolabelpos = False\n if transform is None:\n transform = self.axes.transAxes\n\n self.label.set_transform(transform)\n self.label.set_position((x, y))\n\n def get_transform(self):\n return self._scale.get_transform()\n\n def get_scale(self):\n return self._scale.name\n\n def set_scale(self, value, **kwargs):\n self._scale = mscale.scale_factory(value, self, **kwargs)\n self._scale.set_default_locators_and_formatters(self)\n\n def limit_range_for_scale(self, vmin, vmax):\n return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())\n\n def get_children(self):\n children = [self.label]\n majorticks = self.get_major_ticks()\n minorticks = self.get_minor_ticks()\n\n children.extend(majorticks)\n children.extend(minorticks)\n return children\n\n def cla(self):\n 'clear the current axis'\n self.set_major_locator(mticker.AutoLocator())\n self.set_major_formatter(mticker.ScalarFormatter())\n self.set_minor_locator(mticker.NullLocator())\n self.set_minor_formatter(mticker.NullFormatter())\n\n # Clear the callback registry for this axis, or it may \"leak\"\n self.callbacks = cbook.CallbackRegistry(('units', 'units finalize'))\n\n # whether the grids are on\n self._gridOnMajor = rcParams['axes.grid']\n self._gridOnMinor = False\n\n self.label.set_text('')\n self._set_artist_props(self.label)\n\n # build a few default ticks; grow as necessary later; only\n # define 1 so properties set on ticks will be copied as they\n # grow\n\n cbook.popall(self.majorTicks)\n cbook.popall(self.minorTicks)\n\n self.majorTicks.extend([self._get_tick(major=True)])\n self.minorTicks.extend([self._get_tick(major=False)])\n self._lastNumMajorTicks = 1\n self._lastNumMinorTicks = 1\n\n self.converter = None\n self.units = None\n self.set_units(None)\n\n def set_clip_path(self, clippath, transform=None):\n artist.Artist.set_clip_path(self, clippath, transform)\n majorticks = self.get_major_ticks()\n minorticks = self.get_minor_ticks()\n for child in self.majorTicks + self.minorTicks:\n child.set_clip_path(clippath, transform)\n\n def get_view_interval(self):\n 'return the Interval instance for this axis view limits'\n raise NotImplementedError('Derived must override')\n\n def set_view_interval(self, vmin, vmax, ignore=False):\n raise NotImplementedError('Derived must override')\n\n def get_data_interval(self):\n 'return the Interval instance for this axis data limits'\n raise NotImplementedError('Derived must override')\n\n def set_data_interval(self):\n 'Set the axis data limits'\n raise NotImplementedError('Derived must override')\n\n def _set_artist_props(self, a):\n if a is None: return\n a.set_figure(self.figure)\n\n def iter_ticks(self):\n \"\"\"\n Iterate through all of the major and minor ticks.\n \"\"\"\n majorLocs = self.major.locator()\n majorTicks = self.get_major_ticks(len(majorLocs))\n self.major.formatter.set_locs(majorLocs)\n majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]\n\n minorLocs = self.minor.locator()\n minorTicks = self.get_minor_ticks(len(minorLocs))\n self.minor.formatter.set_locs(minorLocs)\n minorLabels = [self.minor.formatter(val, i) for i, val in enumerate(minorLocs)]\n\n major_minor = [\n (majorTicks, majorLocs, majorLabels),\n (minorTicks, minorLocs, minorLabels)]\n\n for group in major_minor:\n for tick in zip(*group):\n yield tick\n\n def get_ticklabel_extents(self, renderer):\n \"\"\"\n Get the extents of the tick labels on either side\n of the axes.\n \"\"\"\n ticklabelBoxes = []\n ticklabelBoxes2 = []\n\n interval = self.get_view_interval()\n for tick, loc, label in self.iter_ticks():\n if tick is None: continue\n if not mtransforms.interval_contains(interval, loc): continue\n tick.update_position(loc)\n tick.set_label1(label)\n tick.set_label2(label)\n if tick.label1On and tick.label1.get_visible():\n extent = tick.label1.get_window_extent(renderer)\n ticklabelBoxes.append(extent)\n if tick.label2On and tick.label2.get_visible():\n extent = tick.label2.get_window_extent(renderer)\n ticklabelBoxes2.append(extent)\n\n if len(ticklabelBoxes):\n bbox = mtransforms.Bbox.union(ticklabelBoxes)\n else:\n bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)\n if len(ticklabelBoxes2):\n bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)\n else:\n bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)\n return bbox, bbox2\n\n def draw(self, renderer, *args, **kwargs):\n 'Draw the axis lines, grid lines, tick lines and labels'\n ticklabelBoxes = []\n ticklabelBoxes2 = []\n\n if not self.get_visible(): return\n renderer.open_group(__name__)\n interval = self.get_view_interval()\n for tick, loc, label in self.iter_ticks():\n if tick is None: continue\n if not mtransforms.interval_contains(interval, loc): continue\n tick.update_position(loc)\n tick.set_label1(label)\n tick.set_label2(label)\n tick.draw(renderer)\n if tick.label1On and tick.label1.get_visible():\n extent = tick.label1.get_window_extent(renderer)\n ticklabelBoxes.append(extent)\n if tick.label2On and tick.label2.get_visible():\n extent = tick.label2.get_window_extent(renderer)\n ticklabelBoxes2.append(extent)\n\n # scale up the axis label box to also find the neighbors, not\n # just the tick labels that actually overlap note we need a\n # *copy* of the axis label box because we don't wan't to scale\n # the actual bbox\n\n self._update_label_position(ticklabelBoxes, ticklabelBoxes2)\n\n self.label.draw(renderer)\n\n self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)\n self.offsetText.set_text( self.major.formatter.get_offset() )\n self.offsetText.draw(renderer)\n\n if 0: # draw the bounding boxes around the text for debug\n for tick in majorTicks:\n label = tick.label1\n mpatches.bbox_artist(label, renderer)\n mpatches.bbox_artist(self.label, renderer)\n\n renderer.close_group(__name__)\n\n def _get_label(self):\n raise NotImplementedError('Derived must override')\n\n def _get_offset_text(self):\n raise NotImplementedError('Derived must override')\n\n def get_gridlines(self):\n 'Return the grid lines as a list of Line2D instance'\n ticks = self.get_major_ticks()\n return cbook.silent_list('Line2D gridline', [tick.gridline for tick in ticks])\n\n def get_label(self):\n 'Return the axis label as a Text instance'\n return self.label\n\n def get_offset_text(self):\n 'Return the axis offsetText as a Text instance'\n return self.offsetText\n\n def get_pickradius(self):\n 'Return the depth of the axis used by the picker'\n return self.pickradius\n\n def get_majorticklabels(self):\n 'Return a list of Text instances for the major ticklabels'\n ticks = self.get_major_ticks()\n labels1 = [tick.label1 for tick in ticks if tick.label1On]\n labels2 = [tick.label2 for tick in ticks if tick.label2On]\n return cbook.silent_list('Text major ticklabel', labels1+labels2)\n\n def get_minorticklabels(self):\n 'Return a list of Text instances for the minor ticklabels'\n ticks = self.get_minor_ticks()\n labels1 = [tick.label1 for tick in ticks if tick.label1On]\n labels2 = [tick.label2 for tick in ticks if tick.label2On]\n return cbook.silent_list('Text minor ticklabel', labels1+labels2)\n\n def get_ticklabels(self, minor=False):\n 'Return a list of Text instances for ticklabels'\n if minor:\n return self.get_minorticklabels()\n return self.get_majorticklabels()\n\n def get_majorticklines(self):\n 'Return the major tick lines as a list of Line2D instances'\n lines = []\n ticks = self.get_major_ticks()\n for tick in ticks:\n lines.append(tick.tick1line)\n lines.append(tick.tick2line)\n return cbook.silent_list('Line2D ticklines', lines)\n\n def get_minorticklines(self):\n 'Return the minor tick lines as a list of Line2D instances'\n lines = []\n ticks = self.get_minor_ticks()\n for tick in ticks:\n lines.append(tick.tick1line)\n lines.append(tick.tick2line)\n return cbook.silent_list('Line2D ticklines', lines)\n\n def get_ticklines(self, minor=False):\n 'Return the tick lines as a list of Line2D instances'\n if minor:\n return self.get_minorticklines()\n return self.get_majorticklines()\n\n def get_majorticklocs(self):\n \"Get the major tick locations in data coordinates as a numpy array\"\n return self.major.locator()\n\n def get_minorticklocs(self):\n \"Get the minor tick locations in data coordinates as a numpy array\"\n return self.minor.locator()\n\n def get_ticklocs(self, minor=False):\n \"Get the tick locations in data coordinates as a numpy array\"\n if minor:\n return self.minor.locator()\n return self.major.locator()\n\n def _get_tick(self, major):\n 'return the default tick intsance'\n raise NotImplementedError('derived must override')\n\n def _copy_tick_props(self, src, dest):\n 'Copy the props from src tick to dest tick'\n if src is None or dest is None: return\n dest.label1.update_from(src.label1)\n dest.label2.update_from(src.label2)\n\n dest.tick1line.update_from(src.tick1line)\n dest.tick2line.update_from(src.tick2line)\n dest.gridline.update_from(src.gridline)\n\n dest.tick1On = src.tick1On\n dest.tick2On = src.tick2On\n dest.label1On = src.label1On\n dest.label2On = src.label2On\n\n def get_major_locator(self):\n 'Get the locator of the major ticker'\n return self.major.locator\n\n def get_minor_locator(self):\n 'Get the locator of the minor ticker'\n return self.minor.locator\n\n def get_major_formatter(self):\n 'Get the formatter of the major ticker'\n return self.major.formatter\n\n def get_minor_formatter(self):\n 'Get the formatter of the minor ticker'\n return self.minor.formatter\n\n def get_major_ticks(self, numticks=None):\n 'get the tick instances; grow as necessary'\n if numticks is None:\n numticks = len(self.get_major_locator()())\n if len(self.majorTicks) < numticks:\n # update the new tick label properties from the old\n for i in range(numticks - len(self.majorTicks)):\n tick = self._get_tick(major=True)\n self.majorTicks.append(tick)\n\n if self._lastNumMajorTicks < numticks:\n protoTick = self.majorTicks[0]\n for i in range(self._lastNumMajorTicks, len(self.majorTicks)):\n tick = self.majorTicks[i]\n if self._gridOnMajor: tick.gridOn = True\n self._copy_tick_props(protoTick, tick)\n\n self._lastNumMajorTicks = numticks\n ticks = self.majorTicks[:numticks]\n\n return ticks\n\n\n def get_minor_ticks(self, numticks=None):\n 'get the minor tick instances; grow as necessary'\n if numticks is None:\n numticks = len(self.get_minor_locator()())\n\n if len(self.minorTicks) < numticks:\n # update the new tick label properties from the old\n for i in range(numticks - len(self.minorTicks)):\n tick = self._get_tick(major=False)\n self.minorTicks.append(tick)\n\n if self._lastNumMinorTicks < numticks:\n protoTick = self.minorTicks[0]\n for i in range(self._lastNumMinorTicks, len(self.minorTicks)):\n tick = self.minorTicks[i]\n if self._gridOnMinor: tick.gridOn = True\n self._copy_tick_props(protoTick, tick)\n\n self._lastNumMinorTicks = numticks\n ticks = self.minorTicks[:numticks]\n\n return ticks\n\n\n def grid(self, b=None, which='major', **kwargs):\n \"\"\"\n Set the axis grid on or off; b is a boolean use *which* =\n 'major' | 'minor' to set the grid for major or minor ticks\n\n if *b* is *None* and len(kwargs)==0, toggle the grid state. If\n *kwargs* are supplied, it is assumed you want the grid on and *b*\n will be set to True\n\n *kwargs* are used to set the line properties of the grids, eg,\n\n xax.grid(color='r', linestyle='-', linewidth=2)\n \"\"\"\n if len(kwargs): b = True\n if which.lower().find('minor')>=0:\n if b is None: self._gridOnMinor = not self._gridOnMinor\n else: self._gridOnMinor = b\n for tick in self.minorTicks: # don't use get_ticks here!\n if tick is None: continue\n tick.gridOn = self._gridOnMinor\n if len(kwargs): artist.setp(tick.gridline,**kwargs)\n else:\n if b is None: self._gridOnMajor = not self._gridOnMajor\n else: self._gridOnMajor = b\n for tick in self.majorTicks: # don't use get_ticks here!\n if tick is None: continue\n tick.gridOn = self._gridOnMajor\n if len(kwargs): artist.setp(tick.gridline,**kwargs)\n\n\n def update_units(self, data):\n \"\"\"\n introspect *data* for units converter and update the\n axis.converter instance if necessary. Return *True* is *data* is\n registered for unit conversion\n \"\"\"\n\n converter = munits.registry.get_converter(data)\n if converter is None: return False\n self.converter = converter\n default = self.converter.default_units(data)\n #print 'update units: default=\"%s\", units=%s\"'%(default, self.units)\n if default is not None and self.units is None:\n self.set_units(default)\n self._update_axisinfo()\n return True\n\n def _update_axisinfo(self):\n \"\"\"\n check the axis converter for the stored units to see if the\n axis info needs to be updated\n \"\"\"\n\n if self.converter is None:\n return\n\n info = self.converter.axisinfo(self.units)\n if info is None:\n return\n if info.majloc is not None and self.major.locator!=info.majloc:\n self.set_major_locator(info.majloc)\n if info.minloc is not None and self.minor.locator!=info.minloc:\n self.set_minor_locator(info.minloc)\n if info.majfmt is not None and self.major.formatter!=info.majfmt:\n self.set_major_formatter(info.majfmt)\n if info.minfmt is not None and self.minor.formatter!=info.minfmt:\n self.set_minor_formatter(info.minfmt)\n if info.label is not None:\n label = self.get_label()\n label.set_text(info.label)\n\n\n def have_units(self):\n return self.converter is not None or self.units is not None\n\n def convert_units(self, x):\n if self.converter is None:\n self.converter = munits.registry.get_converter(x)\n\n if self.converter is None:\n #print 'convert_units returning identity: units=%s, converter=%s'%(self.units, self.converter)\n return x\n\n ret = self.converter.convert(x, self.units)\n #print 'convert_units converting: axis=%s, units=%s, converter=%s, in=%s, out=%s'%(self, self.units, self.converter, x, ret)\n return ret\n\n def set_units(self, u):\n \"\"\"\n set the units for axis\n\n ACCEPTS: a units tag\n \"\"\"\n pchanged = False\n if u is None:\n self.units = None\n pchanged = True\n else:\n if u!=self.units:\n self.units = u\n #print 'setting units', self.converter, u, munits.registry.get_converter(u)\n pchanged = True\n if pchanged:\n self._update_axisinfo()\n self.callbacks.process('units')\n self.callbacks.process('units finalize')\n\n def get_units(self):\n 'return the units for axis'\n return self.units\n\n def set_major_formatter(self, formatter):\n \"\"\"\n Set the formatter of the major ticker\n\n ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance\n \"\"\"\n self.major.formatter = formatter\n formatter.set_axis(self)\n\n\n def set_minor_formatter(self, formatter):\n \"\"\"\n Set the formatter of the minor ticker\n\n ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance\n \"\"\"\n self.minor.formatter = formatter\n formatter.set_axis(self)\n\n\n def set_major_locator(self, locator):\n \"\"\"\n Set the locator of the major ticker\n\n ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance\n \"\"\"\n self.major.locator = locator\n locator.set_axis(self)\n\n\n def set_minor_locator(self, locator):\n \"\"\"\n Set the locator of the minor ticker\n\n ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance\n \"\"\"\n self.minor.locator = locator\n locator.set_axis(self)\n\n def set_pickradius(self, pickradius):\n \"\"\"\n Set the depth of the axis used by the picker\n\n ACCEPTS: a distance in points\n \"\"\"\n self.pickradius = pickradius\n\n\n def set_ticklabels(self, ticklabels, *args, **kwargs):\n \"\"\"\n Set the text values of the tick labels. Return a list of Text\n instances. Use *kwarg* *minor=True* to select minor ticks.\n\n ACCEPTS: sequence of strings\n \"\"\"\n #ticklabels = [str(l) for l in ticklabels]\n minor = kwargs.pop('minor', False)\n if minor:\n self.set_minor_formatter(mticker.FixedFormatter(ticklabels))\n ticks = self.get_minor_ticks()\n else:\n self.set_major_formatter( mticker.FixedFormatter(ticklabels) )\n ticks = self.get_major_ticks()\n\n self.set_major_formatter( mticker.FixedFormatter(ticklabels) )\n\n ret = []\n for i, tick in enumerate(ticks):\n if i<len(ticklabels):\n tick.label1.set_text(ticklabels[i])\n ret.append(tick.label1)\n tick.label1.update(kwargs)\n return ret\n\n def set_ticks(self, ticks, minor=False):\n \"\"\"\n Set the locations of the tick marks from sequence ticks\n\n ACCEPTS: sequence of floats\n \"\"\"\n ### XXX if the user changes units, the information will be lost here\n ticks = self.convert_units(ticks)\n if len(ticks) > 1:\n xleft, xright = self.get_view_interval()\n if xright > xleft:\n self.set_view_interval(min(ticks), max(ticks))\n else:\n self.set_view_interval(max(ticks), min(ticks))\n if minor:\n self.set_minor_locator(mticker.FixedLocator(ticks))\n return self.get_minor_ticks(len(ticks))\n else:\n self.set_major_locator( mticker.FixedLocator(ticks) )\n return self.get_major_ticks(len(ticks))\n\n def _update_label_position(self, bboxes, bboxes2):\n \"\"\"\n Update the label position based on the sequence of bounding\n boxes of all the ticklabels\n \"\"\"\n raise NotImplementedError('Derived must override')\n\n def _update_offset_text_postion(self, bboxes, bboxes2):\n \"\"\"\n Update the label position based on the sequence of bounding\n boxes of all the ticklabels\n \"\"\"\n raise NotImplementedError('Derived must override')\n\n def pan(self, numsteps):\n 'Pan *numsteps* (can be positive or negative)'\n self.major.locator.pan(numsteps)\n\n def zoom(self, direction):\n \"Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out\"\n self.major.locator.zoom(direction)\n\nclass XAxis(Axis):\n __name__ = 'xaxis'\n axis_name = 'x'\n\n def contains(self,mouseevent):\n \"\"\"Test whether the mouse event occured in the x axis.\n \"\"\"\n if callable(self._contains): return self._contains(self,mouseevent)\n\n x,y = mouseevent.x,mouseevent.y\n try:\n trans = self.axes.transAxes.inverted()\n xaxes,yaxes = trans.transform_point((x,y))\n except ValueError:\n return False, {}\n l,b = self.axes.transAxes.transform_point((0,0))\n r,t = self.axes.transAxes.transform_point((1,1))\n inaxis = xaxes>=0 and xaxes<=1 and (\n (y<b and y>b-self.pickradius) or\n (y>t and y<t+self.pickradius))\n return inaxis, {}\n\n def _get_tick(self, major):\n return XTick(self.axes, 0, '', major=major)\n\n def _get_label(self):\n # x in axes coords, y in display coords (to be updated at draw\n # time by _update_label_positions)\n label = mtext.Text(x=0.5, y=0,\n fontproperties = font_manager.FontProperties(size=rcParams['axes.labelsize']),\n color = rcParams['axes.labelcolor'],\n verticalalignment='top',\n horizontalalignment='center',\n )\n\n label.set_transform( mtransforms.blended_transform_factory(\n self.axes.transAxes, mtransforms.IdentityTransform() ))\n\n self._set_artist_props(label)\n self.label_position='bottom'\n return label\n\n def _get_offset_text(self):\n # x in axes coords, y in display coords (to be updated at draw time)\n offsetText = mtext.Text(x=1, y=0,\n fontproperties = font_manager.FontProperties(size=rcParams['xtick.labelsize']),\n color = rcParams['xtick.color'],\n verticalalignment='top',\n horizontalalignment='right',\n )\n offsetText.set_transform( mtransforms.blended_transform_factory(\n self.axes.transAxes, mtransforms.IdentityTransform() ))\n self._set_artist_props(offsetText)\n self.offset_text_position='bottom'\n return offsetText\n\n def get_label_position(self):\n \"\"\"\n Return the label position (top or bottom)\n \"\"\"\n return self.label_position\n\n def set_label_position(self, position):\n \"\"\"\n Set the label position (top or bottom)\n\n ACCEPTS: [ 'top' | 'bottom' ]\n \"\"\"\n assert position == 'top' or position == 'bottom'\n if position == 'top':\n self.label.set_verticalalignment('bottom')\n else:\n self.label.set_verticalalignment('top')\n self.label_position=position\n\n def _update_label_position(self, bboxes, bboxes2):\n \"\"\"\n Update the label position based on the sequence of bounding\n boxes of all the ticklabels\n \"\"\"\n if not self._autolabelpos: return\n x,y = self.label.get_position()\n if self.label_position == 'bottom':\n if not len(bboxes):\n bottom = self.axes.bbox.ymin\n else:\n bbox = mtransforms.Bbox.union(bboxes)\n bottom = bbox.y0\n self.label.set_position( (x, bottom - self.LABELPAD*self.figure.dpi / 72.0))\n\n else:\n if not len(bboxes2):\n top = self.axes.bbox.ymax\n else:\n bbox = mtransforms.Bbox.union(bboxes2)\n top = bbox.y1\n self.label.set_position( (x, top+self.LABELPAD*self.figure.dpi / 72.0))\n\n def _update_offset_text_position(self, bboxes, bboxes2):\n \"\"\"\n Update the offset_text position based on the sequence of bounding\n boxes of all the ticklabels\n \"\"\"\n x,y = self.offsetText.get_position()\n if not len(bboxes):\n bottom = self.axes.bbox.ymin\n else:\n bbox = mtransforms.Bbox.union(bboxes)\n bottom = bbox.y0\n self.offsetText.set_position((x, bottom-self.OFFSETTEXTPAD*self.figure.dpi/72.0))\n\n def get_text_heights(self, renderer):\n \"\"\"\n Returns the amount of space one should reserve for text\n above and below the axes. Returns a tuple (above, below)\n \"\"\"\n bbox, bbox2 = self.get_ticklabel_extents(renderer)\n # MGDTODO: Need a better way to get the pad\n padPixels = self.majorTicks[0].get_pad_pixels()\n\n above = 0.0\n if bbox2.height:\n above += bbox2.height + padPixels\n below = 0.0\n if bbox.height:\n below += bbox.height + padPixels\n\n if self.get_label_position() == 'top':\n above += self.label.get_window_extent(renderer).height + padPixels\n else:\n below += self.label.get_window_extent(renderer).height + padPixels\n return above, below\n\n def set_ticks_position(self, position):\n \"\"\"\n Set the ticks position (top, bottom, both, default or none)\n both sets the ticks to appear on both positions, but does not\n change the tick labels. default resets the tick positions to\n the default: ticks on both positions, labels at bottom. none\n can be used if you don't want any ticks.\n\n ACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]\n \"\"\"\n assert position in ('top', 'bottom', 'both', 'default', 'none')\n\n\n ticks = list( self.get_major_ticks() ) # a copy\n ticks.extend( self.get_minor_ticks() )\n\n if position == 'top':\n for t in ticks:\n t.tick1On = False\n t.tick2On = True\n t.label1On = False\n t.label2On = True\n elif position == 'bottom':\n for t in ticks:\n t.tick1On = True\n t.tick2On = False\n t.label1On = True\n t.label2On = False\n elif position == 'default':\n for t in ticks:\n t.tick1On = True\n t.tick2On = True\n t.label1On = True\n t.label2On = False\n elif position == 'none':\n for t in ticks:\n t.tick1On = False\n t.tick2On = False\n else:\n for t in ticks:\n t.tick1On = True\n t.tick2On = True\n for t in ticks:\n t.update_position(t._loc)\n\n def tick_top(self):\n 'use ticks only on top'\n self.set_ticks_position('top')\n\n def tick_bottom(self):\n 'use ticks only on bottom'\n self.set_ticks_position('bottom')\n\n def get_ticks_position(self):\n \"\"\"\n Return the ticks position (top, bottom, default or unknown)\n \"\"\"\n majt=self.majorTicks[0]\n mT=self.minorTicks[0]\n\n majorTop=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On\n minorTop=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On\n if majorTop and minorTop: return 'top'\n\n MajorBottom=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)\n MinorBottom=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)\n if MajorBottom and MinorBottom: return 'bottom'\n\n majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)\n minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)\n if majorDefault and minorDefault: return 'default'\n\n return 'unknown'\n\n def get_view_interval(self):\n 'return the Interval instance for this axis view limits'\n return self.axes.viewLim.intervalx\n\n def set_view_interval(self, vmin, vmax, ignore=False):\n if ignore:\n self.axes.viewLim.intervalx = vmin, vmax\n else:\n Vmin, Vmax = self.get_view_interval()\n self.axes.viewLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)\n\n def get_minpos(self):\n return self.axes.dataLim.minposx\n\n def get_data_interval(self):\n 'return the Interval instance for this axis data limits'\n return self.axes.dataLim.intervalx\n\n def set_data_interval(self, vmin, vmax, ignore=False):\n 'return the Interval instance for this axis data limits'\n if ignore:\n self.axes.dataLim.intervalx = vmin, vmax\n else:\n Vmin, Vmax = self.get_data_interval()\n self.axes.dataLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)\n\n\nclass YAxis(Axis):\n __name__ = 'yaxis'\n axis_name = 'y'\n\n def contains(self,mouseevent):\n \"\"\"Test whether the mouse event occurred in the y axis.\n\n Returns *True* | *False*\n \"\"\"\n if callable(self._contains): return self._contains(self,mouseevent)\n\n x,y = mouseevent.x,mouseevent.y\n try:\n trans = self.axes.transAxes.inverted()\n xaxes,yaxes = trans.transform_point((x,y))\n except ValueError:\n return False, {}\n l,b = self.axes.transAxes.transform_point((0,0))\n r,t = self.axes.transAxes.transform_point((1,1))\n inaxis = yaxes>=0 and yaxes<=1 and (\n (x<l and x>l-self.pickradius) or\n (x>r and x<r+self.pickradius))\n return inaxis, {}\n\n def _get_tick(self, major):\n return YTick(self.axes, 0, '', major=major)\n\n\n def _get_label(self):\n # x in display coords (updated by _update_label_position)\n # y in axes coords\n label = mtext.Text(x=0, y=0.5,\n # todo: get the label position\n fontproperties=font_manager.FontProperties(size=rcParams['axes.labelsize']),\n color = rcParams['axes.labelcolor'],\n verticalalignment='center',\n horizontalalignment='right',\n rotation='vertical',\n )\n label.set_transform( mtransforms.blended_transform_factory(\n mtransforms.IdentityTransform(), self.axes.transAxes) )\n\n self._set_artist_props(label)\n self.label_position='left'\n return label\n\n def _get_offset_text(self):\n # x in display coords, y in axes coords (to be updated at draw time)\n offsetText = mtext.Text(x=0, y=0.5,\n fontproperties = font_manager.FontProperties(size=rcParams['ytick.labelsize']),\n color = rcParams['ytick.color'],\n verticalalignment = 'bottom',\n horizontalalignment = 'left',\n )\n offsetText.set_transform(mtransforms.blended_transform_factory(\n self.axes.transAxes, mtransforms.IdentityTransform()) )\n self._set_artist_props(offsetText)\n self.offset_text_position='left'\n return offsetText\n\n def get_label_position(self):\n \"\"\"\n Return the label position (left or right)\n \"\"\"\n return self.label_position\n\n def set_label_position(self, position):\n \"\"\"\n Set the label position (left or right)\n\n ACCEPTS: [ 'left' | 'right' ]\n \"\"\"\n assert position == 'left' or position == 'right'\n if position == 'right':\n self.label.set_horizontalalignment('left')\n else:\n self.label.set_horizontalalignment('right')\n self.label_position=position\n\n def _update_label_position(self, bboxes, bboxes2):\n \"\"\"\n Update the label position based on the sequence of bounding\n boxes of all the ticklabels\n \"\"\"\n if not self._autolabelpos: return\n x,y = self.label.get_position()\n if self.label_position == 'left':\n if not len(bboxes):\n left = self.axes.bbox.xmin\n else:\n bbox = mtransforms.Bbox.union(bboxes)\n left = bbox.x0\n\n self.label.set_position( (left-self.LABELPAD*self.figure.dpi/72.0, y))\n\n else:\n if not len(bboxes2):\n right = self.axes.bbox.xmax\n else:\n bbox = mtransforms.Bbox.union(bboxes2)\n right = bbox.x1\n\n self.label.set_position( (right+self.LABELPAD*self.figure.dpi/72.0, y))\n\n def _update_offset_text_position(self, bboxes, bboxes2):\n \"\"\"\n Update the offset_text position based on the sequence of bounding\n boxes of all the ticklabels\n \"\"\"\n x,y = self.offsetText.get_position()\n top = self.axes.bbox.ymax\n self.offsetText.set_position((x, top+self.OFFSETTEXTPAD*self.figure.dpi/72.0))\n\n def set_offset_position(self, position):\n assert position == 'left' or position == 'right'\n\n x,y = self.offsetText.get_position()\n if position == 'left': x = 0\n else: x = 1\n\n self.offsetText.set_ha(position)\n self.offsetText.set_position((x,y))\n\n def get_text_widths(self, renderer):\n bbox, bbox2 = self.get_ticklabel_extents(renderer)\n # MGDTODO: Need a better way to get the pad\n padPixels = self.majorTicks[0].get_pad_pixels()\n\n left = 0.0\n if bbox.width:\n left += bbox.width + padPixels\n right = 0.0\n if bbox2.width:\n right += bbox2.width + padPixels\n\n if self.get_label_position() == 'left':\n left += self.label.get_window_extent(renderer).width + padPixels\n else:\n right += self.label.get_window_extent(renderer).width + padPixels\n return left, right\n\n def set_ticks_position(self, position):\n \"\"\"\n Set the ticks position (left, right, both or default)\n both sets the ticks to appear on both positions, but\n does not change the tick labels.\n default resets the tick positions to the default:\n ticks on both positions, labels on the left.\n\n ACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]\n \"\"\"\n assert position in ('left', 'right', 'both', 'default', 'none')\n\n ticks = list( self.get_major_ticks() ) # a copy\n ticks.extend( self.get_minor_ticks() )\n\n if position == 'right':\n self.set_offset_position('right')\n for t in ticks:\n t.tick1On = False\n t.tick2On = True\n t.label1On = False\n t.label2On = True\n elif position == 'left':\n self.set_offset_position('left')\n for t in ticks:\n t.tick1On = True\n t.tick2On = False\n t.label1On = True\n t.label2On = False\n elif position == 'default':\n self.set_offset_position('left')\n for t in ticks:\n t.tick1On = True\n t.tick2On = True\n t.label1On = True\n t.label2On = False\n elif position == 'none':\n for t in ticks:\n t.tick1On = False\n t.tick2On = False\n else:\n self.set_offset_position('left')\n for t in ticks:\n t.tick1On = True\n t.tick2On = True\n\n def tick_right(self):\n 'use ticks only on right'\n self.set_ticks_position('right')\n\n def tick_left(self):\n 'use ticks only on left'\n self.set_ticks_position('left')\n\n def get_ticks_position(self):\n \"\"\"\n Return the ticks position (left, right, both or unknown)\n \"\"\"\n majt=self.majorTicks[0]\n mT=self.minorTicks[0]\n\n majorRight=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On\n minorRight=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On\n if majorRight and minorRight: return 'right'\n\n majorLeft=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)\n minorLeft=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)\n if majorLeft and minorLeft: return 'left'\n\n majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)\n minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)\n if majorDefault and minorDefault: return 'default'\n\n return 'unknown'\n\n def get_view_interval(self):\n 'return the Interval instance for this axis view limits'\n return self.axes.viewLim.intervaly\n\n def set_view_interval(self, vmin, vmax, ignore=False):\n if ignore:\n self.axes.viewLim.intervaly = vmin, vmax\n else:\n Vmin, Vmax = self.get_view_interval()\n self.axes.viewLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)\n\n def get_minpos(self):\n return self.axes.dataLim.minposy\n\n def get_data_interval(self):\n 'return the Interval instance for this axis data limits'\n return self.axes.dataLim.intervaly\n\n def set_data_interval(self, vmin, vmax, ignore=False):\n 'return the Interval instance for this axis data limits'\n if ignore:\n self.axes.dataLim.intervaly = vmin, vmax\n else:\n Vmin, Vmax = self.get_data_interval()\n self.axes.dataLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)\n"
] |
[
[
"matplotlib.patches.bbox_artist",
"matplotlib.transforms.Bbox.from_extents",
"matplotlib.units.registry.get_converter",
"matplotlib.artist.Artist.set_clip_path",
"matplotlib.cbook.silent_list",
"matplotlib.font_manager.FontProperties",
"matplotlib.transforms.IdentityTransform",
"matplotlib.ticker.FixedFormatter",
"matplotlib.lines.Line2D",
"matplotlib.cbook.popall",
"matplotlib.transforms.interval_contains",
"matplotlib.artist.Artist.__init__",
"matplotlib.scale.scale_factory",
"matplotlib.ticker.ScalarFormatter",
"matplotlib.ticker.NullFormatter",
"matplotlib.artist.setp",
"matplotlib.ticker.AutoLocator",
"matplotlib.transforms.Bbox.union",
"matplotlib.ticker.NullLocator",
"matplotlib.ticker.FixedLocator",
"matplotlib.cbook.CallbackRegistry"
]
] |
Benykoz/simcom
|
[
"ffe1c3636ef65a037a34e71d5cbcdb2e483d5b93"
] |
[
"src/Unity2RealWorld.py"
] |
[
"import numpy as np\nimport math\nfrom geometry_msgs.msg import Pose, Point, Quaternion, Vector3\n\n\ndef positionROS2RW(position):\n A = np.array([[-1,0,0], [0,-1,0], [0,0,1,]])\n B = np.array([position.x, position.y, position.z])\n RWPos = A.dot(B)\n #RWPos = RWPos[0:3]\n return RWPos\n\n\ndef rotationROS2RW(orientation):\n RWOrient = Quaternion()\n RWOrient.x = -orientation.x\n RWOrient.y = -orientation.y\n RWOrient.z = orientation.z\n RWOrient.w = orientation.w\n return RWOrient\n\n\ndef velAccROS2RW(velocity):\n RWVelocity = Vector3()\n RWVelocity.x = -velocity.x\n RWVelocity.y = -velocity.y\n RWVelocity.z = velocity.z\n return RWVelocity\n\n\ndef euler_to_quaternion(roll, pitch, yaw):\n qx = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(\n yaw / 2)\n qy = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(\n yaw / 2)\n qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(\n yaw / 2)\n qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(\n yaw / 2)\n\n return [qx, qy, qz, qw]\n\n\ndef quaternion_to_euler(x, y, z, w):\n\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll = math.atan2(t0, t1)\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch = math.asin(t2)\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw = math.atan2(t3, t4)\n return [yaw, pitch, roll]"
] |
[
[
"numpy.array",
"numpy.sin",
"numpy.cos"
]
] |
ngi-nix/vframe
|
[
"60469e25203136f9d6a5ecaabe2423695ee9a0f2"
] |
[
"vframe_cli/commands/train/create-yolo-pytorch.py"
] |
[
"#############################################################################\n#\n# VFRAME\n# MIT License\n# Copyright (c) 2019 Adam Harvey and VFRAME\n# https://vframe.io\n#\n#############################################################################\n\n\nimport click\n\n@click.command()\n@click.option('-i', '--input', 'opt_fp_cfg', required=True,\n help='Path YAML job config')\n@click.option('--skip-images', 'opt_skip_images', is_flag=True)\n@click.option('--skip-labels', 'opt_skip_labels', is_flag=True)\n@click.pass_context\ndef cli(ctx, opt_fp_cfg, opt_skip_images, opt_skip_labels):\n \"\"\"YOLO PyTorch project\"\"\"\n\n from os.path import join\n from pathlib import Path\n import shutil\n\n from dataclasses import asdict\n from tqdm import tqdm\n import pandas as pd\n\n from vframe.settings import app_cfg\n from vframe.utils.file_utils import ensure_dir, load_yaml, write_yaml\n from vframe.utils.file_utils import write_txt, replace_ext, chmod_exec\n from vframe.utils.dataset_utils import split_train_val_test\n from vframe.models.annotation import Annotation\n from vframe.models.training_dataset import YoloPyTorch\n\n log = app_cfg.LOG\n\n # load config\n cfg = load_yaml(opt_fp_cfg, data_class=YoloPyTorch)\n\n # provision output\n ensure_dir(cfg.fp_output)\n dir_images = join(cfg.fp_output, cfg.fn_images)\n dir_labels = join(cfg.fp_output, cfg.fn_labels)\n ensure_dir(dir_images)\n ensure_dir(dir_labels)\n\n # write to yaml\n fp_out = join(cfg.fp_output, cfg.fn_hyp)\n comment = '\\n'.join([app_cfg.LICENSE_HEADER,'# Hyperparameter'])\n write_yaml(asdict(cfg.hyperparameters), fp_out, comment=comment)\n\n # load annos\n df = pd.read_csv(cfg.fp_annotations)\n df_pos = df[df.label_index != -1]\n # df_neg = df[df.label_enum == app_cfg.LABEL_BACKGROUND or df.label_index == -1]\n df_neg = df[df.label_index == -1]\n\n # count\n log.info(f'positive annotations: {len(df_pos):,}')\n log.info(f'background annotations: {len(df_neg):,}')\n log.info(f'total annotations: {len(df):,}')\n log.info(f'positive images: {len(df_pos.groupby(\"filename\")):,}')\n log.info(f'negative images: {len(df_neg.groupby(\"filename\")):,}')\n log.info(f'total images: {len(df.groupby(\"filename\")):,}')\n\n # get class-label list sorted by class index\n df_sorted = df_pos.sort_values(by='label_index', ascending=True)\n df_sorted.drop_duplicates(['label_enum'], keep='first', inplace=True)\n class_labels = df_sorted.label_enum.values.tolist()\n # write to txt\n write_txt(class_labels, join(cfg.fp_output, app_cfg.FN_LABELS))\n\n # update config\n cfg.classes = class_labels\n\n # Generate one label per file with all bboxes and classes\n # <object-class> <x_center> <y_center> <width> <height>\n labels_data = {}\n file_list = []\n df_groups = df_pos.groupby('filename')\n for fn, df_group in df_groups:\n annos = []\n file_list.append(join(dir_images, fn))\n for row_idx, row in df_group.iterrows():\n annos.append(Annotation.from_anno_series_row(row).to_darknet_str())\n labels_data.update({fn: annos})\n\n # write txt files for train, val\n splits = split_train_val_test(file_list, splits=cfg.splits, seed=1)\n write_txt(splits['train'], join(cfg.fp_output, cfg.fn_train))\n write_txt(splits['val'], join(cfg.fp_output, cfg.fn_val))\n write_txt(splits['test'], join(cfg.fp_output, cfg.fn_test))\n\n # write metadata\n fp_out = join(cfg.fp_output, cfg.fn_metadata)\n comment = '\\n'.join([app_cfg.LICENSE_HEADER, '# Metadata'])\n write_yaml(cfg.to_metadata(), fp_out, comment=comment)\n\n # copy postive images\n if not opt_skip_labels:\n for fn, annos in tqdm(labels_data.items()):\n # write all annos for this image to txt file\n fp_label = join(dir_labels, replace_ext(fn, 'txt'))\n write_txt(annos, fp_label)\n\n # symlink/copy images\n if not opt_skip_images:\n df_groups = df.groupby('filename')\n for fn, df_group in tqdm(df_groups):\n fpp_im_dst = Path(join(dir_images, fn))\n fpp_im_src = Path(join(cfg.fp_images, fn))\n if not fpp_im_src.is_file():\n app_cfg.LOG.error(f'{fpp_im_dst} missing')\n continue\n if cfg.symlink:\n if fpp_im_dst.is_symlink():\n fpp_im_dst.unlink()\n fpp_im_dst.symlink_to(fpp_im_src)\n else:\n shutil.copy(fpp_im_src, fpp_im_dst)\n\n # write model yaml, but print k:v pairs instead of dump\n model_cfg = load_yaml(cfg.fp_model_cfg)\n fp_out = join(cfg.fp_output, cfg.fn_model_cfg)\n model_cfg['nc'] = len(cfg.classes)\n with open(fp_out, 'w') as f:\n for k,v in model_cfg.items():\n f.write(f'{k}: {v}\\n')\n\n # shell scripts\n args = cfg.arguments\n py_cmds = ['python','train.py','']\n cli_opts = cfg.to_cli_args()\n # join strings\n sh_header_str = '\\n'.join(['#!/bin/bash','','# training', ''])\n py_cmds_str = list(map(str, py_cmds))\n cli_opts_str = list(map(str, cli_opts))\n sh_script = sh_header_str + ' '.join(py_cmds_str) + ' '.join(cli_opts_str)\n # write\n fp_sh = join(cfg.fp_output, app_cfg.FN_TRAIN_INIT)\n write_txt(sh_script, fp_sh)\n # make executable\n chmod_exec(fp_sh)\n\n # TODO: add tensorboard script\n # tensorboard --logdir runs/exp0 --bind_all\n if args.device and len(args.device) > 1:\n n_gpus = len(args.device)\n # multi GPU cmd\n py_cmds = ['python', '-m', 'torch.distributed.launch', '--nproc_per_node', f'{n_gpus}', 'train.py', '']\n # join strings\n sh_header_str = '\\n'.join(['#!/bin/bash','','# multi gpu training', ''])\n py_cmds_str = list(map(str, py_cmds))\n cfg.arguments.batch_size *= 2\n cli_opts = cfg.to_cli_args()\n cli_opts_str = list(map(str, cli_opts))\n sh_script = sh_header_str + ' '.join(py_cmds_str) + ' '.join(cli_opts_str)\n # write\n fp_sh = join(cfg.fp_output, app_cfg.FN_TRAIN_MULTI)\n write_txt(sh_script, fp_sh)\n # make executable\n chmod_exec(fp_sh)\n"
] |
[
[
"pandas.read_csv"
]
] |
nyiritb/tensorflow
|
[
"61a985bb48e4d38d05966132a347afe6f8a9a353"
] |
[
"tensorflow/python/keras/engine/base_layer_test.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for TensorFlow 2.0 layer behavior.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport os\nimport sys\nimport traceback\n\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import combinations\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.keras.engine import input_layer\nfrom tensorflow.python.keras.engine import sequential\nfrom tensorflow.python.keras.engine import training as training_lib\nfrom tensorflow.python.keras.mixed_precision.experimental import policy\nfrom tensorflow.python.keras.optimizer_v2 import rmsprop\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.layers import core as legacy_core\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import summary_ops_v2\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.summary import summary_iterator\nfrom tensorflow.python.util import nest\n\n\nclass DynamicLayer(base_layer.Layer):\n\n def __init__(self, dynamic=False, **kwargs):\n super(DynamicLayer, self).__init__(dynamic=dynamic, **kwargs)\n\n def call(self, inputs):\n samples = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=array_ops.shape(inputs)[0])\n for idx, sample in enumerate(inputs):\n samples = samples.write(idx, math_ops.square(sample))\n return samples.stack()\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass InvalidLayer(base_layer.Layer):\n\n def call(self, inputs):\n raise ValueError('You did something wrong!')\n\n\nclass BaseLayerTest(keras_parameterized.TestCase):\n\n @combinations.generate(combinations.times(\n combinations.keras_model_type_combinations(),\n combinations.keras_tensor_combinations()))\n def test_dynamic_layer(self):\n model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],\n input_shape=(3,))\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n self.assertEqual(model.run_eagerly, True)\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n\n @combinations.generate(combinations.times(\n combinations.keras_model_type_combinations(),\n combinations.keras_tensor_combinations()))\n def test_dynamic_layer_error(self):\n # Functional Models hit the `dyanamic=True` error during construction.\n # Subclass Models should just throw the original autograph error during\n # execution.\n raised_error = False\n try:\n model = testing_utils.get_model_from_layers([DynamicLayer()],\n input_shape=(3,))\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n except errors_impl.OperatorNotAllowedInGraphError as e:\n if 'iterating over `tf.Tensor` is not allowed' in str(e):\n raised_error = True\n except TypeError as e:\n if 'attempting to use Python control flow' in str(e):\n raised_error = True\n self.assertTrue(raised_error)\n\n @combinations.generate(combinations.times(\n combinations.keras_model_type_combinations(),\n combinations.keras_tensor_combinations()))\n def test_dynamic_layer_error_running_in_graph_mode(self):\n with ops.get_default_graph().as_default():\n model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],\n input_shape=(3,))\n self.assertEqual(model.dynamic, True)\n # But then you cannot run the model since you're in a graph scope.\n with self.assertRaisesRegex(ValueError,\n 'You must enable eager execution'):\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n\n def test_manual_compute_output_shape(self):\n\n class BuildCounter(base_layer.Layer):\n\n def __init__(self, *args, **kwargs): # pylint: disable=redefined-outer-name\n super(BuildCounter, self).__init__(*args, **kwargs)\n self.build_counter = 0\n\n def build(self, input_shape):\n self.build_counter += 1\n self.build_shape = input_shape\n\n def call(self, inputs):\n return inputs\n\n layer = BuildCounter(dtype=dtypes.float64)\n output_shape = layer.compute_output_shape((None, 10))\n self.assertEqual(layer.build_counter, 1)\n self.assertEqual(layer.build_shape.as_list(), [None, 10])\n self.assertEqual(output_shape.as_list(), [None, 10])\n output_signature = layer.compute_output_signature(\n tensor_spec.TensorSpec(dtype=dtypes.float64, shape=[None, 10]))\n self.assertEqual(layer.build_counter, 1)\n self.assertEqual(layer.build_shape.as_list(), [None, 10])\n self.assertEqual(output_signature.dtype, dtypes.float64)\n self.assertEqual(output_signature.shape.as_list(), [None, 10])\n layer(np.ones((5, 10)))\n self.assertEqual(layer.build_counter, 1)\n self.assertEqual(layer.build_shape.as_list(), [None, 10])\n\n def test_dynamic_layer_with_deferred_sequential_model(self):\n model = sequential.Sequential([DynamicLayer(dynamic=True), layers.Dense(3)])\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n self.assertEqual(model.run_eagerly, True)\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n\n def test_nested_dynamic_layers_in_eager_mode(self):\n inputs = input_layer.Input((3,))\n outputs = DynamicLayer(dynamic=True)(inputs)\n inner_model = training_lib.Model(inputs, outputs)\n self.assertEqual(inner_model.dynamic, True)\n\n inputs = input_layer.Input((3,))\n x = DynamicLayer(dynamic=True)(inputs)\n outputs = inner_model(x)\n\n model = training_lib.Model(inputs, outputs)\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n self.assertEqual(model.run_eagerly, True)\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n\n def test_dynamic_subclassed_model_no_shape_inference(self):\n\n class MyModel(training_lib.Model):\n\n def __init__(self):\n super(MyModel, self).__init__(dynamic=True)\n self.layer1 = layers.Dense(3)\n self.layer2 = layers.Dense(3)\n\n def call(self, inputs):\n if math_ops.reduce_sum(inputs) > 0:\n return self.layer1(inputs)\n else:\n return self.layer2(inputs)\n\n model = MyModel()\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n self.assertEqual(model.run_eagerly, True)\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n self.assertEqual(model.outputs, None)\n\n def test_dynamic_subclassed_model_with_shape_inference(self):\n\n class MyModel(training_lib.Model):\n\n def __init__(self):\n super(MyModel, self).__init__(dynamic=True)\n self.layer1 = layers.Dense(3)\n self.layer2 = layers.Dense(3)\n\n def call(self, inputs):\n if math_ops.reduce_sum(inputs) > 0:\n return self.layer1(inputs)\n else:\n return self.layer2(inputs)\n\n def compute_output_shape(self, input_shape):\n return tuple(input_shape[:-1].as_list()) + (3,)\n\n model = MyModel()\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n x, y = np.random.random((2, 3)), np.random.random((2, 3))\n model.train_on_batch(x, y)\n outputs = model(x)\n self.assertEqual(outputs.shape.as_list(), [2, 3])\n\n def test_deepcopy(self):\n bias_reg = lambda x: 1e-3 * math_ops.reduce_sum(x)\n layer = layers.Conv2D(32, (3, 3), bias_regularizer=bias_reg)\n # Call the Layer on data to generate regularize losses.\n layer(array_ops.ones((1, 10, 10, 3)))\n self.assertLen(layer.losses, 1)\n new_layer = copy.deepcopy(layer)\n self.assertEqual(new_layer.bias_regularizer, bias_reg)\n self.assertEqual(layer.get_config(), new_layer.get_config())\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_invalid_forward_pass(self):\n inputs = input_layer.Input((3,))\n with self.assertRaisesRegex(ValueError, 'You did something wrong!'):\n _ = InvalidLayer()(inputs)\n\n def test_no_legacy_model(self):\n inputs = input_layer.Input((1,))\n legacy_dense_0 = legacy_core.Dense(1, name='legacy_dense_0')\n legacy_dense_1 = legacy_core.Dense(1, name='legacy_dense_1')\n\n layer = legacy_dense_0(inputs)\n layer = layers.Dense(1)(layer)\n layer = legacy_dense_1(layer)\n\n expected_regex = (r'The following are legacy tf\\.layers\\.Layers:\\n '\n '{}\\n {}'.format(legacy_dense_0, legacy_dense_1))\n\n with self.assertRaisesRegex(TypeError, expected_regex):\n _ = training_lib.Model(inputs=[inputs], outputs=[layer])\n\n model = training_lib.Model(inputs=[inputs], outputs=[inputs])\n with self.assertRaisesRegex(TypeError, expected_regex):\n model._insert_layers([legacy_dense_0, legacy_dense_1])\n\n def test_no_legacy_sequential(self):\n layer = [layers.Dense(1), legacy_core.Dense(1, name='legacy_dense_0')]\n\n expected_regex = r'legacy tf\\.layers\\.Layers:\\n {}'.format(layer[1])\n with self.assertRaisesRegex(TypeError, expected_regex):\n _ = sequential.Sequential(layer)\n\n with self.assertRaisesRegex(TypeError, expected_regex):\n _ = sequential.Sequential([input_layer.Input(shape=(4,))] + layer)\n\n model = sequential.Sequential()\n with self.assertRaisesRegex(TypeError, expected_regex):\n for l in layer:\n model.add(l)\n\n @combinations.generate(\n combinations.times(\n combinations.keras_model_type_combinations(),\n combinations.keras_tensor_combinations(),\n combinations.combine(mode=['graph', 'eager'])))\n def test_build_with_numpy_data(self):\n model_layers = [\n layers.Dense(3, activation='relu', kernel_initializer='ones'),\n layers.Dense(1, activation='sigmoid', kernel_initializer='ones')\n ]\n model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))\n model(np.zeros((2, 4), dtype='float32'))\n self.assertTrue(model.built)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_default_add_weight(self):\n\n class TestLayer(base_layer.Layer):\n\n def __init__(self):\n super(TestLayer, self).__init__()\n self.default_weight = self.add_weight()\n self.weight_without_name = self.add_weight(shape=(3, 4))\n self.regularized_weight_without_name = self.add_weight(\n shape=(3, 4), regularizer='l2')\n\n layer = TestLayer()\n self.assertEqual(layer.default_weight.shape.as_list(), [])\n self.assertEqual(layer.weight_without_name.shape.as_list(), [3, 4])\n self.assertEqual(layer.default_weight.dtype.name, 'float32')\n self.assertEqual(layer.weight_without_name.dtype.name, 'float32')\n self.assertEqual(len(layer.losses), 1)\n if not context.executing_eagerly():\n # Cannot access tensor.name in eager execution.\n self.assertIn('Variable_2/Regularizer', layer.losses[0].name)\n\n @combinations.generate(combinations.keras_mode_combinations(mode=['eager']))\n def test_learning_phase_freezing_for_layers(self):\n\n class LearningPhaseLayer(base_layer.Layer):\n\n def call(self, inputs):\n return backend.in_train_phase(lambda: array_ops.ones_like(inputs),\n lambda: array_ops.zeros_like(inputs))\n\n def get_learning_phase_value():\n model = sequential.Sequential([LearningPhaseLayer(input_shape=(1,))])\n model._run_eagerly = testing_utils.should_run_eagerly()\n return np.sum(model(np.ones((1, 1))))\n\n self.assertEqual(get_learning_phase_value(), 0)\n\n # Test scope.\n with backend.learning_phase_scope(1):\n self.assertEqual(get_learning_phase_value(), 1)\n\n # The effects of the scope end after exiting it.\n self.assertEqual(get_learning_phase_value(), 0)\n\n # Test setting.\n backend.set_learning_phase(1)\n self.assertEqual(get_learning_phase_value(), 1)\n backend.set_learning_phase(0)\n self.assertEqual(get_learning_phase_value(), 0)\n\n # Cannot be enabled with `run_eagerly=True`, see b/123904578\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_layer_can_return_variable(self):\n\n class ComputeSum(base_layer.Layer):\n\n def __init__(self):\n super(ComputeSum, self).__init__()\n self.total = variables.Variable(\n initial_value=array_ops.zeros((1, 1)), trainable=False)\n if not context.executing_eagerly():\n backend.get_session().run(self.total.initializer)\n\n def call(self, inputs):\n self.total.assign_add(inputs)\n return self.total\n\n inputs = input_layer.Input(shape=(1,))\n model = training_lib.Model(inputs, ComputeSum()(inputs))\n model.predict(np.ones((1, 1)))\n\n def _get_layer_with_training_arg(self):\n\n class TrainingLayer(base_layer.Layer):\n \"\"\"A layer with a `training` argument in a defuned `call`.\"\"\"\n\n @def_function.function\n def call(self, inputs, training=None):\n if training is None:\n training = backend.learning_phase()\n return tf_utils.smart_cond(training,\n lambda: array_ops.ones_like(inputs),\n lambda: array_ops.zeros_like(inputs))\n\n return TrainingLayer()\n\n # b/124459427: can't test with `run_eagerly=True` for now.\n @combinations.generate(\n combinations.times(combinations.keras_mode_combinations(),\n combinations.keras_model_type_combinations(),\n combinations.keras_tensor_combinations()))\n def test_training_arg_in_defun(self):\n layer = self._get_layer_with_training_arg()\n model = testing_utils.get_model_from_layers([layer], input_shape=(1,))\n model.compile(rmsprop.RMSprop(0.),\n loss='mae')\n history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))\n self.assertEqual(history.history['loss'][0], 1.)\n loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1)))\n self.assertEqual(loss, 0.)\n\n # Test that the argument injection performed in `call` is not active\n # when the argument is passed explicitly.\n layer = self._get_layer_with_training_arg()\n inputs = input_layer.Input(shape=(1,))\n # Pass `training` by name\n outputs = layer(inputs, training=False)\n model = training_lib.Model(inputs, outputs)\n model.compile(rmsprop.RMSprop(0.),\n loss='mae')\n history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))\n self.assertEqual(history.history['loss'][0], 0.)\n\n @combinations.generate(\n combinations.times(combinations.keras_mode_combinations(),\n combinations.keras_model_type_combinations(),\n combinations.keras_tensor_combinations()))\n def test_raw_variable_assignment(self):\n\n class RawVariableLayer(base_layer.Layer):\n\n def __init__(self, **kwargs):\n super(RawVariableLayer, self).__init__(**kwargs)\n # Test variables in nested structure.\n self.var_list = [variables.Variable(1.), {'a': variables.Variable(2.)}]\n\n def call(self, inputs):\n return inputs * self.var_list[0] * self.var_list[1]['a']\n\n model = testing_utils.get_model_from_layers([RawVariableLayer()],\n input_shape=(10,))\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly())\n x, y = np.ones((10, 10)), np.ones((10, 10))\n # Checks that variables get initialized.\n model.fit(x, y, batch_size=2, epochs=2)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_layer_names(self):\n inputs = input_layer.Input(shape=[2])\n add1 = inputs + inputs\n add2 = layers.Add()([inputs, inputs])\n add3 = inputs + inputs\n add4 = layers.Add()([inputs, inputs])\n model = training_lib.Model(inputs=[inputs],\n outputs=[add1, add2, add3, add4])\n actual_names = [l.name for l in model.layers]\n graph_names = [\n 'input_1', 'tf_op_layer_AddV2', 'add', 'tf_op_layer_AddV2_1', 'add_1'\n ]\n eager_names = [\n 'input_1', 'tf_op_layer_add', 'add', 'tf_op_layer_add_2', 'add_1'\n ]\n for actual, eager, graph in zip(actual_names, graph_names, eager_names):\n self.assertIn(actual, {eager, graph})\n\n def test_add_trainable_weight_on_frozen_layer(self):\n\n class TestLayer(base_layer.Layer):\n\n def build(self, input_shape):\n self.w = self.add_weight(shape=(), trainable=True)\n\n def call(self, inputs):\n return self.w * inputs\n\n layer = TestLayer()\n layer.trainable = False\n layer.build(None)\n layer.trainable = True\n self.assertListEqual(layer.trainable_weights, [layer.w])\n\n @combinations.generate(\n combinations.times(combinations.keras_mode_combinations(),\n combinations.keras_model_type_combinations()))\n def test_passing_initial_weights_values(self):\n kernel_value = np.random.random((10, 2))\n layer_with_weights = layers.Dense(2, use_bias=False, weights=[kernel_value])\n\n model = testing_utils.get_model_from_layers([layer_with_weights],\n input_shape=(10,))\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly())\n inputs = np.random.random((3, 10))\n out = model.predict(inputs)\n self.assertAllClose(model.layers[-1].get_weights()[0], kernel_value)\n self.assertAllClose(out, np.dot(inputs, kernel_value))\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_set_weights_and_get_weights(self):\n layer = layers.Dense(2)\n layer.build((None, 10))\n kernel = np.random.random((10, 2))\n bias = np.random.random((2,))\n layer.set_weights([kernel, bias])\n weights = layer.get_weights()\n self.assertEqual(len(weights), 2)\n self.assertAllClose(weights[0], kernel)\n self.assertAllClose(weights[1], bias)\n with self.assertRaisesRegex(ValueError,\n 'but the layer was expecting 2 weights'):\n layer.set_weights([1, 2, 3])\n with self.assertRaisesRegex(ValueError,\n 'not compatible with provided weight shape'):\n layer.set_weights([kernel.T, bias])\n\n def test_get_config_error(self):\n\n class MyLayer(base_layer.Layer):\n\n def __init__(self, my_kwarg='default', **kwargs):\n super(MyLayer, self).__init__(**kwargs)\n self.my_kwarg = my_kwarg\n\n # `__init__` includes kwargs but `get_config` is not overridden, so\n # an error should be thrown:\n with self.assertRaisesRegex(NotImplementedError, 'Layer MyLayer has'):\n MyLayer('custom').get_config()\n\n class MyLayerNew(base_layer.Layer):\n\n def __init__(self, my_kwarg='default', **kwargs):\n super(MyLayerNew, self).__init__(**kwargs)\n self.my_kwarg = my_kwarg\n\n def get_config(self):\n config = super(MyLayerNew, self).get_config()\n config['my_kwarg'] = self.my_kwarg\n return config\n\n # Test to make sure that error is not raised if the method call is\n # from an overridden `get_config`:\n self.assertEqual(MyLayerNew('custom').get_config()['my_kwarg'], 'custom')\n\n class MyLayerNew2(base_layer.Layer):\n\n def __init__(self, name='MyLayerName', dtype=None, **kwargs): # pylint:disable=redefined-outer-name\n super(MyLayerNew2, self).__init__(name=name, dtype=dtype, **kwargs)\n\n # Check that if the kwargs in `__init__` are base layer constructor\n # arguments, no error is thrown:\n self.assertEqual(MyLayerNew2(name='New').get_config()['name'], 'New')\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_count_params(self):\n dense = layers.Dense(16)\n dense.build((None, 4))\n self.assertEqual(dense.count_params(), 16 * 4 + 16)\n\n dense = layers.Dense(16)\n with self.assertRaisesRegex(ValueError, 'call `count_params`'):\n dense.count_params()\n\n model = sequential.Sequential(layers.Dense(16))\n with self.assertRaisesRegex(ValueError, 'call `count_params`'):\n model.count_params()\n\n dense = layers.Dense(16, input_dim=4)\n model = sequential.Sequential(dense)\n self.assertEqual(model.count_params(), 16 * 4 + 16)\n\n def test_super_not_called(self):\n\n class CustomLayerNotCallingSuper(base_layer.Layer):\n\n def __init__(self):\n pass\n\n layer = CustomLayerNotCallingSuper()\n with self.assertRaisesRegex(RuntimeError, 'You must call `super()'):\n layer(np.random.random((10, 2)))\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_first_arg_not_called_inputs(self):\n x, y = array_ops.ones((10, 1)), array_ops.ones((10, 1))\n\n class ArgLayer(base_layer.Layer):\n\n def call(self, x, y):\n return x + y\n\n layer = ArgLayer()\n out = self.evaluate(layer(x=x, y=y))\n self.assertAllClose(out, 2 * np.ones((10, 1)))\n\n class KwargLayer(base_layer.Layer):\n\n def call(self, x=None, y=None):\n return x + y\n\n layer = KwargLayer()\n out = self.evaluate(layer(x=x, y=y))\n self.assertAllClose(out, 2 * np.ones((10, 1)))\n\n with self.assertRaisesRegex(ValueError, 'must always be passed'):\n layer(y=y)\n\n class TFFunctionLayer(base_layer.Layer):\n\n @def_function.function\n def call(self, x, y=None):\n if y is None:\n return x\n return x + y\n\n layer = TFFunctionLayer()\n out = self.evaluate(layer(x=x, y=y))\n self.assertAllClose(out, 2 * np.ones((10, 1)))\n\n def test_build_input_shape(self):\n\n class CustomLayer(base_layer.Layer):\n\n def build(self, input_shape):\n self.add_weight('w', shape=input_shape[1:])\n super(CustomLayer, self).build(input_shape)\n\n layer = CustomLayer()\n self.assertFalse(layer.built)\n\n layer.build([None, 1, 2, 3])\n self.assertTrue(layer.built)\n self.assertEqual([None, 1, 2, 3], layer._build_input_shape)\n\n layer = CustomLayer()\n layer(input_layer.Input((3,)))\n self.assertTrue(layer.built)\n self.assertEqual([None, 3], layer._build_input_shape.as_list())\n\n @combinations.generate(combinations.combine(mode=['eager']))\n def custom_layer_training_arg(self):\n class CustomLayerNoTrainingArg(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n self._nested_layer = nested_layer or array_ops.identity\n\n def call(self, inputs):\n return self._nested_layer(inputs)\n\n class CustomLayerDefaultTrainingMissing(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n self._nested_layer = nested_layer or array_ops.identity\n\n def call(self, inputs, training):\n if training:\n return self._nested_layer(inputs)\n else:\n return self._nested_layer(inputs) * 0.5\n\n class CustomLayerDefaultTrainingNone(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n self._nested_layer = nested_layer or array_ops.identity\n\n def call(self, inputs, training=None):\n if training:\n return self._nested_layer(inputs)\n else:\n return self._nested_layer(inputs) * 0.5\n\n class CustomLayerDefaultTrainingFalse(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n self._nested_layer = nested_layer or array_ops.identity\n\n def call(self, inputs, training=False):\n if training:\n return self._nested_layer(inputs)\n else:\n return self._nested_layer(inputs) * 0.5\n\n class CustomLayerDefaultTrainingTrue(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n self._nested_layer = nested_layer or array_ops.identity\n\n def call(self, inputs, training=True):\n if training:\n return self._nested_layer(inputs)\n else:\n return self._nested_layer(inputs) * 0.5\n\n x = array_ops.ones(shape=(1, 1))\n\n # If the layer signature doesn't specify a default training arg,\n # run it in inference mode when to training arg is passed\n # to __call__\n layer = CustomLayerDefaultTrainingMissing()\n self.assertAllEqual(layer(x), x * 0.5)\n self.assertAllEqual(layer(x, training=False), x * 0.5)\n self.assertAllEqual(layer(x, training=True), x)\n\n # If the layer signature specifies `False` as the default training arg,\n # run it in inference mode when no training arg is passed\n # to __call__\n layer = CustomLayerDefaultTrainingFalse()\n self.assertAllEqual(layer(x), x * 0.5)\n self.assertAllEqual(layer(x, training=False), x * 0.5)\n self.assertAllEqual(layer(x, training=True), x)\n\n # If the layer signature specifies `True` as the default training arg,\n # explicitly run it in training mode when no training arg is passed\n # to __call__\n layer = CustomLayerDefaultTrainingTrue()\n self.assertAllEqual(layer(x), x)\n self.assertAllEqual(layer(x, training=False), x * 0.5)\n self.assertAllEqual(layer(x, training=True), x)\n\n # Outer layers/models should set the training context implicitly for all\n # nested layers, respecting whatever mode the outer layer was run with.\n layer = CustomLayerDefaultTrainingTrue(CustomLayerDefaultTrainingFalse())\n # No outer value passed: use local defaults\n self.assertAllEqual(layer(x), x * 0.25) # Use local default False\n # Outer value passed: override local defaults\n self.assertAllEqual(layer(x, training=False), x * 0.25)\n self.assertAllEqual(layer(x, training=True), x)\n\n layer = CustomLayerDefaultTrainingFalse(CustomLayerDefaultTrainingTrue())\n # No outer value passed: use local defaults\n self.assertAllEqual(layer(x), x) # Use local default True\n # Outer value passed: override local defaults\n self.assertAllEqual(layer(x, training=False), x * 0.25)\n self.assertAllEqual(layer(x, training=True), x)\n\n # If the outer layer `call` doesn't take a training argument at all,\n # it'll set the nested scope as None when no training arg is passed in.\n # If a training arg is passed in it won't use it directly in `call`, but\n # it will set the nested training mode.\n layer = CustomLayerNoTrainingArg(CustomLayerDefaultTrainingTrue())\n self.assertAllEqual(layer(x), x) # Use local default True\n self.assertAllEqual(layer(x, training=False), x * 0.5)\n self.assertAllEqual(layer(x, training=True), x)\n\n layer = CustomLayerDefaultTrainingNone(CustomLayerDefaultTrainingTrue())\n self.assertAllEqual(layer(x), x) # Use local default True\n self.assertAllEqual(layer(x, training=False), x * 0.5)\n self.assertAllEqual(layer(x, training=True), x)\n\n def test_activity_regularizer_string(self):\n\n class MyLayer(base_layer.Layer):\n pass\n\n layer = MyLayer(activity_regularizer='l2')\n self.assertIsInstance(layer.activity_regularizer, regularizers.L2)\n\n\nclass SymbolicSupportTest(keras_parameterized.TestCase):\n\n def test_using_symbolic_tensors_with_tf_ops(self):\n # Single-input.\n x = input_layer.Input((3,))\n math_ops.square(x)\n\n # Multi-inputs.\n x1, x2 = input_layer.Input((3,)), input_layer.Input((3,))\n array_ops.concat([x1, x2], axis=1)\n\n # Mixing Keras symbolic tensors and graph tensors from the same graph works.\n with backend.get_graph().as_default():\n x1 = input_layer.Input((3,))\n x2 = input_layer.Input((3,))\n math_ops.matmul(x1, x2)\n\n # Creating same op type (matmul) multiple times in the Keras graph works.\n x1 = input_layer.Input((3,))\n x2 = input_layer.Input((3,))\n math_ops.matmul(x1, x2)\n\n def test_mixing_eager_and_graph_tensors(self):\n with ops.Graph().as_default():\n x1 = array_ops.ones((3, 3))\n x2 = array_ops.ones((3, 3))\n self.assertIsInstance(x2, ops.EagerTensor)\n with self.assertRaisesRegex(TypeError, 'Graph tensors'):\n math_ops.matmul(x1, x2)\n\n def test_mixing_numpy_arrays_and_graph_tensors(self):\n with ops.Graph().as_default():\n x1 = array_ops.ones((3, 3))\n x2 = np.ones((3, 3), dtype='float32')\n with self.assertRaisesRegex(TypeError, 'Graph tensors'):\n math_ops.matmul(x1, x2)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_mixing_keras_symbolic_tensors_and_eager_tensors(self):\n x1 = input_layer.Input((3,))\n x2 = array_ops.ones((3, 3))\n y = math_ops.matmul(x1, x2)\n\n fn = backend.function(inputs=[x1], outputs=[y])\n x_val = np.random.random((3, 3))\n y_val = np.ones((3, 3))\n self.assertAllClose(fn([x_val])[0],\n np.matmul(x_val, y_val),\n atol=1e-5)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_mixing_keras_symbolic_tensors_and_numpy_arrays(self):\n x1 = input_layer.Input((3,))\n x2 = np.ones((3, 3), dtype='float32')\n y = math_ops.matmul(x1, x2)\n\n fn = backend.function(inputs=[x1], outputs=[y])\n x_val = np.random.random((3, 3))\n y_val = np.ones((3, 3))\n self.assertAllClose(fn([x_val])[0],\n np.matmul(x_val, y_val),\n atol=1e-5)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_reraising_exception(self):\n # When layer is not dynamic, we have some pattern matching during exception\n # handling to detect when the user is trying to use python control flow.\n # When an exception is thrown but the pattern doesn't match, we want to\n # preserve the originating stack trace. An early implementation of this\n # logic lost the stack trace. We test the correct behavior here.\n\n class TypeErrorLayer(base_layer.Layer):\n\n def call(self, inputs):\n def easily_identifiable_name():\n raise TypeError('Non-matching TypeError message.')\n easily_identifiable_name()\n\n inputs = input_layer.Input((3,))\n\n try:\n _ = TypeErrorLayer()(inputs)\n except TypeError as e:\n if hasattr(e, 'ag_error_metadata'):\n self.assertIn('easily_identifiable_name', str(e))\n # See ErrorMetadataBase in autograph/pyct/errors.py\n function_name = e.ag_error_metadata.translated_stack[-1].function_name\n else:\n tb = traceback.extract_tb(sys.exc_info()[2])\n last_entry = tb[-1]\n function_name = last_entry[2]\n self.assertEqual(function_name, 'easily_identifiable_name')\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_summaries_in_tf_function(self):\n if not context.executing_eagerly():\n return\n\n class MyLayer(base_layer.Layer):\n\n def call(self, inputs):\n summary_ops_v2.scalar('mean', math_ops.reduce_mean(inputs))\n return inputs\n\n tmp_dir = self.get_temp_dir()\n writer = summary_ops_v2.create_file_writer_v2(tmp_dir)\n with writer.as_default(), summary_ops_v2.always_record_summaries():\n my_layer = MyLayer()\n x = array_ops.ones((10, 10))\n\n def my_fn(x):\n return my_layer(x)\n\n _ = my_fn(x)\n\n event_file = gfile.Glob(os.path.join(tmp_dir, 'events*'))\n self.assertLen(event_file, 1)\n event_file = event_file[0]\n tags = set()\n for e in summary_iterator.summary_iterator(event_file):\n for val in e.summary.value:\n tags.add(val.tag)\n self.assertEqual(set(['my_layer/mean']), tags)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_error_when_passing_non_tensor(self):\n # layers that have an `input_spec` will raise an error when called on\n # non-tensors. This covers all built-in layers.\n layer = layers.Dense(3)\n x = object()\n with self.assertRaisesRegex(TypeError, r'should be tensors'):\n layer(x)\n\n\n@combinations.generate(combinations.combine(mode=['graph', 'eager']))\nclass NestedTrackingTest(test.TestCase):\n\n def test_nested_layer_variable_tracking(self):\n # Test that variables from nested sublayers are\n # being tracked by subclassed layers.\n\n class MyLayer(base_layer.Layer):\n\n def __init__(self):\n super(MyLayer, self).__init__()\n self.dense1 = layers.Dense(1)\n self.dense2 = layers.BatchNormalization()\n\n def build(self, input_shape):\n self.v1 = self.add_weight('v1', shape=input_shape[1:].as_list())\n self.v2 = variables.Variable(\n name='v2',\n initial_value=np.zeros(input_shape[1:].as_list(), dtype='float32'),\n trainable=False)\n\n def call(self, inputs):\n x = self.dense1(inputs) + self.dense2(inputs)\n return x + self.v1 + self.v2\n\n layer = MyLayer()\n inputs = input_layer.Input((1,))\n _ = layer(inputs)\n\n self.assertEqual(len(layer.weights), 8)\n self.assertEqual(len(layer.trainable_weights), 5)\n self.assertEqual(len(layer.non_trainable_weights), 3)\n\n layer.dense1.trainable = False\n self.assertEqual(len(layer.weights), 8)\n self.assertEqual(len(layer.trainable_weights), 3)\n self.assertEqual(len(layer.non_trainable_weights), 5)\n\n layer.trainable = False\n self.assertEqual(len(layer.weights), 8)\n self.assertEqual(len(layer.trainable_weights), 0)\n self.assertEqual(len(layer.non_trainable_weights), 8)\n self.assertEqual(\n {id(v) for v in [layer.dense1, layer.dense2, layer.v1, layer.v2]},\n {id(v) for _, v in layer._checkpoint_dependencies})\n\n def test_nested_layer_updates_losses_tracking(self):\n # Test that updates and losses from nested sublayers are\n # being tracked by subclassed layers.\n\n class UpdateAndLossLayer(base_layer.Layer):\n\n def build(self, _):\n self.v1 = self.add_weight('v1', shape=())\n\n def call(self, inputs):\n self.add_loss(math_ops.reduce_sum(inputs))\n self.add_update(state_ops.assign_add(self.v1, 1))\n return inputs + 1\n\n class MyLayer(base_layer.Layer):\n\n def build(self, _):\n self.v1 = self.add_weight('v1', shape=())\n\n def __init__(self):\n super(MyLayer, self).__init__()\n self.ul1 = UpdateAndLossLayer()\n self.ul2 = UpdateAndLossLayer()\n\n def call(self, inputs):\n self.add_loss(math_ops.reduce_sum(inputs))\n self.add_update(state_ops.assign_add(self.v1, 1))\n x = self.ul1(inputs)\n return self.ul2(x)\n\n layer = MyLayer()\n\n if context.executing_eagerly():\n inputs = array_ops.ones((3, 1))\n _ = layer(inputs)\n self.assertEqual(len(layer.losses), 3)\n self.assertLen(layer.get_losses_for(None), 3)\n else:\n inputs = input_layer.Input((1,))\n _ = layer(inputs)\n self.assertEqual(len(layer.losses), 3)\n self.assertEqual(len(layer.updates), 3)\n self.assertLen(layer.get_losses_for(None), 3)\n\n def test_attribute_reassignment(self):\n l = base_layer.Layer()\n l.a = base_layer.Layer()\n l.a = []\n l.a = variables.Variable(1.)\n l.a = base_layer.Layer()\n last_assignment = base_layer.Layer()\n l.a = last_assignment\n l.b = variables.Variable(1.)\n del l.b\n l.c = base_layer.Layer()\n del l.c\n l.d = last_assignment\n del l.d\n self.assertEqual([last_assignment], l._layers)\n self.assertEqual([], l.trainable_weights)\n self.assertEqual([], l.non_trainable_weights)\n self.assertEqual([], l.weights)\n del l.a\n self.assertEqual([], l._layers)\n\n def test_assign_op_not_tracked_as_variable(self):\n\n class LayerWithAssignAttr(base_layer.Layer):\n\n def build(self, input_shape):\n self.v = variables.Variable(1.)\n self.v_assign = self.v.assign_add(2.)\n\n layer = LayerWithAssignAttr()\n layer.build((10, 10))\n\n self.assertEqual([layer.v], layer.variables)\n\n def test_layer_class_not_tracked_as_sublayer(self):\n # See https://github.com/tensorflow/tensorflow/issues/27431 for details.\n\n class LayerWithClassAttribute(base_layer.Layer):\n\n def __init__(self):\n super(LayerWithClassAttribute, self).__init__()\n self.layer_fn = layers.Dense\n\n layer = LayerWithClassAttribute()\n self.assertEmpty(layer.variables)\n self.assertEmpty(layer.submodules)\n\n def test_layer_call_fn_args(self):\n\n class NonDefunLayer(base_layer.Layer):\n\n def call(self, inputs, a, mask, b=None, training=None):\n return inputs\n\n class DefunLayer(base_layer.Layer):\n\n @def_function.function\n def call(self, x, mask, a, training=None, b=None):\n return x\n\n nondefun_layer = NonDefunLayer()\n self.assertEqual(nondefun_layer._call_fn_args,\n ['inputs', 'a', 'mask', 'b', 'training'])\n defun_layer = DefunLayer()\n self.assertEqual(defun_layer._call_fn_args,\n ['x', 'mask', 'a', 'training', 'b'])\n\n def test_sequential_model(self):\n model = sequential.Sequential(\n [layers.Dense(10, input_shape=(10,)),\n layers.Dense(5)])\n self.assertLen(model.layers, 2)\n self.assertLen(model.weights, 4)\n\n # Make sure a subclass model also works when it is called 'Sequential'.\n class Sequential(training_lib.Model):\n\n def __init__(self):\n super(Sequential, self).__init__()\n self.dense_layers = [layers.Dense(10), layers.Dense(5)]\n\n def call(self, inputs):\n x = inputs\n for d in self.dense_layers:\n x = d(x)\n return x\n\n s = Sequential()\n self.assertLen(s.layers, 2)\n self.assertLen(s.weights, 0)\n\n s(input_layer.Input((10,)))\n self.assertLen(s.weights, 4)\n\n\n@combinations.generate(combinations.combine(mode=['graph', 'eager']))\nclass NameScopingTest(keras_parameterized.TestCase):\n\n def test_name_scope_layer(self):\n x = backend.placeholder(shape=(10, 10))\n layer = layers.Dense(10, name='MyName')\n layer(x)\n self.assertEqual(layer.bias.name, 'MyName/bias:0')\n self.assertEqual(layer.kernel.name, 'MyName/kernel:0')\n\n def test_name_scope_functional_api(self):\n inputs = input_layer.Input((3,))\n layer = layers.Dense(10, name='MyName')\n _ = layer(inputs)\n self.assertEqual(layer.bias.name, 'MyName/bias:0')\n self.assertEqual(layer.kernel.name, 'MyName/kernel:0')\n\n def test_name_scope_functional_api_nested(self):\n\n class NestedLayer(base_layer.Layer):\n\n def __init__(self, name='OuterName'):\n super(NestedLayer, self).__init__(name=name)\n self.dense = layers.Dense(10, name='InnerName')\n\n def call(self, inputs):\n return self.dense(inputs)\n\n inputs = input_layer.Input((3,))\n layer = NestedLayer()\n _ = layer(inputs)\n self.assertEqual(layer.dense.bias.name, 'OuterName/InnerName/bias:0')\n self.assertEqual(layer.dense.kernel.name, 'OuterName/InnerName/kernel:0')\n\n def test_name_scope_sublayer(self):\n\n class NameScopeTracker(base_layer.Layer):\n\n def call(self, inputs):\n self.active_name_scope = ops.get_name_scope()\n return inputs\n\n x = backend.placeholder(shape=(10, 10))\n sublayer = NameScopeTracker(name='Sublayer')\n layer = layers.Dense(10, activation=sublayer, name='MyName2')\n layer(x)\n self.assertEqual(layer.bias.name, 'MyName2/bias:0')\n self.assertEqual(layer.kernel.name, 'MyName2/kernel:0')\n self.assertEqual(sublayer.active_name_scope, 'MyName2/Sublayer')\n\n def test_name_scope_tf_tensor(self):\n x = ops.convert_to_tensor_v2(np.ones((10, 10)))\n layer = layers.Dense(\n 10, activation=layers.ReLU(name='MyAct'), name='MyName3')\n layer(x)\n self.assertEqual(layer.bias.name, 'MyName3/bias:0')\n self.assertEqual(layer.kernel.name, 'MyName3/kernel:0')\n\n\n@combinations.generate(combinations.keras_mode_combinations(mode=['eager']))\nclass AutographControlFlowTest(keras_parameterized.TestCase):\n\n def test_disabling_in_context_is_matched(self):\n\n test_obj = self\n\n class MyLayer(base_layer.Layer):\n\n def call(self, inputs, training=None):\n with test_obj.assertRaisesRegex(TypeError, 'Tensor.*as.*bool'):\n if constant_op.constant(False):\n return inputs * 1.\n return inputs * 0.\n\n @def_function.function(autograph=False)\n def test_fn():\n return MyLayer()(constant_op.constant([[1., 2., 3.]]))\n\n test_fn()\n\n def test_if_training_pattern_output(self):\n\n class MyLayer(base_layer.Layer):\n\n def call(self, inputs, training=None):\n if training:\n return inputs * 1.\n return inputs * 0.\n\n inputs = input_layer.Input((3,))\n outputs = MyLayer()(inputs)\n model = training_lib.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly())\n train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(train_loss, 0.)\n test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(test_loss, 1.)\n\n def test_if_training_pattern_loss(self):\n\n class MyLayer(base_layer.Layer):\n\n def call(self, inputs, training=None):\n if training:\n loss = math_ops.reduce_sum(inputs)\n else:\n loss = 0.\n self.add_loss(loss)\n return inputs\n\n inputs = input_layer.Input((3,))\n outputs = MyLayer()(inputs)\n model = training_lib.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly())\n train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(train_loss, 2 * 3)\n test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(test_loss, 0)\n\n def test_if_training_pattern_metric(self):\n\n class MyLayer(base_layer.Layer):\n\n def call(self, inputs, training=None):\n if training:\n metric = math_ops.reduce_sum(inputs)\n else:\n metric = 0.\n self.add_metric(metric, name='my_metric', aggregation='mean')\n return inputs\n\n inputs = input_layer.Input((3,))\n outputs = MyLayer()(inputs)\n model = training_lib.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly())\n for _ in range(3):\n _, train_metric = model.train_on_batch(np.ones((2, 3)),\n np.ones((2, 3)))\n\n self.assertEqual(train_metric, 2 * 3)\n _, test_metric = model.test_on_batch(np.ones((2, 3)),\n np.ones((2, 3)))\n self.assertEqual(test_metric, 0)\n\n def test_if_training_pattern_update(self):\n\n class MyLayer(base_layer.Layer):\n\n def build(self, input_shape):\n self.counter = self.add_weight(\n shape=(), trainable=False, initializer='zeros')\n\n def call(self, inputs, training=None):\n if training:\n increment = 1.\n else:\n increment = 0.\n self.counter.assign_add(increment)\n return inputs\n\n inputs = input_layer.Input((3,))\n layer = MyLayer()\n outputs = layer(inputs)\n model = training_lib.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly())\n model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(backend.get_value(layer.counter), 1.)\n\n def test_conditional_losses_in_call(self):\n\n class MyLayer(base_layer.Layer):\n\n def __init__(self):\n super(MyLayer,\n self).__init__(dynamic=testing_utils.should_run_eagerly())\n\n def call(self, inputs, training=None):\n if training:\n self.add_loss(math_ops.reduce_sum(inputs))\n return inputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n inputs = input_layer.Input((3,))\n layer = MyLayer()\n outputs = layer(inputs)\n model = training_lib.Model(inputs, outputs)\n model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())\n loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(loss, 2 * 3)\n\n def test_conditional_callable_losses(self):\n model = sequential.Sequential([\n layers.Dense(\n 1, kernel_regularizer=regularizers.l2(1e-4), input_shape=(1,))\n ])\n model._run_eagerly = testing_utils.should_run_eagerly()\n\n def assert_graph(t):\n if not context.executing_eagerly():\n self.assertEqual(t.graph, ops.get_default_graph())\n\n @def_function.function\n def get_losses(t):\n if t < 0:\n return math_ops.reduce_sum(model.losses) * t\n else:\n return math_ops.reduce_sum(model.losses)\n\n assert_graph(get_losses(constant_op.constant(2.)))\n assert_graph(get_losses(constant_op.constant(0.5)))\n\n def test_conditional_metrics_in_call(self):\n\n class MyLayer(base_layer.Layer):\n\n def __init__(self):\n super(MyLayer,\n self).__init__(dynamic=testing_utils.should_run_eagerly())\n\n def call(self, inputs, training=None):\n if training:\n self.add_metric(math_ops.reduce_sum(inputs),\n name='sum',\n aggregation='mean')\n return inputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n inputs = input_layer.Input((3,))\n layer = MyLayer()\n outputs = layer(inputs)\n model = training_lib.Model(inputs, outputs)\n model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())\n history = model.fit(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(history.history['sum'][-1], 2 * 3)\n\n def test_conditional_activity_regularizer_in_call(self):\n\n class TestModel(training_lib.Model):\n\n def __init__(self):\n super(TestModel, self).__init__(\n name='test_model', dynamic=testing_utils.should_run_eagerly())\n self.layer = layers.Dense(2, activity_regularizer='l2')\n\n def call(self, x, training=None):\n if math_ops.greater(math_ops.reduce_sum(x), 0.0):\n return self.layer(x)\n else:\n return self.layer(x)\n\n model = TestModel()\n model.compile(\n loss='mse',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly())\n\n x = np.ones(shape=(10, 1))\n y = np.ones(shape=(10, 2))\n\n if testing_utils.should_run_eagerly():\n model.fit(x, y, epochs=2, batch_size=5)\n else:\n with self.assertRaisesRegex(errors_impl.InaccessibleTensorError,\n 'ActivityRegularizer'):\n model.fit(x, y, epochs=2, batch_size=5)\n\n def test_conditional_activity_regularizer_with_wrappers_in_call(self):\n\n class TestModel(training_lib.Model):\n\n def __init__(self):\n super(TestModel, self).__init__(\n name='test_model', dynamic=testing_utils.should_run_eagerly())\n self.layer = layers.TimeDistributed(\n layers.Dense(2, activity_regularizer='l2'), input_shape=(3, 4))\n\n def call(self, x, training=None):\n if math_ops.greater(math_ops.reduce_sum(x), 0.0):\n return self.layer(x)\n else:\n return self.layer(x)\n\n model = TestModel()\n model.compile(\n loss='mse',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly())\n\n x = np.ones(shape=(10, 3, 4))\n y = np.ones(shape=(10, 3, 2))\n\n if testing_utils.should_run_eagerly():\n model.fit(x, y, epochs=2, batch_size=5)\n else:\n with self.assertRaisesRegex(errors_impl.InaccessibleTensorError,\n 'ActivityRegularizer'):\n model.fit(x, y, epochs=2, batch_size=5)\n\n\nclass AddLayer(base_layer.Layer):\n \"\"\"A layer which adds its input to a variable.\n\n Useful for testing a layer with a variable\n \"\"\"\n\n def build(self, _):\n self.v = self.add_weight('v', (), initializer='ones')\n self.built = True\n\n def call(self, inputs):\n return inputs + self.v\n\n\nclass IdentityLayer(base_layer.Layer):\n \"\"\"A layer that returns its input.\n\n Useful for testing a layer without a variable.\n \"\"\"\n\n def call(self, inputs):\n return inputs\n\n\n@combinations.generate(combinations.combine(mode=['graph', 'eager']))\nclass DTypeTest(keras_parameterized.TestCase):\n\n # This class only have tests relating to layer.dtype. Tests for dtype policies\n # are in mixed_precision/experimental/keras_test.py\n\n # TODO(reedwm): Maybe have a separate test file for input casting tests.\n\n def _const(self, dtype):\n return array_ops.constant(1, dtype=dtype)\n\n @testing_utils.enable_v2_dtype_behavior\n def test_dtype_defaults_to_floatx(self):\n layer = AddLayer()\n self.assertEqual(layer.dtype, 'float32')\n layer(self._const('float64'))\n self.assertEqual(layer.dtype, 'float32') # dtype should not change\n\n try:\n backend.set_floatx('float64')\n layer = AddLayer()\n self.assertEqual(layer.dtype, 'float64')\n finally:\n backend.set_floatx('float32')\n\n @testing_utils.enable_v2_dtype_behavior\n def test_passing_dtype_to_constructor(self):\n layer = IdentityLayer(dtype='float64')\n layer(self._const('float32'))\n self.assertEqual(layer.dtype, 'float64')\n\n layer = IdentityLayer(dtype='int32')\n layer(self._const('float32'))\n self.assertEqual(layer.dtype, 'int32')\n\n layer = IdentityLayer(dtype=dtypes.float64)\n layer(self._const('float32'))\n self.assertEqual(layer.dtype, 'float64')\n\n @testing_utils.enable_v2_dtype_behavior\n def input_cast_to_dtype(self):\n layer = AddLayer()\n\n # Input should be cast to layer.dtype, so output should also be layer.dtype\n self.assertEqual(layer(self._const('float64')).dtype, 'float32')\n\n layer = AddLayer(dtype='float64')\n self.assertEqual(layer(self._const('float32')).dtype, 'float64')\n\n # Test inputs are not casted if layer.dtype is not floating-point\n layer = IdentityLayer(dtype='int32')\n self.assertEqual(layer(self._const('float64')).dtype, 'float64')\n\n # Test inputs are not casted if the inputs are not floating-point\n layer = IdentityLayer(dtype='float32')\n self.assertEqual(layer(self._const('int32')).dtype, 'int32')\n\n # Test Numpy arrays are casted\n layer = IdentityLayer(dtype='float64')\n self.assertEqual(layer(np.array(1, dtype='float32')).dtype, 'float64')\n\n # Test Python floats are casted\n layer = IdentityLayer(dtype='float64')\n self.assertEqual(layer(1.).dtype, 'float64')\n\n @testing_utils.enable_v2_dtype_behavior\n def multiple_inputs_cast_to_dtype(self):\n\n class MultiIdentityLayer(base_layer.Layer):\n\n def call(self, inputs):\n return [array_ops.identity(x) for x in inputs]\n\n # Testing layer with default dtype of float32\n layer = MultiIdentityLayer()\n x, y = layer([self._const('float16'), self._const('float32')])\n self.assertEqual(x.dtype, 'float32')\n self.assertEqual(y.dtype, 'float32')\n\n # Test passing dtype to the constructor\n layer = MultiIdentityLayer(dtype='float64')\n x, y = layer([self._const('float16'), self._const('float32')])\n self.assertEqual(x.dtype, 'float64')\n self.assertEqual(y.dtype, 'float64')\n\n # Test several non-floating point types\n layer = MultiIdentityLayer(dtype='float64')\n x, y, z, w = layer([self._const('float16'), self._const('bool'),\n self._const('float64'), self._constant('complex64')])\n self.assertEqual(x.dtype, 'float64')\n self.assertEqual(y.dtype, 'bool')\n self.assertEqual(z.dtype, 'float64')\n self.assertEqual(w.dtype, 'complex64')\n\n @testing_utils.enable_v2_dtype_behavior\n def test_extra_args_and_kwargs_not_casted(self):\n\n class IdentityLayerWithArgs(base_layer.Layer):\n\n def call(self, inputs, *args, **kwargs):\n kwargs.pop('training', None)\n return nest.flatten([inputs, args, kwargs])\n\n layer = IdentityLayerWithArgs(dtype='float64')\n x, y, z = layer(self._const('float16'), self._const('float16'),\n kwarg=self._const('float16'))\n self.assertEqual(x.dtype, 'float64')\n self.assertEqual(y.dtype, 'float16')\n self.assertEqual(z.dtype, 'float16')\n\n @testing_utils.enable_v2_dtype_behavior\n def test_layer_without_autocast(self):\n\n class IdentityLayerWithoutAutocast(IdentityLayer):\n\n def __init__(self, *args, **kwargs):\n kwargs['autocast'] = False\n super(IdentityLayerWithoutAutocast, self).__init__(*args, **kwargs)\n\n layer = IdentityLayerWithoutAutocast(dtype='float64')\n self.assertEqual(layer(self._const('float32')).dtype, 'float32')\n\n @testing_utils.enable_v2_dtype_behavior\n def test_dtype_warnings(self):\n # Test a layer warns when it casts inputs.\n layer = IdentityLayer()\n with test.mock.patch.object(tf_logging, 'warn') as mock_warn:\n layer(self._const('float64'))\n self.assertRegex(\n str(mock_warn.call_args),\n \".*from dtype float64 to the layer's dtype of float32.*\"\n \"The layer has dtype float32 because.*\")\n\n # Test a layer does not warn a second time\n with test.mock.patch.object(tf_logging, 'warn') as mock_warn:\n layer(self._const('float64'))\n mock_warn.assert_not_called()\n\n # Test a new layer can warn even if a different layer already warned\n layer = IdentityLayer()\n with test.mock.patch.object(tf_logging, 'warn') as mock_warn:\n layer(self._const('float64'))\n self.assertRegex(\n str(mock_warn.call_args),\n \".*from dtype float64 to the layer's dtype of float32.*\"\n \"The layer has dtype float32 because.*\")\n\n # Test a layer does not warn if a dtype is passed\n layer = IdentityLayer(dtype='float32')\n with test.mock.patch.object(tf_logging, 'warn') as mock_warn:\n layer(self._const('float64'))\n mock_warn.assert_not_called()\n\n # Test a layer does not warn if a Policy is set:\n with policy.policy_scope('float32'):\n layer = IdentityLayer()\n with test.mock.patch.object(tf_logging, 'warn') as mock_warn:\n layer(self._const('float64'))\n mock_warn.assert_not_called()\n\n @testing_utils.enable_v2_dtype_behavior\n def test_compute_output_signature(self):\n\n class IdentityLayerWithOutputShape(IdentityLayer):\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n layer = IdentityLayerWithOutputShape(dtype='float64')\n output_signature = layer.compute_output_signature(\n tensor_spec.TensorSpec(shape=(), dtype='float32'))\n self.assertEqual(output_signature.shape, ())\n self.assertEqual(output_signature.dtype, 'float64')\n\n @testing_utils.enable_v2_dtype_behavior\n def test_composite_tensors_input_casting(self):\n sparse = sparse_tensor.SparseTensor(\n indices=array_ops.constant([[0, 1], [2, 3]], dtype='int64'),\n values=array_ops.constant([0., 1.], dtype='float32'),\n dense_shape=array_ops.constant([4, 4], dtype='int64'))\n ragged = ragged_tensor.RaggedTensor.from_row_splits(\n values=array_ops.constant([1., 2., 3.], dtype='float32'),\n row_splits=array_ops.constant([0, 2, 2, 3], dtype='int64'))\n\n layer = IdentityLayer(dtype='float16')\n\n for x in sparse, ragged:\n self.assertEqual(x.dtype, 'float32')\n y = layer(x)\n self.assertEqual(y.dtype, 'float16')\n self.assertEqual(type(x), type(y))\n\n @testing_utils.enable_v2_dtype_behavior\n def test_passing_non_tensor(self):\n layer = IdentityLayer()\n x = object()\n y = layer(x) # Layer should not cast 'x', as it's not a tensor\n self.assertIs(x, y)\n\n @testing_utils.disable_v2_dtype_behavior\n def test_v1_behavior(self):\n # Test dtype defaults to None and inferred from input\n layer = IdentityLayer()\n self.assertIsNone(layer.dtype)\n layer(self._const('float64'))\n self.assertEqual(layer.dtype, 'float64')\n\n # Test layer does not cast to dtype\n self.assertEqual(layer(self._const('float32')).dtype, 'float32')\n\n\nif __name__ == '__main__':\n ops.enable_eager_execution()\n test.main()\n"
] |
[
[
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.identity",
"numpy.dot",
"tensorflow.python.keras.backend.learning_phase",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.python.keras.engine.sequential.Sequential",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.keras.backend.set_floatx",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.keras.engine.input_layer.Input",
"tensorflow.python.keras.engine.base_layer.Layer",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.eager.context.executing_eagerly",
"numpy.random.random",
"tensorflow.python.keras.combinations.keras_tensor_combinations",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.layers.BatchNormalization",
"tensorflow.python.keras.testing_utils.get_model_from_layers",
"tensorflow.python.keras.layers.Conv2D",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.keras.backend.learning_phase_scope",
"tensorflow.python.keras.backend.get_session",
"tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop",
"tensorflow.python.framework.ops.Graph",
"numpy.array",
"numpy.matmul",
"numpy.zeros",
"tensorflow.python.keras.combinations.keras_model_type_combinations",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.keras.backend.placeholder",
"tensorflow.python.keras.layers.Add",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.keras.testing_utils.should_run_eagerly",
"tensorflow.python.framework.ops.get_name_scope",
"tensorflow.python.layers.core.Dense",
"tensorflow.python.framework.ops.enable_eager_execution",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.keras.regularizers.l2",
"tensorflow.python.summary.summary_iterator.summary_iterator",
"tensorflow.python.ops.summary_ops_v2.create_file_writer_v2",
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.keras.mixed_precision.experimental.policy.policy_scope",
"tensorflow.python.platform.test.mock.patch.object",
"tensorflow.python.keras.combinations.keras_mode_combinations",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.keras.engine.training.Model",
"numpy.ones",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.keras.combinations.combine",
"tensorflow.python.keras.backend.set_learning_phase",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.keras.backend.get_value",
"tensorflow.python.keras.backend.function",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.summary_ops_v2.always_record_summaries",
"tensorflow.python.keras.layers.ReLU"
]
] |
pranav270-create/pybullet-panda
|
[
"bf53fb685d8568beb7dd17ec62ea73ecb180f0b5"
] |
[
"Archive/train_grasp_minimal.py"
] |
[
"import os\nimport random\nimport math\nimport numpy as np\nfrom numpy import array\nimport matplotlib.pyplot as plt\nimport torch\nimport pybullet as p\nfrom PIL import Image\nimport concurrent.futures\nimport psutil\n\nfrom model.fcn import MLP\nfrom utils.depth import getParameters\nfrom utils.misc import save__init__args, ensure_directory\nfrom panda.util_geom import quatMult, euler2quat\nfrom panda.panda_env import PandaEnv\n\n\nclass GraspSim:\n def __init__(self, img_size=96, hidden_size=200, max_obj_height=0.05):\n save__init__args(locals())\n\n # Height of the EE before and after reaching down\n self.min_ee_z = 0.15 # EE height when fingers contact the table\n\n # Height of grasp from the depth at the chosen pixel\n self.delta_z = 0.03\n\n # Initialize panda env\n self.mu = 0.3\n self.sigma = 0.03\n self.panda_env = PandaEnv(mu=self.mu,\n sigma=self.sigma,\n finger_type='long')\n self.obj_id_list = []\n self.max_obj_height = max_obj_height\n\n # Pixel to xy\n pixel_xy_path = 'data/pixel2xy' + str(img_size) + '.npz'\n self.pixel2xy_mat = np.load(pixel_xy_path)['pixel2xy'] # HxWx2\n\n # Initialize model\n self.policy = MLP(hidden_size=hidden_size,\n img_size=self.img_size).to('cpu')\n self.policy.eval()\n\n def update_policy(self, model_dict):\n self.policy.load_state_dict(model_dict)\n\n def load_obj(self, obj_path_list, obj_height_list):\n self.obj_id_list = [] # reinitialize\n self.obj_initial_height_list = {}\n env_x = [0.48, 0.53] #!\n env_y = [-0.03, 0.02]\n env_yaw = [-3.14, 3.14]\n num_obj = len(obj_path_list)\n\n obj_x_initial = np.random.uniform(low=env_x[0],\n high=env_x[1],\n size=(num_obj, ))\n obj_y_initial = np.random.uniform(low=env_y[0],\n high=env_y[1],\n size=(num_obj, ))\n obj_orn_initial_all = np.random.uniform(low=env_yaw[0],\n high=env_yaw[1],\n size=(num_obj, 3))\n obj_orn_initial_all[:, :-1] = 0\n\n for obj_ind in range(num_obj):\n pos = [\n obj_x_initial[obj_ind], obj_y_initial[obj_ind],\n obj_height_list[obj_ind] / 2 + 0.001\n ]\n obj_id = p.loadURDF(obj_path_list[obj_ind],\n basePosition=pos,\n baseOrientation=p.getQuaternionFromEuler(\n obj_orn_initial_all[obj_ind]))\n self.obj_id_list += [obj_id]\n\n # Infer number of links - change dynamics for each\n num_joint = p.getNumJoints(obj_id)\n link_all = [-1] + [*range(num_joint)]\n for link_id in link_all:\n p.changeDynamics(\n obj_id,\n link_id,\n lateralFriction=self.mu,\n spinningFriction=self.sigma,\n frictionAnchor=1,\n )\n\n # Let objects settle (actually do not need since we know the height of object and can make sure it spawns very close to table level)\n for _ in range(10):\n p.stepSimulation()\n\n # Record object initial height (for comparing with final height when checking if lifted). Note that obj_initial_height_list is a dict\n for obj_id in self.obj_id_list:\n pos, _ = p.getBasePositionAndOrientation(obj_id)\n self.obj_initial_height_list[obj_id] = pos[2]\n\n def sim_parallel(self,\n obj_path_list,\n obj_height_list,\n eps=0,\n num_cpus=16,\n cpu_offset=0):\n num_trial = len(obj_path_list)\n\n # Determine how many trials will be epsilon-random\n random_chosen_ids = random.sample(range(len(obj_path_list)),\n k=int(len(obj_path_list) *\n eps)) # round down\n random_all = np.zeros((len(obj_path_list)))\n random_all[random_chosen_ids] = 1\n\n # Make each path as a list\n obj_path_list = [[obj_path] for obj_path in obj_path_list]\n obj_height_list = [[obj_height] for obj_height in obj_height_list]\n\n # Split for each worker\n trial_ind_batch_all = np.array_split(np.arange(num_trial), num_cpus)\n\n # Construct args - one cpu per worker\n args = (\n ([obj_path_list[id] for id in trial_ind_batch\n ], [obj_height_list[id] for id in trial_ind_batch],\n [random_all[id]\n for id in trial_ind_batch], cpu_offset + batch_ind)\n for batch_ind, trial_ind_batch in enumerate(trial_ind_batch_all))\n\n with torch.no_grad():\n success_det = []\n success = []\n depth = np.empty((0, self.img_size, self.img_size))\n pred = np.empty((0, 2), dtype='int')\n\n # executor.submit will not keep the order of calling the function! executor.map will\n # num_cpus=16, 200 envs, fork, 21.6s\n # num_cpus=16, 200 envs, forkserver, 24.5s\n # num_cpus=16, 200 envs, spawn, 24.9s\n # ray take 28.8s\n # 13.8s after batching\n # ? with 32 cpus and 200 envs on server, same when using fork or forkserver\n with concurrent.futures.ProcessPoolExecutor(num_cpus) as executor:\n res_batch_all = list(executor.map(self.sim_step_helper, args))\n for res_batch in res_batch_all:\n success_det += res_batch[0]\n success += res_batch[1]\n depth = np.concatenate((depth, res_batch[2]))\n pred = np.concatenate((pred, res_batch[3]))\n executor.shutdown()\n\n # map/starmap will return results in the order of calling the function unlike apply/apply_async; and starmap accepts multiple arguments unlike map\n # with mp.Pool(processes=num_cpus) as pool:\n # \tres_all = pool.starmap(self.sim_step, zip(obj_path_list, add_noise_all))\n # \tfor res in res_all:\n # \t\tsuccess += [res[0]]\n # \t\tdepth = np.concatenate((depth, res[1][np.newaxis]))\n # \t\tpred = np.concatenate((pred, res[2][np.newaxis]))\n # pool.close()\n # pool.join()\n\n return success_det, success, depth, pred\n\n def sim_step_helper(self, args):\n return self.sim_step(args[0], args[1], args[2], args[3])\n\n def sim_step(self,\n obj_path_list_all,\n obj_height_list_all,\n random_all,\n cpu_id=0,\n gui=False):\n\n # Assign CPU - somehow PyBullet very slow if assigning cpu in GUI mode\n if not gui:\n ps = psutil.Process()\n ps.cpu_affinity([cpu_id])\n torch.set_num_threads(1)\n\n # Re-seed for sampling initial poses\n np.random.seed()\n\n # Initialize PyBullet\n if gui:\n p.connect(p.GUI, options=\"--width=2600 --height=1800\")\n p.resetDebugVisualizerCamera(0.8, 180, -45, [0.5, 0, 0])\n else:\n p.connect(p.DIRECT)\n\n # Params\n initial_ee_pos_before_img = array([0.3, -0.5, 0.25])\n ee_orn = array([1.0, 0.0, 0.0, 0.0]) # straight down\n\n ######################### Reset #######################\n self.panda_env.reset_env()\n\n ########################\n success_det_trials = [] # deterministic\n success_trials = []\n depth_trials = np.empty((0, self.img_size, self.img_size))\n pred_trials = np.empty((0, 2), dtype='int')\n\n for obj_path_list, obj_height_list, use_random_object in zip(\n obj_path_list_all, obj_height_list_all, random_all):\n\n # If use random, also sample a deterministic one for success rate\n if use_random_object:\n use_random_trial = [1, 0]\n else:\n use_random_trial = [0]\n for use_random in use_random_trial:\n\n # Set arm to starting pose\n self.panda_env.reset_arm_joints_ik(initial_ee_pos_before_img,\n ee_orn)\n self.panda_env.grasp(targetVel=0.10) # open gripper\n\n # At each step, use same environment (objects)\n for obj_id in self.obj_id_list:\n p.removeBody(obj_id)\n self.load_obj(obj_path_list, obj_height_list)\n\n # If clears table\n success = 0\n\n ######################### Execute #######################\n\n # Infer\n depth = torch.from_numpy(self.get_depth()[np.newaxis]).to(\n 'cpu') # 1xNxW\n # plt.imshow(depth_orig[0], cmap='Greys', interpolation='nearest')\n # plt.show()\n # for depth in depth_rot_all:\n # \tplt.imshow(depth[0], cmap='Greys', interpolation='nearest')\n # \tplt.show()\n pred_infer = self.policy(depth).squeeze(0).detach().numpy()\n # plt.imshow(pred_infer.detach().cpu().numpy())\n # plt.show()\n\n # Apply spatial (3D) argmax to pick pixel and theta\n if not use_random:\n (px, py) = np.unravel_index(np.argmax(pred_infer),\n pred_infer.shape)\n else:\n px = random.randint(0, self.img_size - 1)\n py = random.randint(0, self.img_size - 1)\n\n # Get x/y from pixels\n x, y = self.pixel2xy_mat[py, px] # actual pos, a bug\n\n # Find the target z height\n z = float(depth[0, px, py] * self.max_obj_height)\n z_target = max(0, z - self.delta_z) # clip\n z_target_ee = z_target + self.min_ee_z\n\n # Rotate into local frame\n xy_orig = array([[x], [y]])\n\n # Execute, reset ik on top of object, reach down, grasp, lift, check success\n ee_pos_before = np.append(xy_orig, z_target_ee + 0.10)\n ee_pos_after = np.append(xy_orig, z_target_ee + 0.05)\n for _ in range(3):\n self.panda_env.reset_arm_joints_ik(ee_pos_before, ee_orn)\n p.stepSimulation()\n ee_pos = np.append(xy_orig, z_target_ee)\n self.panda_env.move_pos(ee_pos,\n absolute_global_quat=ee_orn,\n numSteps=300)\n # print(self.panda_env.get_ee())\n self.panda_env.grasp(targetVel=-0.10) # always close gripper\n self.panda_env.move_pos(\n ee_pos, absolute_global_quat=ee_orn,\n numSteps=100) # keep pose until gripper closes\n self.panda_env.move_pos(ee_pos_after,\n absolute_global_quat=ee_orn,\n numSteps=150) # lift\n\n # Check if all objects removed, terminate early if so\n self.clear_obj()\n if len(self.obj_id_list) == 0:\n success = 1\n\n ######################### Data #######################\n if not (\n use_random_object and not use_random\n ): # do not save for the deterministic one paired with the random one\n success_trials += [success]\n depth_trials = np.concatenate((depth_trials, depth))\n pred_trials = np.concatenate(\n (pred_trials, np.array([[px, py]], dtype='int')))\n if not use_random: # save success of deterministic one\n success_det_trials += [success]\n\n p.disconnect()\n return success_det_trials, success_trials, depth_trials, pred_trials\n\n def clear_obj(self):\n height = []\n obj_to_be_removed = []\n for obj_id in self.obj_id_list:\n pos, _ = p.getBasePositionAndOrientation(obj_id)\n height += [pos[2]]\n if pos[2] - self.obj_initial_height_list[obj_id] > 0.03:\n obj_to_be_removed += [obj_id]\n\n for obj_id in obj_to_be_removed:\n p.removeBody(obj_id)\n self.obj_id_list.remove(obj_id)\n\n def get_depth(self):\n camera_height = 0.30\n viewMat = [\n -1.0, 0.0, -0.0, 0.0, -0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,\n 0.5, 0.0, -camera_height, 1.0\n ] # 5cm height\n projMat = [\n 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0\n ]\n width = 64\n height = 64\n center = width // 2\n crop_dim = self.img_size\n m22 = projMat[10]\n m32 = projMat[14]\n near = 2 * m32 / (2 * m22 - 2)\n far = ((m22 - 1.0) * near) / (m22 + 1.0)\n\n img_arr = p.getCameraImage(width=width,\n height=height,\n viewMatrix=viewMat,\n projectionMatrix=projMat,\n flags=p.ER_NO_SEGMENTATION_MASK)\n depth = np.reshape(\n img_arr[3],\n (width, height))[center - crop_dim // 2:center + crop_dim // 2,\n center - crop_dim // 2:center + crop_dim // 2]\n depth = far * near / (far - (far - near) * depth)\n\n depth = (camera_height -\n depth) / self.max_obj_height # set table zero, and normalize\n depth = depth.clip(min=0., max=1.)\n\n return depth\n\n\nclass TrainGrasp:\n def __init__(self,\n result_dir,\n num_cpus=16,\n cpu_offset=0,\n device='cuda:0',\n img_size=10,\n batch_size=64,\n buffer_size=500,\n hidden_size=200,\n lr=1e-4,\n num_update_per_step=1,\n eps=0.2,\n eps_min=0.,\n eps_decay=0.9,\n **kwargs):\n # Save class attributes and initialize folders\n save__init__args(locals())\n self.model_dir = result_dir + 'policy_model/'\n self.img_dir = result_dir + 'policy_img/'\n self.train_detail_dir = result_dir + 'retrain_detail/'\n ensure_directory(self.model_dir)\n ensure_directory(self.img_dir)\n ensure_directory(self.train_detail_dir)\n\n # Using CPU for inference in simulation right now\n self.graspSim = GraspSim(\n img_size=img_size,\n hidden_size=hidden_size,\n )\n\n # Set up model\n self.policy = MLP(hidden_size=self.hidden_size,\n img_size=self.img_size).to(device)\n num_model_parameter = sum(p.numel() for p in self.policy.parameters()\n if p.requires_grad)\n print('Num of policy parameters: %d' % num_model_parameter)\n\n # Optimizer\n self.criterion = torch.nn.BCEWithLogitsLoss(reduction='mean')\n self.optimizer = torch.optim.AdamW([{\n 'params': self.policy.parameters(),\n 'lr': lr,\n 'weight_decay': 0\n }])\n\n # Flag for indicating first iteration\n self.initial_itr_flag = True\n\n # Reset buffer\n self.reset_buffer()\n\n def reset_buffer(self):\n # Experience buffer\n self.depth_buffer = torch.empty(\n (0, self.img_size, self.img_size)).float().to('cpu')\n self.ground_truth_buffer = torch.empty(\n (0, self.img_size, self.img_size)).float().to('cpu')\n self.mask_buffer = torch.empty(\n (0, self.img_size, self.img_size)).float().to('cpu')\n self.recency_buffer = np.empty((0))\n\n def run(self,\n obj_path_all,\n obj_height_all,\n num_step,\n num_trial_per_step=10,\n debug_freq=100,\n affordance_map_freq=100,\n **kwargs):\n\n # Record results\n train_loss_list = []\n success_rate_list = []\n best_success_rate = 0.\n best_policy_path = None\n prev_policy_path = None\n\n # Start with all random to fill up the buffer\n if self.initial_itr_flag:\n eps = 1\n self.initial_itr_flag = False\n else:\n eps = self.eps\n\n # Simulate once first\n new_success_det, new_success, new_depth, new_pred = self.graspSim.sim_parallel(\n obj_path_all * num_trial_per_step,\n obj_height_all * num_trial_per_step,\n eps=eps,\n num_cpus=self.num_cpus,\n cpu_offset=self.cpu_offset)\n\n # Run\n for step in range(num_step):\n # optimizer_to(self.optimizer, 'cpu')# had to push optimizer to cpu\n # optimizer_to(self.optimizer, self.device)\t# push back to gpu\n\n # Decay epsilon if buffer filled\n eps = max(self.eps_min, eps * self.eps_decay)\n\n # Add to buffer\n fill_flag = self.add_to_buffer(step, new_success, new_depth,\n new_pred)\n\n # Update for multiple times once buffer filled\n step_loss = 0\n if fill_flag:\n for _ in range(self.num_update_per_step):\n step_loss += self.train_policy()\n step_loss /= self.num_update_per_step\n\n # Update policy for graspSim (on CPU)\n self.graspSim.update_policy(self.get_policy())\n\n # Simulate\n new_success_det, new_success, new_depth, new_pred = self.graspSim.sim_parallel(\n obj_path_all * num_trial_per_step,\n obj_height_all * num_trial_per_step,\n eps=eps,\n num_cpus=self.num_cpus,\n cpu_offset=self.cpu_offset)\n success_rate = np.mean(array(new_success_det))\n\n # Record\n train_loss_list += [step_loss]\n success_rate_list += [success_rate]\n\n # Generate sample affordance map - samples can be random - so not necessarily the best one\n if step % affordance_map_freq == 0:\n depth_ind = random.randint(0, new_depth.shape[0] - 1)\n depth_input = torch.from_numpy(\n new_depth[depth_ind][np.newaxis]).float().to(\n self.device) # 1xHxW\n pred_infer = self.policy(depth_input).squeeze(0) # HxW\n self.save_infer_img(new_depth[depth_ind],\n pred_infer,\n img_path_prefix=self.img_dir + str(step) +\n '_' + str(depth_ind))\n\n # Debug\n if step % debug_freq == 0:\n print(\"Step {:d}, Loss: {:.4f}\".format(step, step_loss))\n torch.save(\n {\n 'train_loss_list': train_loss_list,\n 'success_rate_list': success_rate_list,\n }, self.result_dir + 'train_details') # keeps overwriting\n # Clear GPU data regularly\n torch.cuda.empty_cache()\n\n # Save model if better success rate, remove prev one\n if best_success_rate < success_rate:\n best_success_rate = success_rate\n best_policy_path = self.model_dir + 'step_' + str(\n step) + '_acc_' + \"{:.3f}\".format(success_rate)\n self.save_model(path=best_policy_path)\n print('Saving new model, success %f' % success_rate)\n\n if prev_policy_path is not None:\n os.remove(prev_policy_path + '.pt')\n prev_policy_path = best_policy_path\n return best_policy_path\n\n def add_to_buffer(self, step, new_success, new_depth, new_pred):\n # Indices to be replaced in the buffer for current step\n num_new = new_depth.shape[0]\n\n # Convert depth to tensor and append new dimension\n new_depth = torch.from_numpy(new_depth).float().to('cpu').detach()\n\n # Construnct ground truth and mask (all zeros except for selected pixel)\n new_ground_truth = torch.zeros(num_new, self.img_size,\n self.img_size).to('cpu')\n new_mask = torch.zeros(num_new, self.img_size, self.img_size).to('cpu')\n for trial_ind, (success,\n (px, py)) in enumerate(zip(new_success, new_pred)):\n new_ground_truth[trial_ind, px, py] = success\n new_mask[trial_ind, px, py] = 1\n\n # Determine recency for new data\n recency = math.exp(-step * 0.1) # rank-based\n\n # Check if buffer filled up\n if self.depth_buffer.shape[0] < self.buffer_size:\n self.depth_buffer = torch.cat(\n (self.depth_buffer, new_depth))[:self.buffer_size]\n self.ground_truth_buffer = torch.cat(\n (self.ground_truth_buffer,\n new_ground_truth))[:self.buffer_size]\n self.mask_buffer = torch.cat(\n (self.mask_buffer, new_mask))[:self.buffer_size]\n self.recency_buffer = np.concatenate(\n (self.recency_buffer, np.ones(\n (num_new)) * recency))[:self.buffer_size]\n else:\n # Replace older ones\n replace_ind = np.random.choice(self.buffer_size,\n size=num_new,\n replace=False,\n p=self.recency_buffer /\n np.sum(self.recency_buffer))\n self.depth_buffer[replace_ind] = new_depth\n self.ground_truth_buffer[replace_ind] = new_ground_truth\n self.mask_buffer[replace_ind] = new_mask\n self.recency_buffer[replace_ind] = recency\n\n # Return if filled up\n if self.depth_buffer.shape[0] >= self.buffer_size:\n return 1\n else:\n return 0\n\n def train_policy(self):\n # Switch mode\n # self.fcn.to(self.device)\n # self.optimizer.load_state_dict(self.optimizer.state_dict())\n # self.fcn.train()\n\n # Train by sampling from buffer\n sample_inds = random.sample(range(self.buffer_size), k=self.batch_size)\n depth_train_batch = self.depth_buffer[sample_inds].clone().detach().to(\n self.device, non_blocking=True) # NxHxW\n ground_truth_batch = self.ground_truth_buffer[sample_inds].clone(\n ).detach().to(self.device, non_blocking=True) # NxHxW\n mask_train_batch = self.mask_buffer[sample_inds].clone().detach().to(\n self.device, non_blocking=True) # NxHxW\n\n # Forward, get loss, zero gradients\n pred_train_batch = self.policy(depth_train_batch) # NxHxW\n train_loss = self.criterion(pred_train_batch, ground_truth_batch)\n self.optimizer.zero_grad()\n\n # mask gradient for non-selected pixels\n pred_train_batch.retain_grad()\n pred_train_batch.register_hook(lambda grad: grad * mask_train_batch)\n\n # Update params using clipped gradients\n train_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), 10)\n self.optimizer.step()\n return train_loss.detach().cpu().numpy()\n\n def load_policy(self, policy_path):\n self.policy.load_state_dict(\n torch.load(policy_path + '.pt', map_location=self.device))\n self.graspSim.update_policy(self.get_policy())\n\n def get_policy(self):\n return self.policy.state_dict()\n\n def save_model(self, path):\n torch.save(self.policy.state_dict(), path + '.pt')\n\n def save_infer_img(self, depth, pred_infer, img_path_prefix):\n depth_8bit = (depth * 255).astype('uint8')\n depth_8bit = np.stack((depth_8bit, ) * 3, axis=-1)\n img_rgb = Image.fromarray(depth_8bit, mode='RGB')\n img_rgb.save(img_path_prefix + '_rgb.png')\n\n cmap = plt.get_cmap('jet')\n pred_infer_detach = (torch.sigmoid(pred_infer)).detach().cpu().numpy()\n pred_infer_detach = (pred_infer_detach - np.min(pred_infer_detach)) / (\n np.max(pred_infer_detach) - np.min(pred_infer_detach)) # normalize\n pred_cmap = cmap(pred_infer_detach)\n pred_cmap = (np.delete(pred_cmap, 3, 2) * 255).astype('uint8')\n img_heat = Image.fromarray(pred_cmap, mode='RGB')\n img_heat.save(img_path_prefix + '_heatmap.png')\n\n img_overlay = Image.blend(img_rgb, img_heat, alpha=.8)\n img_overlay.save(img_path_prefix + '_overlay.png')\n\n\nif __name__ == '__main__':\n\n # Fix seeds\n # seed = 0\n # random.seed(seed)\n # np.random.seed(seed)\n # torch.manual_seed(seed)\n\n # Configs\n name = 'grasp_test'\n num_cpus = 10 # same as num_trial_per_step\n num_steps = 1000\n num_trial_per_step = 10\n num_update_per_step = 10\n batch_size = 16\n val_freq = 20\n buffer_size = 200\n img_size = 20\n result_dir = 'result/' + name + '/'\n ensure_directory(result_dir)\n\n # Configure objects\n # obj_dir = '/home/allen/data/wasserstein/grasp/random_polygon_v1/'\n # obj_path_all = [obj_dir + str(ind) + '.urdf' for ind in range(10)]\n obj_path_all = ['data/sample_mug/4.urdf']\n obj_height_all = [0.05 for _ in range(len(obj_path_all))]\n\n # Initialize trianing env\n trainer = TrainGrasp(result_dir=result_dir,\n buffer_size=buffer_size,\n num_update_per_step=num_update_per_step,\n batch_size=batch_size,\n img_size=img_size,\n policy_path=None,\n num_cpus=num_cpus)\n best_policy_path = trainer.run(obj_path_all, obj_height_all, num_steps,\n num_trial_per_step, val_freq, val_freq)\n print('Training done; best policy path: ', best_policy_path)\n"
] |
[
[
"torch.cat",
"numpy.load",
"numpy.min",
"torch.nn.BCEWithLogitsLoss",
"torch.load",
"numpy.max",
"numpy.concatenate",
"torch.sigmoid",
"numpy.empty",
"matplotlib.pyplot.get_cmap",
"numpy.arange",
"numpy.argmax",
"numpy.append",
"torch.empty",
"torch.set_num_threads",
"torch.zeros",
"numpy.array",
"numpy.delete",
"numpy.reshape",
"torch.save",
"torch.cuda.empty_cache",
"numpy.stack",
"numpy.random.seed",
"numpy.sum",
"torch.no_grad",
"numpy.ones",
"torch.from_numpy",
"numpy.random.uniform"
]
] |
hectormz/napari
|
[
"c53051ed3e3693ae74c86a5c4611f057293bd21d"
] |
[
"napari/_qt/qt_dims_slider.py"
] |
[
"from typing import Optional, Tuple\n\nimport numpy as np\nfrom qtpy.QtCore import QObject, Qt, QTimer, Signal, Slot\nfrom qtpy.QtGui import QIntValidator\nfrom qtpy.QtWidgets import (\n QApplication,\n QCheckBox,\n QComboBox,\n QDoubleSpinBox,\n QFormLayout,\n QHBoxLayout,\n QLabel,\n QLineEdit,\n QPushButton,\n QWidget,\n QFrame,\n)\n\nfrom ..components.dims_constants import DimsMode\nfrom ..utils.event import Event\nfrom ._constants import LoopMode\nfrom .qt_modal import QtPopup\nfrom .qt_scrollbar import ModifiedScrollBar\nfrom .threading import _new_worker_qthread\n\n\nclass QtDimSliderWidget(QWidget):\n \"\"\"Compound widget to hold the label, slider and play button for an axis.\n\n These will usually be instantiated in the QtDims._create_sliders method.\n This widget *must* be instantiated with a parent QtDims.\n \"\"\"\n\n axis_label_changed = Signal(int, str) # axis, label\n fps_changed = Signal(float)\n mode_changed = Signal(str)\n range_changed = Signal(tuple)\n play_started = Signal()\n play_stopped = Signal()\n\n def __init__(self, parent: QWidget, axis: int):\n super().__init__(parent=parent)\n self.axis = axis\n self.qt_dims = parent\n self.dims = parent.dims\n self.axis_label = None\n self.slider = None\n self.play_button = None\n self.curslice_label = QLineEdit(self)\n self.curslice_label.setToolTip(f'Current slice for axis {axis}')\n # if we set the QIntValidator to actually reflect the range of the data\n # then an invalid (i.e. too large) index doesn't actually trigger the\n # editingFinished event (the user is expected to change the value)...\n # which is confusing to the user, so instead we use an IntValidator\n # that makes sure the user can only enter integers, but we do our own\n # value validation in self.change_slice\n self.curslice_label.setValidator(QIntValidator(0, 999999))\n\n self.curslice_label.editingFinished.connect(self._set_slice_from_label)\n self.totslice_label = QLabel(self)\n self.totslice_label.setToolTip(f'Total slices for axis {axis}')\n self.curslice_label.setObjectName('slice_label')\n self.totslice_label.setObjectName('slice_label')\n sep = QFrame(self)\n sep.setFixedSize(1, 14)\n sep.setObjectName('slice_label_sep')\n\n self._fps = 10\n self._minframe = None\n self._maxframe = None\n self._loop_mode = LoopMode.LOOP\n\n layout = QHBoxLayout()\n self._create_axis_label_widget()\n self._create_range_slider_widget()\n self._create_play_button_widget()\n\n layout.addWidget(self.axis_label)\n layout.addWidget(self.play_button)\n layout.addWidget(self.slider, stretch=1)\n layout.addWidget(self.curslice_label)\n layout.addWidget(sep)\n layout.addWidget(self.totslice_label)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(2)\n layout.setAlignment(Qt.AlignVCenter)\n self.setLayout(layout)\n self.dims.events.axis_labels.connect(self._pull_label)\n\n def _set_slice_from_label(self):\n \"\"\"Update the dims point based on the curslice_label.\"\"\"\n val = int(self.curslice_label.text())\n max_allowed = self.dims.max_indices[self.axis]\n if val > max_allowed:\n val = max_allowed\n self.curslice_label.setText(str(val))\n self.curslice_label.clearFocus()\n self.qt_dims.setFocus()\n self.dims.set_point(self.axis, val)\n\n def _create_axis_label_widget(self):\n \"\"\"Create the axis label widget which accompanies its slider.\"\"\"\n label = QLineEdit(self)\n label.setObjectName('axis_label') # needed for _update_label\n label.setText(self.dims.axis_labels[self.axis])\n label.home(False)\n label.setToolTip('Edit to change axis label')\n label.setAcceptDrops(False)\n label.setEnabled(True)\n label.setAlignment(Qt.AlignRight)\n label.setContentsMargins(0, 0, 2, 0)\n label.textChanged.connect(self._update_label)\n label.editingFinished.connect(self._clear_label_focus)\n self.axis_label = label\n\n def _value_changed(self, value):\n \"\"\"Slider changed to this new value.\n\n We split this out as a separate function for perfmon.\n \"\"\"\n self.dims.set_point(self.axis, value)\n\n def _create_range_slider_widget(self):\n \"\"\"Creates a range slider widget for a given axis.\"\"\"\n _range = self.dims.range[self.axis]\n # Set the maximum values of the range slider to be one step less than\n # the range of the layer as otherwise the slider can move beyond the\n # shape of the layer as the endpoint is included\n _range = (_range[0], _range[1] - _range[2], _range[2])\n point = self.dims.point[self.axis]\n\n slider = ModifiedScrollBar(Qt.Horizontal)\n slider.setFocusPolicy(Qt.NoFocus)\n slider.setMinimum(int(_range[0]))\n slider.setMaximum(int(_range[1]))\n slider.setSingleStep(int(_range[2]))\n slider.setPageStep(int(_range[2]))\n slider.setValue(point)\n\n # Listener to be used for sending events back to model:\n slider.valueChanged.connect(self._value_changed)\n\n def slider_focused_listener():\n self.qt_dims.last_used = self.axis\n\n # linking focus listener to the last used:\n slider.sliderPressed.connect(slider_focused_listener)\n self.slider = slider\n\n def _create_play_button_widget(self):\n \"\"\"Creates the actual play button, which has the modal popup.\"\"\"\n self.play_button = QtPlayButton(self.qt_dims, self.axis)\n self.play_button.mode_combo.activated[str].connect(\n lambda x: self.__class__.loop_mode.fset(\n self, LoopMode(x.replace(' ', '_'))\n )\n )\n\n def fps_listener(*args):\n fps = self.play_button.fpsspin.value()\n fps *= -1 if self.play_button.reverse_check.isChecked() else 1\n self.__class__.fps.fset(self, fps)\n\n self.play_button.fpsspin.editingFinished.connect(fps_listener)\n self.play_button.reverse_check.stateChanged.connect(fps_listener)\n self.play_stopped.connect(self.play_button._handle_stop)\n self.play_started.connect(self.play_button._handle_start)\n\n def _pull_label(self, event):\n \"\"\"Updates the label LineEdit from the dims model.\"\"\"\n if event.axis == self.axis:\n label = self.dims.axis_labels[self.axis]\n self.axis_label.setText(label)\n self.axis_label_changed.emit(self.axis, label)\n\n def _update_label(self):\n \"\"\"Update dimension slider label.\"\"\"\n with self.dims.events.axis_labels.blocker():\n self.dims.set_axis_label(self.axis, self.axis_label.text())\n self.axis_label_changed.emit(self.axis, self.axis_label.text())\n\n def _clear_label_focus(self):\n \"\"\"Clear focus from dimension slider label.\"\"\"\n self.axis_label.clearFocus()\n self.qt_dims.setFocus()\n\n def _update_range(self):\n \"\"\"Updates range for slider.\"\"\"\n displayed_sliders = self.qt_dims._displayed_sliders\n\n _range = self.dims.range[self.axis]\n _range = (_range[0], _range[1] - _range[2], _range[2])\n if _range not in (None, (None, None, None)):\n if _range[1] == 0:\n displayed_sliders[self.axis] = False\n self.qt_dims.last_used = None\n self.hide()\n else:\n if (\n not displayed_sliders[self.axis]\n and self.axis not in self.dims.displayed\n ):\n displayed_sliders[self.axis] = True\n self.last_used = self.axis\n self.show()\n self.slider.setMinimum(int(_range[0]))\n self.slider.setMaximum(int(_range[1]))\n self.slider.setSingleStep(int(_range[2]))\n self.slider.setPageStep(int(_range[2]))\n maxi = self.dims.max_indices[self.axis]\n self.totslice_label.setText(str(int(maxi)))\n self.totslice_label.setAlignment(Qt.AlignLeft)\n self._update_slice_labels()\n else:\n displayed_sliders[self.axis] = False\n self.hide()\n\n def _update_slider(self):\n \"\"\"Update dimension slider.\"\"\"\n mode = self.dims.mode[self.axis]\n if mode == DimsMode.POINT:\n self.slider.setValue(int(self.dims.point[self.axis]))\n self._update_slice_labels()\n\n def _update_slice_labels(self):\n \"\"\"Update slice labels to match current dimension slider position.\"\"\"\n step = self.dims.range[self.axis][2]\n self.curslice_label.setText(\n str(int(self.dims.point[self.axis] // step))\n )\n self.curslice_label.setAlignment(Qt.AlignRight)\n\n @property\n def fps(self):\n \"\"\"Frames per second for animation.\"\"\"\n return self._fps\n\n @fps.setter\n def fps(self, value):\n \"\"\"Frames per second for animation.\n\n Parameters\n ----------\n value : float\n Frames per second for animation.\n \"\"\"\n self._fps = value\n self.play_button.fpsspin.setValue(abs(value))\n self.play_button.reverse_check.setChecked(value < 0)\n self.fps_changed.emit(value)\n\n @property\n def loop_mode(self):\n \"\"\"Loop mode for animation.\n\n Loop mode enumeration napari._qt._constants.LoopMode\n Available options for the loop mode string enumeration are:\n - LoopMode.ONCE\n Animation will stop once movie reaches the max frame\n (if fps > 0) or the first frame (if fps < 0).\n - LoopMode.LOOP\n Movie will return to the first frame after reaching\n the last frame, looping continuously until stopped.\n - LoopMode.BACK_AND_FORTH\n Movie will loop continuously until stopped,\n reversing direction when the maximum or minimum frame\n has been reached.\n \"\"\"\n return self._loop_mode\n\n @loop_mode.setter\n def loop_mode(self, value):\n \"\"\"Loop mode for animation.\n\n Parameters\n ----------\n value : napari._qt._constants.LoopMode\n Loop mode for animation.\n Available options for the loop mode string enumeration are:\n - LoopMode.ONCE\n Animation will stop once movie reaches the max frame\n (if fps > 0) or the first frame (if fps < 0).\n - LoopMode.LOOP\n Movie will return to the first frame after reaching\n the last frame, looping continuously until stopped.\n - LoopMode.BACK_AND_FORTH\n Movie will loop continuously until stopped,\n reversing direction when the maximum or minimum frame\n has been reached.\n \"\"\"\n self._loop_mode = value\n self.play_button.mode_combo.setCurrentText(str(value))\n self.mode_changed.emit(str(value))\n\n @property\n def frame_range(self):\n \"\"\"Frame range for animation, as (minimum_frame, maximum_frame).\"\"\"\n frame_range = (self._minframe, self._maxframe)\n frame_range = frame_range if any(frame_range) else None\n return frame_range\n\n @frame_range.setter\n def frame_range(self, value):\n \"\"\"Frame range for animation, as (minimum_frame, maximum_frame).\n\n Parameters\n ----------\n value : tuple(int, int)\n Frame range as tuple/list with range (minimum_frame, maximum_frame)\n \"\"\"\n if not isinstance(value, (tuple, list, type(None))):\n raise TypeError('frame_range value must be a list or tuple')\n if value and not len(value) == 2:\n raise ValueError('frame_range must have a length of 2')\n if value is None:\n value = (None, None)\n self._minframe, self._maxframe = value\n self.range_changed.emit(tuple(value))\n\n def _update_play_settings(self, fps, loop_mode, frame_range):\n \"\"\"Update settings for animation.\n\n Parameters\n ----------\n fps : float\n Frames per second to play the animation.\n loop_mode : napari._qt._constants.LoopMode\n Loop mode for animation.\n Available options for the loop mode string enumeration are:\n - LoopMode.ONCE\n Animation will stop once movie reaches the max frame\n (if fps > 0) or the first frame (if fps < 0).\n - LoopMode.LOOP\n Movie will return to the first frame after reaching\n the last frame, looping continuously until stopped.\n - LoopMode.BACK_AND_FORTH\n Movie will loop continuously until stopped,\n reversing direction when the maximum or minimum frame\n has been reached.\n frame_range : tuple(int, int)\n Frame range as tuple/list with range (minimum_frame, maximum_frame)\n \"\"\"\n if fps is not None:\n self.fps = fps\n if loop_mode is not None:\n self.loop_mode = loop_mode\n if frame_range is not None:\n self.frame_range = frame_range\n\n def _play(\n self,\n fps: Optional[float] = None,\n loop_mode: Optional[str] = None,\n frame_range: Optional[Tuple[int, int]] = None,\n ):\n \"\"\"Animate (play) axis. Same API as QtDims.play()\n\n Putting the AnimationWorker logic here makes it easier to call\n QtDims.play(axis), or hit the keybinding, and have each axis remember\n it's own settings (fps, mode, etc...).\n\n Parameters\n ----------\n fps : float\n Frames per second for animation.\n loop_mode : napari._qt._constants.LoopMode\n Loop mode for animation.\n Available options for the loop mode string enumeration are:\n - LoopMode.ONCE\n Animation will stop once movie reaches the max frame\n (if fps > 0) or the first frame (if fps < 0).\n - LoopMode.LOOP\n Movie will return to the first frame after reaching\n the last frame, looping continuously until stopped.\n - LoopMode.BACK_AND_FORTH\n Movie will loop continuously until stopped,\n reversing direction when the maximum or minimum frame\n has been reached.\n frame_range : tuple(int, int)\n Frame range as tuple/list with range (minimum_frame, maximum_frame)\n \"\"\"\n\n # having this here makes sure that using the QtDims.play() API\n # keeps the play preferences synchronized with the play_button.popup\n self._update_play_settings(fps, loop_mode, frame_range)\n\n # setting fps to 0 just stops the animation\n if fps == 0:\n return\n\n worker, thread = _new_worker_qthread(\n AnimationWorker,\n self,\n _start_thread=True,\n _connect={'frame_requested': self.qt_dims._set_frame},\n )\n worker.finished.connect(self.qt_dims.stop)\n thread.finished.connect(self.play_stopped.emit)\n self.play_started.emit()\n self.thread = thread\n return worker, thread\n\n\nclass QtCustomDoubleSpinBox(QDoubleSpinBox):\n \"\"\"Custom Spinbox that emits an additional editingFinished signal whenever\n the valueChanged event is emitted AND the left mouse button is down.\n\n The original use case here was the FPS spinbox in the play button, where\n hooking to the actual valueChanged event is undesirable, because if the\n user clears the LineEdit to type, for example, \"0.5\", then play back\n will temporarily pause when \"0\" is typed (if the animation is currently\n running). However, the editingFinished event ignores mouse click events on\n the spin buttons. This subclass class triggers an event both during\n editingFinished and when the user clicks on the spin buttons.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, *kwargs)\n self.valueChanged.connect(self.custom_change_event)\n\n def custom_change_event(self, value):\n \"\"\"Emits editingFinished if valueChanged AND left mouse button is down.\n (i.e. when the user clicks on the spin buttons)\n Paramters\n ---------\n value : float\n The value of this custom double spin box.\n \"\"\"\n if QApplication.mouseButtons() & Qt.LeftButton:\n self.editingFinished.emit()\n\n def textFromValue(self, value):\n \"\"\"This removes the decimal places if the float is an integer.\n\n Parameters\n ----------\n value : float\n The value of this custom double spin box.\n \"\"\"\n if value.is_integer():\n value = int(value)\n return str(value)\n\n def keyPressEvent(self, event):\n \"\"\"Handle key press event for the dimension slider spinbox.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n Event from the Qt context.\n \"\"\"\n # this is here to intercept Return/Enter keys when editing the FPS\n # SpinBox. We WANT the return key to close the popup normally,\n # but if the user is editing the FPS spinbox, we simply want to\n # register the change and lose focus on the lineEdit, in case they\n # want to make an additional change (without reopening the popup)\n if event.key() in (Qt.Key_Return, Qt.Key_Enter):\n self.editingFinished.emit()\n self.clearFocus()\n return\n super().keyPressEvent(event)\n\n\nclass QtPlayButton(QPushButton):\n \"\"\"Play button, included in the DimSliderWidget, to control playback\n\n the button also owns the QtModalPopup that controls the playback settings.\n \"\"\"\n\n play_requested = Signal(int) # axis, fps\n\n def __init__(self, dims, axis, reverse=False, fps=10, mode=LoopMode.LOOP):\n super().__init__()\n self.dims = dims\n self.axis = axis\n self.reverse = reverse\n self.fps = fps\n self.mode = mode\n self.setProperty('reverse', str(reverse)) # for styling\n self.setProperty('playing', 'False') # for styling\n\n # build popup modal form\n\n self.popup = QtPopup(self)\n form_layout = QFormLayout()\n self.popup.frame.setLayout(form_layout)\n\n fpsspin = QtCustomDoubleSpinBox(self.popup)\n fpsspin.setObjectName(\"fpsSpinBox\")\n fpsspin.setAlignment(Qt.AlignCenter)\n fpsspin.setValue(self.fps)\n if hasattr(fpsspin, 'setStepType'):\n # this was introduced in Qt 5.12. Totally optional, just nice.\n fpsspin.setStepType(QDoubleSpinBox.AdaptiveDecimalStepType)\n fpsspin.setMaximum(500)\n fpsspin.setMinimum(0)\n form_layout.insertRow(\n 0, QLabel('frames per second:', parent=self.popup), fpsspin\n )\n self.fpsspin = fpsspin\n\n revcheck = QCheckBox(self.popup)\n revcheck.setObjectName(\"playDirectionCheckBox\")\n form_layout.insertRow(\n 1, QLabel('play direction:', parent=self.popup), revcheck\n )\n self.reverse_check = revcheck\n\n mode_combo = QComboBox(self.popup)\n mode_combo.addItems([str(i).replace('_', ' ') for i in LoopMode])\n form_layout.insertRow(\n 2, QLabel('play mode:', parent=self.popup), mode_combo\n )\n mode_combo.setCurrentText(str(self.mode))\n self.mode_combo = mode_combo\n\n def mouseReleaseEvent(self, event):\n \"\"\"Show popup for right-click, toggle animation for right click.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n Event from the qt context.\n \"\"\"\n # using this instead of self.customContextMenuRequested.connect and\n # clicked.connect because the latter was not sending the\n # rightMouseButton release event.\n if event.button() == Qt.RightButton:\n self.popup.show_above_mouse()\n elif event.button() == Qt.LeftButton:\n self._on_click()\n\n def _on_click(self):\n \"\"\"Toggle play/stop animation control.\"\"\"\n if self.property('playing') == \"True\":\n return self.dims.stop()\n self.play_requested.emit(self.axis)\n\n def _handle_start(self):\n \"\"\"On animation start, set playing property to True & update style.\"\"\"\n self.setProperty('playing', 'True')\n self.style().unpolish(self)\n self.style().polish(self)\n\n def _handle_stop(self):\n \"\"\"On animation stop, set playing property to False & update style.\"\"\"\n self.setProperty('playing', 'False')\n self.style().unpolish(self)\n self.style().polish(self)\n\n\nclass AnimationWorker(QObject):\n \"\"\"A thread to keep the animation timer independent of the main event loop.\n\n This prevents mouseovers and other events from causing animation lag. See\n QtDims.play() for public-facing docstring.\n \"\"\"\n\n frame_requested = Signal(int, int) # axis, point\n finished = Signal()\n started = Signal()\n\n def __init__(self, slider):\n super().__init__()\n self.slider = slider\n self.dims = slider.dims\n self.axis = slider.axis\n self.loop_mode = slider.loop_mode\n slider.fps_changed.connect(self.set_fps)\n slider.mode_changed.connect(self.set_loop_mode)\n slider.range_changed.connect(self.set_frame_range)\n self.set_fps(self.slider.fps)\n self.set_frame_range(slider.frame_range)\n\n # after dims.set_point is called, it will emit a dims.events.axis()\n # we use this to update this threads current frame (in case it\n # was some other event that updated the axis)\n self.dims.events.axis.connect(self._on_axis_changed)\n self.current = max(self.dims.point[self.axis], self.min_point)\n self.current = min(self.current, self.max_point)\n self.timer = QTimer()\n\n @Slot()\n def work(self):\n \"\"\"Play the animation.\"\"\"\n # if loop_mode is once and we are already on the last frame,\n # return to the first frame... (so the user can keep hitting once)\n if self.loop_mode == LoopMode.ONCE:\n if self.step > 0 and self.current >= self.max_point - 1:\n self.frame_requested.emit(self.axis, self.min_point)\n elif self.step < 0 and self.current <= self.min_point + 1:\n self.frame_requested.emit(self.axis, self.max_point)\n self.timer.singleShot(self.interval, self.advance)\n else:\n # immediately advance one frame\n self.advance()\n self.started.emit()\n\n @Slot(float)\n def set_fps(self, fps):\n \"\"\"Set the frames per second value for the animation.\n\n Parameters\n ----------\n fps : float\n Frames per second for the animation.\n \"\"\"\n if fps == 0:\n return self.finish()\n self.step = 1 if fps > 0 else -1 # negative fps plays in reverse\n self.interval = 1000 / abs(fps)\n\n @Slot(tuple)\n def set_frame_range(self, frame_range):\n \"\"\"Frame range for animation, as (minimum_frame, maximum_frame).\n\n Parameters\n ----------\n frame_range : tuple(int, int)\n Frame range as tuple/list with range (minimum_frame, maximum_frame)\n \"\"\"\n self.dimsrange = self.dims.range[self.axis]\n\n if frame_range is not None:\n if frame_range[0] >= frame_range[1]:\n raise ValueError(\"frame_range[0] must be <= frame_range[1]\")\n if frame_range[0] < self.dimsrange[0]:\n raise IndexError(\"frame_range[0] out of range\")\n if frame_range[1] * self.dimsrange[2] >= self.dimsrange[1]:\n raise IndexError(\"frame_range[1] out of range\")\n self.frame_range = frame_range\n\n if self.frame_range is not None:\n self.min_point, self.max_point = self.frame_range\n else:\n self.min_point = 0\n self.max_point = int(\n np.floor(self.dimsrange[1] - self.dimsrange[2])\n )\n self.max_point += 1 # range is inclusive\n\n @Slot(str)\n def set_loop_mode(self, mode):\n \"\"\"Set the loop mode for the animation.\n\n Parameters\n ----------\n mode : str\n Loop mode for animation.\n Available options for the loop mode string enumeration are:\n - LoopMode.ONCE\n Animation will stop once movie reaches the max frame\n (if fps > 0) or the first frame (if fps < 0).\n - LoopMode.LOOP\n Movie will return to the first frame after reaching\n the last frame, looping continuously until stopped.\n - LoopMode.BACK_AND_FORTH\n Movie will loop continuously until stopped,\n reversing direction when the maximum or minimum frame\n has been reached.\n \"\"\"\n self.loop_mode = LoopMode(mode)\n\n def advance(self):\n \"\"\"Advance the current frame in the animation.\n\n Takes dims scale into account and restricts the animation to the\n requested frame_range, if entered.\n \"\"\"\n self.current += self.step * self.dimsrange[2]\n if self.current < self.min_point:\n if (\n self.loop_mode == LoopMode.BACK_AND_FORTH\n ): # 'loop_back_and_forth'\n self.step *= -1\n self.current = self.min_point + self.step * self.dimsrange[2]\n elif self.loop_mode == LoopMode.LOOP: # 'loop'\n self.current = self.max_point + self.current - self.min_point\n else: # loop_mode == 'once'\n return self.finish()\n elif self.current >= self.max_point:\n if (\n self.loop_mode == LoopMode.BACK_AND_FORTH\n ): # 'loop_back_and_forth'\n self.step *= -1\n self.current = (\n self.max_point + 2 * self.step * self.dimsrange[2]\n )\n elif self.loop_mode == LoopMode.LOOP: # 'loop'\n self.current = self.min_point + self.current - self.max_point\n else: # loop_mode == 'once'\n return self.finish()\n with self.dims.events.axis.blocker(self._on_axis_changed):\n self.frame_requested.emit(self.axis, self.current)\n # using a singleShot timer here instead of timer.start() because\n # it makes it easier to update the interval using signals/slots\n self.timer.singleShot(self.interval, self.advance)\n\n def finish(self):\n \"\"\"Emit the finished event signal.\"\"\"\n self.finished.emit()\n\n @Slot(Event)\n def _on_axis_changed(self, event):\n \"\"\"Update the current frame if the axis has changed.\"\"\"\n # slot for external events to update the current frame\n if event.axis == self.axis and hasattr(event, 'value'):\n self.current = event.value\n"
] |
[
[
"numpy.floor"
]
] |
liyuan9988/IVOPEwithACME
|
[
"d77fab09b2e1cb8d3dbd8b2ab88adcce6a853558"
] |
[
"src/ope/kiv_batch/learner.py"
] |
[
"# Lint as: python3\n# pylint: disable=bad-indentation,line-too-long\n\"\"\"DFIV Learner implementation.\"\"\"\n\nfrom typing import Dict, List\n\nimport acme\nfrom acme.tf import savers as tf2_savers\nfrom acme.tf import utils as tf2_utils\nfrom acme.utils import counting\nfrom acme.utils import loggers\nimport numpy as np\nimport sonnet as snt\nimport tensorflow as tf\n\nfrom src.utils.tf_linear_reg_utils import fit_linear, linear_reg_loss, linear_reg_pred, add_const_col\n\n\nclass KIVLearner(acme.Learner, tf2_savers.TFSaveable):\n \"\"\"KIVLearner.\n\n This is the learning component of aKIV learner. IE it takes a dataset as\n input and implements update functionality to learn from this dataset.\n Optionally it takes a replay client as well to allow for updating of\n priorities.\n \"\"\"\n\n def __init__(self,\n value_func: snt.Module,\n instrumental_feature: snt.Module,\n policy_net: snt.Module,\n discount: float,\n stage1_reg: float,\n stage2_reg: float,\n stage1_batch: int,\n stage2_batch: int,\n dataset: tf.data.Dataset,\n valid_dataset: tf.data.Dataset,\n counter: counting.Counter = None,\n logger: loggers.Logger = None,\n checkpoint: bool = True):\n \"\"\"Initializes the learner.\n\n Args:\n value_func: value function network\n instrumental_feature: dual function network.\n policy_net: policy network.\n discount: global discount.\n stage1_reg: ridge regularizer for stage 1 regression\n stage2_reg: ridge regularizer for stage 2 regression\n stage1_batch: number of mini-batches for stage 1 regression\n stage2_batch: number of mini-batches for stage 2 regression\n dataset: dataset to learn from.\n valid_dataset: validation dataset to compute score.\n counter: Counter object for (potentially distributed) counting.\n logger: Logger object for writing logs to.\n checkpoint: boolean indicating whether to checkpoint the learner.\n \"\"\"\n\n self._counter = counter or counting.Counter()\n self._logger = logger or loggers.TerminalLogger('learner', time_delta=1.)\n\n self.stage1_reg = stage1_reg\n self.stage2_reg = stage2_reg\n self.discount = discount\n\n self.stage1_weight = None\n self.stage2_weight = None\n\n # Get an iterator over the dataset.\n self._iterator = iter(dataset) # pytype: disable=wrong-arg-types\n self._valid_dataset = valid_dataset\n\n self.value_func = value_func\n self.value_feature = value_func._feature\n self.instrumental_feature = instrumental_feature\n self.policy = policy_net\n\n self.stage1_batch = stage1_batch\n self.stage2_batch = stage2_batch\n\n self._variables = [\n value_func.trainable_variables,\n instrumental_feature.trainable_variables,\n ]\n self._num_steps = tf.Variable(0, dtype=tf.int32)\n\n # Create a snapshotter object.\n if checkpoint:\n self._snapshotter = tf2_savers.Snapshotter(\n objects_to_save={'value_func': value_func,\n 'instrumental_feature': instrumental_feature,\n }, time_delta_minutes=60.)\n else:\n self._snapshotter = None\n\n # @tf.function\n def _step(self) -> Dict[str, tf.Tensor]:\n stage1_loss, stage2_loss = self.update_final_weight()\n self._num_steps.assign_add(1)\n\n fetches = {'stage1_loss': stage1_loss, 'stage2_loss': stage2_loss}\n return fetches\n\n def cal_stage1_weights(self, stage1_input):\n current_obs_1st, action_1st, _, discount_1st, next_obs_1st = stage1_input.data[:5]\n next_action_1st = self.policy(next_obs_1st)\n discount_1st = tf.expand_dims(discount_1st, axis=1)\n target_1st = discount_1st * self.value_feature(obs=next_obs_1st, action=next_action_1st)\n instrumental_feature_1st = self.instrumental_feature(obs=current_obs_1st, action=action_1st)\n\n nData, nDim = instrumental_feature_1st.shape\n nData = tf.cast(tf.shape(instrumental_feature_1st)[0], dtype=tf.float32)\n A = tf.matmul(instrumental_feature_1st, instrumental_feature_1st, transpose_a=True)\n A = A + self.stage1_reg * tf.eye(nDim) * nData\n b = tf.matmul(instrumental_feature_1st, target_1st, transpose_a=True)\n return A / nData, b / nData\n\n def cal_stage1_loss(self):\n loss_sum = 0.\n count = 0.\n for sample in self._valid_dataset:\n loss_sum += self.cal_stage1_loss_one_batch(sample)\n count += 1.\n return loss_sum / count\n\n def cal_stage1_loss_one_batch(self, stage1_input):\n assert self.stage1_weight is not None\n current_obs_1st, action_1st, _, discount_1st, next_obs_1st = stage1_input.data[:5]\n next_action_1st = self.policy(next_obs_1st)\n discount_1st = tf.expand_dims(discount_1st, axis=1)\n target_1st = discount_1st * self.value_feature(obs=next_obs_1st, action=next_action_1st)\n instrumental_feature_1st = self.instrumental_feature(obs=current_obs_1st, action=action_1st)\n pred = linear_reg_pred(instrumental_feature_1st, self.stage1_weight)\n return tf.reduce_mean((pred - target_1st) ** 2).numpy()\n\n def cal_stage2_weight(self, stage2_input, stage1_weight):\n current_obs_2nd, action_2nd, reward_2nd, _, _ = stage2_input.data[:5]\n reward_2nd = tf.expand_dims(reward_2nd, axis=1)\n instrumental_feature_2nd = self.instrumental_feature(obs=current_obs_2nd, action=action_2nd)\n predicted_feature_2nd = linear_reg_pred(instrumental_feature_2nd, stage1_weight)\n current_feature_2nd = self.value_feature(obs=current_obs_2nd, action=action_2nd)\n predicted_feature_2nd = current_feature_2nd - self.discount * predicted_feature_2nd\n\n nData, nDim = predicted_feature_2nd.shape\n nData = tf.cast(tf.shape(predicted_feature_2nd)[0], dtype=tf.float32)\n A = tf.matmul(predicted_feature_2nd, predicted_feature_2nd, transpose_a=True)\n A = A + self.stage2_reg * tf.eye(nDim) * nData\n b = tf.matmul(predicted_feature_2nd, reward_2nd, transpose_a=True)\n return A / nData, b / nData\n\n def cal_stage2_loss(self):\n loss_sum = 0.\n count = 0.\n for sample in self._valid_dataset:\n loss_sum += self.cal_stage2_loss_one_batch(sample)\n count += 1.\n return loss_sum / count\n\n def cal_stage2_loss_one_batch(self, sample):\n assert self.stage1_weight is not None\n assert self.stage2_weight is not None\n\n current_obs_2nd, action_2nd, reward_2nd, _, _ = sample.data[:5]\n reward_2nd = tf.expand_dims(reward_2nd, axis=1)\n instrumental_feature_2nd = self.instrumental_feature(obs=current_obs_2nd, action=action_2nd)\n predicted_feature_2nd = linear_reg_pred(instrumental_feature_2nd, self.stage1_weight)\n current_feature_2nd = self.value_feature(obs=current_obs_2nd, action=action_2nd)\n predicted_feature_2nd = current_feature_2nd - self.discount * predicted_feature_2nd\n\n pred = linear_reg_pred(predicted_feature_2nd, self.stage2_weight)\n return tf.reduce_mean((pred - reward_2nd) ** 2).numpy()\n\n def update_final_weight(self):\n # calculate stage1 weights\n instrumental_feature_dim = self.instrumental_feature.feature_dim()\n value_feature_dim = self.value_feature.feature_dim()\n A = tf.zeros((instrumental_feature_dim, instrumental_feature_dim))\n b = tf.zeros((instrumental_feature_dim, value_feature_dim))\n for _ in range(self.stage1_batch):\n data = next(self._iterator)\n A_new, b_new = self.cal_stage1_weights(data)\n A = A + A_new\n b = b + b_new\n\n self.stage1_weight = tf.linalg.solve(A, b)\n # calculate training loss for the last batch\n # it may be replaced to validation data\n stage1_loss = None\n if self._valid_dataset is not None:\n stage1_loss = self.cal_stage1_loss()\n\n # calculate stage2 weights\n A = tf.zeros((value_feature_dim, value_feature_dim))\n b = tf.zeros((value_feature_dim, 1))\n for _ in range(self.stage2_batch):\n data = next(self._iterator)\n A_new, b_new = self.cal_stage2_weight(data, self.stage1_weight)\n A = A + A_new\n b = b + b_new\n\n self.stage2_weight = tf.linalg.solve(A, b)\n # calculate training loss for the last batch\n # it may be replaced to validation data\n stage2_loss = None\n if self._valid_dataset is not None:\n stage2_loss = self.cal_stage2_loss()\n\n self.value_func.weight.assign(self.stage2_weight)\n return stage1_loss, stage2_loss\n\n def step(self):\n # Do a batch of SGD.\n result = self._step()\n\n # Update our counts and record it.\n counts = self._counter.increment(steps=1)\n result.update(counts)\n\n # Snapshot and attempt to write logs.\n if self._snapshotter is not None:\n self._snapshotter.save()\n self._logger.write(result)\n\n return result\n\n def get_variables(self, names: List[str]) -> List[np.ndarray]:\n return tf2_utils.to_numpy(self._variables)\n\n @property\n def state(self):\n \"\"\"Returns the stateful parts of the learner for checkpointing.\"\"\"\n return {\n 'value_feature': self.value_feature,\n 'instrumental_feature': self.instrumental_feature,\n 'num_steps': self._num_steps\n }\n"
] |
[
[
"tensorflow.zeros",
"tensorflow.shape",
"tensorflow.eye",
"tensorflow.expand_dims",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.linalg.solve",
"tensorflow.reduce_mean"
]
] |
ManuelALH/ProyectoTeoriaComputacionDFA
|
[
"f989c71934f2d31f25c31f60ed1aab9e8e0a971a"
] |
[
"visual_automata/colors.py"
] |
[
"from colormath.color_conversions import convert_color\nfrom colormath.color_objects import sRGBColor\nfrom typing import Generator\nimport numpy as np\n\n\ndef create_palette(\n start_rgb: sRGBColor, end_rgb: sRGBColor, n: int, colorspace: sRGBColor\n) -> list:\n \"\"\"\n Generates color palette based on start and end color.\n\n Args:\n start_rgb (sRGBColor): Palette start color.\n end_rgb (sRGBColor): Palette end color.\n n (int): Number of colors in the palette.\n colorspace (sRGBColor): The colorspace to use.\n\n Returns:\n list: Generated color palette.\n \"\"\"\n # convert start and end to a point in the given colorspace\n start = convert_color(start_rgb, colorspace).get_value_tuple()\n end = convert_color(end_rgb, colorspace).get_value_tuple()\n\n # create a set of n points along start to end\n points = list(zip(*[np.linspace(start[i], end[i], n) for i in range(3)]))\n\n # create a color for each point and convert back to rgb\n rgb_colors = [\n convert_color(colorspace(*point), sRGBColor) for point in points\n ]\n\n # finally convert rgb colors back to hex\n return [color.get_rgb_hex() for color in rgb_colors]\n\n\ndef hex_to_rgb_color(hex: str) -> sRGBColor:\n \"\"\"\n Converts hex color to RBG color.\n\n Args:\n hex (str): Hex color code.\n\n Returns:\n sRGBColor: RBG color values.\n \"\"\"\n return sRGBColor(*[int(hex[i + 1: i + 3], 16) for i in (0, 2, 4)],\n is_upscaled=True)\n\n\ndef list_cycler(lst: list) -> Generator[list, None, None]:\n \"\"\"\n Generator that yields elements of a list. If all list values are yielded,\n it resets and start from the beginning.\n\n Args:\n lst (list): List to yield elements from.\n\n Yields:\n Generator[list, None, None]: Generator yielding list elements.\n \"\"\"\n while True:\n yield from lst\n"
] |
[
[
"numpy.linspace"
]
] |
Baukmeister/rtex
|
[
"0ac06e5d9a467d915cde97a0919f168a176abe21"
] |
[
"load_manual_eval_tags.py"
] |
[
"\"\"\"\nThis script loads the corresponding to repot text for certain patient IDs\n\"\"\"\nimport json\nimport os\nimport pydicom\nfrom PIL import Image\nimport pandas as pd\n\nDATA_DIR = \"./data/tags\"\nOUTPUT_DIR = \"./manual_classification\"\n\ngroups = pd.read_csv(f\"{DATA_DIR}/groups.csv\", sep=\";\")\ntags = pd.read_csv(f\"{DATA_DIR}/iu_xray_all_test.tsv\", sep=\"\\t\")\nresult = pd.DataFrame(columns=[\"pat_id\", \"sex\", \"normal\"])\n\ngroup_pat_id_list = groups.groupby(\"group\")\n\nfor _, relevant_group in group_pat_id_list:\n relevant_group_name = f\"group_{int(relevant_group['group'].iloc[0])}\"\n\n for _, row in relevant_group.iterrows():\n pat_id = row[\"pat_id\"]\n pat_tags = tags[tags[\"reports\"] == pat_id][\"mti_tags\"]\n is_normal = \"Normal\" if (pat_tags == \"none\").iloc[0] else \"Abnormal\"\n result = result.append(\n {'pat_id': pat_id, 'sex': row['sex'], 'normal': is_normal},\n ignore_index=True,\n )\n\nresult.to_csv(f\"{OUTPUT_DIR}/sex_matched_normality.csv\")"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
Grantzho/Labs
|
[
"935707defeef61a856a248eddabbd0dcc87c51c1"
] |
[
"app/data.py"
] |
[
"\"\"\"\nBloomTech Labs DS Data Engineer Role\n- Database Interface\n- Visualization Interface\n\"\"\"\nimport os\nimport re\nimport string\nfrom random import randint\nfrom typing import Iterable, Dict, List\n\nimport pandas as pd\nimport psycopg2\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom plotly.graph_objs import Figure\nfrom psycopg2 import sql\nfrom dotenv import load_dotenv\n\n\nclass Data:\n load_dotenv()\n db_url = os.getenv(\"DB_URL\")\n\n def _setup(self, table_name: str, columns: Iterable[str]):\n self._action(f\"\"\"CREATE TABLE IF NOT EXISTS {table_name} \n ({', '.join(columns)});\"\"\")\n\n def _action(self, sql_action):\n conn = psycopg2.connect(self.db_url)\n curs = conn.cursor()\n curs.execute(sql_action)\n conn.commit()\n curs.close()\n conn.close()\n\n def _query(self, sql_query) -> list:\n conn = psycopg2.connect(self.db_url)\n curs = conn.cursor()\n curs.execute(sql_query)\n results = curs.fetchall()\n curs.close()\n conn.close()\n return results\n\n def count(self) -> int:\n return self._query(\"SELECT COUNT(*) FROM features\")[0][0]\n\n def columns(self) -> List[str]:\n return [col[3] for col in self._query(\n \"\"\"SELECT * FROM information_schema.columns \n WHERE table_name = 'features';\"\"\"\n )]\n\n def rows(self) -> List[List]:\n return self._query(\"SELECT * FROM features;\")\n\n def df(self):\n return pd.DataFrame(data=self.rows(), columns=self.columns())\n\n def row(self, idx: int) -> Dict:\n df = self.df()\n return df[df[\"idx\"] == idx].to_dict(orient=\"records\")[0]\n\n def format_target(self, target):\n return f\"Class {str(target).rjust(2, '0')}\"\n\n def random_row(self, n_features=3):\n features = tuple(randint(1, 6) for _ in range(n_features))\n return *features, self.format_target(sum(features))\n\n def joined_rows(self, n_rows):\n return \",\".join(str(self.random_row()) for _ in range(n_rows))\n\n def seed(self, n_rows: int):\n self._action(f\"\"\"INSERT INTO\n features (feature_1, feature_2, feature_3, target)\n VALUES {self.joined_rows(n_rows)};\"\"\")\n\n @staticmethod\n def cleaner(text: str) -> str:\n return re.sub(r\"\\s+\", \" \", text.translate(\n str.maketrans(\"\", \"\", string.punctuation)\n ).strip())\n\n def insert(self, feature_1, feature_2, feature_3, target):\n self._action(sql.SQL(\"\"\"INSERT INTO features\n (feature_1, feature_2, feature_3, target)\n VALUES ({},{},{},{});\"\"\").format(\n sql.Literal(feature_1),\n sql.Literal(feature_2),\n sql.Literal(feature_3),\n sql.Literal(self.format_target(self.cleaner(target))),\n ))\n return int(self._query(sql.SQL(\"\"\"SELECT idx FROM features \n ORDER BY idx DESC LIMIT 1;\"\"\"))[0][0])\n\n def reset(self):\n self._action(\"TRUNCATE TABLE features RESTART IDENTITY;\")\n\n def crosstab_vis(self, feature_id) -> Figure:\n if feature_id not in range(1, 4):\n return Figure()\n feature_name = f\"feature_{feature_id}\"\n feature_title = feature_name.replace('_', ' ').title()\n df = self.df()\n cross_tab = pd.crosstab(\n df[\"target\"],\n df[feature_name],\n )\n data = [\n go.Bar(name=col, x=cross_tab.index, y=cross_tab[col])\n for col in cross_tab.columns\n ]\n title = f\"Target by {feature_title} Crosstab\"\n layout = go.Layout(\n title=title,\n barmode=\"stack\",\n colorway=px.colors.qualitative.Antique,\n )\n return go.Figure(data=data, layout=layout)\n\n def target_percent_vis(self):\n df = self.df()[\"target\"].value_counts().to_frame()\n data = go.Pie(\n labels=df.index.values,\n values=df[\"target\"],\n textinfo='label+percent',\n showlegend=False,\n hole=0.5,\n )\n layout = go.Layout(\n title=\"Target Percentage\",\n colorway=px.colors.qualitative.Antique,\n )\n return go.Figure(data=data, layout=layout)\n\n\nif __name__ == '__main__':\n db = Data()\n # db._action(\"DROP TABLE features\")\n # db._setup(\"features\", [\n # \"idx SERIAL PRIMARY KEY NOT NULL\",\n # \"feature_1 INT8 NOT NULL\",\n # \"feature_2 INT8 NOT NULL\",\n # \"feature_3 INT8 NOT NULL\",\n # \"target varchar(10) NOT NULL\"\n # ])\n # db.reset()\n # db.seed(1024)\n db.crosstab_vis(1).show()\n # db.target_percent_vis().show()\n"
] |
[
[
"pandas.crosstab"
]
] |
9Strike/ap_praktikum
|
[
"2771f995372c1d075d0a5fcaaeb4c4214a0dfd8c"
] |
[
"datstat.py"
] |
[
"import numpy as np\nfrom numpy import sqrt\n\ndef mv(x):\n s = 0.0\n for i in range(len(x)):\n s += x[i]\n return s / len(x)\n\ndef dsto(x):\n s = 0.0\n for i in range(len(x)):\n s += (x[i] - mv(x))**2\n return sqrt(s / (len(x) - 1))\n\ndef dsto_mv(x):\n return dsto(x) / sqrt(len(x))\n\ndef dsys_mv(x):\n return sqrt(np.sum(x**2)) / len(x)\n\ndef dtot(dsys, dsto):\n return sqrt(dsys**2 + dsto**2)\n\ndef chi2(yo, dyo, ye, dye=[]):\n if (dye == []):\n dye = [0.0 for i in range(len(ye))]\n chi2 = 0.0\n for i in range(len(yo)):\n chi2 += (yo[i] - ye[i])**2 / (dyo[i]**2 + dye[i]**2)\n return chi2\n\ndef chi2_red(yo, dyo, ye, dye=[], dof=0):\n if (dof == 0):\n dof = len(ye)\n return chi2(yo, dyo, ye, dye) / dof\n\ndef dev(yo, dyo, ye, dye=None):\n if dye is None:\n dye = np.zeros_like(dyo)\n return np.abs(yo - ye) / sqrt(dyo**2 + dye**2)"
] |
[
[
"numpy.sum",
"numpy.zeros_like",
"numpy.abs",
"numpy.sqrt"
]
] |
jrderek/some-projects
|
[
"ad46083d451763a4201d4b4057086cf29dd8bf13"
] |
[
"Data-Science-Projects-master/KaggleAmazon/predict_keras.py"
] |
[
"import train_keras\nfrom keras.models import load_model\nimport os\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom keras.callbacks import ModelCheckpoint\nimport sys\n\nTF_CPP_MIN_LOG_LEVEL=2\nTEST_BATCH = 128\n\ndef load_params():\n X_test = os.listdir('./test-jpg')\n X_test = [fn.replace('.jpg', '') for fn in X_test]\n model = load_model('model_amazon6.h5', custom_objects={'fbeta': train_keras.fbeta})\n with open('tag_columns.txt', 'r') as f:\n tag_columns = f.read().split('\\n')\n return X_test, model, tag_columns\n\ndef prediction(X_test, model, tag_columns, test_folder):\n result = []\n for i in tqdm(range(0, len(X_test), TEST_BATCH)):\n X_batch = X_test[i:i+TEST_BATCH]\n X_batch = np.array([train_keras.preprocess(train_keras.load_image(fn, folder=test_folder)) for fn in X_batch])\n p = model.predict(X_batch)\n result.append(p)\n\n r = np.concatenate(result)\n r = r > 0.5\n table = []\n for row in r:\n t = []\n for b, v in zip(row, tag_columns):\n if b:\n t.append(v.replace('tag_', ''))\n table.append(' '.join(t))\n print('Prediction done !')\n return table\n\ndef launch(test_folder):\n X_test, model, tag_columns = load_params()\n table = prediction(X_test, model, tag_columns, test_folder)\n try:\n df_pred = pd.DataFrame.from_dict({'image_name': X_test, 'tags': table})\n df_pred.to_csv('submission9.csv', index=False)\n except:\n np.save('image_name', X_test)\n np.save('table', table)\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n test_folder = sys.argv[1]\n else:\n test_folder='test-jpg'\n launch(test_folder)"
] |
[
[
"numpy.concatenate",
"pandas.DataFrame.from_dict",
"numpy.save"
]
] |
jinglebot/CarND-Capstone
|
[
"cf81f270eedda9e9b2f09d01cd23f74e6069b2d3"
] |
[
"ros/src/tl_detector/light_classification/tl_classifier.py"
] |
[
"import numpy as np\nimport os\nimport tensorflow as tf\n\nfrom styx_msgs.msg import TrafficLight\n\nFROZEN_SIM_INFERENCE_GRAPH = os.getcwd() + \"/sim_traffic_light_graph.pb\"\nFROZEN_SITE_INFERENCE_GRAPH = os.getcwd() + \"/site_traffic_light_graph.pb\"\nSCORE_THRESHOLD = 0.5\nMAX_BOXES = 3\n\nclass TLClassifier(object):\n def __init__(self, is_site):\n if (is_site):\n graph_loc = FROZEN_SITE_INFERENCE_GRAPH\n else:\n graph_loc = FROZEN_SIM_INFERENCE_GRAPH\n\n #load classifier\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(graph_loc, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n # end with\n\n self.sess = tf.Session(graph=self.detection_graph)\n\n # Definite input and output Tensors for detection_graph\n self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n # end with\n\n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n #implement light color prediction\n state = TrafficLight.UNKNOWN\n with self.detection_graph.as_default():\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_expanded = np.expand_dims(image, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_expanded})\n\n boxes = np.squeeze(boxes)\n classes = np.squeeze(classes).astype(np.int32)\n scores = np.squeeze(scores)\n\n max_score = 0\n for i in range(min(MAX_BOXES, boxes.shape[0])):\n if (scores[i] > SCORE_THRESHOLD) and (scores[i] > max_score):\n if (classes[i] == 1):\n state = TrafficLight.GREEN\n elif (classes[i] == 2):\n state = TrafficLight.RED\n elif (classes[i] == 3):\n state = TrafficLight.YELLOW\n else:\n state = TrafficLight.UNKNOWN\n max_score = scores[i]\n # end for\n # end with\n\n return state"
] |
[
[
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.import_graph_def",
"tensorflow.gfile.GFile",
"numpy.squeeze",
"numpy.expand_dims"
]
] |
yangrq1018/akshare
|
[
"fed5922110727edf2f14bcc68d2045bcf67ebb5a"
] |
[
"akshare/stock_feature/stock_em_jgdy.py"
] |
[
"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nAuthor: Albert King\ndate: 2019/12/27 18:02\ncontact: jindaxiang@163.com\ndesc: 东方财富网-数据中心-特色数据-机构调研\n东方财富网-数据中心-特色数据-机构调研-机构调研统计: http://data.eastmoney.com/jgdy/tj.html\n东方财富网-数据中心-特色数据-机构调研-机构调研详细: http://data.eastmoney.com/jgdy/xx.html\n\"\"\"\nimport json\n\nimport pandas as pd\nimport requests\nfrom tqdm import tqdm\n\n\n# pd.set_option('display.max_columns', 500)\n\n\ndef _get_page_num_tj():\n \"\"\"\n 东方财富网-数据中心-特色数据-机构调研-机构调研统计\n http://data.eastmoney.com/jgdy/tj.html\n :return: int 获取 机构调研统计 的总页数\n \"\"\"\n url = \"http://data.eastmoney.com/DataCenter_V3/jgdy/gsjsdy.ashx\"\n params = {\n \"pagesize\": \"5000\",\n \"page\": \"2\",\n \"js\": \"var sGrabtEb\",\n \"param\": \"\",\n \"sortRule\": \"-1\",\n \"sortType\": \"0\",\n \"rt\": \"52581365\",\n }\n res = requests.get(url, params=params)\n data_json = json.loads(res.text[res.text.find(\"={\")+1:])\n return data_json[\"pages\"]\n\n\ndef _get_page_num_detail():\n \"\"\"\n 东方财富网-数据中心-特色数据-机构调研-机构调研详细\n http://data.eastmoney.com/jgdy/xx.html\n :return: int 获取 机构调研详细 的总页数\n \"\"\"\n url = \"http://data.eastmoney.com/DataCenter_V3/jgdy/xx.ashx\"\n params = {\n \"pagesize\": \"5000\",\n \"page\": \"1\",\n \"js\": \"var SZGpIhFb\",\n \"param\": \"\",\n \"sortRule\": \"-1\",\n \"sortType\": \"0\",\n \"rt\": \"52581407\",\n }\n res = requests.get(url, params=params)\n data_json = json.loads(res.text[res.text.find(\"={\")+1:])\n return data_json[\"pages\"]\n\n\ndef stock_em_jgdy_tj():\n \"\"\"\n 东方财富网-数据中心-特色数据-机构调研-机构调研统计\n http://data.eastmoney.com/jgdy/tj.html\n :return: pandas.DataFrame\n \"\"\"\n url = \"http://data.eastmoney.com/DataCenter_V3/jgdy/gsjsdy.ashx\"\n page_num = _get_page_num_tj()\n temp_df = pd.DataFrame()\n for page in tqdm(range(1, page_num+1)):\n params = {\n \"pagesize\": \"5000\",\n \"page\": str(page),\n \"js\": \"var sGrabtEb\",\n \"param\": \"\",\n \"sortRule\": \"-1\",\n \"sortType\": \"0\",\n \"rt\": \"52581365\",\n }\n res = requests.get(url, params=params)\n data_json = json.loads(res.text[res.text.find(\"={\")+1:])\n temp_df = temp_df.append(pd.DataFrame(data_json[\"data\"]), ignore_index=True)\n return temp_df\n\n\ndef stock_em_jgdy_detail():\n \"\"\"\n 东方财富网-数据中心-特色数据-机构调研-机构调研详细\n http://data.eastmoney.com/jgdy/xx.html\n :return: 机构调研详细\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://datainterface3.eastmoney.com/EM_DataCenter_V3/api/JGDYMX/GetJGDYMX\"\n params = {\n \"js\": \"datatable8174128\",\n \"tkn\": \"eastmoney\",\n \"secuCode\": \"\",\n \"dateTime\": \"\",\n \"sortfield\": \"0\",\n \"sortdirec\": \"1\",\n \"pageNum\": \"1\",\n \"pageSize\": \"5000\",\n \"cfg\": \"jgdymx\",\n \"_\": \"1605088363693\",\n }\n r = requests.get(url, params=params)\n data_json = json.loads(r.text[r.text.find(\"(\")+1:-1])\n temp_df = pd.DataFrame([item.split(\"|\") for item in data_json[\"Data\"][0][\"Data\"]])\n temp_df.columns = data_json[\"Data\"][0][\"FieldName\"].split(\",\") + [\"_\"]\n temp_df = temp_df.iloc[:, :-1]\n return temp_df\n\n\nif __name__ == '__main__':\n stock_em_jgdy_tj_df = stock_em_jgdy_tj()\n print(stock_em_jgdy_tj_df)\n stock_em_jgdy_detail_df = stock_em_jgdy_detail()\n print(stock_em_jgdy_detail_df)\n"
] |
[
[
"pandas.DataFrame"
]
] |
lonelyhentai/workspace
|
[
"2a996af58d6b9be5d608ed040267398bcf72403b",
"2a996af58d6b9be5d608ed040267398bcf72403b"
] |
[
"data_ai/comp3009/src/lab3/utils.py",
"data_ai/comp3006/src/test.py"
] |
[
"import pandas as pd\nimport numpy as np\nfrom typing import Dict, Any, Union, Iterable\nfrom numpy import ndarray\nimport math\nimport copy\n\n\ndef one_hot_encoder(labels: pd.Series) -> Dict[Any, ndarray]:\n ret = {}\n uniques = labels.unique()\n unique_num = len(uniques)\n for index, label in enumerate(uniques):\n ret[label] = np.zeros(unique_num).astype(np.int, copy=False)\n ret[label][index] = 1\n return ret\n\n\ndef value_encoder(labels: pd.Series) -> Dict[Any, int]:\n ret = {}\n uniques = labels.unique()\n for index, label in enumerate(uniques):\n ret[label] = index\n return ret\n\n\ndef data_split(dataset: pd.DataFrame, split_rate: float = 0.8, method: str = \"value\") -> \\\n (ndarray, ndarray, ndarray, ndarray, Union[Dict[Any, ndarray], Dict[Any, int]]):\n row_num, col_num = dataset.shape\n split_point = int(row_num * split_rate)\n x_train = dataset.iloc[:split_point, :-1].as_matrix(columns=None).astype(np.float64, copy=False)\n x_test = dataset.iloc[split_point:, :-1].as_matrix(columns=None).astype(np.float64, copy=False)\n factor_mapper: Union[Dict[Any, ndarray], Dict[Any, int]] = None\n y_train, y_test = None, None\n if method == \"value\":\n factor_mapper = value_encoder(dataset.iloc[:, -1])\n y_train = np.asarray(list(map(lambda x: factor_mapper[x], dataset.iloc[:split_point, -1])), dtype=np.int)\n y_test = np.asarray(list(map(lambda x: factor_mapper[x], dataset.iloc[split_point:, -1])), dtype=np.int)\n elif method == \"one-hot\":\n factor_mapper = one_hot_encoder(dataset.iloc[:, -1])\n y_train = pd.DataFrame(list(map(lambda x: factor_mapper[x], dataset.iloc[:split_point, -1])),\n dtype=np.int).as_matrix(\n columns=None)\n y_test = pd.DataFrame(list(map(lambda x: factor_mapper[x], dataset.iloc[split_point:, -1])), dtype=np.int) \\\n .as_matrix(\n columns=None)\n else:\n raise TypeError(\"invalid method\")\n return x_train, y_train, x_test, y_test, factor_mapper\n\n\ndef data_indent(dataset: pd.DataFrame, method: str = \"value\") -> \\\n (ndarray, ndarray, Union[Dict[Any, ndarray], Dict[Any, int]]):\n x_train = dataset.iloc[:, :-1].as_matrix(columns=None).astype(np.float64, copy=False)\n factor_mapper: Union[Dict[Any, ndarray], Dict[Any, int]] = None\n y_train = None\n if method == \"value\":\n factor_mapper = value_encoder(dataset.iloc[:, -1])\n y_train = np.asarray(list(map(lambda x: factor_mapper[x], dataset.iloc[:, -1])), dtype=np.int)\n elif method == \"one-hot\":\n factor_mapper = one_hot_encoder(dataset.iloc[:, -1])\n y_train = pd.DataFrame(list(map(lambda x: factor_mapper[x], dataset.iloc[:, -1])),\n dtype=np.int).as_matrix(\n columns=None)\n else:\n raise TypeError(\"invalid method\")\n return x_train, y_train, factor_mapper\n\n\ndef classification_score(classification, X_train: ndarray, y_train: ndarray, X_test: ndarray, y_test: ndarray,\n out_format: str = \"value\"):\n print(f\"{classification.__class__}开始训练...\")\n trained_classification = classification.fit(X_train, y_train)\n print(f\"{classification.__class__}完成训练\")\n print(f\"{classification.__class__}开始测试...\")\n pred_test: ndarray = trained_classification.predict(X_test)\n print(f\"{classification.__class__}完成测试\")\n print(f\"{classification.__class__}开始评分...\")\n count = 0\n if out_format == \"one-hot\":\n pred_rounded = np.asarray(list(map(round, pred_test.flatten()))).reshape(y_test.shape)\n for index, item in enumerate(pred_rounded):\n add_value = 1\n for j, jt in enumerate(item):\n if jt != y_test[index, j]:\n add_value = 0\n break\n count += add_value\n else:\n for index, item in enumerate(pred_test):\n if item == y_test[index]:\n count += 1\n print(f\"{classification.__class__}完成评分\")\n return count / len(pred_test)\n\n\ndef classification_cross_val_score(classification, X: ndarray, y: ndarray, cv: int = 10,\n out_format: str = \"value\") -> list:\n result_score = []\n num = len(y)\n groups = []\n group_num = int(num / cv - 1)\n for i in range(cv - 1):\n groups.append(list(range(i * group_num, (i + 1) * group_num)))\n groups.append(list(range(cv - 1 * group_num, num)))\n for i in range(cv):\n x_tests = X[groups[i]]\n y_tests = y[groups[i]]\n others = []\n for index, group in enumerate(groups):\n if index != i:\n others = others + group\n x_trains = X[others]\n y_trains = y[others]\n print(f\"{classification.__class__}开始第{i+1}折检验流程...\")\n result_score.append(\n classification_score(copy.deepcopy(classification), x_trains, y_trains, x_tests, y_tests, out_format))\n print(f\"{classification.__class__}完成第{i+1}折检验流程\")\n return result_score\n\n\ndef count(elements: Iterable[Any]) -> Dict[Any, int]:\n ret: Dict[Any, int] = {}\n for it in elements:\n if ret.get(it) is None:\n ret[it] = 0\n ret[it] += 1\n return ret\n",
"import pandas as pd\nimport numpy as np\nfrom os import path\nfrom path_service import LOG_DIR, DATA_DIR\nfrom sklearn.metrics import log_loss\nimport re\n\nprob_columns = list(map(lambda x: f\"prob{x}\", range(8)))\nprob_columns_without_end = list(map(lambda x: f\"prob{x}\", range(7)))\n\ndef row_check(df: pd.DataFrame):\n df.loc[:,prob_columns]=df.loc[:,prob_columns].apply(lambda x: x/np.sum(x),axis=1,result_type='expand')\n df = df.round(5)\n sum7 = np.sum(df.loc[:,prob_columns_without_end],axis=1)\n df.loc[:,'prob7'] = 1.0 - sum7\n return df\n\n\ndef get_prob_res(file_name: str):\n df: pd.DataFrame = pd.DataFrame([])\n with open(path.join(LOG_DIR, file_name), 'r') as prob_file:\n prob_lines = prob_file.readlines()\n probs = {}\n for i in range(8):\n probs[i] = []\n for line in prob_lines:\n words = re.split(r\"\\s\", line)\n for i in range(8):\n pos = i * 2\n prob_index = int(words[pos][-1])\n probs[prob_index].append(float(words[pos + 1]))\n df.loc[:, \"file_id\"] = pd.Series(list(range(1, len(probs[0]) + 1)), dtype=np.int)\n for i in range(8):\n df.loc[:, f\"prob{i}\"] = pd.Series(probs[i], dtype=np.float)\n return row_check(df)\n\n\ndef get_single_res(file_name: str, true_mode: bool = True):\n df: pd.DataFrame = pd.DataFrame([])\n with open(path.join(LOG_DIR if not true_mode else DATA_DIR, file_name), 'r') as prob_file:\n prob_lines = prob_file.readlines()\n probs = {}\n for i in range(8):\n probs[i] = []\n j = 0\n for line in prob_lines:\n label = int(str.strip(re.split(r\"\\s\", line)[0])[-1])\n for i in range(8):\n if i == label:\n probs[i].append(1.0)\n else:\n probs[i].append(0.0)\n df.loc[:, \"file_id\"] = pd.Series(list(range(1, len(probs[0]) + 1)), dtype=np.int)\n for i in range(8):\n df.loc[:, f\"prob{i}\"] = pd.Series(probs[i], dtype=np.float)\n return df\n\n\ndef get_probs(df: pd.DataFrame) -> pd.DataFrame:\n return df.loc[:, list(map(lambda x: f\"prob{x}\", range(8)))]\n\n\ndef check_valid_log_loss():\n valid_prob_df = get_prob_res('valid_prob.log')\n labels = get_single_res('security.valid', True)\n print(\"prob mode: \", log_loss(get_probs(labels), get_probs(valid_prob_df)))\n\n\ndef check_train_log_loss():\n valid_prob_df = get_prob_res('train_prob.log')\n labels = get_single_res('new_train', True)\n print(\"prob mode: \", log_loss(get_probs(labels), get_probs(valid_prob_df)))\n\n\ndef save_train_res(df: pd.DataFrame):\n df.to_csv(path.join(DATA_DIR, \"test_submit.csv\"), sep=\",\", index=False, float_format='%.5f')\n\n\nif __name__ == \"__main__\":\n check_valid_log_loss()\n check_train_log_loss()\n test_prob_df = get_prob_res(\"test_prob.log\")\n save_train_res(test_prob_df)\n df = pd.read_csv(path.join(DATA_DIR, \"test_submit.csv\"), sep=\",\")\n for index, row in df.iterrows():\n if np.abs(np.sum(row[list(map(lambda x: f\"prob{x}\", range(8)))]) - 1.0) > 1e-6:\n raise Exception(f\"sum prob not equal 1.0 in {index}\")\n"
] |
[
[
"numpy.zeros"
],
[
"numpy.sum",
"pandas.Series",
"pandas.DataFrame"
]
] |
Kindpire/inflated_convnets_pytorch
|
[
"8efe7748ad3b0df3de4cc1d18211988d5af442a3"
] |
[
"src/inflate.py"
] |
[
"import torch\n\ndef inflate_conv(conv2d,\n time_dim=3,\n time_padding=0,\n time_stride=1,\n time_dilation=1,\n center=False):\n # To preserve activations, padding should be by continuity and not zero\n # or no padding in time dimension\n kernel_dim = (time_dim, conv2d.kernel_size[0], conv2d.kernel_size[1])\n padding = (time_padding, conv2d.padding[0], conv2d.padding[1])\n stride = (time_stride, conv2d.stride[0], conv2d.stride[0])\n dilation = (time_dilation, conv2d.dilation[0], conv2d.dilation[1])\n conv3d = torch.nn.Conv3d(\n conv2d.in_channels,\n conv2d.out_channels,\n kernel_dim,\n padding=padding,\n dilation=dilation,\n stride=stride)\n # Repeat filter time_dim times along time dimension\n weight_2d = conv2d.weight.data\n if center:\n weight_3d = torch.zeros(*weight_2d.shape)\n weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)\n middle_idx = time_dim // 2\n weight_3d[:, :, middle_idx, :, :] = weight_2d\n else:\n weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)\n weight_3d = weight_3d / time_dim\n\n # Assign new params\n conv3d.weight = torch.nn.Parameter(torch.Tensor(weight_3d))\n #print('bias', conv2d.bias, type(conv2d.bias))\n if (conv2d.bias is not None):# and (len(conv2d.bias)):\n conv3d.bias = torch.nn.Parameter(torch.Tensor(conv2d.bias))\n return conv3d\n\n\ndef inflate_linear(linear2d, time_dim):\n \"\"\"\n Args:\n time_dim: final time dimension of the features\n \"\"\"\n linear3d = torch.nn.Linear(linear2d.in_features * time_dim,\n linear2d.out_features)\n weight3d = linear2d.weight.data.repeat(1, time_dim)\n weight3d = weight3d / time_dim\n\n linear3d.weight = torch.nn.Parameter(torch.Tensor(weight3d))\n linear3d.bias = torch.nn.Parameter(torch.Tensor(linear2d.bias))\n return linear3d\n\n\ndef inflate_batch_norm(batch2d):\n # In pytorch 0.2.0 the 2d and 3d versions of batch norm\n # work identically except for the check that verifies the\n # input dimensions\n\n batch3d = torch.nn.BatchNorm3d(batch2d.num_features)\n # retrieve 3d _check_input_dim function\n batch2d._check_input_dim = batch3d._check_input_dim\n return batch2d\n\n\ndef inflate_pool(pool2d,\n time_dim=1,\n time_padding=0,\n time_stride=None,\n time_dilation=1):\n kernel_dim = (time_dim, pool2d.kernel_size, pool2d.kernel_size)\n padding = (time_padding, pool2d.padding, pool2d.padding)\n if time_stride is None:\n time_stride = time_dim\n stride = (time_stride, pool2d.stride, pool2d.stride)\n if isinstance(pool2d, torch.nn.MaxPool2d):\n dilation = (time_dilation, pool2d.dilation, pool2d.dilation)\n pool3d = torch.nn.MaxPool3d(\n kernel_dim,\n padding=padding,\n dilation=dilation,\n stride=stride,\n ceil_mode=pool2d.ceil_mode)\n elif isinstance(pool2d, torch.nn.AvgPool2d):\n pool3d = torch.nn.AvgPool3d(kernel_dim, stride=stride)\n else:\n raise ValueError(\n '{} is not among known pooling classes'.format(type(pool2d)))\n return pool3d\n"
] |
[
[
"torch.nn.Linear",
"torch.zeros",
"torch.nn.BatchNorm3d",
"torch.nn.MaxPool3d",
"torch.nn.Conv3d",
"torch.nn.AvgPool3d",
"torch.Tensor"
]
] |
martinbomio/tfx-bsl
|
[
"14b46fad556527009497f0de0161400ed7e234df"
] |
[
"tfx_bsl/tfxio/telemetry.py"
] |
[
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Contains PTransforms that collects telemetry from produced by TFXIO.\"\"\"\n\nimport enum\nfrom typing import Iterable, Callable, List, Optional, Text\n\nimport apache_beam as beam\nimport numpy as np\nimport pyarrow as pa\nfrom tfx_bsl.arrow import array_util\nfrom tfx_bsl.arrow import table_util\nfrom tfx_bsl.telemetry import util as telemetry_util\n\n\n@beam.typehints.with_input_types(pa.RecordBatch)\n@beam.typehints.with_output_types(pa.RecordBatch)\n@beam.ptransform_fn\ndef ProfileRecordBatches(\n pcoll: beam.PCollection,\n telemetry_descriptors: Optional[List[Text]],\n logical_format: Text,\n physical_format: Text,\n distribution_update_probability: float = 0.1) -> beam.PCollection:\n \"\"\"An identity transform to profile RecordBatches and updated Beam metrics.\n\n Args:\n pcoll: a PCollection[pa.RecordBatch]\n telemetry_descriptors: a set of descriptors that identify the component that\n invokes this PTransform. These will be used to construct the namespace\n to contain the beam metrics created within this PTransform. All such\n namespaces will be prefixed by \"tfxio.\". If None, a default \"unknown\"\n descriptor will be used.\n logical_format: the logical format of the data (before parsed into\n RecordBatches). Used to construct metric names.\n physical_format: the physical format in which the data is stored on disk.\n Used to construct metric names.\n distribution_update_probability: probability to update the expensive,\n per-row distributions.\n\n Returns:\n `pcoll` (identity function).\n \"\"\"\n assert 0 < distribution_update_probability <= 1.0, (\n \"Invalid probability: {}\".format(distribution_update_probability))\n return pcoll | \"ProfileRecordBatches\" >> beam.ParDo(\n _ProfileRecordBatchDoFn(telemetry_descriptors, logical_format,\n physical_format, distribution_update_probability))\n\n\n@beam.typehints.with_input_types(bytes)\n@beam.typehints.with_output_types(bytes)\n@beam.ptransform_fn\ndef ProfileRawRecords(\n pcoll: beam.PCollection,\n telemetry_descriptors: Optional[List[Text]],\n logical_format: Text,\n physical_format: Text) -> beam.PCollection:\n \"\"\"An identity transform to profile raw records for record based TFXIO.\"\"\"\n return pcoll | \"ProfileRawRecords\" >> beam.ParDo(_ProfileRawRecordDoFn(\n telemetry_descriptors, logical_format, physical_format))\n\n\nclass _ValueType(enum.IntEnum):\n INT = 0\n FLOAT = 1\n STRING = 2\n NULL = 3 # pa.is_null()\n OTHER = 4\n\n\n_IO_TELEMETRY_DESCRIPTOR = [\"io\"]\n_UNKNOWN_TELEMETRY_DESCRIPTORS = [\"UNKNOWN_COMPONENT\"]\n\n\nclass _ProfileRecordBatchDoFn(beam.DoFn):\n \"\"\"A DoFn that profiles RecordBatches and updates Beam counters.\n\n The following metrics are maintained:\n\n num_rows: Counter. Total number of rows.\n record_batch_byte_size: Distribution. In-memory size of the RecordBatches.\n num_columns: Distribution. Number of present columns per row.\n A column is present in a row if its value is not None.\n num_feature_values: Distribution. Number of (primitive) values per cell.\n num_feature_values[_ValueType]: Distribution. Similar to num_feature_values,\n but sliced by _ValueType.\n num_cells[_ValueType]: Counter. Total number of cells by _ValueType. Note that\n it's sliced by primitive_type if a column is of type\n list<primitive_type>. For other columns, the slice is OTHER.\n \"\"\"\n\n def __init__(self, telemetry_descriptors: Optional[List[Text]],\n logical_format: Text,\n physical_format: Text, dist_update_prob: float):\n if telemetry_descriptors is None:\n telemetry_descriptors = _UNKNOWN_TELEMETRY_DESCRIPTORS\n metric_namespace = telemetry_util.MakeTfxNamespace(telemetry_descriptors +\n _IO_TELEMETRY_DESCRIPTOR)\n namer = _GetMetricNamer(logical_format, physical_format)\n self._num_rows = beam.metrics.Metrics.counter(metric_namespace,\n namer(\"num_rows\"))\n self._byte_size_dist = beam.metrics.Metrics.distribution(\n metric_namespace, namer(\"record_batch_byte_size\"))\n self._num_columns_dist = beam.metrics.Metrics.distribution(\n metric_namespace, namer(\"num_columns\"))\n self._num_feature_values_dist = beam.metrics.Metrics.distribution(\n metric_namespace, namer(\"num_feature_values\"))\n self._num_feature_values_dist_by_type = {\n t: beam.metrics.Metrics.distribution(\n metric_namespace, namer(\"num_feature_values[{}]\".format(t.name)))\n for t in _ValueType\n }\n self._num_cells_by_type = {\n t: beam.metrics.Metrics.counter(metric_namespace,\n namer(\"num_cells[{}]\".format(t.name)))\n for t in _ValueType\n }\n self._dist_update_prob = dist_update_prob\n\n def _UpdateNumColumnsDist(self, record_batch: pa.RecordBatch) -> None:\n # Define number of columns of a row to be the number of cells in that row\n # whose values are not null.\n # It can be computed by summing up (element wise) the negation of null\n # flags (converted to integer) of all the arrays.\n null_bitmaps = [\n np.asarray(array_util.GetArrayNullBitmapAsByteArray(c)).view(np.bool)\n for c in record_batch]\n indicators = [(~bitmap).view(np.uint8) for bitmap in null_bitmaps]\n sum_indicators = np.zeros(record_batch.num_rows, dtype=np.int64)\n for indicator in indicators:\n np.add(sum_indicators, indicator, out=sum_indicators)\n for num_column in sum_indicators.tolist():\n self._num_columns_dist.update(num_column)\n\n def _UpdateNumValuesDist(self, record_batch: pa.RecordBatch) -> None:\n # Updates the distribution of number of values per cell.\n # Note that a cell could be of a deeper nested type (e.g.\n # Struct or nested ListArray), the number of values actually means\n # lengths of leaves.\n # For example, given the following row:\n # col1 | col2\n # [[[1, 2], [3]]] | [{'a': [1, 2]}, {'b': [3]}]]\n # the number of values for col1 is 3\n # the number of values for col2 will be updated twice because there are\n # two leaves (col2.a, col2.b), with values 2, 1 respectively.\n\n # Algorithm: create a mapping `m` (int->int) for array `a` so that if\n # m[i] == j, then a[i] belongs to row j in the record batch.\n # Then, np.bincount(m, minlength=record_batch.num_rows)[i] is how many\n # values in `a` belong to row i. As we flatten the array, the mapping\n # needs to be maintained so that it maps a flattened value to a row.\n num_rows = record_batch.num_rows\n\n def _RecursionHelper(row_indices, array):\n \"\"\"Flattens `array` while maintains the `row_indices`.\"\"\"\n array_type = array.type\n if _IsListLike(array_type):\n parent_indices = np.asarray(\n array_util.GetFlattenedArrayParentIndices(array))\n _RecursionHelper(row_indices[parent_indices], array.flatten())\n elif pa.types.is_struct(array_type):\n for child in array.flatten():\n _RecursionHelper(row_indices, child)\n else:\n value_type = _GetValueType(array.type)\n dist_by_type = self._num_feature_values_dist_by_type[value_type]\n for num_values in np.bincount(row_indices, minlength=num_rows).tolist():\n dist_by_type.update(num_values)\n self._num_feature_values_dist.update(num_values)\n\n for column in record_batch:\n _RecursionHelper(np.arange(num_rows, dtype=np.int64), column)\n\n def _UpdateNumCellsCounters(self, record_batch: pa.RecordBatch) -> None:\n num_rows = record_batch.num_rows\n for column in record_batch:\n column_type = column.type\n if pa.types.is_null(column_type):\n self._num_cells_by_type[_ValueType.NULL].inc(num_rows)\n continue\n\n if _IsListLike(column_type):\n value_type = _GetValueType(column_type.value_type)\n else:\n value_type = _ValueType.OTHER\n self._num_cells_by_type[value_type].inc(num_rows - column.null_count)\n\n def process(self, record_batch: pa.RecordBatch) -> Iterable[pa.RecordBatch]:\n num_rows = record_batch.num_rows\n self._num_rows.inc(num_rows)\n self._UpdateNumCellsCounters(record_batch)\n total_byte_size = table_util.TotalByteSize(\n record_batch, ignore_unsupported=True)\n self._byte_size_dist.update(total_byte_size)\n # These distributions are per-row therefore expensive to update because\n # dist.update() needs to be called num_rows * k times.\n if np.random.rand() < self._dist_update_prob:\n self._UpdateNumColumnsDist(record_batch)\n self._UpdateNumValuesDist(record_batch)\n yield record_batch\n\n\ndef _IsListLike(data_type: pa.DataType) -> bool:\n return pa.types.is_list(data_type) or pa.types.is_large_list(data_type)\n\n\ndef _GetValueType(data_type: pa.DataType) -> _ValueType:\n \"\"\"Maps a `pa.DataType` to `ValueType`.\"\"\"\n if pa.types.is_integer(data_type):\n return _ValueType.INT\n if pa.types.is_floating(data_type):\n return _ValueType.FLOAT\n if (pa.types.is_string(data_type) or\n pa.types.is_binary(data_type) or\n pa.types.is_large_string(data_type) or\n pa.types.is_large_binary(data_type)):\n return _ValueType.STRING\n if pa.types.is_null(data_type):\n return _ValueType.NULL\n return _ValueType.OTHER\n\n\nclass _ProfileRawRecordDoFn(beam.DoFn):\n \"\"\"A DoFn that profiles raw records and updates Beam counters.\n\n The following metrics are maintained:\n\n num_raw_records: Counter. Total number of rows.\n raw_record_byte_size: Distribution. Byte size of the raw records.\n \"\"\"\n\n def __init__(self, telemetry_descriptors: Optional[List[Text]],\n logical_format: Text, physical_format: Text):\n if telemetry_descriptors is None:\n telemetry_descriptors = _UNKNOWN_TELEMETRY_DESCRIPTORS\n metric_namespace = telemetry_util.MakeTfxNamespace(telemetry_descriptors +\n _IO_TELEMETRY_DESCRIPTOR)\n namer = _GetMetricNamer(logical_format, physical_format)\n self._num_rows = beam.metrics.Metrics.counter(\n metric_namespace, namer(\"num_raw_records\"))\n self._byte_size_dist = beam.metrics.Metrics.distribution(\n metric_namespace, namer(\"raw_record_byte_size\"))\n\n def process(self, raw_record: bytes) -> Iterable[bytes]:\n self._num_rows.inc()\n self._byte_size_dist.update(len(raw_record))\n yield raw_record\n\n\ndef _GetMetricNamer(\n logical_format: Text, physical_format: Text) -> Callable[[Text], Text]:\n \"\"\"Returns a function to contruct beam metric names.\"\"\"\n for f in (logical_format, physical_format):\n assert \"[\" not in f, \"Invalid logical / physical format string: %s\" % f\n assert \"]\" not in f, \"Invalid logical / physical format string: %s\" % f\n assert \"-\" not in f, \"Invalid logical / physical format string: %s\" % f\n\n def _Namer(base_name: Text) -> Text:\n assert \"-\" not in base_name, \"Invalid metric name: %s\" % base_name\n return \"LogicalFormat[%s]-PhysicalFormat[%s]-%s\" % (\n logical_format, physical_format, base_name)\n\n return _Namer\n"
] |
[
[
"numpy.bincount",
"numpy.add",
"numpy.random.rand",
"numpy.zeros",
"numpy.arange"
]
] |
andykoswara/adme_tox
|
[
"6ff1de3a85e415c7a8f1ab981dc64c468be23957"
] |
[
"rdkit_ecfp_main.py"
] |
[
"from adme_utils import *\n\nfrom rdkit import DataStructs, Chem\nfrom rdkit.Chem import AllChem, Descriptors\n\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler\n\n## generate morgan fingerprint/ecfp\ndef genFP(mol, rad, nBits):\n \"\"\"\n\t\tcompute extended circular fingerprint\n \"\"\"\n fp = AllChem.GetMorganFingerprintAsBitVect(mol, rad, nBits=nBits)\n fp_vect = np.zeros((1,))\n DataStructs.ConvertToNumpyArray(fp, fp_vect)\n\n return fp_vect\n\ndef compute_descriptors(mol, id_string):\n \"\"\"\n\t\tcompute rdkit descriptors\n \"\"\"\n descriptors = [id_string]\n\n # Property descriptor\n descriptors.append(Descriptors.MolWt(mol))\n descriptors.append(Descriptors.HeavyAtomMolWt(mol))\n descriptors.append(Descriptors.MolLogP(mol))\n descriptors.append(Descriptors.MolMR(mol))\n descriptors.append(Descriptors.TPSA(mol))\n # Constitutional descriptor\n descriptors.append(Descriptors.FractionCSP3(mol))\n # Atom\n descriptors.append(Descriptors.HeavyAtomCount(mol))\n descriptors.append(Descriptors.NHOHCount(mol))\n descriptors.append(Descriptors.NOCount(mol))\n descriptors.append(Descriptors.NumHAcceptors(mol))\n descriptors.append(Descriptors.NumHDonors(mol))\n descriptors.append(Descriptors.NumHeteroatoms(mol))\n #descriptors.append(Descriptors.NumBridgeheadAtoms(mol))\n #descriptors.append(Descriptors.NumSpiroAtoms(mol))\n # Bond\n descriptors.append(Descriptors.NumRotatableBonds(mol))\n # Electronic\n descriptors.append(Descriptors.NumRadicalElectrons(mol))\n descriptors.append(Descriptors.NumValenceElectrons(mol))\n descriptors.append(Descriptors.MaxPartialCharge(mol))\n descriptors.append(Descriptors.MinPartialCharge(mol))\n descriptors.append(Descriptors.MaxAbsPartialCharge(mol))\n descriptors.append(Descriptors.MinAbsPartialCharge(mol))\n # Ring\n #descriptors.append(Descriptors.NumRings(mol))\n descriptors.append(Descriptors.NumAromaticRings(mol))\n descriptors.append(Descriptors.NumSaturatedRings(mol))\n descriptors.append(Descriptors.NumAliphaticRings(mol))\n #descriptors.append(Descriptors.NumCarbocycles(mol))\n descriptors.append(Descriptors.NumAromaticCarbocycles(mol))\n descriptors.append(Descriptors.NumSaturatedCarbocycles(mol))\n descriptors.append(Descriptors.NumAliphaticCarbocycles(mol))\n #descriptors.append(Descriptors.NumHeterocycles(mol))\n descriptors.append(Descriptors.NumAromaticHeterocycles(mol))\n descriptors.append(Descriptors.NumSaturatedHeterocycles(mol))\n descriptors.append(Descriptors.NumAliphaticHeterocycles(mol))\n # Functional Groups\n descriptors.append(Descriptors.fr_Al_COO(mol))\n descriptors.append(Descriptors.fr_Al_OH(mol))\n descriptors.append(Descriptors.fr_Al_OH_noTert(mol))\n descriptors.append(Descriptors.fr_ArN(mol))\n descriptors.append(Descriptors.fr_Ar_COO(mol))\n descriptors.append(Descriptors.fr_Ar_N(mol))\n descriptors.append(Descriptors.fr_Ar_NH(mol))\n descriptors.append(Descriptors.fr_Ar_OH(mol))\n descriptors.append(Descriptors.fr_COO(mol))\n descriptors.append(Descriptors.fr_COO2(mol))\n descriptors.append(Descriptors.fr_C_O(mol))\n descriptors.append(Descriptors.fr_C_O_noCOO(mol))\n descriptors.append(Descriptors.fr_C_S(mol))\n descriptors.append(Descriptors.fr_HOCCN(mol))\n descriptors.append(Descriptors.fr_Imine(mol))\n descriptors.append(Descriptors.fr_NH0(mol))\n descriptors.append(Descriptors.fr_NH1(mol))\n descriptors.append(Descriptors.fr_NH2(mol))\n descriptors.append(Descriptors.fr_N_O(mol))\n descriptors.append(Descriptors.fr_Ndealkylation1(mol))\n descriptors.append(Descriptors.fr_Ndealkylation2(mol))\n descriptors.append(Descriptors.fr_Nhpyrrole(mol))\n descriptors.append(Descriptors.fr_SH(mol))\n descriptors.append(Descriptors.fr_aldehyde(mol))\n descriptors.append(Descriptors.fr_alkyl_carbamate(mol))\n descriptors.append(Descriptors.fr_alkyl_halide(mol))\n descriptors.append(Descriptors.fr_allylic_oxid(mol))\n descriptors.append(Descriptors.fr_amide(mol))\n descriptors.append(Descriptors.fr_amidine(mol))\n descriptors.append(Descriptors.fr_aniline(mol))\n descriptors.append(Descriptors.fr_aryl_methyl(mol))\n descriptors.append(Descriptors.fr_azide(mol))\n descriptors.append(Descriptors.fr_azo(mol))\n descriptors.append(Descriptors.fr_barbitur(mol))\n descriptors.append(Descriptors.fr_benzene(mol))\n descriptors.append(Descriptors.fr_benzodiazepine(mol))\n descriptors.append(Descriptors.fr_bicyclic(mol))\n descriptors.append(Descriptors.fr_diazo(mol))\n descriptors.append(Descriptors.fr_dihydropyridine(mol))\n descriptors.append(Descriptors.fr_epoxide(mol))\n descriptors.append(Descriptors.fr_ester(mol))\n descriptors.append(Descriptors.fr_ether(mol))\n descriptors.append(Descriptors.fr_furan(mol))\n descriptors.append(Descriptors.fr_guanido(mol))\n descriptors.append(Descriptors.fr_halogen(mol))\n descriptors.append(Descriptors.fr_hdrzine(mol))\n descriptors.append(Descriptors.fr_hdrzone(mol))\n descriptors.append(Descriptors.fr_imidazole(mol))\n descriptors.append(Descriptors.fr_imide(mol))\n descriptors.append(Descriptors.fr_isocyan(mol))\n descriptors.append(Descriptors.fr_isothiocyan(mol))\n descriptors.append(Descriptors.fr_ketone(mol))\n descriptors.append(Descriptors.fr_ketone_Topliss(mol))\n descriptors.append(Descriptors.fr_lactam(mol))\n descriptors.append(Descriptors.fr_lactone(mol))\n descriptors.append(Descriptors.fr_methoxy(mol))\n descriptors.append(Descriptors.fr_morpholine(mol))\n descriptors.append(Descriptors.fr_nitrile(mol))\n descriptors.append(Descriptors.fr_nitro(mol))\n descriptors.append(Descriptors.fr_nitro_arom(mol))\n descriptors.append(Descriptors.fr_nitro_arom_nonortho(mol))\n descriptors.append(Descriptors.fr_nitroso(mol))\n descriptors.append(Descriptors.fr_oxazole(mol))\n descriptors.append(Descriptors.fr_oxime(mol))\n descriptors.append(Descriptors.fr_para_hydroxylation(mol))\n descriptors.append(Descriptors.fr_phenol(mol))\n descriptors.append(Descriptors.fr_phenol_noOrthoHbond(mol))\n descriptors.append(Descriptors.fr_phos_acid(mol))\n descriptors.append(Descriptors.fr_phos_ester(mol))\n descriptors.append(Descriptors.fr_piperdine(mol))\n descriptors.append(Descriptors.fr_piperzine(mol))\n descriptors.append(Descriptors.fr_priamide(mol))\n descriptors.append(Descriptors.fr_prisulfonamd(mol))\n descriptors.append(Descriptors.fr_pyridine(mol))\n descriptors.append(Descriptors.fr_quatN(mol))\n descriptors.append(Descriptors.fr_sulfide(mol))\n descriptors.append(Descriptors.fr_sulfonamd(mol))\n descriptors.append(Descriptors.fr_sulfone(mol))\n descriptors.append(Descriptors.fr_term_acetylene(mol))\n descriptors.append(Descriptors.fr_tetrazole(mol))\n descriptors.append(Descriptors.fr_thiazole(mol))\n descriptors.append(Descriptors.fr_thiocyan(mol))\n descriptors.append(Descriptors.fr_thiophene(mol))\n descriptors.append(Descriptors.fr_unbrch_alkane(mol))\n descriptors.append(Descriptors.fr_urea(mol))\n # MOE-type descriptors\n descriptors.append(Descriptors.LabuteASA(mol))\n descriptors.append(Descriptors.PEOE_VSA1(mol))\n descriptors.append(Descriptors.PEOE_VSA2(mol))\n descriptors.append(Descriptors.PEOE_VSA3(mol))\n descriptors.append(Descriptors.PEOE_VSA4(mol))\n descriptors.append(Descriptors.PEOE_VSA5(mol))\n descriptors.append(Descriptors.PEOE_VSA6(mol))\n descriptors.append(Descriptors.PEOE_VSA7(mol))\n descriptors.append(Descriptors.PEOE_VSA8(mol))\n descriptors.append(Descriptors.PEOE_VSA9(mol))\n descriptors.append(Descriptors.PEOE_VSA10(mol))\n descriptors.append(Descriptors.PEOE_VSA11(mol))\n descriptors.append(Descriptors.PEOE_VSA12(mol))\n descriptors.append(Descriptors.PEOE_VSA13(mol))\n descriptors.append(Descriptors.PEOE_VSA14(mol))\n descriptors.append(Descriptors.SMR_VSA1(mol))\n descriptors.append(Descriptors.SMR_VSA2(mol))\n descriptors.append(Descriptors.SMR_VSA3(mol))\n descriptors.append(Descriptors.SMR_VSA4(mol))\n descriptors.append(Descriptors.SMR_VSA5(mol))\n descriptors.append(Descriptors.SMR_VSA6(mol))\n descriptors.append(Descriptors.SMR_VSA7(mol))\n descriptors.append(Descriptors.SMR_VSA8(mol))\n descriptors.append(Descriptors.SMR_VSA9(mol))\n descriptors.append(Descriptors.SMR_VSA10(mol))\n descriptors.append(Descriptors.SlogP_VSA1(mol))\n descriptors.append(Descriptors.SlogP_VSA2(mol))\n descriptors.append(Descriptors.SlogP_VSA3(mol))\n descriptors.append(Descriptors.SlogP_VSA4(mol))\n descriptors.append(Descriptors.SlogP_VSA5(mol))\n descriptors.append(Descriptors.SlogP_VSA6(mol))\n descriptors.append(Descriptors.SlogP_VSA7(mol))\n descriptors.append(Descriptors.SlogP_VSA8(mol))\n descriptors.append(Descriptors.SlogP_VSA9(mol))\n descriptors.append(Descriptors.SlogP_VSA10(mol))\n descriptors.append(Descriptors.SlogP_VSA11(mol))\n descriptors.append(Descriptors.SlogP_VSA12(mol))\n descriptors.append(Descriptors.EState_VSA1(mol))\n descriptors.append(Descriptors.EState_VSA2(mol))\n descriptors.append(Descriptors.EState_VSA3(mol))\n descriptors.append(Descriptors.EState_VSA4(mol))\n descriptors.append(Descriptors.EState_VSA5(mol))\n descriptors.append(Descriptors.EState_VSA6(mol))\n descriptors.append(Descriptors.EState_VSA7(mol))\n descriptors.append(Descriptors.EState_VSA8(mol))\n descriptors.append(Descriptors.EState_VSA9(mol))\n descriptors.append(Descriptors.EState_VSA10(mol))\n descriptors.append(Descriptors.EState_VSA11(mol))\n descriptors.append(Descriptors.VSA_EState1(mol))\n descriptors.append(Descriptors.VSA_EState2(mol))\n descriptors.append(Descriptors.VSA_EState3(mol))\n descriptors.append(Descriptors.VSA_EState4(mol))\n descriptors.append(Descriptors.VSA_EState5(mol))\n descriptors.append(Descriptors.VSA_EState6(mol))\n descriptors.append(Descriptors.VSA_EState7(mol))\n descriptors.append(Descriptors.VSA_EState8(mol))\n descriptors.append(Descriptors.VSA_EState9(mol))\n descriptors.append(Descriptors.VSA_EState10(mol))\n # Topological descriptors\n descriptors.append(Descriptors.BalabanJ(mol))\n descriptors.append(Descriptors.BertzCT(mol))\n descriptors.append(Descriptors.HallKierAlpha(mol))\n descriptors.append(Descriptors.Ipc(mol))\n descriptors.append(Descriptors.Kappa1(mol))\n descriptors.append(Descriptors.Kappa2(mol))\n descriptors.append(Descriptors.Kappa3(mol))\n # Connectivity descriptors\n descriptors.append(Descriptors.Chi0(mol))\n descriptors.append(Descriptors.Chi1(mol))\n descriptors.append(Descriptors.Chi0n(mol))\n descriptors.append(Descriptors.Chi1n(mol))\n descriptors.append(Descriptors.Chi2n(mol))\n descriptors.append(Descriptors.Chi3n(mol))\n descriptors.append(Descriptors.Chi4n(mol))\n descriptors.append(Descriptors.Chi0v(mol))\n descriptors.append(Descriptors.Chi1v(mol))\n descriptors.append(Descriptors.Chi2v(mol))\n descriptors.append(Descriptors.Chi3v(mol))\n descriptors.append(Descriptors.Chi4v(mol))\n # Other properties\n descriptors.append(Descriptors.qed(mol))\n # Morgan FP\n rad = 3\n nBits = 1024\n descriptors.extend(genFP(mol, rad, nBits))\n\n return(descriptors)\n\ndef getrdkitdesc_from_smi(smiles_nonzero_list, smiles_zero_list, zero_id):\n\t\"\"\"\n\t\tcompute encoding of smiles\n\t\tArgs:\n\t\t\tsmiles_nonzero_list, smiles_zero_list: same as above\n\t\tReturns:\n\t\t\tx: smile embeddings\n\t\t\ty: fragment labels\n\t\"\"\"\n\tdfsmi_nonzero = pd.DataFrame({'smiles': smiles_nonzero_list})\n\tdfsmi_nonzero['id']=list(range(0, dfsmi_nonzero.shape[0]))\n\tdfsmi_zero = pd.DataFrame({'smiles': smiles_zero_list})\n\tdfsmi_zero['id']=list(range(0, dfsmi_zero.shape[0]))\n\tdfsmi_comb = pd.concat([dfsmi_nonzero, dfsmi_zero])\n\t# print(dfsmi_comb.head(5))\n\tnewdf = []\n\tfor id, row in dfsmi_comb.iterrows():\n\n\t ## compute descriptors\n\t smiles_string = dfsmi_comb['smiles'].iloc[id]\n\t # print(smiles_string)\n\t id_string = dfsmi_comb['id'].iloc[id]\n\t mol = Chem.MolFromSmiles(smiles_string)\n\t descriptors = compute_descriptors(mol, id_string)\n\n\t # append results\n\t newdf.append(descriptors)\n\n\t## convert descriptors to np array\n\tall_new = np.asarray(newdf)\n\tall_desc = all_new[:,1:].astype(float)\n\tall_name = all_new[:,:1]\n\tprint('df.shape: ' + str(all_desc.shape))\n\n\t## checking rows\n\tnansmile = all_desc[~np.isnan(all_desc).any(axis=1)].shape\n\tprint('df.shape w/out NaN smiles: ' + str(nansmile))\n\n\t## checking columns\n\tnandesc = all_desc[:,~np.any(np.isnan(all_desc), axis=0)].shape\n\tprint('df.shape w/out NaN descriptors: ' + str(nandesc))\n\n\t## removing descriptors with NaN\n\t# all_desc = all_desc[:,~np.any(np.isnan(all_desc), axis=0)]\n\n\t## removing smiles with NaN descriptors\n\tall_desc = all_desc[~np.isnan(all_desc).any(axis=1),:]\n\n\t## minmax rescale descriptors\n\tscaler = MinMaxScaler(feature_range=(0, 1))\n\tall_desc_minmax = scaler.fit_transform(all_desc)\n\n\t## other options for scaling\n\t## standardize scale descriptors\n\t# all_desc_std = StandardScaler().fit_transform(all_desc)\n\n\t## robust scale descriptors\n\t# scaler = RobustScaler(quantile_range=(25, 75))\n\t# all_desc_robust = scaler.fit_transform(all_desc))\n\n\tdfsmi_desc_comb = pd.DataFrame(np.asarray(all_desc_minmax))\n\tx = dfsmi_desc_comb.to_numpy()\n\tcount=x.shape[0]\n\ty = np.concatenate([np.ones(zero_id), np.zeros(count-zero_id)])\n\n\treturn x, y, dfsmi_desc_comb\n\n## initial params\nprint('')\nhomedir = os.path.expanduser(\"~/\")\nworkdir = homedir + 'Desktop/adme_tox/' ## I like to put stuff on Desktop\nprint('workdir: ' + workdir)\nlabel = 'MC'\ndesc_choice = 'rdkit_ecfp'\nsavedir = workdir + desc_choice + '/' + label + '/'\nprint('savedir: ' + savedir)\ndatadir = workdir + 'adme_tox_dataset/' + label + '/'\nprint('datadir: ' + datadir)\ntrain_fn = label + '_train'\ntrain_ft = '.csv'\nprint('train fn: ' + train_fn + train_ft)\ntrain_path = datadir + train_fn + train_ft\nload_enc = True # load previously saved molecular encoding?\nload_mod = True # load model?\nload_u = False # load UMAP points?\ninc_test = True # include test sets?\n\nsmiles_nonzero_list, smiles_zero_list, zero_id = getsmi_from_csv(train_path)\nprint('train nonzero count: ' + str(zero_id))\n\nif load_enc: ## load previously saved encoding\n\tdfsmi_enc_train = pd.read_csv(workdir + desc_choice + '/' + \\\n\t\tlabel + '/' + label + '_train_smiles_rdkit_ecfp.csv', index_col=0)\n\tcount_train = dfsmi_enc_train.shape[0]\n\tx_train = dfsmi_enc_train.to_numpy()\n\ty_train = y = np.concatenate([np.ones(zero_id),\n\t\tnp.zeros(count_train-zero_id)])\n\tprint('train smiles encoding loaded')\nelse: ## generate encoding\n\tprint(''.join('train smiles encodings being computed'))\n\tx_train, y_train, dfsmi_enc_train = \\\n\t\t\tgetrdkitdesc_from_smi(smiles_nonzero_list,\n\t\t\t\t\t\t\t\tsmiles_zero_list,\n\t\t\t\t\t\t\t\tzero_id)\n\tdfsmi_enc_train.to_csv(workdir + desc_choice + '/' + \\\n\t\tlabel + '/' + label + '_train_smiles_rdkit_ecfp.csv')\n\nif inc_test: ## include test sets\n\tprint('test set IS included')\n\ttest_fn = label + '_test'\n\ttest_ft = '.csv'\n\tprint('test fn: ' + test_fn + test_ft)\n\ttest_path = datadir + test_fn + test_ft\n\tsmiles_nonzero_list_test, smiles_zero_list_test, zero_test_id = \\\n\t\tgetsmi_from_csv(test_path)\n\tprint(''.join(['test nonzero count: ', str(zero_test_id)]))\n\tif load_enc:\n\t\tdfsmi_enc_test = pd.read_csv(workdir + desc_choice + '/' + \\\n\t\t\tlabel + '/' + label + '_test_smiles_rdkit_ecfp.csv', index_col=0)\n\t\tx_test = dfsmi_enc_test.to_numpy()\n\t\tcount_test = dfsmi_enc_test.shape[0]\n\t\ty_test = y = np.concatenate([np.ones(zero_test_id),\n\t\t\tnp.zeros(count_test-zero_test_id)])\n\t\ttest_id = dfsmi_enc_test.shape[0]\n\t\tprint('test smiles encoding loaded')\n\telse:\n\t\tprint(''.join('test smiles encoding being computed'))\n\t\tx_test, y_test, dfsmi_enc_test = \\\n\t\t\tgetrdkitdesc_from_smi(smiles_nonzero_list_test,\n\t\t\t\t\t\t\t\tsmiles_zero_list_test,\n\t\t\t\t\t\t\t\tzero_test_id)\n\t\tdfsmi_enc_test.to_csv(workdir + desc_choice + '/' + \\\n\t\t\tlabel + '/' + label + '_test_smiles_rdkit_ecfp.csv')\n\t\ttest_id = dfsmi_enc_test.shape[0]\n\nelse:\n\tprint('test set NOT included')\n\tx_test = np.array([])\n\ty_test = np.array([])\n\nsetglobal(savedir, label, x_train, y_train) ## set global parameters\nif load_mod: ## load previously trained model\n\tfor file in os.listdir(savedir):\n\t\tif file.endswith(\".pkl\"):\n\t\t\tmodel_path = os.path.join(savedir, file)\n\t\t\tprint('model located: ' + model_path)\n\trfc = load_model(model_path)\n\tprint('model loaded')\nelse:\n\tprint('optimizing model')\n\tdiscrete_domain = def_optdom()\n\trfc = get_optimrfc(discrete_domain=discrete_domain)\n\tanalyze_rfc(x_train, y_train, rfc, x_test, y_test)\n\n## UMAP\nif inc_test:\n\tx=np.concatenate((x_train, x_test))\n\ty=np.concatenate((y_train, y_test))\nelse:\n\tx = x_train\n\ty = y_train\nn_comps=[2,3]\nfor n in n_comps:\n\tdraw_umap(x=x, y=y, n_comps=n, load_u=load_u, savedir=savedir, label=label)\n"
] |
[
[
"sklearn.preprocessing.MinMaxScaler"
]
] |
derekvantilborg/molml_tools
|
[
"5a5baaa21a4b3b91e59c1a350d04db3fd5102e4e"
] |
[
"molml/Viz/multivariate.py"
] |
[
"from typing import List, Callable, Any\nfrom molml.Datastructures.molecule import Molecule\nfrom sklearn.manifold import TSNE as sklearn_tsne\nfrom sklearn.decomposition import PCA as sklearn_PCA\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nclass TSNE:\n def __init__(self, n_components: int = 2, perplexity: Any = 30.0, early_exaggeration: Any = 12.0,\n learning_rate: Any = \"warn\", n_iter: Any = 1000, n_iter_without_progress: Any = 300,\n min_grad_norm: Any = 1e-7, metric: Any = \"euclidean\", init: Any = \"warn\", verbose: Any = 0,\n random_state: Any = None, method: Any = \"barnes_hut\", angle: Any = 0.5, n_jobs: Any = None):\n\n self.tsne = sklearn_tsne(n_components=n_components, perplexity=perplexity,\n early_exaggeration=early_exaggeration, learning_rate=learning_rate, n_iter=n_iter,\n n_iter_without_progress=n_iter_without_progress, min_grad_norm=min_grad_norm,\n metric=metric, init=init, verbose=verbose, random_state=random_state, method=method,\n angle=angle, n_jobs=n_jobs)\n\n self.molecules = None\n self.results = None\n self.coords = None\n\n def fit(self, molecules: List[Molecule], transform: Callable = None, use_n_principal_components: int = None):\n\n self.molecules = molecules\n\n if use_n_principal_components is not None:\n pca = PCA(n_components=use_n_principal_components)\n pca.fit(molecules, transform=transform)\n x = pca.results\n\n else:\n if transform is None:\n x = np.array([m.ecfp() for m in molecules])\n else:\n x = np.array([transform(m.smiles) for m in molecules])\n\n self.results = self.tsne.fit_transform(x)\n self.coords = pd.DataFrame({\"x\": self.results[:, 0], \"y\": self.results[:, 1]})\n\n def show(self, color_by: List[any] = None, palette: Any = None):\n \"\"\" Make a quick scatter plot of the T-SNE\"\"\"\n\n if color_by is None:\n color_by = [None for _ in range(len(self.coords))]\n self.coords['label'] = color_by\n\n plt.figure(figsize=(10, 10))\n sns.scatterplot(\n x=\"x\", y=\"y\",\n hue=\"label\",\n palette=palette,\n data=self.coords,\n alpha=0.5\n )\n plt.show()\n\n\nclass PCA:\n def __init__(self, n_components: int = 2):\n self.pca = sklearn_PCA(n_components=n_components)\n self.molecules = None\n self.results = None\n self.coords = None\n\n def fit(self, molecules: List[Molecule], transform: Callable = None):\n\n self.molecules = molecules\n\n if transform is None:\n x = np.array([m.ecfp() for m in molecules])\n else:\n x = np.array([transform(m.smiles) for m in molecules])\n\n self.results = self.pca.fit_transform(x)\n\n self.coords = pd.DataFrame({\"x\": self.results[:, 0], \"y\": self.results[:, 1]})\n\n def show(self, color_by: List[any] = None, palette: Any = None):\n \"\"\" Make a quick scatter plot of the PCA\"\"\"\n\n if color_by is None:\n color_by = [None for _ in range(len(self.coords))]\n self.coords['label'] = color_by\n\n plt.figure(figsize=(10, 10))\n sns.scatterplot(\n x=\"x\", y=\"y\",\n hue=\"label\",\n palette=palette,\n data=self.coords,\n alpha=0.5\n )\n plt.show()\n"
] |
[
[
"pandas.DataFrame",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"sklearn.decomposition.PCA"
]
] |
draustin/otk
|
[
"c6e91423ec79b85b380ee9385f6d27c91f92503d"
] |
[
"otk/rt1/_interfaces.py"
] |
[
"from typing import Dict, Sequence, Tuple\nfrom enum import Enum\nimport numpy as np\nimport otk.functions\nimport scipy.interpolate\nfrom otk.functions import make_perpendicular\n\nfrom .. import v4hb\nfrom .. import functions\nfrom .. import ri\n\nclass Directions(Enum):\n REFLECTED = 0\n TRANSMITTED = 1\n\nclass InterfaceMode:\n def __init__(self, direction: int, matrix: np.ndarray, vector: np.ndarray, n: np.ndarray):\n \"\"\"\n\n Args:\n direction: Either REFLECTED or TRANSMITTED.\n matrix: Projection matrix.\n vector: Outgoing k vector.\n n: Outgoing refractive index.\n \"\"\"\n self.direction = Directions(direction)\n self.matrix = np.asarray(matrix)\n assert self.matrix.shape[-2:] == (4, 4)\n self.vector = np.asarray(vector)\n assert self.vector.shape[-1] == 4\n # Row space of matrix should be orthogonal to outgoing k vector.\n assert np.allclose(v4hb.dot(self.matrix, self.vector[..., None, :]), 0, atol=1e-7)\n # This checks that the shapes are consistent.\n self.shape = np.broadcast(self.matrix, self.vector[..., None]).shape\n self.n = n\n\n def __repr__(self):\n return 'InterfaceMode(matrix=%r, vector=%r, n=%r)'%(self.matrix, self.vector, self.n)\n\n\nclass Interface:\n def calc_modes(self, point: np.ndarray, normal: np.ndarray, lamb: float, vector: np.ndarray, n: np.ndarray) -> Dict[\n str, InterfaceMode]:\n \"\"\"\n\n Args:\n point: ...x4 array in surface local coordinates.\n normal: ...x4 array in surface local coordinates.\n lamb: wavelength\n vector: ...x4 array of normalized incident k vectors in local coordinates.\n\n \"\"\"\n raise NotImplementedError()\n\n\ndef calc_outer_product(vector1, vector2, amplitude):\n \"\"\"Output[...,i,j] = vector1[...,i]*amplitude*vector2[...,j].\"\"\"\n vector1 = np.asarray(vector1)\n vector2 = np.asarray(vector2)\n amplitude = np.atleast_1d(amplitude)\n assert amplitude.shape[-1] == 1\n return vector1[..., :, None]*amplitude[..., None]*vector2[..., None, :]\n\n\ndef calc_matrix(incident_vectors, deflected_vectors, amplitudes):\n return sum(calc_outer_product(incident_vector, deflected_vector, amplitude) for incident_vector, deflected_vector, amplitude in\n zip(incident_vectors, deflected_vectors, amplitudes))\n\n\nclass Mirror(Interface):\n def calc_modes(self, point: np.ndarray, normal: np.ndarray, lamb: float, incident_vector: np.ndarray,\n n: np.ndarray) -> Dict:\n reflected_vector = otk.functions.reflect_vector(incident_vector, normal)\n s_pol_vector = v4hb.cross(normal, incident_vector)\n incident_p_pol_vector = v4hb.cross(incident_vector, s_pol_vector)\n reflected_p_pol_vector = v4hb.cross(reflected_vector, s_pol_vector)\n\n matrix = calc_matrix((incident_p_pol_vector, s_pol_vector), (reflected_p_pol_vector, s_pol_vector),\n np.asarray((1, 1)))\n mode = InterfaceMode(Directions.REFLECTED, matrix, reflected_vector, n)\n modes = dict(reflected=mode)\n return modes\n\n\nclass IsotropicMediaInterface(Interface):\n def __init__(self, n1, n2, reflects: bool = True, transmits: bool = True):\n self.n1 = n1\n self.n2 = n2\n self.reflects = reflects\n self.transmits = transmits\n\n def calc_amplitudes(self, n1, n2, cos_theta1, lamb) -> Tuple[Tuple]:\n \"\"\"Returns amplitudes ((rp, rs), (tp, ts)).\"\"\"\n raise NotImplementedError()\n\n def calc_modes(self, point: np.ndarray, normal: np.ndarray, lamb: float, incident_vector: np.ndarray,\n n: np.ndarray) -> Dict:\n \"\"\"\n\n Args:\n point:\n normal:\n lamb:\n incident_vector:\n\n Returns:\n Mapping of (Outgoing, Polarization) pairs to InterfaceMode objects.\n \"\"\"\n n1 = self.n1(lamb)\n n2 = self.n2(lamb)\n cos_theta1 = v4hb.dot(normal, incident_vector)\n\n if 0:\n na = np.choose(cos_theta1 < 0, (n1, n2))\n nb = np.choose(cos_theta1 < 0, (n2, n1))\n else:\n assert np.all(cos_theta1>=0) or np.all(cos_theta1<=0)\n cos_theta1 = cos_theta1.ravel()[0]\n if cos_theta1>0:\n na, nb = n1, n2\n else:\n na, nb = n2, n1\n\n cos_theta1 = abs(cos_theta1)\n\n refracted_vector = otk.functions.refract_vector(incident_vector, normal, nb/na)*na/nb\n reflected_vector = otk.functions.reflect_vector(incident_vector, normal)\n\n # Generate unit vector perpendicular to normal and incident.\n s_pol_vector = make_perpendicular(normal, incident_vector)\n\n incident_p_pol_vector = v4hb.cross(incident_vector, s_pol_vector)\n refracted_p_pol_vector = v4hb.cross(refracted_vector, s_pol_vector)\n reflected_p_pol_vector = v4hb.cross(reflected_vector, s_pol_vector)\n\n amplitudes = self.calc_amplitudes(na, nb, cos_theta1, lamb)\n\n modes = {}\n if self.reflects:\n matrix = calc_matrix((incident_p_pol_vector, s_pol_vector), (reflected_p_pol_vector, s_pol_vector),\n amplitudes[0])\n modes['reflected'] = InterfaceMode(Directions.REFLECTED, matrix, reflected_vector, na)\n\n if self.transmits:\n matrix = calc_matrix((incident_p_pol_vector, s_pol_vector), (refracted_p_pol_vector, s_pol_vector),\n amplitudes[1])\n modes['transmitted'] = InterfaceMode(Directions.TRANSMITTED, matrix, refracted_vector, nb)\n\n return modes\n\nclass PerfectRefractor(IsotropicMediaInterface):\n def __init__(self, n1, n2):\n IsotropicMediaInterface.__init__(self, n1, n2, False, True)\n\n def calc_amplitudes(self, n1, n2, cos_theta1, lamb):\n return ((0, 0), (1, 1))\n\nclass FresnelInterface(IsotropicMediaInterface):\n def calc_amplitudes(self, n1, nb, cos_theta1, lamb):\n return functions.calc_fresnel_coefficients(n1, nb, cos_theta1)\n\n def __repr__(self):\n return 'FresnelInterface(n1=%r, n2=%r)'%(self.n1, self.n2)\n\n def flip(self):\n return FresnelInterface(self.n2, self.n1)\n\n\nclass SampledCoating(IsotropicMediaInterface):\n \"\"\"Symmetric - amplitudes are the same from both sides.\"\"\"\n\n def __init__(self, n1: ri.Index, n2: ri.Index, lambs: Sequence, thetas: Sequence, amplitudes: np.ndarray):\n \"\"\"\n\n Args:\n lambs: Sampled wavelengths.\n thetas: Sampled angles.\n amplitudes: Array with dimensions (Outgoing, Polarization, wavelength, angle).\n \"\"\"\n IsotropicMediaInterface.__init__(self, n1, n2)\n self.lambs = np.asarray(lambs)\n assert self.lambs.ndim == 1\n self.thetas = np.asarray(thetas)\n assert self.thetas.ndim == 1\n self.amplitudes = amplitudes\n assert self.amplitudes.shape == (2, 2, len(self.lambs), len(self.thetas))\n\n def __repr__(self):\n return 'SampledCoating(n1=%r, n2=%r, lambs=%r, thetas=%r, amplitudes=%r)'%(\n self.n1, self.n2, self.lambs, self.thetas, self.amplitudes)\n\n def calc_amplitudes(self, n1, n2, cos_theta1, lamb):\n results = []\n theta1 = np.arccos(cos_theta1)\n # Loop over reflected, transmitted.\n for amplitudes in self.amplitudes:\n results.append([])\n # Loop over p, s.\n for amplitude in zip(amplitudes):\n # TODO switch to complex interpolation.\n amplitude_lamb = scipy.interpolate.interp1d(self.lambs, amplitude, axis=0, copy=False)(lamb)\n amplitude_lamb_theta = scipy.interpolate.interp1d(self.thetas, amplitude_lamb, axis=0, copy=False)(\n theta1)\n results[-1].append(amplitude_lamb_theta)\n return results\n"
] |
[
[
"numpy.arccos",
"numpy.asarray",
"numpy.broadcast",
"numpy.choose",
"numpy.atleast_1d",
"numpy.all"
]
] |
Darylgolden/manim
|
[
"9d42bdf9274572726334e91d54310cf4d0876630"
] |
[
"manim/renderer/cairo_renderer.py"
] |
[
"import time\nimport typing\n\nimport numpy as np\n\nfrom manim.utils.hashing import get_hash_from_play_call\n\nfrom .. import config, logger\nfrom ..camera.camera import Camera\nfrom ..mobject.mobject import Mobject\nfrom ..scene.scene_file_writer import SceneFileWriter\nfrom ..utils.exceptions import EndSceneEarlyException\nfrom ..utils.iterables import list_update\n\n\ndef handle_play_like_call(func):\n \"\"\"\n This method is used internally to wrap the\n passed function, into a function that\n actually writes to the video stream.\n Simultaneously, it also adds to the number\n of animations played.\n\n Parameters\n ----------\n func : function\n The play() like function that has to be\n written to the video file stream.\n\n Returns\n -------\n function\n The play() like function that can now write\n to the video file stream.\n \"\"\"\n\n # NOTE : This is only kept for OpenGL renderer.\n # The play logic of the cairo renderer as been refactored and does not need this function anymore.\n # When OpenGL renderer will have a proper testing system,\n # the play logic of the latter has to be refactored in the same way the cairo renderer has been, and thus this\n # method has to be deleted.\n\n def wrapper(self, scene, *args, **kwargs):\n self.animation_start_time = time.time()\n self.file_writer.begin_animation(not self.skip_animations)\n func(self, scene, *args, **kwargs)\n self.file_writer.end_animation(not self.skip_animations)\n self.num_plays += 1\n\n return wrapper\n\n\nclass CairoRenderer:\n \"\"\"A renderer using Cairo.\n\n num_plays : Number of play() functions in the scene.\n time: time elapsed since initialisation of scene.\n \"\"\"\n\n def __init__(self, camera_class=None, skip_animations=False, **kwargs):\n # All of the following are set to EITHER the value passed via kwargs,\n # OR the value stored in the global config dict at the time of\n # _instance construction_.\n self.file_writer = None\n camera_cls = camera_class if camera_class is not None else Camera\n self.camera = camera_cls()\n self._original_skipping_status = skip_animations\n self.skip_animations = skip_animations\n self.animations_hashes = []\n self.num_plays = 0\n self.time = 0\n self.static_image = None\n\n def init_scene(self, scene):\n self.file_writer = SceneFileWriter(\n self,\n scene.__class__.__name__,\n )\n\n def play(self, scene, *args, **kwargs):\n # Reset skip_animations to the original state.\n # Needed when rendering only some animations, and skipping others.\n self.skip_animations = self._original_skipping_status\n self.update_skipping_status()\n\n scene.compile_animation_data(*args, **kwargs)\n\n # If skip_animations is already True, we can skip all the caching process.\n if not config[\"disable_caching\"] and not self.skip_animations:\n hash_current_animation = get_hash_from_play_call(\n scene, self.camera, scene.animations, scene.mobjects\n )\n if self.file_writer.is_already_cached(hash_current_animation):\n logger.info(\n f\"Animation {self.num_plays} : Using cached data (hash : %(hash_current_animation)s)\",\n {\"hash_current_animation\": hash_current_animation},\n )\n self.skip_animations = True\n else:\n hash_current_animation = f\"uncached_{self.num_plays:05}\"\n\n if self.skip_animations:\n logger.debug(f\"Skipping animation {self.num_plays}\")\n hash_current_animation = None\n else:\n if config[\"disable_caching\"]:\n logger.info(\"Caching disabled.\")\n hash_current_animation = f\"uncached_{self.num_plays:05}\"\n else:\n hash_current_animation = get_hash_from_play_call(\n scene, self.camera, scene.animations, scene.mobjects\n )\n if self.file_writer.is_already_cached(hash_current_animation):\n logger.info(\n f\"Animation {self.num_plays} : Using cached data (hash : %(hash_current_animation)s)\",\n {\"hash_current_animation\": hash_current_animation},\n )\n self.skip_animations = True\n # adding None as a partial movie file will make file_writer ignore the latter.\n self.file_writer.add_partial_movie_file(hash_current_animation)\n self.animations_hashes.append(hash_current_animation)\n logger.debug(\n \"List of the first few animation hashes of the scene: %(h)s\",\n {\"h\": str(self.animations_hashes[:5])},\n )\n\n # Save a static image, to avoid rendering non moving objects.\n self.static_image = self.save_static_frame_data(scene, scene.static_mobjects)\n\n self.file_writer.begin_animation(not self.skip_animations)\n scene.begin_animations()\n if scene.is_current_animation_frozen_frame():\n self.update_frame(scene)\n # self.duration stands for the total run time of all the animations.\n # In this case, as there is only a wait, it will be the length of the wait.\n self.freeze_current_frame(scene.duration)\n else:\n scene.play_internal()\n self.file_writer.end_animation(not self.skip_animations)\n\n self.num_plays += 1\n\n def update_frame( # TODO Description in Docstring\n self,\n scene,\n mobjects=None,\n include_submobjects=True,\n ignore_skipping=True,\n **kwargs,\n ):\n \"\"\"Update the frame.\n\n Parameters\n ----------\n mobjects: list, optional\n list of mobjects\n\n background: np.ndarray, optional\n Pixel Array for Background.\n\n include_submobjects: bool, optional\n\n ignore_skipping : bool, optional\n\n **kwargs\n\n \"\"\"\n if self.skip_animations and not ignore_skipping:\n return\n if not mobjects:\n mobjects = list_update(\n scene.mobjects,\n scene.foreground_mobjects,\n )\n if self.static_image is not None:\n self.camera.set_frame_to_background(self.static_image)\n else:\n self.camera.reset()\n\n kwargs[\"include_submobjects\"] = include_submobjects\n self.camera.capture_mobjects(mobjects, **kwargs)\n\n def render(self, scene, time, moving_mobjects):\n self.update_frame(scene, moving_mobjects)\n self.add_frame(self.get_frame())\n\n def get_frame(self):\n \"\"\"\n Gets the current frame as NumPy array.\n\n Returns\n -------\n np.array\n NumPy array of pixel values of each pixel in screen.\n The shape of the array is height x width x 3\n \"\"\"\n return np.array(self.camera.pixel_array)\n\n def add_frame(self, frame, num_frames=1):\n \"\"\"\n Adds a frame to the video_file_stream\n\n Parameters\n ----------\n frame : numpy.ndarray\n The frame to add, as a pixel array.\n num_frames: int\n The number of times to add frame.\n \"\"\"\n dt = 1 / self.camera.frame_rate\n self.time += num_frames * dt\n if self.skip_animations:\n return\n for _ in range(num_frames):\n self.file_writer.write_frame(frame)\n\n def freeze_current_frame(self, duration: float):\n \"\"\"Adds a static frame to the movie for a given duration. The static frame is the current frame.\n\n Parameters\n ----------\n duration : float\n [description]\n \"\"\"\n dt = 1 / self.camera.frame_rate\n self.add_frame(\n self.get_frame(),\n num_frames=int(duration / dt),\n )\n\n def show_frame(self):\n \"\"\"\n Opens the current frame in the Default Image Viewer\n of your system.\n \"\"\"\n self.update_frame(ignore_skipping=True)\n self.camera.get_image().show()\n\n def save_static_frame_data(\n self, scene, static_mobjects: typing.Iterable[Mobject]\n ) -> typing.Iterable[Mobject]:\n \"\"\"Compute and save the static frame, that will be reused at each frame to avoid to unecesseraly computer\n static mobjects.\n\n Parameters\n ----------\n scene : Scene\n The scene played.\n static_mobjects : typing.Iterable[Mobject]\n Static mobjects of the scene. If None, self.static_image is set to None\n\n Returns\n -------\n typing.Iterable[Mobject]\n the static image computed.\n \"\"\"\n if static_mobjects == None or len(static_mobjects) == 0:\n self.static_image = None\n return\n self.update_frame(scene, mobjects=static_mobjects)\n self.static_image = self.get_frame()\n return self.static_image\n\n def update_skipping_status(self):\n \"\"\"\n This method is used internally to check if the current\n animation needs to be skipped or not. It also checks if\n the number of animations that were played correspond to\n the number of animations that need to be played, and\n raises an EndSceneEarlyException if they don't correspond.\n \"\"\"\n if config[\"save_last_frame\"]:\n self.skip_animations = True\n if config[\"from_animation_number\"]:\n if self.num_plays < config[\"from_animation_number\"]:\n self.skip_animations = True\n if config[\"upto_animation_number\"]:\n if self.num_plays > config[\"upto_animation_number\"]:\n self.skip_animations = True\n raise EndSceneEarlyException()\n\n def scene_finished(self, scene):\n # If no animations in scene, render an image instead\n if self.num_plays:\n self.file_writer.finish()\n elif config.write_to_movie:\n config.save_last_frame = True\n config.write_to_movie = False\n else:\n self.update_frame(scene)\n\n if config[\"save_last_frame\"]:\n self.update_frame(scene)\n self.file_writer.save_final_image(self.camera.get_image())\n"
] |
[
[
"numpy.array"
]
] |
transcendentsky/detection_models
|
[
"185f4bcccd5ab2c2f8edac37c76a9ccc47f73883",
"185f4bcccd5ab2c2f8edac37c76a9ccc47f73883"
] |
[
"ssd.pytorch/layers/box_utils.py",
"ssd.pytorch/train.py"
] |
[
"# -*- coding: utf-8 -*-\nimport torch\n\n\ndef point_form(boxes):\n \"\"\" Convert prior_boxes to (xmin, ymin, xmax, ymax)\n representation for comparison to point form ground truth data.\n Args:\n boxes: (tensor) center-size default boxes from priorbox layers.\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin\n boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax\n\n\ndef center_size(boxes):\n \"\"\" Convert prior_boxes to (cx, cy, w, h)\n representation for comparison to center-size form ground truth data.\n Args:\n boxes: (tensor) point_form boxes\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2], 1) # w, h\n\n\ndef intersect(box_a, box_b):\n \"\"\" We resize both tensors to [A,B,2] without new malloc:\n [A,2] -> [A,1,2] -> [A,B,2]\n [B,2] -> [1,B,2] -> [A,B,2]\n Then we compute the area of intersect between box_a and box_b.\n Args:\n box_a: (tensor) bounding boxes, Shape: [A,4].\n box_b: (tensor) bounding boxes, Shape: [B,4].\n Return:\n (tensor) intersection area, Shape: [A,B].\n \"\"\"\n A = box_a.size(0)\n B = box_b.size(0)\n max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),\n box_b[:, 2:].unsqueeze(0).expand(A, B, 2))\n min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),\n box_b[:, :2].unsqueeze(0).expand(A, B, 2))\n inter = torch.clamp((max_xy - min_xy), min=0)\n return inter[:, :, 0] * inter[:, :, 1]\n\n\ndef jaccard(box_a, box_b):\n \"\"\"Compute the jaccard overlap of two sets of boxes. The jaccard overlap\n is simply the intersection over union of two boxes. Here we operate on\n ground truth boxes and default boxes.\n E.g.:\n A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n Args:\n box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]\n box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]\n Return:\n jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]\n \"\"\"\n inter = intersect(box_a, box_b)\n area_a = ((box_a[:, 2]-box_a[:, 0]) *\n (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]\n area_b = ((box_b[:, 2]-box_b[:, 0]) *\n (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]\n union = area_a + area_b - inter\n return inter / union # [A,B]\n\n\ndef match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):\n \"\"\"Match each prior box with the ground truth box of the highest jaccard\n overlap, encode the bounding boxes, then return the matched indices\n corresponding to both confidence and location preds.\n Args:\n threshold: (float) The overlap threshold used when mathing boxes.\n truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].\n priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].\n variances: (tensor) Variances corresponding to each prior coord,\n Shape: [num_priors, 4].\n labels: (tensor) All the class labels for the image, Shape: [num_obj].\n loc_t: (tensor) Tensor to be filled w/ endcoded location targets.\n conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.\n idx: (int) current batch index\n Return:\n The matched indices corresponding to 1)location and 2)confidence preds.\n \"\"\"\n # jaccard index\n overlaps = jaccard(\n truths,\n point_form(priors)\n )\n # (Bipartite Matching)\n # [1,num_objects] best prior for each ground truth\n best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)\n # [1,num_priors] best ground truth for each prior\n best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)\n best_truth_idx.squeeze_(0)\n best_truth_overlap.squeeze_(0)\n best_prior_idx.squeeze_(1)\n best_prior_overlap.squeeze_(1)\n best_truth_overlap.index_fill_(0, best_prior_idx, 2) # ensure best prior\n # TODO refactor: index best_prior_idx with long tensor\n # ensure every gt matches with its prior of max overlap\n for j in range(best_prior_idx.size(0)):\n best_truth_idx[best_prior_idx[j]] = j\n matches = truths[best_truth_idx] # Shape: [num_priors,4]\n conf = labels[best_truth_idx] + 1 # Shape: [num_priors]\n conf[best_truth_overlap < threshold] = 0 # label as background\n loc = encode(matches, priors, variances)\n loc_t[idx] = loc # [num_priors,4] encoded offsets to learn\n\n conf_t[idx] = conf # [num_priors] top class label for each prior\n\n\ndef encode(matched, priors, variances):\n \"\"\"Encode the variances from the priorbox layers into the ground truth boxes\n we have matched (based on jaccard overlap) with the prior boxes.\n Args:\n matched: (tensor) Coords of ground truth for each prior in point-form\n Shape: [num_priors, 4].\n priors: (tensor) Prior boxes in center-offset form\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n encoded boxes (tensor), Shape: [num_priors, 4]\n \"\"\"\n\n # dist b/t match center and prior's center\n g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]\n # encode variance\n g_cxcy /= (variances[0] * priors[:, 2:])\n # match wh / prior wh\n g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]\n ### Fixed Inf loss\n # g_wh = torch.log(g_wh) / variances[1]\n g_wh = torch.log(g_wh + 1e-10) / variances[1]\n # return target for smooth_l1_loss\n return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]\n\n\n# Adapted from https://github.com/Hakuyume/chainer-ssd\ndef decode(loc, priors, variances):\n \"\"\"Decode locations from predictions using priors to undo\n the encoding we did for offset regression at train time.\n Args:\n loc (tensor): location predictions for loc layers,\n Shape: [num_priors,4]\n priors (tensor): Prior boxes in center-offset form.\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n decoded bounding box predictions\n \"\"\"\n\n boxes = torch.cat((\n priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],\n priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)\n boxes[:, :2] -= boxes[:, 2:] / 2\n boxes[:, 2:] += boxes[:, :2]\n return boxes\n\n\ndef log_sum_exp(x):\n \"\"\"Utility function for computing log_sum_exp while determining\n This will be used to determine unaveraged confidence loss across\n all examples in a batch.\n Args:\n x (Variable(tensor)): conf_preds from conf layers\n \"\"\"\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max\n\n\n# Original author: Francisco Massa:\n# https://github.com/fmassa/object-detection.torch\n# Ported to PyTorch by Max deGroot (02/01/2017)\ndef nms(boxes, scores, overlap=0.5, top_k=200):\n \"\"\"Apply non-maximum suppression at test time to avoid detecting too many\n overlapping bounding boxes for a given object.\n Args:\n boxes: (tensor) The location preds for the img, Shape: [num_priors,4].\n scores: (tensor) The class predscores for the img, Shape:[num_priors].\n overlap: (float) The overlap thresh for suppressing unnecessary boxes.\n top_k: (int) The Maximum number of box preds to consider.\n Return:\n The indices of the kept boxes with respect to num_priors.\n \"\"\"\n\n keep = scores.new(scores.size(0)).zero_().long()\n if boxes.numel() == 0:\n print(boxes.size())\n print(\"********** Wocao?????? \")\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(0) # sort in ascending order\n # I = I[v >= 0.01]\n idx = idx[-top_k:] # indices of the top-k largest vals\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[-1] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[:-1] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n return keep, count\n",
"from __future__ import print_function\nfrom data import *\nfrom utils.augmentations import SSDAugmentation\nfrom layers.modules import MultiBoxLoss\nfrom ssd import build_ssd\nimport os\nimport sys\nimport time\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.utils.data as data\nimport numpy as np\nimport argparse\nfrom tensorboardX import SummaryWriter\n\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nparser = argparse.ArgumentParser(\n description='Single Shot MultiBox Detector Training With Pytorch')\ntrain_set = parser.add_mutually_exclusive_group()\nparser.add_argument('--dataset', default='VOC', choices=['VOC', 'COCO'],\n type=str, help='VOC or COCO')\nparser.add_argument('--dataset_root', default=VOC_ROOT,\n help='Dataset root directory path')\nparser.add_argument('--basenet', default='vgg16_reducedfc.pth',\n help='Pretrained base model')\nparser.add_argument('--batch_size', default=32, type=int,\n help='Batch size for training')\nparser.add_argument('--resume', default=None, type=str,\n help='Checkpoint state_dict file to resume training from')\nparser.add_argument('--start_iter', default=0, type=int,\n help='Resume training at this iter')\nparser.add_argument('--num_workers', default=4, type=int,\n help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True, type=str2bool,\n help='Use CUDA to train model')\nparser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,\n help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float,\n help='Momentum value for optim')\nparser.add_argument('--weight_decay', default=5e-4, type=float,\n help='Weight decay for SGD')\nparser.add_argument('--gamma', default=0.1, type=float,\n help='Gamma update for SGD')\nparser.add_argument('--visdom', default=False, type=str2bool,\n help='Use visdom for loss visualization')\nparser.add_argument('--save_folder', default='weights/',\n help='Directory for saving checkpoint models')\nargs = parser.parse_args()\n\nif torch.cuda.is_available():\n if args.cuda:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n if not args.cuda:\n print(\"WARNING: It looks like you have a CUDA device, but aren't \" +\n \"using CUDA.\\nRun with --cuda for optimal training speed.\")\n torch.set_default_tensor_type('torch.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\nif not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\n\nwriter = None\nlr_sche = None\nlog_folder = None\nCOCO_ROOT = None\n\ndef train():\n if args.dataset == 'COCO':\n if args.dataset_root == VOC_ROOT:\n if not os.path.exists(COCO_ROOT):\n parser.error('Must specify dataset_root if specifying dataset')\n print(\"WARNING: Using default COCO dataset_root because \" +\n \"--dataset_root was not specified.\")\n args.dataset_root = COCO_ROOT\n cfg = coco\n dataset = COCODetection(root=args.dataset_root,\n transform=SSDAugmentation(cfg['min_dim'],\n MEANS))\n elif args.dataset == 'VOC':\n # if args.dataset_root == COCO_ROOT:\n # parser.error('Must specify dataset if specifying dataset_root')\n cfg = voc\n dataset = VOCDetection(root=args.dataset_root,\n transform=SSDAugmentation(cfg['min_dim'],\n MEANS))\n\n if args.visdom:\n import visdom\n viz = visdom.Visdom()\n\n ssd_net = build_ssd('train', cfg['min_dim'], cfg['num_classes'])\n net = ssd_net\n\n if args.cuda:\n net = torch.nn.DataParallel(ssd_net)\n cudnn.benchmark = True\n\n if args.resume:\n print('Resuming training, loading {}...'.format(args.resume))\n ssd_net.load_weights(args.resume)\n else:\n vgg_weights = torch.load(args.save_folder + args.basenet)\n print('Loading base network...')\n ssd_net.vgg.load_state_dict(vgg_weights)\n\n if args.cuda:\n net = net.cuda()\n\n if not args.resume:\n print('Initializing weights...')\n # initialize newly added layers' weights with xavier method\n ssd_net.extras.apply(weights_init)\n ssd_net.loc.apply(weights_init)\n ssd_net.conf.apply(weights_init)\n\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay)\n criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,\n False, args.cuda, lm=False)\n\n # initSummaty()\n log_folder = './results/' + net.__class__.__name__ + '/' + optimizer.__class__.__name__ + '/' + str(np.random.randint(999)) + '/'\n print(\"log_folder: \", log_folder)\n writer = SummaryWriter(log_folder)\n\n net.train()\n # loss counters\n loc_loss = 0\n conf_loss = 0\n epoch = 0\n print('Loading the dataset...')\n\n epoch_size = len(dataset) // args.batch_size\n print('Training SSD on:', dataset.name)\n print('Using the specified args:')\n print(args)\n\n step_index = 0\n\n if args.visdom:\n vis_title = 'SSD.PyTorch on ' + dataset.name\n vis_legend = ['Loc Loss', 'Conf Loss', 'Total Loss']\n iter_plot = create_vis_plot('Iteration', 'Loss', vis_title, vis_legend)\n epoch_plot = create_vis_plot('Epoch', 'Loss', vis_title, vis_legend)\n\n data_loader = data.DataLoader(dataset, args.batch_size,\n num_workers=args.num_workers,\n shuffle=True, collate_fn=detection_collate,\n pin_memory=True)\n # create batch iterator\n batch_iterator = iter(data_loader)\n for iteration in range(args.start_iter, cfg['max_iter']):\n if args.visdom and iteration != 0 and (iteration % epoch_size == 0):\n update_vis_plot(epoch, loc_loss, conf_loss, epoch_plot, None,\n 'append', epoch_size)\n writer.add_scalar('loc_loss', loc_loss, epoch)\n writer.add_scalar('conf_loss', conf_loss, epoch)\n # reset epoch loss counters\n loc_loss = 0\n conf_loss = 0\n epoch += 1\n\n if iteration in cfg['lr_steps']:\n step_index += 1\n adjust_learning_rate(optimizer, args.gamma, step_index)\n\n # load train data\n images, targets = next(batch_iterator)\n\n if args.cuda:\n images = Variable(images.cuda())\n targets = [Variable(ann.cuda(), volatile=True) for ann in targets]\n else:\n images = Variable(images)\n targets = [Variable(ann, volatile=True) for ann in targets]\n # forward\n t0 = time.time()\n out = net(images)\n # backprop\n optimizer.zero_grad()\n loss_l, loss_c = criterion(out, targets)\n loss = loss_l + loss_c\n loss.backward()\n optimizer.step()\n t1 = time.time()\n loc_loss += loss_l.data[0]\n conf_loss += loss_c.data[0]\n\n if iteration % 10 == 0:\n print('timer: %.4f sec.' % (t1 - t0))\n print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.data[0]), end=' ')\n\n if args.visdom:\n update_vis_plot(iteration, loss_l.data[0], loss_c.data[0],\n iter_plot, epoch_plot, 'append')\n\n if iteration != 0 and iteration % 5000 == 0:\n print('Saving state, iter:', iteration)\n torch.save(ssd_net.state_dict(), 'weights/ssd300_COCO_' +\n repr(iteration) + '.pth')\n torch.save(ssd_net.state_dict(),\n args.save_folder + '' + args.dataset + '.pth')\n\n\ndef adjust_learning_rate(optimizer, gamma, step):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 at every\n specified step\n # Adapted from PyTorch Imagenet example:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n lr = args.lr * (gamma ** (step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef xavier(param):\n # update to pytorch 0.4\n init.xavier_uniform_(param)\n\n\ndef weights_init(m):\n if isinstance(m, nn.Conv2d):\n xavier(m.weight.data)\n m.bias.data.zero_()\n\n\ndef create_vis_plot(_xlabel, _ylabel, _title, _legend):\n return viz.line(\n X=torch.zeros((1,)).cpu(),\n Y=torch.zeros((1, 3)).cpu(),\n opts=dict(\n xlabel=_xlabel,\n ylabel=_ylabel,\n title=_title,\n legend=_legend\n )\n )\n\n\ndef update_vis_plot(iteration, loc, conf, window1, window2, update_type,\n epoch_size=1):\n viz.line(\n X=torch.ones((1, 3)).cpu() * iteration,\n Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu() / epoch_size,\n win=window1,\n update=update_type\n )\n # initialize epoch plot on first iteration\n if iteration == 0:\n viz.line(\n X=torch.zeros((1, 3)).cpu(),\n Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu(),\n win=window2,\n update=True\n )\n\n\nif __name__ == '__main__':\n train()\n"
] |
[
[
"torch.cat",
"torch.mul",
"torch.clamp",
"torch.index_select",
"torch.log",
"torch.exp"
],
[
"torch.zeros",
"torch.autograd.Variable",
"torch.set_default_tensor_type",
"torch.nn.init.xavier_uniform_",
"torch.ones",
"torch.cuda.is_available",
"numpy.random.randint",
"torch.utils.data.DataLoader",
"torch.load",
"torch.Tensor",
"torch.nn.DataParallel"
]
] |
marcelo-santos-12/tcc
|
[
"a5dd5c4c4b923fbd3753b8e4749f5815da2305dd"
] |
[
"lbp_module/texture.py"
] |
[
"\"\"\"\nMethods to characterize image textures.\n\"\"\"\n\nimport numpy as np\nimport warnings\nfrom .utils._texture import _local_binary_pattern\nfrom .utils._texture_ilbp import _improved_local_binary_pattern\nfrom .utils._texture_hlbp import _hamming_local_binary_pattern\nfrom .utils._texture_elbp import _extended_local_binary_pattern\nfrom .utils._texture_clbp import _completed_local_binary_pattern\nfrom skimage.feature import local_binary_pattern as lbp\nfrom . import bins_ROR\n\nDEFAULT = 'default'\nROR = 'ror'\nUNIFORM = 'uniform' \nNRI_UNIFORM = 'nri_uniform'\nVAR = 'var'\n\nmethods = {\n DEFAULT: ord('D'),\n ROR: ord('R'),\n UNIFORM: ord('U'),\n NRI_UNIFORM: ord('N'),\n VAR: ord('V')\n}\n\ndef original_lbp(image, P, R, method, block=(1,1)):\n check_nD(image, 2)\n image = np.ascontiguousarray(image, dtype=np.double)\n output = lbp(image, P, R, method)\n\n if method == DEFAULT:\n bins = 2**P\n \n elif method == UNIFORM:\n bins = P + 2\n\n elif method == NRI_UNIFORM:\n bins = P * (P - 1) + 3\n \n elif method == ROR:\n bins = bins_ROR[str(P)]\n \n else: # method == VAR\n bins = None\n\n return histogram(output, bins, block)\n\ndef improved_lbp(image, P, R, method, block=(1,1),):\n check_nD(image, 2)\n image = np.ascontiguousarray(image, dtype=np.double)\n output = _improved_local_binary_pattern(image, P, R, methods[method.lower()]) \n\n if method == DEFAULT:\n bins = 2**(P + 1)\n \n elif method == UNIFORM:\n bins = P + 3\n\n elif method == NRI_UNIFORM:\n bins = (P + 1) * P + 3\n \n elif method == ROR:\n bins = bins_ROR[str(P + 1)]\n \n else: # method == VAR\n bins = None\n\n return histogram(output, bins, block)\n\ndef hamming_lbp(image, P, R, method, block=(1,1),):\n assert method == UNIFORM or method == NRI_UNIFORM, 'Method --> {}. Dont permissed for this variant.'.format(method)\n check_nD(image, 2)\n image = np.ascontiguousarray(image, dtype=np.double)\n output = _hamming_local_binary_pattern(image, P, R, methods[method.lower()])\n \n if method == UNIFORM:\n bins = P + 1\n\n else: # method == NRI_UNIFORM:\n bins = P * (P - 1) + 2\n \n return histogram(output, bins, block)\n\ndef completed_lbp(image, P, R, method, block=(1,1),):\n check_nD(image, 2)\n image = np.ascontiguousarray(image, dtype=np.double)\n output = _completed_local_binary_pattern(image, P, R, methods[method.lower()])\n\n if method == DEFAULT:\n bins = 2**P\n \n elif method == UNIFORM:\n bins = P + 2\n\n elif method == NRI_UNIFORM:\n bins = P * (P - 1) + 3\n \n elif method == ROR:\n bins = bins_ROR[str(P)]\n \n else: # method == VAR\n bins = None\n\n def histogram_completed(output, bins, _block):\n r_range = int(output.shape[1]/_block[0])\n c_range = int(output.shape[2]/_block[1])\n\n hist = []\n\n for r in range(0, output.shape[1], r_range):\n for c in range(0, output.shape[2], c_range):\n\n # computing histogram 2d of Signal and Center component\n hist_s_c, _, _ = np.histogram2d(x=output[0].flatten(), y=output[2].flatten(), bins=[bins, 1])\n \n # computing histogram 1d of magnitude component\n hist_m , _ = np.histogram(a=output[1], bins=bins)\n\n # concatening the histograms computed previously\n hist_total = hist_s_c.flatten() + hist_m.flatten()\n \n hist.extend(list(hist_total))\n \n return np.asarray(hist)\n\n return histogram_completed(output, bins, block)\n\ndef extended_lbp(image, P, R, method, block=(1,1),):\n check_nD(image, 2)\n\n image = np.ascontiguousarray(image, dtype=np.double)\n output = _extended_local_binary_pattern(image, P, R, methods[method.lower()])\n\n if method == DEFAULT:\n bins = 2**P\n \n elif method == UNIFORM:\n bins = P + 2\n\n elif method == NRI_UNIFORM:\n bins = P * (P - 1) + 3\n \n elif method == ROR:\n bins = bins_ROR[str(P)]\n \n else: # method == VAR\n bins = None\n\n return histogram(output, bins, block)\n\ndef check_nD(array, ndim, arg_name='image'):\n array = np.asanyarray(array)\n msg_incorrect_dim = \"The parameter `%s` must be a %s-dimensional array\"\n msg_empty_array = \"The parameter `%s` cannot be an empty array\"\n if isinstance(ndim, int):\n ndim = [ndim]\n if array.size == 0:\n raise ValueError(msg_empty_array % (arg_name))\n if not array.ndim in ndim:\n raise ValueError(msg_incorrect_dim % (arg_name, '-or-'.join([str(n) for n in ndim])))\n\ndef histogram(output, bins, block):\n r_range = int(output.shape[0]/block[0])\n c_range = int(output.shape[1]/block[1])\n hist = []\n\n for r in range(0, output.shape[0], r_range):\n for c in range(0, output.shape[1], c_range):\n\n hist_roi = np.histogram(output[r:r + r_range, c:c + c_range], bins=bins)[0]\n \n hist.extend(list(hist_roi))\n \n return np.asarray(hist)"
] |
[
[
"numpy.ascontiguousarray",
"numpy.histogram",
"numpy.asarray",
"numpy.asanyarray"
]
] |
morris-frank/scvi-tools
|
[
"b828c75455bdd9e9558882d0b110ed97ba135184"
] |
[
"scvi/model/_condscvi.py"
] |
[
"import logging\nimport warnings\nfrom typing import Optional, Union\n\nimport numpy as np\nimport torch\nfrom anndata import AnnData\n\nfrom scvi import _CONSTANTS\nfrom scvi.model.base import (\n BaseModelClass,\n RNASeqMixin,\n UnsupervisedTrainingMixin,\n VAEMixin,\n)\nfrom scvi.module import VAEC\n\nlogger = logging.getLogger(__name__)\n\n\nclass CondSCVI(RNASeqMixin, VAEMixin, UnsupervisedTrainingMixin, BaseModelClass):\n \"\"\"\n Conditional version of single-cell Variational Inference, used for hierarchical deconvolution of spatial transcriptomics data.\n\n Parameters\n ----------\n adata\n AnnData object that has been registered via :func:`~scvi.data.setup_anndata`.\n n_hidden\n Number of nodes per hidden layer.\n n_latent\n Dimensionality of the latent space.\n n_layers\n Number of hidden layers used for encoder and decoder NNs.\n dropout_rate\n Dropout rate for the encoder neural networks.\n weight_obs\n Whether to reweight observations by their inverse proportion (useful for lowly abundant cell types)\n **module_kwargs\n Keyword args for :class:`~scvi.modules.VAEC`\n\n Examples\n --------\n >>> adata = anndata.read_h5ad(path_to_anndata)\n >>> scvi.data.setup_anndata(adata, batch_key=\"batch\")\n >>> vae = scvi.external.CondSCVI(adata)\n >>> vae.train()\n >>> adata.obsm[\"X_CondSCVI\"] = vae.get_latent_representation()\n \"\"\"\n\n def __init__(\n self,\n adata: AnnData,\n n_hidden: int = 128,\n n_latent: int = 5,\n n_layers: int = 2,\n dropout_rate: float = 0.1,\n weight_obs: bool = False,\n **module_kwargs,\n ):\n super(CondSCVI, self).__init__(adata)\n\n n_labels = self.summary_stats[\"n_labels\"]\n n_vars = self.summary_stats[\"n_vars\"]\n if weight_obs:\n ct_counts = adata.obs[\"_scvi_labels\"].value_counts()[range(n_labels)].values\n ct_prop = ct_counts / np.sum(ct_counts)\n ct_prop[ct_prop < 0.05] = 0.05\n ct_prop = ct_prop / np.sum(ct_prop)\n ct_weight = 1.0 / ct_prop\n module_kwargs.update({\"ct_weight\": ct_weight})\n\n self.module = VAEC(\n n_input=n_vars,\n n_labels=n_labels,\n n_hidden=n_hidden,\n n_latent=n_latent,\n n_layers=n_layers,\n dropout_rate=dropout_rate,\n **module_kwargs,\n )\n self._model_summary_string = (\n \"Conditional SCVI Model with the following params: \\nn_hidden: {}, n_latent: {}, n_layers: {}, dropout_rate: {}, weight_obs: {}\"\n ).format(n_hidden, n_latent, n_layers, dropout_rate, weight_obs)\n self.init_params_ = self._get_init_params(locals())\n\n @torch.no_grad()\n def get_vamp_prior(\n self,\n adata: Optional[AnnData] = None,\n p: int = 50,\n ) -> np.ndarray:\n r\"\"\"\n Return an empirical prior over the cell-type specific latent space (vamp prior) that may be used for deconvolution.\n\n Parameters\n ----------\n adata\n AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the\n AnnData object used to initialize the model.\n p\n number of components in the mixture model underlying the empirical prior\n\n Returns\n -------\n mean_vprior: np.ndarray\n (n_labels, p, D) array\n var_vprior\n (n_labels, p, 3) array\n \"\"\"\n if self.is_trained_ is False:\n warnings.warn(\n \"Trying to query inferred values from an untrained model. Please train the model first.\"\n )\n\n adata = self._validate_anndata(adata)\n\n mean_vprior = np.zeros(\n (self.summary_stats[\"n_labels\"], p, self.module.n_latent)\n )\n var_vprior = np.zeros((self.summary_stats[\"n_labels\"], p, self.module.n_latent))\n key = self.scvi_setup_dict_[\"categorical_mappings\"][\"_scvi_labels\"][\n \"original_key\"\n ]\n mapping = self.scvi_setup_dict_[\"categorical_mappings\"][\"_scvi_labels\"][\n \"mapping\"\n ]\n for ct in range(self.summary_stats[\"n_labels\"]):\n # pick p cells\n local_indices = np.random.choice(\n np.where(adata.obs[key] == mapping[ct])[0], p\n )\n # get mean and variance from posterior\n scdl = self._make_data_loader(\n adata=adata, indices=local_indices, batch_size=p\n )\n mean = []\n var = []\n for tensors in scdl:\n x = tensors[_CONSTANTS.X_KEY]\n y = tensors[_CONSTANTS.LABELS_KEY]\n out = self.module.inference(x, y)\n mean_, var_ = out[\"qz_m\"], out[\"qz_v\"]\n mean += [mean_.cpu()]\n var += [var_.cpu()]\n\n mean_vprior[ct], var_vprior[ct] = np.array(torch.cat(mean)), np.array(\n torch.cat(var)\n )\n\n return mean_vprior, var_vprior\n\n def train(\n self,\n max_epochs: int = 400,\n lr: float = 0.001,\n use_gpu: Optional[Union[str, int, bool]] = None,\n train_size: float = 1,\n validation_size: Optional[float] = None,\n batch_size: int = 128,\n plan_kwargs: Optional[dict] = None,\n **kwargs,\n ):\n \"\"\"\n Trains the model using MAP inference.\n\n Parameters\n ----------\n max_epochs\n Number of epochs to train for\n lr\n Learning rate for optimization.\n use_gpu\n Use default GPU if available (if None or True), or index of GPU to use (if int),\n or name of GPU (if str), or use CPU (if False).\n train_size\n Size of training set in the range [0.0, 1.0].\n validation_size\n Size of the test set. If `None`, defaults to 1 - `train_size`. If\n `train_size + validation_size < 1`, the remaining cells belong to a test set.\n batch_size\n Minibatch size to use during training.\n plan_kwargs\n Keyword args for :class:`~scvi.train.TrainingPlan`. Keyword arguments passed to\n `train()` will overwrite values present in `plan_kwargs`, when appropriate.\n **kwargs\n Other keyword args for :class:`~scvi.train.Trainer`.\n \"\"\"\n update_dict = {\n \"lr\": lr,\n }\n if plan_kwargs is not None:\n plan_kwargs.update(update_dict)\n else:\n plan_kwargs = update_dict\n super().train(\n max_epochs=max_epochs,\n use_gpu=use_gpu,\n train_size=train_size,\n validation_size=validation_size,\n batch_size=batch_size,\n plan_kwargs=plan_kwargs,\n **kwargs,\n )\n"
] |
[
[
"torch.cat",
"numpy.zeros",
"numpy.sum",
"torch.no_grad",
"numpy.where"
]
] |
nateandre/machine_learning
|
[
"5c6535a18b46feaa5ffc38670e6404869836d2b1"
] |
[
"pointer-gen_implementations/code/pointer_gen_multitask.py"
] |
[
"\"\"\" Multitask Pointer-generation model implementation\n\nAuthor: Nathaniel Andre\n\"\"\"\n\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.layers import Dense,Bidirectional,LSTM,Input,RepeatVector,Activation,Softmax,Embedding,Dot,Lambda\nfrom tensorflow.keras.layers import Softmax,Concatenate,Dropout\nfrom tensorflow.keras.layers import LayerNormalization\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam,Adagrad\nfrom tensorflow.keras.losses import sparse_categorical_crossentropy\nimport tensorflow as tf\ntf.keras.backend.set_floatx('float32')\nimport numpy as np\nfrom sklearn.utils import shuffle # does not shuffle in place\nimport sys\nfrom datetime import datetime\nimport time\nimport pytz\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\ndef main():\n \"\"\" main function\n \"\"\"\n prefix=\"_500\"\n data_dir= \"../data/\"\n x = np.load(data_dir+\"x{}.npy\".format(prefix))\n x_indices = np.load(data_dir+\"x_indices{}.npy\".format(prefix)) # shape:(5900, 500)\n att_mask = np.load(data_dir+\"att_mask{}.npy\".format(prefix))\n loss_mask = np.load(data_dir+\"loss_mask{}.npy\".format(prefix)) # shape:(5900, 101)\n decoder_x = np.load(data_dir+\"decoder_x{}.npy\".format(prefix))\n y_indices = np.load(data_dir+\"y_indices{}.npy\".format(prefix))\n embedding_matrix = np.load(data_dir+\"word_embeddings.npy\".format(prefix)) # (30000,100)\n multitask_y = np.load(data_dir+\"multitask_y{}.npy\".format(prefix)) # (5900, 500)\n multitask_loss_mask = np.load(data_dir+\"multitask_loss_mask{}.npy\".format(prefix))\n \n np_int = \"int32\" # uploaded data should already be of these types\n np_float = \"float32\"\n\n train_end = 5500\n x_train = x[0:train_end] # shape:(5500, 500)\n x_indices_train = x_indices[0:train_end] \n att_mask_train = att_mask[0:train_end]\n loss_mask_train = loss_mask[0:train_end]\n decoder_x_train = decoder_x[0:train_end]\n y_indices_train = y_indices[0:train_end]\n multitask_y_train = multitask_y[0:train_end]\n multitask_loss_mask_train = multitask_loss_mask[0:train_end]\n\n test_start = 5500\n test_val_size= 200\n x_val = x[test_start:test_start+test_val_size] # shape:(200, 500)\n x_indices_val = x_indices[test_start:test_start+test_val_size]\n att_mask_val = att_mask[test_start:test_start+test_val_size]\n loss_mask_val = loss_mask[test_start:test_start+test_val_size]\n decoder_x_val = decoder_x[test_start:test_start+test_val_size]\n y_indices_val = y_indices[test_start:test_start+test_val_size]\n multitask_y_val = multitask_y[test_start:test_start+test_val_size]\n multitask_loss_mask_val = multitask_loss_mask[test_start:test_start+test_val_size]\n\n x_test = x[test_start+test_val_size:test_start+test_val_size*2] # shape:(200, 500)\n x_indices_test = x_indices[test_start+test_val_size:test_start+test_val_size*2]\n att_mask_test = att_mask[test_start+test_val_size:test_start+test_val_size*2]\n loss_mask_test = loss_mask[test_start+test_val_size:test_start+test_val_size*2]\n decoder_x_test = decoder_x[test_start+test_val_size:test_start+test_val_size*2]\n y_indices_test = y_indices[test_start+test_val_size:test_start+test_val_size*2]\n multitask_y_test = multitask_y[test_start+test_val_size:test_start+test_val_size*2]\n multitask_loss_mask_test = multitask_loss_mask[test_start+test_val_size:test_start+test_val_size*2]\n\n tf_float = tf.float32\n tf_int = tf.int32\n continue_training=False # if should continue training from the previous trained parameters; note the optimizer will be starting from scratch\n use_dropout=False\n batch_size= 10\n optimizer = Adagrad(learning_rate=0.05,initial_accumulator_value=0.1,clipnorm=2.0) # Adam(lr=0.01)\n epochs=20\n use_coverage_loss=False\n coverage_lam=0.0\n model_save_path=\"../model_params/\" # for loading model parameters\n model_checkpoints_path=\"../model_params/\" # for storing model information\n model_checkpoints_name=\"model_checkpoints.txt\"\n\n print(x.shape,x_train.shape,x_val.shape,x_test.shape,embedding_matrix.shape,multitask_y_train.shape)\n print(\"\\nData processing done.\\n\")\n start = time.time()\n encoder,decoder = get_pointer_gen_network(embedding_matrix=embedding_matrix,embedding_dim=100,input_len=500,tf_float=tf_float,tf_int=tf_int,use_dropout=use_dropout)\n print(\"\\nModel initialized. Took {} min.\\n\".format(round((time.time()-start)/60,2)))\n\n if continue_training is True:\n # loading model weights:\n encoder.load_weights(model_save_path+\"encoder\")\n decoder.load_weights(model_save_path+\"decoder\")\n # loading optimizer state:\n grad_vars = encoder.trainable_variables+decoder.trainable_variables\n optimizer.apply_gradients(zip([tf.zeros_like(w) for w in grad_vars],grad_vars)) # giving optimizer information about the trainable weights, so the old values can be loaded\n optimizer_weights = np.load(model_save_path+\"optimizer_weights.npy\",allow_pickle=True)\n optimizer.set_weights(optimizer_weights)\n print(\"\\nModel parameters loaded.\\n\")\n\n with tf.device('/device:GPU:0'): # ensure the GPU is being used during training\n train_model(x_train,x_indices_train,att_mask_train,loss_mask_train,decoder_x_train,y_indices_train,multitask_y_train,multitask_loss_mask_train,x_val,x_indices_val,att_mask_val,loss_mask_val,decoder_x_val,y_indices_val,multitask_y_val,multitask_loss_mask_val,x_test,x_indices_test,att_mask_test,loss_mask_test,decoder_x_test,y_indices_test,multitask_y_test,multitask_loss_mask_test,encoder,decoder,batch_size,optimizer,epochs=epochs,coverage_lam=coverage_lam,use_coverage_loss=use_coverage_loss,model_save_path=model_save_path,model_checkpoints_path=model_checkpoints_path,model_checkpoints_name=model_checkpoints_name)\n\n\ndef apply_scatter_nd(updates,indices,tf_int,tf_float):\n \"\"\" applies scatter_nd over the batch dimension\n \"\"\"\n out = Lambda(lambda entry: K.map_fn(lambda entry: tf.scatter_nd(entry[0],entry[1],tf.constant([30100],dtype=tf_int)),entry,dtype=tf_float))([indices,updates]) # assuming a max vocab_size+unique_words_in_input of 30000+100\n return out\n\n\ndef apply_scatter_nd_add(tensor,updates,indices,tf_int,tf_float):\n \"\"\" applies the tensor_scatter_nd_add over the batch dimension\n \"\"\"\n out = Lambda(lambda entry: K.map_fn(lambda entry: tf.tensor_scatter_nd_add(entry[0],entry[1],entry[2]),entry,dtype=tf_float))([tensor,indices,updates])\n return out\n\n\ndef pointer_gen_encoder(embedding_layer,mt_w1,mt_w2,mt_w3,encoder_h=128,input_len=500,tf_int=tf.int32,use_dropout=False):\n \"\"\" Returns the encoder portion of the pointer-gen network\n \"\"\"\n x = Input(shape=(input_len),dtype=tf_int) # input to the encoder\n input_e = embedding_layer(x) # embeddings for the input\n if use_dropout:\n input_e = Dropout(0.25)(input_e)\n h = Bidirectional(LSTM(encoder_h,activation=\"tanh\",return_sequences=True),merge_mode=\"concat\")(input_e) # encoder\n\n m_h1 = mt_w1(h) # multitask component\n m_h2 = mt_w2(m_h1)\n m_pred = mt_w3(m_h2)\n m_pred = tf.squeeze(m_pred) # (n,500,1)->(n,500)\n \n model = Model(inputs=[x],outputs=[h,m_pred])\n return model\n\n\ndef pointer_gen_decoder(embedding_layer,decoder_lstm,att_w1,att_w2,att_w3,att_v,vocab_d,vocab_d_pre,pgen_w1,pgen_w2,pgen_w3,encoder_h=128,input_len=500,output_len=101,tf_float=tf.float32,tf_int=tf.int32):\n \"\"\" Returns the decoder portion of the pointer-gen network\n args:\n input_len: the length of the input sequence (to the encoder)\n output_len: the length of the output sequence (from the decoder)\n tf_float,tf_int: defining datatypes for use in this model\n \"\"\"\n h = Input(shape=(input_len,encoder_h*2),dtype=tf_float) # the input embedding from the encoder model\n x_indices_ = Input(shape=(input_len),dtype=tf_int) # represents where each input word prob. should be added in joint prob. vector\n x_indices = tf.expand_dims(x_indices_,axis=-1)\n fixed_vocab_indices_ = Input(shape=(30000),dtype=tf_int) # the size of the input vocabulary\n fixed_vocab_indices = tf.expand_dims(fixed_vocab_indices_,axis=-1)\n att_mask = Input(shape=(input_len),dtype=tf_float) # mask used with the attention distribution to mask out padding\n decoder_x = Input(shape=(output_len),dtype=tf_int) # delayed y_data for input to the decoder (for teacher-forcing)\n y_indices = Input(shape=(output_len),dtype=tf_int) # indices of the correct word in the joint_probabilities vector\n s_ = Input(shape=(256),dtype=tf_float) # decoder_h\n c_ = Input(shape=(256),dtype=tf_float)\n coverage_vector_ = Input(shape=(input_len),dtype=tf_float)\n s,c,coverage_vector = s_,c_,coverage_vector_\n \n decoder_e = embedding_layer(decoder_x) # embeddings for delayed input to the decoder\n outputs = [] # stores probability of correct ground-truth predictions at each decoder output step\n coverage_loss_contributions = [] # stores coverage loss contribution for each decoder output step\n \n for i in range(output_len): # loop through each step of the decoder\n decoder_input = decoder_e[:,i,:] # input to the decoder at this timestep\n s,_,c = decoder_lstm(tf.expand_dims(decoder_input,axis=1),initial_state=[s,c])\n \n # calculating attention (probabilities over input):\n s_rep = RepeatVector(input_len)(s) # copying the decoder hidden state\n e = att_v(Activation(\"tanh\")(att_w1(h)+att_w2(s_rep)+att_w3(tf.expand_dims(coverage_vector,axis=-1)))) # unscaled attention\n e = tf.squeeze(e,axis=-1)+att_mask # using attention mask (masks out padding in the input sequence)\n a = Activation(\"softmax\")(e) # scaled attention (represents prob. over input)\n \n # handling coverage vector computations:\n step_coverage_loss = tf.reduce_sum(tf.minimum(coverage_vector,a),axis=-1) # cov loss at this decoder step\n coverage_loss_contributions.append(step_coverage_loss)\n coverage_vector+=a\n \n # calculating probabilities over fixed vocabulary:\n context = Dot(axes=1)([a,h]) # calculating the context vector\n pre_vocab_prob = Concatenate()([s,context])\n pre_vocab_prob = vocab_d_pre(pre_vocab_prob) # extra Dense layer\n pre_vocab_prob = vocab_d(pre_vocab_prob)\n vocab_prob = Activation(\"softmax\")(pre_vocab_prob)\n \n # calculation probabilty for text generation:\n pre_gen_prob = pgen_w1(context)+pgen_w2(s)+pgen_w3(decoder_input)\n gen_prob = Activation(\"sigmoid\")(pre_gen_prob)\n \n # calculating joint-probability for generation/copying:\n vocab_prob *= gen_prob # probability of generating a word from the fixed vocabulary\n copy_prob = a*(1-gen_prob) # probability of copying a word from the input\n \n # creating the joint-probability vector:\n vocab_prob_projected = apply_scatter_nd(vocab_prob,fixed_vocab_indices,tf_int,tf_float)\n joint_prob = apply_scatter_nd_add(vocab_prob_projected,copy_prob,x_indices,tf_int,tf_float)\n \n # gathering predictions from joint-probability vector - doing it here will reduce memory consumption\n y_indices_i = tf.expand_dims(y_indices[:,i],axis=-1) # getting predictions at time i for whole batch\n predictions_i = tf.squeeze(tf.gather(joint_prob,y_indices_i,batch_dims=1,axis=-1),axis=-1)\n outputs.append(predictions_i)\n \n prediction_probabilities = K.permute_dimensions(tf.convert_to_tensor(outputs),(1,0))\n coverage_loss_contributions = K.permute_dimensions(tf.convert_to_tensor(coverage_loss_contributions),(1,0))\n \n model = Model(inputs=[h,x_indices_,decoder_x,att_mask,y_indices,s_,c_,coverage_vector_,fixed_vocab_indices_],outputs=[prediction_probabilities,coverage_loss_contributions])\n return model\n\n\ndef get_pointer_gen_network(embedding_matrix,embedding_dim=100,input_len=500,tf_float=tf.float32,tf_int=tf.int32,use_dropout=False,output_len=101):\n \"\"\" initializes re-used model layers and creates the pointer-gen keras model object\n args:\n embedding_matrix: the matrix of pretrained weights\n embedding_dim: the dimensionality of the word embeddings\n \"\"\"\n embedding_layer = Embedding(input_dim=30000,output_dim=embedding_dim,weights=[embedding_matrix],trainable=True,mask_zero=True) # re-used for both the encoder and decoder\n decoder_h=256\n encoder_h=128\n decoder_lstm = LSTM(decoder_h,activation=\"tanh\",return_state=True)\n att_w1 = Dense(256,use_bias=True,activation=None)\n att_w2 = Dense(256,use_bias=True,activation=None)\n att_w3 = Dense(256,use_bias=True,activation=None) # should be 256x1 weight matrix\n att_v = Dense(1,use_bias=False,activation=None)\n vocab_d_pre = Dense(512,use_bias=True,activation=\"relu\") # an additional hidden layer before prediction vocab probs.\n vocab_d = Dense(30000,use_bias=True,activation=None) # 30000 is fixed_vocabulary size\n pgen_w1 = Dense(1,use_bias=True,activation=None)\n pgen_w2 = Dense(1,use_bias=True,activation=None)\n pgen_w3 = Dense(1,use_bias=True,activation=None)\n mt_w1 = Dense(512,activation=\"relu\") # multitask components\n mt_w2 = Dense(256,activation=\"relu\")\n mt_w3 = Dense(1,activation=\"sigmoid\")\n\n if use_dropout:\n print(\"\\nUsing Dropout.\\n\")\n \n encoder = pointer_gen_encoder(embedding_layer,mt_w1,mt_w2,mt_w3,encoder_h=encoder_h,input_len=input_len,tf_int=tf_int,use_dropout=use_dropout)\n decoder = pointer_gen_decoder(embedding_layer,decoder_lstm,att_w1,att_w2,att_w3,att_v,vocab_d,vocab_d_pre,pgen_w1,pgen_w2,pgen_w3,encoder_h=encoder_h,input_len=input_len,output_len=output_len,tf_float=tf_float,tf_int=tf_int)\n return encoder,decoder\n\n\ndef loss_function(prediction_probabilities,loss_mask,coverage_loss,m_pred,multitask_loss_mask_subset,multitask_y_subset,lam,use_coverage_loss,multitask_lam=1.0,return_indiv_loss=False):\n \"\"\" Returns the loss for this batch - also allows for the returning of the loss value for the given input\n args:\n prediction_probabilities: model-assigned probabilities for ground-truth predictions\n loss_mask: vector of 1s,0s specifying whether an input should contribute to the loss\n coverage_loss: coverage loss for this batch of examples\n lam: hyperparameter determining the contribution of coverage_loss to overall loss\n use_coverage_loss: whether coverage loss should be used\n \"\"\"\n p_words = -tf.math.log(prediction_probabilities)\n p_words *= loss_mask # applying the loss mask\n p_words = tf.reduce_sum(p_words,axis=-1)\n general_loss_component = tf.reduce_mean(p_words)\n \n # incorporating the coverage loss:\n coverage_loss_component = 0\n if use_coverage_loss:\n coverage_loss *= loss_mask # applying the loss mask\n coverage_loss = tf.reduce_sum(coverage_loss,axis=-1)\n coverage_loss_component = lam*tf.reduce_mean(coverage_loss)\n\n # multitask loss component:\n multitask_loss_component = K.binary_crossentropy(multitask_y_subset,m_pred)\n multitask_loss_component *= multitask_y_subset # *= multitask_loss_mask_subset # determine whether applying mask to allow only y=1 or have both y=0 & y=1\n multitask_loss_component = tf.reduce_sum(multitask_loss_component,axis=-1)\n multitask_loss_component = multitask_lam*tf.reduce_mean(multitask_loss_component)\n \n total_loss = general_loss_component+coverage_loss_component+multitask_loss_component\n if return_indiv_loss:\n indiv_losses = p_words\n if use_coverage_loss:\n indiv_losses+=coverage_loss\n total_loss -= multitask_loss_component # remove multitask loss when calculating test and val losses (allows easy comparison with previous results)\n return total_loss,indiv_losses\n else:\n return total_loss\n\n\ndef get_validation_set_loss(x_val,x_indices_val,att_mask_val,loss_mask_val,decoder_x_val,y_indices_val,multitask_y_val,multitask_loss_mask_val,encoder,decoder,batch_size,coverage_lam,use_coverage_loss,epoch,checkpoints_path,s_subset,c_subset,coverage_vector_subset,fixed_vocab_indices_subset):\n \"\"\" Get the average loss for the validation set\n -also saves the validation and test losses for each example to a file\n \"\"\"\n losses = []\n checkpoints_file = open(checkpoints_path,\"a+\")\n checkpoints_file.write(\"-----epoch \"+str(epoch)+\":\\n\")\n for i in range(0,len(x_val),batch_size):\n x_subset = x_val[i:i+batch_size]\n x_indices_subset = x_indices_val[i:i+batch_size]\n decoder_x_subset = decoder_x_val[i:i+batch_size]\n att_mask_subset = att_mask_val[i:i+batch_size]\n y_indices_subset = y_indices_val[i:i+batch_size]\n loss_mask_subset = loss_mask_val[i:i+batch_size]\n multitask_y_subset = multitask_y_val[i:i+batch_size]\n multitask_loss_mask_subset = multitask_loss_mask_val[i:i+batch_size]\n\n h,m_pred = encoder(x_subset)\n joint_probabilities,coverage_loss = decoder([h,x_indices_subset,decoder_x_subset,att_mask_subset,y_indices_subset,s_subset,c_subset,coverage_vector_subset,fixed_vocab_indices_subset]) \n loss,indiv_losses = loss_function(joint_probabilities,loss_mask_subset,coverage_loss,m_pred,multitask_loss_mask_subset,multitask_y_subset,lam=coverage_lam,use_coverage_loss=use_coverage_loss,multitask_lam=0.0,return_indiv_loss=True)\n losses.append(float(loss))\n\n for j in range(batch_size): # saving loss value for each validation set example on individual line\n indiv_loss = indiv_losses[j]\n checkpoints_file.write(str(i+j)+\": \"+str(round(float(indiv_loss),6))+\"\\n\")\n\n return round(sum(losses)/max(len(losses),1),6)\n\n\n@tf.function\ndef training_step(encoder,decoder,optimizer,x_subset,x_indices_subset,decoder_x_subset,att_mask_subset,y_indices_subset,loss_mask_subset,s_subset,c_subset,coverage_vector_subset,fixed_vocab_indices_subset,multitask_loss_mask_subset,multitask_y_subset,coverage_lam,use_coverage_loss):\n \"\"\" training step - calculates the gradient w/ respect to encoder & decoder parameters\n - improves runtime by about 2x\n \"\"\"\n with tf.GradientTape() as tape:\n h,m_pred = encoder(x_subset)\n joint_probabilities,coverage_loss = decoder([h,x_indices_subset,decoder_x_subset,att_mask_subset,y_indices_subset,s_subset,c_subset,coverage_vector_subset,fixed_vocab_indices_subset])\n loss = loss_function(joint_probabilities,loss_mask_subset,coverage_loss,m_pred,multitask_loss_mask_subset,multitask_y_subset,lam=coverage_lam,use_coverage_loss=use_coverage_loss,return_indiv_loss=False)\n \n gradients = tape.gradient(loss, encoder.trainable_variables+decoder.trainable_variables)\n optimizer.apply_gradients(zip(gradients, encoder.trainable_variables+decoder.trainable_variables))\n return loss\n\n\ndef train_model(x,x_indices,att_mask,loss_mask,decoder_x,y_indices,multitask_y,multitask_loss_mask,x_val,x_indices_val,att_mask_val,loss_mask_val,decoder_x_val,y_indices_val,multitask_y_val,multitask_loss_mask_val,x_test,x_indices_test,att_mask_test,loss_mask_test,decoder_x_test,y_indices_test,multitask_y_test,multitask_loss_mask_test,encoder,decoder,batch_size,optimizer,epochs,coverage_lam,use_coverage_loss,model_save_path,model_checkpoints_path,model_checkpoints_name):\n \"\"\" training the model\n args:\n x,x_indices,...: training data\n x_val,x_indices_val,...: validation data\n x_test,x_indices_test,...: test data\n model_checkpoints_path: saves checkpoint data to a file after each epoch\n \"\"\"\n print_epoch_value = int((100//batch_size)*batch_size)\n save_epoch_value = int((1000//batch_size)*batch_size)\n\n x,x_indices,att_mask,loss_mask,decoder_x,y_indices,multitask_y,multitask_loss_mask = shuffle(x,x_indices,att_mask,loss_mask,decoder_x,y_indices,multitask_y,multitask_loss_mask) # shuffling data\n # initializing the fixed input to the decoder model:\n s_subset = np.zeros((batch_size,256)).astype(\"float32\")\n c_subset = np.zeros((batch_size,256)).astype(\"float32\")\n coverage_vector_subset = np.zeros((batch_size,500)).astype(\"float32\")\n fixed_vocab_indices_subset = np.vstack([[i for i in range(30000)] for _ in range(batch_size)]).astype(\"int32\")\n\n for epoch_i in range(epochs): # epochs\n checkpoints_file = open(model_checkpoints_path+model_checkpoints_name,\"a+\")\n date_time = datetime.now(tz=pytz.utc).astimezone(pytz.timezone('US/Pacific')).strftime(\"%m/%d/%y %H:%M:%S\")\n checkpoints_file.write(date_time+\"\\n\"+\"--------------------------------\"+\"\\n\")\n print(\"training start time:\",date_time)\n\n losses = []\n for i in range(0,len(x),batch_size): # looping through each batch\n x_subset = x[i:i+batch_size]\n x_indices_subset = x_indices[i:i+batch_size]\n decoder_x_subset = decoder_x[i:i+batch_size]\n att_mask_subset = att_mask[i:i+batch_size]\n y_indices_subset = y_indices[i:i+batch_size]\n loss_mask_subset = loss_mask[i:i+batch_size]\n multitask_y_subset = multitask_y[i:i+batch_size]\n multitask_loss_mask_subset = multitask_loss_mask[i:i+batch_size]\n batch_loss = training_step(encoder,decoder,optimizer,x_subset,x_indices_subset,decoder_x_subset,att_mask_subset,y_indices_subset,loss_mask_subset,s_subset,c_subset,coverage_vector_subset,fixed_vocab_indices_subset,multitask_loss_mask_subset,multitask_y_subset,coverage_lam,use_coverage_loss)\n \n float_loss = round(float(batch_loss),6)\n losses.append(float_loss)\n if i % print_epoch_value == 0:\n date_time = datetime.now(tz=pytz.utc).astimezone(pytz.timezone('US/Pacific')).strftime(\"%m/%d/%y %H:%M:%S\")\n print(\"i:\",i,\":\",float_loss,\";\",date_time)\n \n if i % save_epoch_value == 0: # adding another model checkpoint\n encoder.save_weights(model_save_path+\"encoder\")\n decoder.save_weights(model_save_path+\"decoder\")\n np.save(model_save_path+\"optimizer_weights.npy\",optimizer.get_weights()) # saving optimizer state\n \n # writing out information to screen and saving to checkpoints file\n print_train_loss = \"epoch {}; training loss: {}\".format(epoch_i+1,round(sum(losses)/max(len(losses),1),6))\n print(print_train_loss)\n val_loss = get_validation_set_loss(x_val,x_indices_val,att_mask_val,loss_mask_val,decoder_x_val,y_indices_val,multitask_y_val,multitask_loss_mask_val,encoder,decoder,batch_size,coverage_lam,use_coverage_loss,epoch_i,model_checkpoints_path+\"val_losses.txt\",s_subset,c_subset,coverage_vector_subset,fixed_vocab_indices_subset)\n print_val_loss = \"epoch {}; validation loss: {}\".format(epoch_i+1,val_loss)\n print(print_val_loss)\n test_loss = get_validation_set_loss(x_test,x_indices_test,att_mask_test,loss_mask_test,decoder_x_test,y_indices_test,multitask_y_test,multitask_loss_mask_test,encoder,decoder,batch_size,coverage_lam,use_coverage_loss,epoch_i,model_checkpoints_path+\"test_losses.txt\",s_subset,c_subset,coverage_vector_subset,fixed_vocab_indices_subset)\n print_test_loss = \"epoch {}; test_set loss: {}\".format(epoch_i+1,test_loss)\n print(print_test_loss)\n last_line = \"--------------------------------\"\n print(last_line)\n date_time = datetime.now(tz=pytz.utc).astimezone(pytz.timezone('US/Pacific')).strftime(\"%m/%d/%y %H:%M:%S\") # time at which epoch completed\n checkpoints_file.write(date_time+\"\\n\")\n checkpoints_file.write(print_train_loss+\"\\n\")\n checkpoints_file.write(print_val_loss+\"\\n\")\n checkpoints_file.write(print_test_loss+\"\\n\")\n checkpoints_file.write(last_line+\"\\n\")\n checkpoints_file.close()\n encoder.save_weights(model_save_path+\"encoder\")\n decoder.save_weights(model_save_path+\"decoder\")\n np.save(model_save_path+\"optimizer_weights.npy\",optimizer.get_weights())\n\n\nif __name__==\"__main__\":\n main()\n"
] |
[
[
"tensorflow.keras.layers.RepeatVector",
"tensorflow.keras.layers.Activation",
"numpy.load",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.zeros_like",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Dot",
"tensorflow.gather",
"sklearn.utils.shuffle",
"tensorflow.keras.backend.set_floatx",
"tensorflow.GradientTape",
"tensorflow.math.log",
"tensorflow.constant",
"tensorflow.squeeze",
"tensorflow.tensor_scatter_nd_add",
"tensorflow.keras.layers.Concatenate",
"tensorflow.minimum",
"numpy.zeros",
"tensorflow.expand_dims",
"tensorflow.keras.layers.Dropout",
"tensorflow.reduce_sum",
"tensorflow.keras.optimizers.Adagrad",
"tensorflow.convert_to_tensor",
"tensorflow.keras.layers.Input",
"tensorflow.keras.backend.binary_crossentropy",
"tensorflow.keras.layers.Embedding",
"tensorflow.device",
"tensorflow.reduce_mean"
]
] |
cswin/CADA
|
[
"5e07ad339f0f1e523db87e452f65cb0f9bd05aa7"
] |
[
"data_preprocess/mnet_utils.py"
] |
[
"#The code is modified from https://github.com/HzFu/MNet_DeepCDR\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nfrom PIL import Image\nfrom scipy.ndimage import binary_fill_holes\nfrom skimage.measure import label, regionprops\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.preprocessing import image\n\n\ndef pro_process(temp_img, input_size):\n img = np.asarray(temp_img*255).astype('uint8')\n img = np.array(Image.fromarray(img).resize((input_size, input_size)))\n return img\n\n\ndef train_loader(data_list, data_path, mask_path, input_size):\n while 1:\n for lineIdx, temp_txt in enumerate(data_list):\n train_img = np.asarray(image.load_img(os.path.join(data_path, temp_txt),\n target_size=(input_size, input_size, 3))\n ).astype('float32')\n img_mask = np.asarray(\n image.load_img(os.path.join(mask_path, temp_txt),\n target_size=(input_size, input_size, 3))\n ) / 255.0\n\n train_img = np.reshape(train_img, (1,) + train_img.shape)\n img_mask = np.reshape(img_mask, (1,) + img_mask.shape)\n yield ([train_img], [img_mask, img_mask, img_mask, img_mask, img_mask])\n\n\ndef BW_img(input, thresholding):\n if input.max() > thresholding:\n binary = input > thresholding\n else:\n binary = input > input.max() / 2.0\n\n label_image = label(binary)\n regions = regionprops(label_image)\n area_list = [region.area for region in regions]\n if area_list:\n idx_max = np.argmax(area_list)\n binary[label_image != idx_max + 1] = 0\n return binary_fill_holes(np.asarray(binary).astype(int))\n\n\ndef dice_coef(y_true, y_pred):\n smooth = 1.\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return 1- (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\n\ndef dice_coef2(y_true, y_pred):\n score0 = dice_coef(y_true[:, :, :, 0], y_pred[:, :, :, 0])\n score1 = dice_coef(y_true[:, :, :, 1], y_pred[:, :, :, 1])\n score = 0.5 * score0 + 0.5 * score1\n\n return score\n\n\ndef dice_coef_loss(y_true, y_pred):\n return dice_coef2(y_true, y_pred)\n\n\n\ndef disc_crop(org_img, DiscROI_size, C_x, C_y):\n tmp_size = int(DiscROI_size / 2);\n if len(org_img.shape) == 2:\n disc_region = np.zeros((DiscROI_size, DiscROI_size), dtype=org_img.dtype)\n else:\n disc_region = np.zeros((DiscROI_size, DiscROI_size, 3), dtype=org_img.dtype)\n\n crop_coord = np.array([C_x - tmp_size, C_x + tmp_size, C_y - tmp_size, C_y + tmp_size], dtype=int)\n err_coord = [0, DiscROI_size, 0, DiscROI_size]\n\n if crop_coord[0] < 0:\n err_coord[0] = abs(crop_coord[0])\n crop_coord[0] = 0\n\n if crop_coord[2] < 0:\n err_coord[2] = abs(crop_coord[2])\n crop_coord[2] = 0\n\n if crop_coord[1] > org_img.shape[0]:\n err_coord[1] = err_coord[1] - (crop_coord[1] - org_img.shape[0])\n crop_coord[1] = org_img.shape[0]\n\n if crop_coord[3] > org_img.shape[1]:\n err_coord[3] = err_coord[3] - (crop_coord[3] - org_img.shape[1])\n crop_coord[3] = org_img.shape[1]\n if len(org_img.shape) == 2:\n disc_region[err_coord[0]:err_coord[1], err_coord[2]:err_coord[3]] = org_img[crop_coord[0]:crop_coord[1],\n crop_coord[2]:crop_coord[3]]\n else:\n disc_region[err_coord[0]:err_coord[1], err_coord[2]:err_coord[3], ] = org_img[crop_coord[0]:crop_coord[1],\n crop_coord[2]:crop_coord[3], ]\n\n return disc_region, err_coord, crop_coord\n\n\ndef mk_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return dir_path\n\n\ndef files_with_ext(data_path, data_type):\n file_list = [file for file in os.listdir(data_path) if file.lower().endswith(data_type)]\n print(len(file_list))\n return file_list\n"
] |
[
[
"numpy.array",
"numpy.asarray",
"numpy.zeros",
"numpy.reshape",
"tensorflow.python.keras.backend.flatten",
"tensorflow.python.keras.backend.sum",
"numpy.argmax"
]
] |
blakeaw/ORBILT
|
[
"ed402dd496534dccd00f3e75b57007d944c58c1d"
] |
[
"pybilt/common/running_stats.py"
] |
[
"\"\"\"Running stats module.\n\nThis module defines the RunningStats and BlockAverager classes, as well as the\ngen_running_average function.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom builtins import object\nfrom six.moves import range\nimport numpy as np\nfrom scipy import stats\n\n# Running Statistics\nclass RunningStats(object):\n \"\"\"A RunningStats object.\n\n The RunningStats object keeps running statistics for a single\n value/quantity.\n\n Attributes:\n n (int): The number of points that have pushed to the running\n average.\n \"\"\"\n def __init__(self):\n \"\"\"Initialize the RunningStats object.\n \"\"\"\n self.n=0\n self._Mnold = self._Mnnew = self._Snold = self._Snnew = np.zeros(1)[0]\n\n\n def push(self, val):\n \"\"\"Push a new value to the running average.\n\n Args:\n val (float): The value to be added to the running average.\n\n Returns:\n\n \"\"\"\n self.n += 1\n if self.n == 1:\n self._Mnold = np.array([val])[0]\n self._Snold = np.zeros(1)[0]\n else:\n n = np.array([float(self.n)])[0]\n self._Mnnew = self._Mnold + (val - self._Mnold)/(n);\n self._Snnew = self._Snold + (val - self._Mnold)*(val-self._Mnnew);\n self._Mnold = self._Mnnew;\n self._Snold = self._Snnew;\n\n\n def mean(self):\n \"\"\"Return the current mean.\"\"\"\n if self.n == 1:\n return self._Mnold\n elif self.n > 1:\n return self._Mnnew\n else:\n return 0.0\n\n\n def variance(self):\n \"\"\"Returun the current variance.\"\"\"\n if self.n > 1:\n one = np.array([1.0])[0]\n n = np.array([float(self.n)])[0]\n vary = self._Snnew/(n-one)\n return vary\n else:\n return 0.0\n\n\n def deviation(self):\n \"\"\"Return the current standard deviation.\"\"\"\n # dev = math.sqrt(self.Variance())\n dev = np.sqrt(self.variance())\n return dev\n\n\n def reset(self):\n \"\"\"Reset the running average.\"\"\"\n self.n = 0\n\n\n# assumes that a 1d numpy array of floats is pass as input, but\n# does not check this\ndef gen_running_average(onednparray):\n \"\"\" Generates a running average\n\n Args:\n onednparray (numpy.array): A 1d numpy array of measurements (e.g. over time)\n\n Returns:\n numpy.array: 2d array of dim len(onednparray)x2\n 2dnparray[i][0] = running average at i\n 2dnparray[i][1] = running standard deviation at i\n for i in range(0,len(onednparray))\n \"\"\"\n averager = RunningStats()\n nele = len(onednparray)\n output = np.zeros((nele,2))\n for i in range(nele):\n averager.push(onednparray[i])\n run_avg = averager.mean()\n run_dev = averager.deviation()\n # print run_avg, run_dev, averager.mean(), onednparray[i]\n output[i,0] = run_avg\n output[i,1] = run_dev\n return output\n\nclass BlockAverager(object):\n \"\"\"An object that keeps track of points for block averaging.\n\n Attributes:\n n_blocks (int): The current number of active blocks.\n\n \"\"\"\n\n def __init__(self, points_per_block=1000, min_points_in_block=500, store_data=False):\n \"\"\"Init a the BlockAverager\n\n Args:\n points_per_block (int, Optional): The number of points to assign to a block before initiating a new block.\n Default: 1000\n min_points_in_block (int, Optional): The minimum number of points that a block (typically the last block)\n can have and still be included in computing the final block average and standard error estimates. This\n value should be <= points_per_block. Default: 500\n \"\"\"\n self._store_data = store_data\n self._blocks = [RunningStats()]\n if store_data:\n self._blocks = [[]]\n self.n_blocks = 1\n self._points_per_block = points_per_block\n if min_points_in_block > points_per_block:\n self._min_points_in_block = points_per_block-1\n else:\n self._min_points_in_block = min_points_in_block\n #print \"points_per_block \",self._points_per_block, \" min_p \",self._min_points_in_block\n return\n\n def _add_block(self):\n \"\"\"Append a new block.\"\"\"\n if self._store_data:\n self._blocks.append([])\n else:\n self._blocks.append(RunningStats())\n self.n_blocks+=1\n return\n\n def _check_add_block(self):\n \"\"\"Check whether to add a new block and do so if the condition is met.\"\"\"\n block_i = self.n_blocks - 1\n if self._store_data:\n if len(self._blocks[block_i]) >= self._points_per_block:\n self._add_block()\n else:\n if self._blocks[block_i].n >= self._points_per_block:\n self._add_block()\n return\n\n def push_single(self, datum):\n \"\"\"Push a single data point (datum) into the block averager.\n\n Args:\n datum (float): The value to add to the block averaging.\n\n \"\"\"\n block_i = self.n_blocks-1\n #print \"pushing datum \",datum\n if self._store_data:\n self._blocks[block_i].append(datum)\n else:\n self._blocks[block_i].push(datum)\n self._check_add_block()\n return\n\n def push_container(self, data):\n \"\"\"Push a container (array or array like) of data points to the block averaging.\n\n Args:\n data (array like): The container (list, tuple, np.array, etc.) of data points to add to the block averaging.\n\n \"\"\"\n for datum in data:\n #print(datum)\n self.push_single(datum)\n return\n\n def _get_running(self):\n \"\"\"Get the block average quantities from interanl RunningStats\n objects.\n \"\"\"\n means = []\n for block in self._blocks:\n #print \"block.n \",block.n, \" min_p \",self._min_points_in_block\n if block.n >= self._min_points_in_block:\n means.append(block.mean())\n means = np.array(means)\n if len(means) > 1:\n block_average = means.mean()\n std_err = means.std()/np.sqrt(len(means))\n elif len(means) == 1:\n block_average = means[0]\n std_err = 0.0\n else:\n block_average = 0.0\n std_err = 0.0\n return block_average, std_err\n\n def _get_np(self):\n \"\"\"Get the block average quantities from internally stored numpy\n arrays.\n \"\"\"\n means = []\n for block in self._blocks:\n if len(block) >= self._min_points_in_block:\n means.append(np.array(block).mean())\n means = np.array(means)\n if len(means) > 1:\n block_average = means.mean()\n std_err = means.std()/np.sqrt(len(means))\n elif len(means) == 1:\n block_average = means[0]\n std_err = 0.0\n else:\n block_average = 0.0\n std_err = 0.0\n\n return block_average, std_err\n\n def get(self):\n \"\"\"Return the block average and standard error.\n\n Returns:\n tuple: Returns a length two tuple with the block average and standard error estimates.\n \"\"\"\n #print(self._blocks)\n if self._store_data:\n return self._get_np()\n else:\n return self._get_running()\n\n def _aob_running(self):\n \"\"\"Get the block average quantities from interanl RunningStats\n objects.\n \"\"\"\n means = []\n for block in self._blocks:\n #print \"block.n \",block.n, \" min_p \",self._min_points_in_block\n if block.n >= self._min_points_in_block:\n means.append(block.mean())\n means = np.array(means)\n return means\n\n def _aob_np(self):\n \"\"\"Get the block average quantities from internally stored numpy\n arrays.\n \"\"\"\n means = []\n for block in self._blocks:\n if len(block) >= self._min_points_in_block:\n means.append(np.array(block).mean())\n means = np.array(means)\n return means\n\n def averages_of_blocks(self):\n \"\"\"Return the block average and standard error.\n\n Returns:\n tuple: Returns a length two tuple with the block average and standard error estimates.\n \"\"\"\n #print(self._blocks)\n if self._store_data:\n return self._aob_np()\n else:\n return self._aob_running()\n\n def _sob_running(self):\n \"\"\"Get the block average quantities from interanl RunningStats\n objects.\n \"\"\"\n means = []\n for block in self._blocks:\n #print \"block.n \",block.n, \" min_p \",self._min_points_in_block\n if block.n >= self._min_points_in_block:\n means.append(block.deviation())\n means = np.array(means)\n return means\n\n def _sob_np(self):\n \"\"\"Get the block average quantities from internally stored numpy\n arrays.\n \"\"\"\n means = []\n for block in self._blocks:\n if len(block) >= self._min_points_in_block:\n means.append(np.array(block).std())\n means = np.array(means)\n return means\n\n def standards_of_blocks(self):\n \"\"\"Return the block average and standard error.\n\n Returns:\n tuple: Returns a length two tuple with the block average and standard error estimates.\n \"\"\"\n #print(self._blocks)\n if self._store_data:\n return self._sob_np()\n else:\n return self._sob_running()\n\n def number_of_blocks(self):\n \"\"\"Return the current number of blocks.\n\n Returns:\n int : The number of blocks.\n \"\"\"\n return self.n_blocks\n\n def points_per_block(self):\n \"\"\"Return information about the points per block.\n\n Returns:\n tuple: A three element tuple containing the setting for points per block, the setting for minimum points\n per block, and the number of points in the last block.\n \"\"\"\n if self._store_data:\n return self._points_per_block, self._min_points_in_block, len(self._blocks[self.n_blocks-1])\n else:\n return self._points_per_block, self._min_points_in_block, self._blocks[self.n_blocks - 1].n\n\n def n_block(self):\n if self._store_data:\n n_block = 0\n for block in self._blocks:\n if len(block) >= self._min_points_in_block:\n n_block += 1\n return n_block\n else:\n n_block = 0\n for block in self._blocks:\n #print \"block.n \",block.n, \" min_p \",self._min_points_in_block\n if block.n >= self._min_points_in_block:\n n_block += 1\n return n_block\n\ndef block_average_bse_v_size(data):\n n_dat = len(data)\n max_size = int(n_dat/3)\n output = list()\n for i in range(1, max_size+1, 1):\n block_averager = BlockAverager(points_per_block=i, min_points_in_block=i)\n block_averager.push_container(data)\n avg, std_error = block_averager.get()\n print(i/10, avg, std_error)\n output.append([i, avg, std_error])\n return np.array(output)\n\ndef binned_average(data, positions, n_bins=25, position_range=None, min_count=0):\n \"\"\"Compute averages over a quantized range of histogram like bins.\n\n Args:\n data (np.array): A 1d numpy array of values.\n positions (np.array): A 1d numpy array of positions corresponding to\n the values in data. These are used to assign the values to the\n histogram like bins for averaging.\n n_bins (Optional[int]): Set the target number of bins to quantize the\n position_range up into. Defaults to 25\n position_range (Optional[tuple]): A two element tuple containing the\n lower and upper range to bin the postions over; i.e.\n (position_lower, postion_upper). Defaults to None, which uses\n positions.min() and positions.max().\n Returns:\n tuple: returns a tuple with two numpy arrays of form (bins, averages)\n\n Notes:\n The function automatically filters out bins that have a zero count,\n so the final value of the number of bins and values will be\n len(bins) <= n_bins.\n \"\"\"\n lower = None\n upper = None\n\n if position_range is not None:\n\n lower = position_range[0]\n upper = position_range[1]\n else:\n lower = positions.min()\n upper = positions.max()\n\n edges = np.linspace(lower, upper, num=n_bins+1, endpoint=True)\n bins = np.linspace(lower, upper, num=n_bins, endpoint=False)\n counts = (np.zeros(len(bins))).astype(np.int64)\n sums = np.zeros(len(bins))\n n_data = len(data)\n # Loop over the data points.\n for i in range(n_data):\n c_val = data[i]\n pos = positions[i]\n bin_index = None\n # Select which bin (if any) the value corresponds to.\n for j in range(1, len(bins)+1):\n if (pos >= edges[j-1]) and (pos <= edges[j]):\n bin_index = j - 1\n break\n if bin_index is not None:\n counts[bin_index] += 1\n sums[bin_index] += c_val\n\n # Filter out the bins that had zero entries.\n keep_bins = []\n for i in range(len(counts)):\n if counts[i] > 0:\n keep_bins.append(i)\n # Return the filtered bins and averages (i.e. without NaNs).\n bins = bins[keep_bins]\n counts = counts[keep_bins]\n sums = sums[keep_bins]\n averages = sums / counts\n return bins, averages\n\n#pair t-test for comparison of the difference of two means - null is zero difference\ndef pair_ttest(mean_1, std_err_1, n_1, mean_2, std_err_2, n_2):\n m_diff = np.abs(mean_1 - mean_2)\n diff_err = np.sqrt(std_err_1**2 + std_err_2**2)\n degrees = n_1 + n_2 - 2\n tval = m_diff / diff_err\n pval = 2.0*(1.0 - stats.t.cdf(tval, df=degrees))\n return pval\n\ndef block_avg_hist(nparray_1d, block_size, in_range='auto', scale=False, *args, **kwargs):\n '''Creates histograms for each block and averages them to generate block\n a single block averaged historgram.\n '''\n if in_range == 'auto':\n in_range = [min(nparray_1d), max(nparray_1d)]\n # Number of blocks of block_size\n nblocks = int(len(nparray_1d)/block_size)\n # print(nblocks)\n # Trim the array to just the points to use with the blocking\n array_trim = nparray_1d[:block_size*nblocks]\n blocks = [array_trim[i*block_size:(i+1)*block_size] for i in range(nblocks)]\n # print(len(blocks))\n # print(len(blocks[0]))\n # print(len(blocks[1]))\n counts, edges = np.histogram(blocks[0], *args, range=in_range, **kwargs)\n # print(counts)\n c_list = [counts]\n for i in range(1,nblocks):\n counts, edges = np.histogram(blocks[i], *args, range=in_range, **kwargs)\n # print(counts)\n c_list.append(counts)\n stacked = np.stack(c_list, axis=1)\n # print(stacked)\n avg_count = stacked.mean(axis=1)\n # print(avg_count)\n se_count = stacked.std(axis=1)/np.sqrt(nblocks)\n centers = 0.5*(edges[1:] + edges[:-1])\n if scale:\n avg_count /= block_size\n se_count /= block_size\n return avg_count, se_count, centers\n"
] |
[
[
"numpy.histogram",
"numpy.array",
"numpy.zeros",
"numpy.stack",
"numpy.sqrt",
"numpy.abs",
"numpy.linspace",
"scipy.stats.t.cdf"
]
] |
agartland/tcrdist3
|
[
"34f8d50e7448b2bf7cf7cd9ab9a2d80759f47240"
] |
[
"tcrdist/tests/longtest_simulate_cdr3_w_olga.py"
] |
[
"import olga.load_model as load_model\nimport olga.generation_probability as pgen\nimport olga.sequence_generation as seq_gen\nimport pandas as pd\n\n\ndef generate_simulated_beta_seqs(params_file_name = 'tcrdist/default_models/human_T_beta/model_params.txt',\n marginals_file_name = 'tcrdist/default_models/human_T_beta/model_marginals.txt',\n V_anchor_pos_file ='tcrdist/default_models/human_T_beta/V_gene_CDR3_anchors.csv',\n J_anchor_pos_file = 'tcrdist/default_models/human_T_beta/J_gene_CDR3_anchors.csv',\n output_cols = ['cdr3_b_aa', \"v_b_gene\",'j_b_gene'],\n n = 100000):\n #Load data\n genomic_data = load_model.GenomicDataVDJ()\n genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file)\n #Load model\n generative_model = load_model.GenerativeModelVDJ()\n generative_model.load_and_process_igor_model(marginals_file_name)\n seq_gen_model = seq_gen.SequenceGenerationVDJ(generative_model, genomic_data)\n\n #Generate some random sequences\n\n\n vs=[x[0] for x in genomic_data.__dict__['genV']]\n js=[x[0] for x in genomic_data.__dict__['genJ']]\n vs = {i:k for i,k in enumerate(vs)}\n js = {i:k for i,k in enumerate(js)}\n\n sim_cdr3 = [seq_gen_model.gen_rnd_prod_CDR3()[1:4] for x in range(n)]\n sim_cdr3_long = [(i,vs[v],js[j]) for i,v,j in sim_cdr3 ]\n\n df = pd.DataFrame(sim_cdr3_long, columns = output_cols)\n return df\n\ndef generate_simulated_alpha_seqs(params_file_name = 'tcrdist/default_models/human_T_alpha/model_params.txt',\n marginals_file_name = 'tcrdist/default_models/human_T_alpha/model_marginals.txt',\n V_anchor_pos_file ='tcrdist/default_models/human_T_alpha/V_gene_CDR3_anchors.csv',\n J_anchor_pos_file = 'tcrdist/default_models/human_T_alpha/J_gene_CDR3_anchors.csv',\n output_cols = ['cdr3_a_aa', \"v_a_gene\",'j_a_gene'],\n n = 100000):\n #Load data\n genomic_data = load_model.GenomicDataVJ()\n genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file)\n #Load model\n generative_model = load_model.GenerativeModelVJ()\n generative_model.load_and_process_igor_model(marginals_file_name)\n seq_gen_model = seq_gen.SequenceGenerationVJ(generative_model, genomic_data)\n\n #Generate some random sequences\n vs=[x[0] for x in genomic_data.__dict__['genV']]\n js=[x[0] for x in genomic_data.__dict__['genJ']]\n vs = {i:k for i,k in enumerate(vs)}\n js = {i:k for i,k in enumerate(js)}\n\n sim_cdr3 = [seq_gen_model.gen_rnd_prod_CDR3()[1:4] for x in range(n)]\n sim_cdr3_long = [(i,vs[v],js[j]) for i,v,j in sim_cdr3 ]\n\n df = pd.DataFrame(sim_cdr3_long, columns = output_cols)\n return df\n\nif __name__ == \"__main__\":\n \"\"\"\n Using Olga See: \n ---------------\n Zachary Sethna, Yuval Elhanati, Curtis G Callan, Aleksandra M Walczak, Thierry Mora\n `Bioinformatics (2019) <https://doi.org/10.1093/bioinformatics/btz035>`_ \n OLGA: fast computation of generation probabilities of B- and T-cell receptor amino acid sequences and motifs\n\n\n Generate 1000K (1M) CDR3s using default Olga Models\n Human (Alpha/Beta) and Mouse (Beta)\n\n human_T_alpha_sim1000K.csv\n human_T_beta_sim1000K.csv\n mouse_T_beta_sim1000K.csv\n \n contained in: \n olga_T_alpha_beta_1000K_simulated_cdr3.zip \n \"\"\"\n dfb= generate_simulated_beta_seqs(params_file_name = 'tcrdist/default_models/human_T_beta/model_params.txt',\n marginals_file_name = 'tcrdist/default_models/human_T_beta/model_marginals.txt',\n V_anchor_pos_file ='tcrdist/default_models/human_T_beta/V_gene_CDR3_anchors.csv',\n J_anchor_pos_file = 'tcrdist/default_models/human_T_beta/J_gene_CDR3_anchors.csv',\n output_cols = ['cdr3_b_aa', \"v_b_gene\",'j_b_gene'], n = 1000000)\n dfb.to_csv('human_T_beta_sim1000K.csv', index = False)\n\n dfa = generate_simulated_alpha_seqs(params_file_name = 'tcrdist/default_models/human_T_alpha/model_params.txt',\n marginals_file_name = 'tcrdist/default_models/human_T_alpha/model_marginals.txt',\n V_anchor_pos_file ='tcrdist/default_models/human_T_alpha/V_gene_CDR3_anchors.csv',\n J_anchor_pos_file = 'tcrdist/default_models/human_T_alpha/J_gene_CDR3_anchors.csv',\n output_cols = ['cdr3_a_aa', \"v_a_gene\",'j_a_gene'],\n n = 1000000)\n\n dfa.to_csv('human_T_alpha_sim1000K.csv', index = False) \n\n dfb= generate_simulated_beta_seqs(params_file_name = 'tcrdist/default_models/mouse_T_beta/model_params.txt',\n marginals_file_name = 'tcrdist/default_models/mouse_T_beta/model_marginals.txt',\n V_anchor_pos_file ='tcrdist/default_models/mouse_T_beta/V_gene_CDR3_anchors.csv',\n J_anchor_pos_file = 'tcrdist/default_models/mouse_T_beta/J_gene_CDR3_anchors.csv',\n output_cols = ['cdr3_b_aa', \"v_b_gene\",'j_b_gene'], n = 1000000)\n dfb.to_csv('mouse_T_beta_sim1000K.csv', index = False)\n"
] |
[
[
"pandas.DataFrame"
]
] |
Dany-L/Reinforcement-learning
|
[
"ca1626e5910e325df6b9fdfd3e403dde93a4a1ee"
] |
[
"randomwalk.py"
] |
[
"import gym\nimport numpy as np\nimport gym_random_walk\n\nenv = gym.make('random_walk-v0')\n\nV = [0, 0.5, 0.5,0.5,0.5,0.5,0]\nV_MC = [0, 0.5, 0.5, 0.5, 0.5, 0.5, 0]\nalpha = 0.1\ngamma = 1\n\nnum2 = 100\n\nfor episodes in range(num2):\n\n env.reset()\n env.render()\n s=3\n total_reward = 0\n done = False\n states =[]\n \n num = 100\n \n for i in range(num):\n a = np.random.randint(env.action_space.n)\n print(\"action: \" ,a)\n s1,reward,done, _ = env.step(a)\n env.render()\n\n# TD(0) \n V[s] = (1-alpha)*V[s] + alpha*(reward + gamma*V[s1])\n \n# reward and states for MC\n states.append(s1)\n total_reward += reward\n \n# update state\n s = s1\n \n if done:\n for j in range(len(states)):\n counter = states[j]\n# calculate MC\n V_MC[counter] = V_MC[counter] +alpha*(total_reward-V_MC[counter])\n print(\"endstate reached\")\n break\n \n print(\"TD\",V,\"MC\",V_MC)\n"
] |
[
[
"numpy.random.randint"
]
] |
akineeic/models
|
[
"11ea5237818e791a5717716d5413977f4c4db1e3",
"11ea5237818e791a5717716d5413977f4c4db1e3"
] |
[
"official/vision/beta/modeling/backbones/mobilenet_test.py",
"official/nlp/transformer/model_utils_test.py"
] |
[
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for MobileNet.\"\"\"\n\nimport itertools\n# Import libraries\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom official.vision.beta.modeling.backbones import mobilenet\n\n\nclass MobileNetTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.parameters(\n 'MobileNetV1',\n 'MobileNetV2',\n 'MobileNetV3Large',\n 'MobileNetV3Small',\n 'MobileNetV3EdgeTPU',\n 'MobileNetMultiAVG',\n 'MobileNetMultiMAX',\n )\n def test_serialize_deserialize(self, model_id):\n # Create a network object that sets all of its config options.\n kwargs = dict(\n model_id=model_id,\n filter_size_scale=1.0,\n stochastic_depth_drop_rate=None,\n use_sync_bn=False,\n kernel_initializer='VarianceScaling',\n kernel_regularizer=None,\n bias_regularizer=None,\n norm_momentum=0.99,\n norm_epsilon=0.001,\n output_stride=None,\n min_depth=8,\n divisible_by=8,\n regularize_depthwise=False,\n finegrain_classification_mode=True\n )\n network = mobilenet.MobileNet(**kwargs)\n\n expected_config = dict(kwargs)\n self.assertEqual(network.get_config(), expected_config)\n\n # Create another network object from the first object's config.\n new_network = mobilenet.MobileNet.from_config(network.get_config())\n\n # Validate that the config can be forced to JSON.\n _ = new_network.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(network.get_config(), new_network.get_config())\n\n @parameterized.parameters(\n itertools.product(\n [1, 3],\n [\n 'MobileNetV1',\n 'MobileNetV2',\n 'MobileNetV3Large',\n 'MobileNetV3Small',\n 'MobileNetV3EdgeTPU',\n 'MobileNetMultiAVG',\n 'MobileNetMultiMAX',\n ],\n ))\n def test_input_specs(self, input_dim, model_id):\n \"\"\"Test different input feature dimensions.\"\"\"\n tf.keras.backend.set_image_data_format('channels_last')\n\n input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim])\n network = mobilenet.MobileNet(model_id=model_id, input_specs=input_specs)\n\n inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1)\n _ = network(inputs)\n\n @parameterized.parameters(\n itertools.product(\n [\n 'MobileNetV1',\n 'MobileNetV2',\n 'MobileNetV3Large',\n 'MobileNetV3Small',\n 'MobileNetV3EdgeTPU',\n 'MobileNetMultiAVG',\n 'MobileNetMultiMAX',\n ],\n [32, 224],\n ))\n def test_mobilenet_creation(self, model_id,\n input_size):\n \"\"\"Test creation of MobileNet family models.\"\"\"\n tf.keras.backend.set_image_data_format('channels_last')\n\n mobilenet_layers = {\n # The stride (relative to input) and number of filters\n # of first few layers for filter_size_scale = 0.75\n 'MobileNetV1': [(1, 24), (1, 48), (2, 96), (2, 96)],\n 'MobileNetV2': [(1, 24), (1, 16), (2, 24), (2, 24)],\n 'MobileNetV3Small': [(1, 16), (2, 16), (3, 24), (3, 24)],\n 'MobileNetV3Large': [(1, 16), (1, 16), (2, 24), (2, 24)],\n 'MobileNetV3EdgeTPU': [(1, 24), (1, 16), (2, 24), (2, 24)],\n 'MobileNetMultiMAX': [(1, 24), (2, 24), (3, 48), (3, 48)],\n 'MobileNetMultiAVG': [(1, 24), (2, 24), (2, 24), (3, 48)],\n }\n\n network = mobilenet.MobileNet(model_id=model_id,\n filter_size_scale=0.75)\n\n inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)\n endpoints = network(inputs)\n\n for idx, (stride, num_filter) in enumerate(mobilenet_layers[model_id]):\n self.assertAllEqual(\n [1, input_size / 2 ** stride, input_size / 2 ** stride, num_filter],\n endpoints[idx+1].shape.as_list())\n\n @parameterized.parameters(\n itertools.product(\n [\n 'MobileNetV1',\n 'MobileNetV2',\n 'MobileNetV3Large',\n 'MobileNetV3Small',\n 'MobileNetV3EdgeTPU',\n 'MobileNetMultiAVG',\n 'MobileNetMultiMAX',\n ],\n [1.0, 0.75],\n ))\n def test_mobilenet_scaling(self, model_id,\n filter_size_scale):\n \"\"\"Test for creation of a MobileNet classifier.\"\"\"\n mobilenet_params = {\n ('MobileNetV1', 1.0): 3228864,\n ('MobileNetV1', 0.75): 1832976,\n ('MobileNetV2', 1.0): 2257984,\n ('MobileNetV2', 0.75): 1382064,\n ('MobileNetV3Large', 1.0): 4226432,\n ('MobileNetV3Large', 0.75): 2731616,\n ('MobileNetV3Small', 1.0): 1529968,\n ('MobileNetV3Small', 0.75): 1026552,\n ('MobileNetV3EdgeTPU', 1.0): 2849312,\n ('MobileNetV3EdgeTPU', 0.75): 1737288,\n ('MobileNetMultiAVG', 1.0): 3700576,\n ('MobileNetMultiAVG', 0.75): 2345864,\n ('MobileNetMultiMAX', 1.0): 3170720,\n ('MobileNetMultiMAX', 0.75): 2041976,\n }\n\n input_size = 224\n network = mobilenet.MobileNet(model_id=model_id,\n filter_size_scale=filter_size_scale)\n self.assertEqual(network.count_params(),\n mobilenet_params[(model_id, filter_size_scale)])\n\n inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)\n _ = network(inputs)\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test Transformer model helper methods.\"\"\"\n\nimport tensorflow as tf\n\nfrom official.nlp.transformer import model_utils\n\nNEG_INF = -1e9\n\n\nclass ModelUtilsTest(tf.test.TestCase):\n\n def test_get_padding(self):\n x = tf.constant([[1, 0, 0, 0, 2], [3, 4, 0, 0, 0], [0, 5, 6, 0, 7]])\n padding = model_utils.get_padding(x, padding_value=0)\n\n self.assertAllEqual([[0, 1, 1, 1, 0], [0, 0, 1, 1, 1], [1, 0, 0, 1, 0]],\n padding)\n\n def test_get_padding_bias(self):\n x = tf.constant([[1, 0, 0, 0, 2], [3, 4, 0, 0, 0], [0, 5, 6, 0, 7]])\n bias = model_utils.get_padding_bias(x)\n bias_shape = tf.shape(bias)\n flattened_bias = tf.reshape(bias, [3, 5])\n\n self.assertAllEqual(\n [[0, NEG_INF, NEG_INF, NEG_INF, 0], [0, 0, NEG_INF, NEG_INF, NEG_INF],\n [NEG_INF, 0, 0, NEG_INF, 0]], flattened_bias)\n self.assertAllEqual([3, 1, 1, 5], bias_shape)\n\n def test_get_decoder_self_attention_bias(self):\n length = 5\n bias = model_utils.get_decoder_self_attention_bias(length)\n\n self.assertAllEqual(\n [[[[0, NEG_INF, NEG_INF, NEG_INF, NEG_INF],\n [0, 0, NEG_INF, NEG_INF, NEG_INF], [0, 0, 0, NEG_INF, NEG_INF],\n [0, 0, 0, 0, NEG_INF], [0, 0, 0, 0, 0]]]], bias)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] |
[
[
"tensorflow.keras.layers.InputSpec",
"tensorflow.keras.Input",
"tensorflow.keras.backend.set_image_data_format",
"tensorflow.test.main"
],
[
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.test.main",
"tensorflow.reshape"
]
] |
X-Libor/qiskit-optimization
|
[
"f2a92538d883ebc0f78c156ab6f31710e9b1d14f"
] |
[
"test/algorithms/test_grover_optimizer.py"
] |
[
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020, 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Test Grover Optimizer.\"\"\"\n\nimport unittest\nfrom test import QiskitOptimizationTestCase\n\nimport numpy as np\nfrom ddt import data, ddt\nfrom docplex.mp.model import Model\nfrom qiskit import Aer\nfrom qiskit.utils import QuantumInstance, algorithm_globals\nfrom qiskit.algorithms import NumPyMinimumEigensolver\nfrom qiskit_optimization.algorithms import (\n GroverOptimizer,\n MinimumEigenOptimizer,\n OptimizationResultStatus,\n)\nfrom qiskit_optimization.converters import (\n InequalityToEquality,\n IntegerToBinary,\n LinearEqualityToPenalty,\n MaximizeToMinimize,\n QuadraticProgramToQubo,\n)\nfrom qiskit_optimization.problems import QuadraticProgram\nfrom qiskit_optimization.translators import from_docplex_mp\n\n\n@ddt\nclass TestGroverOptimizer(QiskitOptimizationTestCase):\n \"\"\"GroverOptimizer tests.\"\"\"\n\n def setUp(self):\n super().setUp()\n algorithm_globals.random_seed = 1\n self.sv_simulator = QuantumInstance(\n Aer.get_backend(\"aer_simulator_statevector\"),\n seed_simulator=921,\n seed_transpiler=200,\n )\n self.qasm_simulator = QuantumInstance(\n Aer.get_backend(\"aer_simulator\"), seed_simulator=123, seed_transpiler=123\n )\n self.n_iter = 8\n\n def validate_results(self, problem, results):\n \"\"\"Validate the results object returned by GroverOptimizer.\"\"\"\n # Get expected value.\n solver = MinimumEigenOptimizer(NumPyMinimumEigensolver())\n comp_result = solver.solve(problem)\n # Validate results.\n np.testing.assert_array_almost_equal(comp_result.x, results.x)\n self.assertEqual(comp_result.fval, results.fval)\n # optimizer internally deals with minimization problem\n self.assertAlmostEqual(\n results.fval, problem.objective.sense.value * results.intermediate_fval\n )\n\n def test_qubo_gas_int_zero(self):\n \"\"\"Test for when the answer is zero.\"\"\"\n\n # Input.\n model = Model()\n x_0 = model.binary_var(name=\"x0\")\n x_1 = model.binary_var(name=\"x1\")\n model.minimize(0 * x_0 + 0 * x_1)\n op = from_docplex_mp(model)\n\n # Will not find a negative, should return 0.\n gmf = GroverOptimizer(1, num_iterations=1, quantum_instance=self.sv_simulator)\n results = gmf.solve(op)\n np.testing.assert_array_almost_equal(results.x, [0, 0])\n self.assertEqual(results.fval, 0.0)\n self.assertAlmostEqual(results.fval, results.intermediate_fval)\n\n def test_qubo_gas_int_simple(self):\n \"\"\"Test for simple case, with 2 linear coeffs and no quadratic coeffs or constants.\"\"\"\n\n # Input.\n model = Model()\n x_0 = model.binary_var(name=\"x0\")\n x_1 = model.binary_var(name=\"x1\")\n model.minimize(-x_0 + 2 * x_1)\n op = from_docplex_mp(model)\n\n # Get the optimum key and value.\n gmf = GroverOptimizer(4, num_iterations=self.n_iter, quantum_instance=self.sv_simulator)\n results = gmf.solve(op)\n self.validate_results(op, results)\n\n self.assertIsNotNone(results.operation_counts)\n self.assertEqual(results.n_input_qubits, 2)\n self.assertEqual(results.n_output_qubits, 4)\n\n def test_qubo_gas_int_simple_maximize(self):\n \"\"\"Test for simple case, but with maximization.\"\"\"\n\n # Input.\n model = Model()\n x_0 = model.binary_var(name=\"x0\")\n x_1 = model.binary_var(name=\"x1\")\n model.maximize(-x_0 + 2 * x_1)\n op = from_docplex_mp(model)\n\n # Get the optimum key and value.\n gmf = GroverOptimizer(4, num_iterations=self.n_iter, quantum_instance=self.sv_simulator)\n results = gmf.solve(op)\n self.validate_results(op, results)\n\n @data(\"sv\", \"qasm\")\n def test_qubo_gas_int_paper_example(self, simulator):\n \"\"\"\n Test the example from https://arxiv.org/abs/1912.04088 using the state vector simulator\n and the qasm simulator\n \"\"\"\n\n # Input.\n model = Model()\n x_0 = model.binary_var(name=\"x0\")\n x_1 = model.binary_var(name=\"x1\")\n x_2 = model.binary_var(name=\"x2\")\n model.minimize(-x_0 + 2 * x_1 - 3 * x_2 - 2 * x_0 * x_2 - 1 * x_1 * x_2)\n op = from_docplex_mp(model)\n\n # Get the optimum key and value.\n q_instance = self.sv_simulator if simulator == \"sv\" else self.qasm_simulator\n gmf = GroverOptimizer(6, num_iterations=self.n_iter, quantum_instance=q_instance)\n results = gmf.solve(op)\n self.validate_results(op, results)\n\n def test_converter_list(self):\n \"\"\"Test converters list\"\"\"\n # Input.\n\n model = Model()\n x_0 = model.binary_var(name=\"x0\")\n x_1 = model.binary_var(name=\"x1\")\n model.maximize(-x_0 + 2 * x_1)\n op = from_docplex_mp(model)\n\n # Get the optimum key and value.\n # a single converter.\n qp2qubo = QuadraticProgramToQubo()\n gmf = GroverOptimizer(\n 4,\n num_iterations=self.n_iter,\n quantum_instance=self.sv_simulator,\n converters=qp2qubo,\n )\n results = gmf.solve(op)\n self.validate_results(op, results)\n\n # a list of converters\n ineq2eq = InequalityToEquality()\n int2bin = IntegerToBinary()\n penalize = LinearEqualityToPenalty()\n max2min = MaximizeToMinimize()\n converters = [ineq2eq, int2bin, penalize, max2min]\n gmf = GroverOptimizer(\n 4,\n num_iterations=self.n_iter,\n quantum_instance=self.sv_simulator,\n converters=converters,\n )\n results = gmf.solve(op)\n self.validate_results(op, results)\n # invalid converters\n with self.assertRaises(TypeError):\n invalid = [qp2qubo, \"invalid converter\"]\n GroverOptimizer(\n 4,\n num_iterations=self.n_iter,\n quantum_instance=self.sv_simulator,\n converters=invalid,\n )\n\n @data(\"sv\", \"qasm\")\n def test_samples_and_raw_samples(self, simulator):\n \"\"\"Test samples and raw_samples\"\"\"\n algorithm_globals.random_seed = 2\n op = QuadraticProgram()\n op.integer_var(0, 3, \"x\")\n op.binary_var(\"y\")\n op.minimize(linear={\"x\": 1, \"y\": 2})\n op.linear_constraint(linear={\"x\": 1, \"y\": 1}, sense=\">=\", rhs=1, name=\"xy\")\n q_instance = self.sv_simulator if simulator == \"sv\" else self.qasm_simulator\n grover_optimizer = GroverOptimizer(\n 8, num_iterations=self.n_iter, quantum_instance=q_instance\n )\n opt_sol = 1\n success = OptimizationResultStatus.SUCCESS\n results = grover_optimizer.solve(op)\n self.assertEqual(len(results.samples), 8)\n self.assertEqual(len(results.raw_samples), 32)\n self.assertAlmostEqual(sum(s.probability for s in results.samples), 1)\n self.assertAlmostEqual(sum(s.probability for s in results.raw_samples), 1)\n self.assertAlmostEqual(min(s.fval for s in results.samples), 0)\n self.assertAlmostEqual(min(s.fval for s in results.samples if s.status == success), opt_sol)\n self.assertAlmostEqual(min(s.fval for s in results.raw_samples), opt_sol)\n for sample in results.raw_samples:\n self.assertEqual(sample.status, success)\n np.testing.assert_array_almost_equal(results.x, results.samples[0].x)\n self.assertAlmostEqual(results.fval, results.samples[0].fval)\n self.assertEqual(results.status, results.samples[0].status)\n self.assertAlmostEqual(results.fval, results.raw_samples[0].fval)\n self.assertEqual(results.status, results.raw_samples[0].status)\n np.testing.assert_array_almost_equal([1, 0, 0, 0, 0], results.raw_samples[0].x)\n\n @data(\"sv\", \"qasm\")\n def test_bit_ordering(self, simulator):\n \"\"\"Test bit ordering\"\"\"\n # test minimize\n algorithm_globals.random_seed = 2\n q_instance = self.sv_simulator if simulator == \"sv\" else self.qasm_simulator\n mdl = Model(\"docplex model\")\n x = mdl.binary_var(\"x\")\n y = mdl.binary_var(\"y\")\n mdl.minimize(x - 2 * y)\n op = from_docplex_mp(mdl)\n opt_sol = -2\n success = OptimizationResultStatus.SUCCESS\n grover_optimizer = GroverOptimizer(\n 3, num_iterations=self.n_iter, quantum_instance=q_instance\n )\n results = grover_optimizer.solve(op)\n self.assertEqual(results.fval, opt_sol)\n np.testing.assert_array_almost_equal(results.x, [0, 1])\n self.assertEqual(results.status, success)\n results.raw_samples.sort(key=lambda x: x.probability, reverse=True)\n self.assertAlmostEqual(sum(s.probability for s in results.samples), 1, delta=1e-5)\n self.assertAlmostEqual(sum(s.probability for s in results.raw_samples), 1, delta=1e-5)\n self.assertAlmostEqual(min(s.fval for s in results.samples), -2)\n self.assertAlmostEqual(min(s.fval for s in results.samples if s.status == success), opt_sol)\n self.assertAlmostEqual(min(s.fval for s in results.raw_samples), opt_sol)\n for sample in results.raw_samples:\n self.assertEqual(sample.status, success)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.testing.assert_array_almost_equal"
]
] |
klw11j/Sexual-Assualt-Analysis
|
[
"bdba47d08d45b26f59832c926fe2ac4baa9e5f02"
] |
[
"app.py"
] |
[
"from flask import Flask, Response, render_template, jsonify,redirect, url_for, send_from_directory\nimport pandas as pd \nfrom sqlalchemy import create_engine\nfrom sqlalchemy import BigInteger, Column, JSON, Text\nfrom config import cxnstring\nfrom flask_cors import CORS\nimport psycopg2\nimport sys\nimport requests\nimport json\nimport os\napp = Flask(__name__)\napp._static_folder = ''\nCORS(app)\nengine = create_engine(cxnstring, pool_recycle=3600)\n\n\n# , pool_recycle=3600\n#main page\n@app.route(\"/\")\ndef index():\n with engine.connect() as con:\n # query result from sqlalchemy + postgres\n year = con.execute (\"\"\"SELECT DISTINCT (totals_gender.\"yearOfRegistration\") FROM totals_gender ORDER BY (totals_gender.\"yearOfRegistration\") ;\"\"\")\n gender = con.execute (\"\"\" SELECT DISTINCT (totals_gender.\"gender\")FROM totals_gender ORDER BY (totals_gender.\"gender\"); \"\"\")\n #cleaning results, removing uneeded values from tuple i.e( (,))\n years = [y[0] for y in year]\n gender = [g[0] for g in gender]\n\n return render_template(\"home.html\", years=years, gender=gender)\n\n#route for geojson for mapping\n@app.route(\"/geodata\")\ndef geodata():\n SITE_ROOT = os.path.realpath(os.path.dirname(__file__))\n json_url = os.path.join(SITE_ROOT, 'static', 'us_trafficking_locations2.geojson')\n data = json.load(open(json_url))\n return jsonify(data=data)\n\n#fulldatabase for plots \n@app.route(\"/fulldate\")\ndef psqltest():\n response = pd.read_sql(\"SELECT * FROM assault_table_db\", engine)\n return Response(response.to_json(orient=\"records\", date_format=\"iso\"), mimetype=\"application/json\")\n\n#database for map\n@app.route(\"/assault_by_state\")\ndef gender():\n response = pd.read_sql(\"SELECT * FROM assault_per_state\", engine)\n return Response(response.to_json(orient = \"records\", date_format=\"iso\"), mimetype=\"application/json\")\n\n#database used\n@app.route(\"/gender\")\ndef test():\n response = pd.read_sql(\"SELECT * FROM totals_gender\", engine)\n return Response(response.to_json(orient = \"records\", date_format=\"iso\"), mimetype=\"application/json\")\n\n\n# path for static file collection\n@app.route('/static/<path:path>')\ndef send_static(path):\n return send_from_directory('static', path)\n\n@app.route('/about_project')\ndef aboutproject():\n return render_template(\"about_project.html\")\n# database orginal\n@app.route('/data_collected')\ndef datacollected():\n return render_template(\"data_collected.html\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)"
] |
[
[
"pandas.read_sql"
]
] |
watsoncm/PruneSeg
|
[
"07c0a209495f88ce596aabf4b5273260aae08c6f"
] |
[
"train.py"
] |
[
"\"\"\"\nTrains, evaluates and saves the KittiSeg model.\n\n-------------------------------------------------\n\nThe MIT License (MIT)\n\nCopyright (c) 2017 Marvin Teichmann\n\nMore details: https://github.com/MarvinTeichmann/KittiSeg/blob/master/LICENSE\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nimport commentjson\nimport logging\nimport os\nimport sys\n\nimport collections\n\n\ndef dict_merge(dct, merge_dct):\n \"\"\" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of\n updating only top-level keys, dict_merge recurses down into dicts nested\n to an arbitrary depth, updating keys. The ``merge_dct`` is merged into\n ``dct``.\n :param dct: dict onto which the merge is executed\n :param merge_dct: dct merged into dct\n :return: None\n \"\"\"\n for k, v in merge_dct.iteritems():\n if (k in dct and isinstance(dct[k], dict) and\n isinstance(merge_dct[k], collections.Mapping)):\n dict_merge(dct[k], merge_dct[k])\n else:\n dct[k] = merge_dct[k]\n\n\n# configure logging\nif 'TV_IS_DEV' in os.environ and os.environ['TV_IS_DEV']:\n logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n level=logging.INFO,\n stream=sys.stdout)\nelse:\n logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n level=logging.INFO,\n stream=sys.stdout)\n\n# https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070\nimport numpy as np\n\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nsys.path.insert(1, 'incl')\n\nimport tensorvision.train as train\nimport tensorvision.utils as utils\n\nflags.DEFINE_string('name', None,\n 'Append a name Tag to run.')\n\nflags.DEFINE_string('project', None,\n 'Append a name Tag to run.')\n\nflags.DEFINE_string('hypes', None,\n 'File storing model parameters.')\n\nflags.DEFINE_string('mod', None,\n 'Modifier for model parameters.')\n\nif 'TV_SAVE' in os.environ and os.environ['TV_SAVE']:\n tf.app.flags.DEFINE_boolean(\n 'save', True, ('Whether to save the run. In case --nosave (default) '\n 'output will be saved to the folder TV_DIR_RUNS/debug, '\n 'hence it will get overwritten by further runs.'))\nelse:\n tf.app.flags.DEFINE_boolean(\n 'save', True, ('Whether to save the run. In case --nosave (default) '\n 'output will be saved to the folder TV_DIR_RUNS/debug '\n 'hence it will get overwritten by further runs.'))\n\n\ndef main(_):\n utils.set_gpus_to_use()\n\n try:\n import tensorvision.train\n import tensorflow_fcn.utils\n except ImportError:\n logging.error(\"Could not import the submodules.\")\n logging.error(\"Please execute:\"\n \"'git submodule update --init --recursive'\")\n exit(1)\n\n if tf.app.flags.FLAGS.hypes is None:\n logging.error(\"No hype file is given.\")\n logging.info(\"Usage: python train.py --hypes hypes/KittiClass.json\")\n exit(1)\n\n with open(tf.app.flags.FLAGS.hypes, 'r') as f:\n logging.info(\"f: %s\", f)\n hypes = commentjson.load(f)\n utils.load_plugins()\n\n if tf.app.flags.FLAGS.mod is not None:\n import ast\n mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)\n dict_merge(hypes, mod_dict)\n\n if 'TV_DIR_RUNS' in os.environ:\n os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],\n 'KittiSeg')\n utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)\n\n utils._add_paths_to_sys(hypes)\n\n train.maybe_download_and_extract(hypes)\n logging.info(\"Initialize training folder\")\n train.initialize_training_folder(hypes)\n logging.info(\"Start training\")\n train.do_training(hypes)\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
] |
[
[
"tensorflow.app.run",
"tensorflow.app.flags.DEFINE_boolean"
]
] |
philippbeer/m4_clustering
|
[
"18cf1b9111f4236f0be152d2419c470840645acb"
] |
[
"feature_extraction.py"
] |
[
"\"\"\"\nThis module enables the feature extraction\n\"\"\"\nimport math\nfrom multiprocessing import Pool\nimport os\nfrom typing import Union\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom tqdm import tqdm\nfrom tsfresh.feature_extraction import extract_features, EfficientFCParameters\nfrom tsfresh.feature_extraction.settings import from_columns\nfrom tsfresh.feature_selection import select_features\nfrom tsfresh.utilities.dataframe_functions import impute, make_forecasting_frame\n\nimport config as cnf\n\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\ndef generate_features(df: pd.DataFrame) -> pd.DataFrame:\n\t\"\"\"\n\textract features from time series selected for their relevance to forecasting\n\tdataframe assumed to be in tsfresh compatible format\n\tParams:\n\t-------\n\tdf : dataframe from which to extract time series features\n\tReturns:\n\t-------\n\tfeatures_filtered : numpy array containing the \n\t\"\"\"\n\tif os.path.isfile(cnf.DATA+'{}_extracted_features.csv'\\\n\t\t\t\t\t\t.format(cnf.RUN_TYPE)):\n\t\tprint('#### Features file exist - loading #######')\n\t\textracted_features = pd.read_csv(cnf.DATA+'{}_extracted_features.csv'\\\n\t\t\t\t\t\t.format(cnf.RUN_TYPE))\n\t\textracted_features.rename(columns={'Unnamed: 0': 'Index'}, inplace=True)\n\t\textracted_features.set_index('Index', inplace=True)\n\t\tstandard_scaler = preprocessing.StandardScaler()\n\t\textracted_features_scaled = pd.DataFrame(standard_scaler.fit_transform(extract_features.values),\n\t\t\tcolumns=extract_features.columns,\n\t\t\tindex=extract_features.index)\n\n\t\treturn extracted_features_scaled\n\telse:\n\t\tprint('#### Features file does not exist - running feature extraction #######')\n\t\t# needs to be done for each time series\n\n\t\tl = list(df['V1'].unique()) # getting all different time series from the list\n\t\tfc_param = dict()\n\t\tprint('#### creating forecasting frame ####')\n\t\tfor elm in l:\n\t\t\tprint('#### Extr. and selecting features for\\\n\t\t\t\t\t series {} of {} ####'\\\n\t\t\t\t\t.format(l.index(elm)+1,len(l)))\n\t\t\tdf_tmp = df[df['V1']==elm]\n\t\t\tdf_fc, y = make_forecasting_frame(df_tmp['value'],kind=elm,\n\t\t\t\t\t\t\t\t\t\t rolling_direction=1,\n\t\t\t\t\t\t\t\t\t\t max_timeshift=7)\n\t\t\t\n\t\t\t\n\t\t\textracted_features = extract_features(df_fc,\n\t\t\t\t\t\t\t\t\tcolumn_id='id',\n\t\t\t\t\t\t\t\t\tcolumn_sort='time',\n\t\t\t\t\t\t\t\t\tcolumn_value='value',\n\t\t\t\t\t\t\t\t\timpute_function=impute,\n\t\t\t\t\t\t\t\t\tdefault_fc_parameters=EfficientFCParameters())\n\n\t\t\t# verify matching index structure\n\t\t\tif y.index[0] in extracted_features.index:\n\t\t\t\t# do nothing as the indexes are in the same structure\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t# modify y index to match extracted features index\n\t\t\t\ty.index = pd.MultiIndex.from_tuples(zip(['id']*len(y.index), y.index))\n\n\t\t\tselected_features = select_features(extracted_features, y)\n\n\t\t\tfc_param_new = from_columns(selected_features)\n\t\t\t\n\t\t\t# Python 3.9 operation to unionize dictionaries\n\t\t\tfc_param = fc_param | fc_param_new\n\t\t\tfc_param_t = dict()\n\t\t\t# extracting\n\t\t\tfor key in fc_param['value']:\n\t\t\t\tfc_param_t.update({key : fc_param['value'][key]})\n\t\t\t\n\t\t\n\t\tprint('#### Extracting relevant fts for all series ####')\n\n\t\textracted_features = extract_features(df,\n\t\t\t\t\t\t\t\t\t\t\tcolumn_id='V1',\n\t\t\t\t\t\t\t\t\t\t\tcolumn_sort='timestamp',\n\t\t\t\t\t\t\t\t\t\t\tcolumn_value='value',\n\t\t\t\t\t\t\t\t\t\t\timpute_function=impute,\n\t\t\t\t\t\t\t\t\t\t\tdefault_fc_parameters=fc_param_t)\n\n\t\tstandard_scaler = preprocessing.StandardScaler()\n\t\textracted_features_scaled = pd.DataFrame(standard_scaler.fit_transform(extract_features.values),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t columns=extract_features.columns,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t index=extract_features.index)\n\n\n\t\textracted_features.to_csv(cnf.DATA+'{}_extracted_features.csv'\\\n\t\t\t\t\t\t\t\t\t.format(cnf.RUN_TYPE))\n\t\textract_features_scaled.to_csv(cnf.Data+'{}_extr_features_scaled.csv'\\\n\t\t\t\t\t\t\t\t\t\t\t .scaled(cnf.RUN_TYPE))\n\n\treturn extracted_features_scaled\n\ndef make_fc_frame(df: pd.DataFrame) -> pd.DataFrame:\n\t\"\"\"\n\tcreates rolling window dataframe\n\tto be used inside apply of groupby\n\t\"\"\"\n\tts_id = df.iloc[0]['V1']\n\tdf_res, y = make_forecasting_frame(df['value'],\n\t\t\t\t\t\t\t\t\t\tkind=ts_id,\n\t\t\t\t\t\t\t\t\t\trolling_direction=1,\n\t\t\t\t\t\t\t\t\t\tmax_timeshift=cnf.MAX_TIMESHIFT)\n\tdf_res['y'] = y\n\treturn df_res\n\n"
] |
[
[
"pandas.set_option",
"sklearn.preprocessing.StandardScaler"
]
] |
sgsmob/covidcast-indicators
|
[
"424ef5fd5361c4ed7b3ed88cf31813349d35240e"
] |
[
"google_health/tests/test_run.py"
] |
[
"\"\"\"Tests for running google_health.\"\"\"\n\nfrom os.path import join, exists\n\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal\nfrom delphi_utils import read_params\nfrom delphi_google_health.run import run_module\n\nclass TestRunModule:\n \"\"\"Tests for run_module().\"\"\"\n PARAMS = {\n \"common\": {\n \"export_dir\": \"./receiving\"\n },\n \"indicator\": {\n \"data_dir\": \"./data\",\n \"end_date\": \"2020-04-30\",\n \"ght_key\": \"\",\n \"start_date\": \"2020-02-11\",\n \"static_file_dir\": \"../static\",\n \"test\": True,\n \"test_data_dir\": \"./test_data/{geo_res}_sample.csv\",\n \"wip_signal\": \"\"\n }\n }\n\n def test_class(self):\n \"\"\"Tests output file existence.\"\"\"\n run_module(self.PARAMS)\n wip_signal = self.PARAMS[\"indicator\"][\"wip_signal\"]\n if wip_signal:\n assert exists(join(\"receiving\", \"20200419_hrr_wip_raw_search.csv\"))\n assert exists(join(\"receiving\", \"20200419_msa_wip_raw_search.csv\"))\n assert exists(join(\"receiving\", \"20200419_state_wip_raw_search.csv\"))\n assert exists(join(\"receiving\", \"20200419_dma_wip_raw_search.csv\"))\n\n assert exists(join(\"receiving\", \"20200315_hrr_wip_raw_search.csv\"))\n assert exists(join(\"receiving\", \"20200315_msa_wip_raw_search.csv\"))\n assert exists(join(\"receiving\", \"20200315_state_wip_raw_search.csv\"))\n assert exists(join(\"receiving\", \"20200315_dma_wip_raw_search.csv\"))\n else:\n assert exists(join(\"receiving\", \"20200419_hrr_raw_search.csv\"))\n assert exists(join(\"receiving\", \"20200419_msa_raw_search.csv\"))\n assert exists(join(\"receiving\", \"20200419_state_raw_search.csv\"))\n assert exists(join(\"receiving\", \"20200419_dma_raw_search.csv\"))\n\n assert exists(join(\"receiving\", \"20200315_hrr_raw_search.csv\"))\n assert exists(join(\"receiving\", \"20200315_msa_raw_search.csv\"))\n assert exists(join(\"receiving\", \"20200315_state_raw_search.csv\"))\n assert exists(join(\"receiving\", \"20200315_dma_raw_search.csv\"))\n\n def test_match_old_raw_output(self):\n \"\"\"Tests that raw output files don't change over time.\"\"\"\n run_module(self.PARAMS)\n wip_signal = self.PARAMS[\"indicator\"][\"wip_signal\"]\n if wip_signal:\n files = [\n \"20200419_hrr_wip_raw_search.csv\",\n \"20200419_msa_wip_raw_search.csv\",\n \"20200419_state_wip_raw_search.csv\",\n \"20200419_dma_wip_raw_search.csv\",\n ]\n else:\n files = [\n \"20200419_hrr_raw_search.csv\",\n \"20200419_msa_raw_search.csv\",\n \"20200419_state_raw_search.csv\",\n \"20200419_dma_raw_search.csv\",\n ]\n\n for fname in files:\n test_df = pd.read_csv(join(\"receiving_test\", fname))\n print(test_df)\n new_df = pd.read_csv(join(\"receiving\", fname))\n print(new_df)\n\n assert_frame_equal(test_df, new_df)\n\n def test_match_old_smoothed_output(self):\n \"\"\"Tests that smooth output files don't change over time.\"\"\"\n run_module(self.PARAMS)\n wip_signal = self.PARAMS[\"indicator\"][\"wip_signal\"]\n if wip_signal:\n files = [\n \"20200419_hrr_wip_smoothed_search.csv\",\n \"20200419_msa_wip_smoothed_search.csv\",\n \"20200419_state_wip_smoothed_search.csv\",\n \"20200419_dma_wip_smoothed_search.csv\",\n ]\n else:\n files = [\n \"20200419_hrr_smoothed_search.csv\",\n \"20200419_msa_smoothed_search.csv\",\n \"20200419_state_smoothed_search.csv\",\n \"20200419_dma_smoothed_search.csv\",\n ]\n for fname in files:\n test_df = pd.read_csv(join(\"receiving_test\", fname))\n new_df = pd.read_csv(join(\"receiving\", fname))\n\n assert_frame_equal(test_df, new_df)\n"
] |
[
[
"pandas.testing.assert_frame_equal"
]
] |
zwxu064/RANP
|
[
"92135583e0ced21fa5634823b289c5aea366de21",
"92135583e0ced21fa5634823b289c5aea366de21"
] |
[
"pruning/pytorch_snip/prune_utils.py",
"third_party/thop/my_test.py"
] |
[
"import torch\nimport copy\n\n\ndef convert_dim_conv2fully(grads):\n grads = copy.deepcopy(grads) # grads is a reference, changing its size in function will change it externally\n\n n_layers = len(grads) // 2\n\n for idx in range(n_layers - 1):\n weight_idx = 2 * idx\n next_weight_idx = 2 * (idx + 1)\n current_layer = grads[weight_idx]\n next_layer = grads[next_weight_idx]\n out_c_current = current_layer.size()[0]\n next_layer_size_len = len(next_layer.size())\n\n if next_layer_size_len == 4:\n out_c_next, in_c_next, h_next, w_next = next_layer.size()\n elif next_layer_size_len == 2:\n out_c_next, in_c_next = next_layer.size()\n h_next, w_next = 1, 1\n else:\n assert False\n\n # This usually happens from a convoluational layer to a fully-connected layer\n # because for some network, the output of a convolutional layer will be flatten, then to a fully-connected layer,\n # such as lenet5 and lenet5_caffe\n if out_c_current != in_c_next:\n assert (h_next == 1) and (w_next == 1)\n grads[next_weight_idx] = next_layer.view(out_c_next, out_c_current, (in_c_next // out_c_current) * h_next, w_next)\n\n return grads\n\n\ndef resume_dim_conv2fully(mask, grads):\n mask = copy.deepcopy(mask) # grads is a reference, changing its size in function will change it externally\n\n assert len(mask) == len(grads)\n n_layers = len(grads) // 2\n\n for idx in range(n_layers):\n weight_idx = 2 * idx\n mask_current = mask[weight_idx]\n grad_current = grads[weight_idx]\n if mask_current.size() != grad_current.size():\n assert mask_current.flatten().size() == grad_current.flatten().size()\n mask[weight_idx] = mask_current.view(grad_current.size())\n\n return mask\n\n\ndef check_same(input_a, input_b):\n if (input_a is None) or (input_b is None):\n return False\n\n is_same = True\n\n if isinstance(input_a, list):\n assert len(input_a) == len(input_b)\n num = len(input_a)\n\n for idx in range(num):\n if not torch.equal(input_a[idx], input_b[idx]):\n is_same = False\n break\n else:\n is_same = False if (not torch.equal(input_a, input_b)) else True\n\n return is_same\n\n\ndef cal_channel_prune_grad(grads, channel_sparsity, mode='max', norm='max'):\n n_layers = len(grads) // 2\n channel_accum_grad_list = []\n\n for idx in range(n_layers):\n weight_idx, bias_idx = 2 * idx, 2 * idx + 1\n grad_size = grads[weight_idx].size()\n out_c, in_c = grad_size[0], grad_size[1]\n\n # Bug: how to define the importance of a channel:\n # 'sum' not good, fully-connected layers would be removed dramatically as its kernel size is just one, would have 0-retained layer\n # 'mean', not good, convolutional layers would be removed dramatically as its kernel size is much larger than fully-connected layers\n # (whose kernel size is 1), the importance of a channel will be decreased by average.\n # 'max', good, highest grad decides how important this channel is\n if mode == 'sum':\n channel_accum = grads[weight_idx].view(out_c, in_c, -1).sum(2)\n channel_accum = channel_accum + grads[bias_idx].view(out_c, 1).repeat(1, in_c) if (grads[bias_idx] is not None) else channel_accum\n elif mode == 'mean':\n if grads[bias_idx] is not None:\n grads_a_layer = grads[weight_idx].view(out_c, in_c, -1)\n n_elements = grads_a_layer.size()[-1]\n channel_accum = grads_a_layer.sum(2)\n channel_accum = (channel_accum + grads[bias_idx].view(out_c, 1)) / (n_elements + 1)\n else:\n channel_accum = grads[weight_idx].view(out_c, in_c, -1).mean(2)\n elif mode == 'max':\n grads_a_layer = grads[weight_idx].view(out_c, in_c, -1)\n channel_accum, _ = grads_a_layer.max(2)\n else:\n assert False\n\n channel_accum_grad_list.append(channel_accum)\n\n # Calculate threshold\n channel_amu_grad_flatten = torch.cat([channel_accum_grad_list[idx].flatten() for idx in range(n_layers)], dim=0)\n n_channels = channel_amu_grad_flatten.size()[0]\n threshold, _ = torch.topk(channel_amu_grad_flatten, int(n_channels * (1 - channel_sparsity)), sorted=True)\n threshold = threshold[-1]\n\n if norm == 'max':\n norm_factor = channel_amu_grad_flatten.max()\n elif norm == 'sum':\n norm_factor = channel_amu_grad_flatten.sum()\n else:\n norm_factor = 1\n\n for idx in range(n_layers):\n channel_accum_grad_list[idx] /= norm_factor\n\n threshold /= norm_factor\n\n return channel_accum_grad_list, threshold",
"import sys\nimport copy\nimport torch\nimport torch.nn as nn\nimport random\nimport numpy as np\nfrom thop.profile import profile\nfrom torchvision.models import resnet50\n\n\nif __name__ == '__main__':\n seed = 2019\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n model = nn.Sequential(\n nn.Conv2d(3, 5, 2, stride=1, bias=True),\n nn.Conv2d(5, 1, 3, stride=1, bias=True),\n nn.Linear(1, 5, bias=True))\n\n model = resnet50()\n input = torch.randn(1, 3, 224, 224)\n flops, params, memory = profile(model, inputs=(input,), verbose=False)\n print('ResNet50 flops:{}, params:{}, memory:{}'.format(flops, params, memory))\n"
] |
[
[
"torch.equal"
],
[
"torch.nn.Linear",
"torch.cuda.manual_seed",
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.randn"
]
] |
gdmcbain/scipy
|
[
"846e9cd501bba8c5b23f218f37c88ce962c38bef",
"846e9cd501bba8c5b23f218f37c88ce962c38bef",
"846e9cd501bba8c5b23f218f37c88ce962c38bef"
] |
[
"scipy/linalg/_expm_frechet.py",
"scipy/optimize/_differentialevolution.py",
"doc/source/tutorial/examples/normdiscr_plot2.py"
] |
[
"\"\"\"Frechet derivative of the matrix exponential.\"\"\"\nimport numpy as np\nimport scipy.linalg\n\n__all__ = ['expm_frechet', 'expm_cond']\n\n\ndef expm_frechet(A, E, method=None, compute_expm=True, check_finite=True):\n \"\"\"\n Frechet derivative of the matrix exponential of A in the direction E.\n\n Parameters\n ----------\n A : (N, N) array_like\n Matrix of which to take the matrix exponential.\n E : (N, N) array_like\n Matrix direction in which to take the Frechet derivative.\n method : str, optional\n Choice of algorithm. Should be one of\n\n - `SPS` (default)\n - `blockEnlarge`\n\n compute_expm : bool, optional\n Whether to compute also `expm_A` in addition to `expm_frechet_AE`.\n Default is True.\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n expm_A : ndarray\n Matrix exponential of A.\n expm_frechet_AE : ndarray\n Frechet derivative of the matrix exponential of A in the direction E.\n\n For ``compute_expm = False``, only `expm_frechet_AE` is returned.\n\n See also\n --------\n expm : Compute the exponential of a matrix.\n\n Notes\n -----\n This section describes the available implementations that can be selected\n by the `method` parameter. The default method is *SPS*.\n\n Method *blockEnlarge* is a naive algorithm.\n\n Method *SPS* is Scaling-Pade-Squaring [1]_.\n It is a sophisticated implementation which should take\n only about 3/8 as much time as the naive implementation.\n The asymptotics are the same.\n\n .. versionadded:: 0.13.0\n\n References\n ----------\n .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)\n Computing the Frechet Derivative of the Matrix Exponential,\n with an application to Condition Number Estimation.\n SIAM Journal On Matrix Analysis and Applications.,\n 30 (4). pp. 1639-1657. ISSN 1095-7162\n\n Examples\n --------\n >>> import scipy.linalg\n >>> rng = np.random.default_rng()\n >>> A = rng.standard_normal((3, 3))\n >>> E = rng.standard_normal((3, 3))\n >>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E)\n >>> expm_A.shape, expm_frechet_AE.shape\n ((3, 3), (3, 3))\n\n >>> import scipy.linalg\n >>> rng = np.random.default_rng()\n >>> A = rng.standard_normal((3, 3))\n >>> E = rng.standard_normal((3, 3))\n >>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E)\n >>> M = np.zeros((6, 6))\n >>> M[:3, :3] = A; M[:3, 3:] = E; M[3:, 3:] = A\n >>> expm_M = scipy.linalg.expm(M)\n >>> np.allclose(expm_A, expm_M[:3, :3])\n True\n >>> np.allclose(expm_frechet_AE, expm_M[:3, 3:])\n True\n\n \"\"\"\n if check_finite:\n A = np.asarray_chkfinite(A)\n E = np.asarray_chkfinite(E)\n else:\n A = np.asarray(A)\n E = np.asarray(E)\n if A.ndim != 2 or A.shape[0] != A.shape[1]:\n raise ValueError('expected A to be a square matrix')\n if E.ndim != 2 or E.shape[0] != E.shape[1]:\n raise ValueError('expected E to be a square matrix')\n if A.shape != E.shape:\n raise ValueError('expected A and E to be the same shape')\n if method is None:\n method = 'SPS'\n if method == 'SPS':\n expm_A, expm_frechet_AE = expm_frechet_algo_64(A, E)\n elif method == 'blockEnlarge':\n expm_A, expm_frechet_AE = expm_frechet_block_enlarge(A, E)\n else:\n raise ValueError('Unknown implementation %s' % method)\n if compute_expm:\n return expm_A, expm_frechet_AE\n else:\n return expm_frechet_AE\n\n\ndef expm_frechet_block_enlarge(A, E):\n \"\"\"\n This is a helper function, mostly for testing and profiling.\n Return expm(A), frechet(A, E)\n \"\"\"\n n = A.shape[0]\n M = np.vstack([\n np.hstack([A, E]),\n np.hstack([np.zeros_like(A), A])])\n expm_M = scipy.linalg.expm(M)\n return expm_M[:n, :n], expm_M[:n, n:]\n\n\n\"\"\"\nMaximal values ell_m of ||2**-s A|| such that the backward error bound\ndoes not exceed 2**-53.\n\"\"\"\nell_table_61 = (\n None,\n # 1\n 2.11e-8,\n 3.56e-4,\n 1.08e-2,\n 6.49e-2,\n 2.00e-1,\n 4.37e-1,\n 7.83e-1,\n 1.23e0,\n 1.78e0,\n 2.42e0,\n # 11\n 3.13e0,\n 3.90e0,\n 4.74e0,\n 5.63e0,\n 6.56e0,\n 7.52e0,\n 8.53e0,\n 9.56e0,\n 1.06e1,\n 1.17e1,\n )\n\n\n# The b vectors and U and V are copypasted\n# from scipy.sparse.linalg.matfuncs.py.\n# M, Lu, Lv follow (6.11), (6.12), (6.13), (3.3)\n\ndef _diff_pade3(A, E, ident):\n b = (120., 60., 12., 1.)\n A2 = A.dot(A)\n M2 = np.dot(A, E) + np.dot(E, A)\n U = A.dot(b[3]*A2 + b[1]*ident)\n V = b[2]*A2 + b[0]*ident\n Lu = A.dot(b[3]*M2) + E.dot(b[3]*A2 + b[1]*ident)\n Lv = b[2]*M2\n return U, V, Lu, Lv\n\n\ndef _diff_pade5(A, E, ident):\n b = (30240., 15120., 3360., 420., 30., 1.)\n A2 = A.dot(A)\n M2 = np.dot(A, E) + np.dot(E, A)\n A4 = np.dot(A2, A2)\n M4 = np.dot(A2, M2) + np.dot(M2, A2)\n U = A.dot(b[5]*A4 + b[3]*A2 + b[1]*ident)\n V = b[4]*A4 + b[2]*A2 + b[0]*ident\n Lu = (A.dot(b[5]*M4 + b[3]*M2) +\n E.dot(b[5]*A4 + b[3]*A2 + b[1]*ident))\n Lv = b[4]*M4 + b[2]*M2\n return U, V, Lu, Lv\n\n\ndef _diff_pade7(A, E, ident):\n b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)\n A2 = A.dot(A)\n M2 = np.dot(A, E) + np.dot(E, A)\n A4 = np.dot(A2, A2)\n M4 = np.dot(A2, M2) + np.dot(M2, A2)\n A6 = np.dot(A2, A4)\n M6 = np.dot(A4, M2) + np.dot(M4, A2)\n U = A.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)\n V = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident\n Lu = (A.dot(b[7]*M6 + b[5]*M4 + b[3]*M2) +\n E.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))\n Lv = b[6]*M6 + b[4]*M4 + b[2]*M2\n return U, V, Lu, Lv\n\n\ndef _diff_pade9(A, E, ident):\n b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,\n 2162160., 110880., 3960., 90., 1.)\n A2 = A.dot(A)\n M2 = np.dot(A, E) + np.dot(E, A)\n A4 = np.dot(A2, A2)\n M4 = np.dot(A2, M2) + np.dot(M2, A2)\n A6 = np.dot(A2, A4)\n M6 = np.dot(A4, M2) + np.dot(M4, A2)\n A8 = np.dot(A4, A4)\n M8 = np.dot(A4, M4) + np.dot(M4, A4)\n U = A.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)\n V = b[8]*A8 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident\n Lu = (A.dot(b[9]*M8 + b[7]*M6 + b[5]*M4 + b[3]*M2) +\n E.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))\n Lv = b[8]*M8 + b[6]*M6 + b[4]*M4 + b[2]*M2\n return U, V, Lu, Lv\n\n\ndef expm_frechet_algo_64(A, E):\n n = A.shape[0]\n s = None\n ident = np.identity(n)\n A_norm_1 = scipy.linalg.norm(A, 1)\n m_pade_pairs = (\n (3, _diff_pade3),\n (5, _diff_pade5),\n (7, _diff_pade7),\n (9, _diff_pade9))\n for m, pade in m_pade_pairs:\n if A_norm_1 <= ell_table_61[m]:\n U, V, Lu, Lv = pade(A, E, ident)\n s = 0\n break\n if s is None:\n # scaling\n s = max(0, int(np.ceil(np.log2(A_norm_1 / ell_table_61[13]))))\n A = A * 2.0**-s\n E = E * 2.0**-s\n # pade order 13\n A2 = np.dot(A, A)\n M2 = np.dot(A, E) + np.dot(E, A)\n A4 = np.dot(A2, A2)\n M4 = np.dot(A2, M2) + np.dot(M2, A2)\n A6 = np.dot(A2, A4)\n M6 = np.dot(A4, M2) + np.dot(M4, A2)\n b = (64764752532480000., 32382376266240000., 7771770303897600.,\n 1187353796428800., 129060195264000., 10559470521600.,\n 670442572800., 33522128640., 1323241920., 40840800., 960960.,\n 16380., 182., 1.)\n W1 = b[13]*A6 + b[11]*A4 + b[9]*A2\n W2 = b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident\n Z1 = b[12]*A6 + b[10]*A4 + b[8]*A2\n Z2 = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident\n W = np.dot(A6, W1) + W2\n U = np.dot(A, W)\n V = np.dot(A6, Z1) + Z2\n Lw1 = b[13]*M6 + b[11]*M4 + b[9]*M2\n Lw2 = b[7]*M6 + b[5]*M4 + b[3]*M2\n Lz1 = b[12]*M6 + b[10]*M4 + b[8]*M2\n Lz2 = b[6]*M6 + b[4]*M4 + b[2]*M2\n Lw = np.dot(A6, Lw1) + np.dot(M6, W1) + Lw2\n Lu = np.dot(A, Lw) + np.dot(E, W)\n Lv = np.dot(A6, Lz1) + np.dot(M6, Z1) + Lz2\n # factor once and solve twice\n lu_piv = scipy.linalg.lu_factor(-U + V)\n R = scipy.linalg.lu_solve(lu_piv, U + V)\n L = scipy.linalg.lu_solve(lu_piv, Lu + Lv + np.dot((Lu - Lv), R))\n # squaring\n for k in range(s):\n L = np.dot(R, L) + np.dot(L, R)\n R = np.dot(R, R)\n return R, L\n\n\ndef vec(M):\n \"\"\"\n Stack columns of M to construct a single vector.\n\n This is somewhat standard notation in linear algebra.\n\n Parameters\n ----------\n M : 2-D array_like\n Input matrix\n\n Returns\n -------\n v : 1-D ndarray\n Output vector\n\n \"\"\"\n return M.T.ravel()\n\n\ndef expm_frechet_kronform(A, method=None, check_finite=True):\n \"\"\"\n Construct the Kronecker form of the Frechet derivative of expm.\n\n Parameters\n ----------\n A : array_like with shape (N, N)\n Matrix to be expm'd.\n method : str, optional\n Extra keyword to be passed to expm_frechet.\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n K : 2-D ndarray with shape (N*N, N*N)\n Kronecker form of the Frechet derivative of the matrix exponential.\n\n Notes\n -----\n This function is used to help compute the condition number\n of the matrix exponential.\n\n See also\n --------\n expm : Compute a matrix exponential.\n expm_frechet : Compute the Frechet derivative of the matrix exponential.\n expm_cond : Compute the relative condition number of the matrix exponential\n in the Frobenius norm.\n\n \"\"\"\n if check_finite:\n A = np.asarray_chkfinite(A)\n else:\n A = np.asarray(A)\n if len(A.shape) != 2 or A.shape[0] != A.shape[1]:\n raise ValueError('expected a square matrix')\n\n n = A.shape[0]\n ident = np.identity(n)\n cols = []\n for i in range(n):\n for j in range(n):\n E = np.outer(ident[i], ident[j])\n F = expm_frechet(A, E,\n method=method, compute_expm=False, check_finite=False)\n cols.append(vec(F))\n return np.vstack(cols).T\n\n\ndef expm_cond(A, check_finite=True):\n \"\"\"\n Relative condition number of the matrix exponential in the Frobenius norm.\n\n Parameters\n ----------\n A : 2-D array_like\n Square input matrix with shape (N, N).\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n kappa : float\n The relative condition number of the matrix exponential\n in the Frobenius norm\n\n Notes\n -----\n A faster estimate for the condition number in the 1-norm\n has been published but is not yet implemented in SciPy.\n\n .. versionadded:: 0.14.0\n\n See also\n --------\n expm : Compute the exponential of a matrix.\n expm_frechet : Compute the Frechet derivative of the matrix exponential.\n\n Examples\n --------\n >>> from scipy.linalg import expm_cond\n >>> A = np.array([[-0.3, 0.2, 0.6], [0.6, 0.3, -0.1], [-0.7, 1.2, 0.9]])\n >>> k = expm_cond(A)\n >>> k\n 1.7787805864469866\n\n \"\"\"\n if check_finite:\n A = np.asarray_chkfinite(A)\n else:\n A = np.asarray(A)\n if len(A.shape) != 2 or A.shape[0] != A.shape[1]:\n raise ValueError('expected a square matrix')\n\n X = scipy.linalg.expm(A)\n K = expm_frechet_kronform(A, check_finite=False)\n\n # The following norm choices are deliberate.\n # The norms of A and X are Frobenius norms,\n # and the norm of K is the induced 2-norm.\n A_norm = scipy.linalg.norm(A, 'fro')\n X_norm = scipy.linalg.norm(X, 'fro')\n K_norm = scipy.linalg.norm(K, 2)\n\n kappa = (K_norm * A_norm) / X_norm\n return kappa\n",
"\"\"\"\ndifferential_evolution: The differential evolution global optimization algorithm\nAdded by Andrew Nelson 2014\n\"\"\"\nimport warnings\n\nimport numpy as np\nfrom scipy.optimize import OptimizeResult, minimize\nfrom scipy.optimize.optimize import _status_message\nfrom scipy._lib._util import check_random_state, MapWrapper\n\nfrom scipy.optimize._constraints import (Bounds, new_bounds_to_old,\n NonlinearConstraint, LinearConstraint)\nfrom scipy.sparse import issparse\n\n__all__ = ['differential_evolution']\n\n\n_MACHEPS = np.finfo(np.float64).eps\n\n\ndef differential_evolution(func, bounds, args=(), strategy='best1bin',\n maxiter=1000, popsize=15, tol=0.01,\n mutation=(0.5, 1), recombination=0.7, seed=None,\n callback=None, disp=False, polish=True,\n init='latinhypercube', atol=0, updating='immediate',\n workers=1, constraints=(), x0=None):\n \"\"\"Finds the global minimum of a multivariate function.\n\n Differential Evolution is stochastic in nature (does not use gradient\n methods) to find the minimum, and can search large areas of candidate\n space, but often requires larger numbers of function evaluations than\n conventional gradient-based techniques.\n\n The algorithm is due to Storn and Price [1]_.\n\n Parameters\n ----------\n func : callable\n The objective function to be minimized. Must be in the form\n ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array\n and ``args`` is a tuple of any additional fixed parameters needed to\n completely specify the function.\n bounds : sequence or `Bounds`\n Bounds for variables. There are two ways to specify the bounds:\n 1. Instance of `Bounds` class.\n 2. ``(min, max)`` pairs for each element in ``x``, defining the finite\n lower and upper bounds for the optimizing argument of `func`. It is\n required to have ``len(bounds) == len(x)``. ``len(bounds)`` is used\n to determine the number of parameters in ``x``.\n args : tuple, optional\n Any additional fixed parameters needed to\n completely specify the objective function.\n strategy : str, optional\n The differential evolution strategy to use. Should be one of:\n\n - 'best1bin'\n - 'best1exp'\n - 'rand1exp'\n - 'randtobest1exp'\n - 'currenttobest1exp'\n - 'best2exp'\n - 'rand2exp'\n - 'randtobest1bin'\n - 'currenttobest1bin'\n - 'best2bin'\n - 'rand2bin'\n - 'rand1bin'\n\n The default is 'best1bin'.\n maxiter : int, optional\n The maximum number of generations over which the entire population is\n evolved. The maximum number of function evaluations (with no polishing)\n is: ``(maxiter + 1) * popsize * len(x)``\n popsize : int, optional\n A multiplier for setting the total population size. The population has\n ``popsize * len(x)`` individuals. This keyword is overridden if an\n initial population is supplied via the `init` keyword. When using\n ``init='sobol'`` the population size is calculated as the next power\n of 2 after ``popsize * len(x)``.\n tol : float, optional\n Relative tolerance for convergence, the solving stops when\n ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,\n where and `atol` and `tol` are the absolute and relative tolerance\n respectively.\n mutation : float or tuple(float, float), optional\n The mutation constant. In the literature this is also known as\n differential weight, being denoted by F.\n If specified as a float it should be in the range [0, 2].\n If specified as a tuple ``(min, max)`` dithering is employed. Dithering\n randomly changes the mutation constant on a generation by generation\n basis. The mutation constant for that generation is taken from\n ``U[min, max)``. Dithering can help speed convergence significantly.\n Increasing the mutation constant increases the search radius, but will\n slow down convergence.\n recombination : float, optional\n The recombination constant, should be in the range [0, 1]. In the\n literature this is also known as the crossover probability, being\n denoted by CR. Increasing this value allows a larger number of mutants\n to progress into the next generation, but at the risk of population\n stability.\n seed : {None, int, `numpy.random.Generator`,\n `numpy.random.RandomState`}, optional\n\n If `seed` is None (or `np.random`), the `numpy.random.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` or ``RandomState`` instance then\n that instance is used.\n Specify `seed` for repeatable minimizations.\n disp : bool, optional\n Prints the evaluated `func` at every iteration.\n callback : callable, `callback(xk, convergence=val)`, optional\n A function to follow the progress of the minimization. ``xk`` is\n the current value of ``x0``. ``val`` represents the fractional\n value of the population convergence. When ``val`` is greater than one\n the function halts. If callback returns `True`, then the minimization\n is halted (any polishing is still carried out).\n polish : bool, optional\n If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`\n method is used to polish the best population member at the end, which\n can improve the minimization slightly. If a constrained problem is\n being studied then the `trust-constr` method is used instead.\n init : str or array-like, optional\n Specify which type of population initialization is performed. Should be\n one of:\n\n - 'latinhypercube'\n - 'sobol'\n - 'halton'\n - 'random'\n - array specifying the initial population. The array should have\n shape ``(M, len(x))``, where M is the total population size and\n len(x) is the number of parameters.\n `init` is clipped to `bounds` before use.\n\n The default is 'latinhypercube'. Latin Hypercube sampling tries to\n maximize coverage of the available parameter space.\n\n 'sobol' and 'halton' are superior alternatives and maximize even more\n the parameter space. 'sobol' will enforce an initial population\n size which is calculated as the next power of 2 after\n ``popsize * len(x)``. 'halton' has no requirements but is a bit less\n efficient. See `scipy.stats.qmc` for more details.\n\n 'random' initializes the population randomly - this has the drawback\n that clustering can occur, preventing the whole of parameter space\n being covered. Use of an array to specify a population could be used,\n for example, to create a tight bunch of initial guesses in an location\n where the solution is known to exist, thereby reducing time for\n convergence.\n atol : float, optional\n Absolute tolerance for convergence, the solving stops when\n ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,\n where and `atol` and `tol` are the absolute and relative tolerance\n respectively.\n updating : {'immediate', 'deferred'}, optional\n If ``'immediate'``, the best solution vector is continuously updated\n within a single generation [4]_. This can lead to faster convergence as\n trial vectors can take advantage of continuous improvements in the best\n solution.\n With ``'deferred'``, the best solution vector is updated once per\n generation. Only ``'deferred'`` is compatible with parallelization, and\n the `workers` keyword can over-ride this option.\n\n .. versionadded:: 1.2.0\n\n workers : int or map-like callable, optional\n If `workers` is an int the population is subdivided into `workers`\n sections and evaluated in parallel\n (uses `multiprocessing.Pool <multiprocessing>`).\n Supply -1 to use all available CPU cores.\n Alternatively supply a map-like callable, such as\n `multiprocessing.Pool.map` for evaluating the population in parallel.\n This evaluation is carried out as ``workers(func, iterable)``.\n This option will override the `updating` keyword to\n ``updating='deferred'`` if ``workers != 1``.\n Requires that `func` be pickleable.\n\n .. versionadded:: 1.2.0\n\n constraints : {NonLinearConstraint, LinearConstraint, Bounds}\n Constraints on the solver, over and above those applied by the `bounds`\n kwd. Uses the approach by Lampinen [5]_.\n\n .. versionadded:: 1.4.0\n\n x0 : None or array-like, optional\n Provides an initial guess to the minimization. Once the population has\n been initialized this vector replaces the first (best) member. This\n replacement is done even if `init` is given an initial population.\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n res : OptimizeResult\n The optimization result represented as a `OptimizeResult` object.\n Important attributes are: ``x`` the solution array, ``success`` a\n Boolean flag indicating if the optimizer exited successfully and\n ``message`` which describes the cause of the termination. See\n `OptimizeResult` for a description of other attributes. If `polish`\n was employed, and a lower minimum was obtained by the polishing, then\n OptimizeResult also contains the ``jac`` attribute.\n If the eventual solution does not satisfy the applied constraints\n ``success`` will be `False`.\n\n Notes\n -----\n Differential evolution is a stochastic population based method that is\n useful for global optimization problems. At each pass through the population\n the algorithm mutates each candidate solution by mixing with other candidate\n solutions to create a trial candidate. There are several strategies [2]_ for\n creating trial candidates, which suit some problems more than others. The\n 'best1bin' strategy is a good starting point for many systems. In this\n strategy two members of the population are randomly chosen. Their difference\n is used to mutate the best member (the 'best' in 'best1bin'), :math:`b_0`,\n so far:\n\n .. math::\n\n b' = b_0 + mutation * (population[rand0] - population[rand1])\n\n A trial vector is then constructed. Starting with a randomly chosen ith\n parameter the trial is sequentially filled (in modulo) with parameters from\n ``b'`` or the original candidate. The choice of whether to use ``b'`` or the\n original candidate is made with a binomial distribution (the 'bin' in\n 'best1bin') - a random number in [0, 1) is generated. If this number is\n less than the `recombination` constant then the parameter is loaded from\n ``b'``, otherwise it is loaded from the original candidate. The final\n parameter is always loaded from ``b'``. Once the trial candidate is built\n its fitness is assessed. If the trial is better than the original candidate\n then it takes its place. If it is also better than the best overall\n candidate it also replaces that.\n To improve your chances of finding a global minimum use higher `popsize`\n values, with higher `mutation` and (dithering), but lower `recombination`\n values. This has the effect of widening the search radius, but slowing\n convergence.\n By default the best solution vector is updated continuously within a single\n iteration (``updating='immediate'``). This is a modification [4]_ of the\n original differential evolution algorithm which can lead to faster\n convergence as trial vectors can immediately benefit from improved\n solutions. To use the original Storn and Price behaviour, updating the best\n solution once per iteration, set ``updating='deferred'``.\n\n .. versionadded:: 0.15.0\n\n Examples\n --------\n Let us consider the problem of minimizing the Rosenbrock function. This\n function is implemented in `rosen` in `scipy.optimize`.\n\n >>> from scipy.optimize import rosen, differential_evolution\n >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]\n >>> result = differential_evolution(rosen, bounds)\n >>> result.x, result.fun\n (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)\n\n Now repeat, but with parallelization.\n\n >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]\n >>> result = differential_evolution(rosen, bounds, updating='deferred',\n ... workers=2)\n >>> result.x, result.fun\n (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)\n\n Let's try and do a constrained minimization\n\n >>> from scipy.optimize import NonlinearConstraint, Bounds\n >>> def constr_f(x):\n ... return np.array(x[0] + x[1])\n >>>\n >>> # the sum of x[0] and x[1] must be less than 1.9\n >>> nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)\n >>> # specify limits using a `Bounds` object.\n >>> bounds = Bounds([0., 0.], [2., 2.])\n >>> result = differential_evolution(rosen, bounds, constraints=(nlc),\n ... seed=1)\n >>> result.x, result.fun\n (array([0.96633867, 0.93363577]), 0.0011361355854792312)\n\n Next find the minimum of the Ackley function\n (https://en.wikipedia.org/wiki/Test_functions_for_optimization).\n\n >>> from scipy.optimize import differential_evolution\n >>> import numpy as np\n >>> def ackley(x):\n ... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))\n ... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))\n ... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e\n >>> bounds = [(-5, 5), (-5, 5)]\n >>> result = differential_evolution(ackley, bounds)\n >>> result.x, result.fun\n (array([ 0., 0.]), 4.4408920985006262e-16)\n\n References\n ----------\n .. [1] Storn, R and Price, K, Differential Evolution - a Simple and\n Efficient Heuristic for Global Optimization over Continuous Spaces,\n Journal of Global Optimization, 1997, 11, 341 - 359.\n .. [2] http://www1.icsi.berkeley.edu/~storn/code.html\n .. [3] http://en.wikipedia.org/wiki/Differential_evolution\n .. [4] Wormington, M., Panaccione, C., Matney, K. M., Bowen, D. K., -\n Characterization of structures from X-ray scattering data using\n genetic algorithms, Phil. Trans. R. Soc. Lond. A, 1999, 357,\n 2827-2848\n .. [5] Lampinen, J., A constraint handling approach for the differential\n evolution algorithm. Proceedings of the 2002 Congress on\n Evolutionary Computation. CEC'02 (Cat. No. 02TH8600). Vol. 2. IEEE,\n 2002.\n \"\"\"\n\n # using a context manager means that any created Pool objects are\n # cleared up.\n with DifferentialEvolutionSolver(func, bounds, args=args,\n strategy=strategy,\n maxiter=maxiter,\n popsize=popsize, tol=tol,\n mutation=mutation,\n recombination=recombination,\n seed=seed, polish=polish,\n callback=callback,\n disp=disp, init=init, atol=atol,\n updating=updating,\n workers=workers,\n constraints=constraints,\n x0=x0) as solver:\n ret = solver.solve()\n\n return ret\n\n\nclass DifferentialEvolutionSolver:\n\n \"\"\"This class implements the differential evolution solver\n\n Parameters\n ----------\n func : callable\n The objective function to be minimized. Must be in the form\n ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array\n and ``args`` is a tuple of any additional fixed parameters needed to\n completely specify the function.\n bounds : sequence or `Bounds`\n Bounds for variables. There are two ways to specify the bounds:\n 1. Instance of `Bounds` class.\n 2. ``(min, max)`` pairs for each element in ``x``, defining the finite\n lower and upper bounds for the optimizing argument of `func`. It is\n required to have ``len(bounds) == len(x)``. ``len(bounds)`` is used\n to determine the number of parameters in ``x``.\n args : tuple, optional\n Any additional fixed parameters needed to\n completely specify the objective function.\n strategy : str, optional\n The differential evolution strategy to use. Should be one of:\n\n - 'best1bin'\n - 'best1exp'\n - 'rand1exp'\n - 'randtobest1exp'\n - 'currenttobest1exp'\n - 'best2exp'\n - 'rand2exp'\n - 'randtobest1bin'\n - 'currenttobest1bin'\n - 'best2bin'\n - 'rand2bin'\n - 'rand1bin'\n\n The default is 'best1bin'\n\n maxiter : int, optional\n The maximum number of generations over which the entire population is\n evolved. The maximum number of function evaluations (with no polishing)\n is: ``(maxiter + 1) * popsize * len(x)``\n popsize : int, optional\n A multiplier for setting the total population size. The population has\n ``popsize * len(x)`` individuals. This keyword is overridden if an\n initial population is supplied via the `init` keyword. When using\n ``init='sobol'`` the population size is calculated as the next power\n of 2 after ``popsize * len(x)``.\n tol : float, optional\n Relative tolerance for convergence, the solving stops when\n ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,\n where and `atol` and `tol` are the absolute and relative tolerance\n respectively.\n mutation : float or tuple(float, float), optional\n The mutation constant. In the literature this is also known as\n differential weight, being denoted by F.\n If specified as a float it should be in the range [0, 2].\n If specified as a tuple ``(min, max)`` dithering is employed. Dithering\n randomly changes the mutation constant on a generation by generation\n basis. The mutation constant for that generation is taken from\n U[min, max). Dithering can help speed convergence significantly.\n Increasing the mutation constant increases the search radius, but will\n slow down convergence.\n recombination : float, optional\n The recombination constant, should be in the range [0, 1]. In the\n literature this is also known as the crossover probability, being\n denoted by CR. Increasing this value allows a larger number of mutants\n to progress into the next generation, but at the risk of population\n stability.\n seed : {None, int, `numpy.random.Generator`,\n `numpy.random.RandomState`}, optional\n\n If `seed` is None (or `np.random`), the `numpy.random.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``Generator`` or ``RandomState`` instance then\n that instance is used.\n Specify `seed` for repeatable minimizations.\n disp : bool, optional\n Prints the evaluated `func` at every iteration.\n callback : callable, `callback(xk, convergence=val)`, optional\n A function to follow the progress of the minimization. ``xk`` is\n the current value of ``x0``. ``val`` represents the fractional\n value of the population convergence. When ``val`` is greater than one\n the function halts. If callback returns `True`, then the minimization\n is halted (any polishing is still carried out).\n polish : bool, optional\n If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`\n method is used to polish the best population member at the end, which\n can improve the minimization slightly. If a constrained problem is\n being studied then the `trust-constr` method is used instead.\n maxfun : int, optional\n Set the maximum number of function evaluations. However, it probably\n makes more sense to set `maxiter` instead.\n init : str or array-like, optional\n Specify which type of population initialization is performed. Should be\n one of:\n\n - 'latinhypercube'\n - 'sobol'\n - 'halton'\n - 'random'\n - array specifying the initial population. The array should have\n shape ``(M, len(x))``, where M is the total population size and\n len(x) is the number of parameters.\n `init` is clipped to `bounds` before use.\n\n The default is 'latinhypercube'. Latin Hypercube sampling tries to\n maximize coverage of the available parameter space.\n\n 'sobol' and 'halton' are superior alternatives and maximize even more\n the parameter space. 'sobol' will enforce an initial population\n size which is calculated as the next power of 2 after\n ``popsize * len(x)``. 'halton' has no requirements but is a bit less\n efficient. See `scipy.stats.qmc` for more details.\n\n 'random' initializes the population randomly - this has the drawback\n that clustering can occur, preventing the whole of parameter space\n being covered. Use of an array to specify a population could be used,\n for example, to create a tight bunch of initial guesses in an location\n where the solution is known to exist, thereby reducing time for\n convergence.\n atol : float, optional\n Absolute tolerance for convergence, the solving stops when\n ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,\n where and `atol` and `tol` are the absolute and relative tolerance\n respectively.\n updating : {'immediate', 'deferred'}, optional\n If `immediate` the best solution vector is continuously updated within\n a single generation. This can lead to faster convergence as trial\n vectors can take advantage of continuous improvements in the best\n solution.\n With `deferred` the best solution vector is updated once per\n generation. Only `deferred` is compatible with parallelization, and the\n `workers` keyword can over-ride this option.\n workers : int or map-like callable, optional\n If `workers` is an int the population is subdivided into `workers`\n sections and evaluated in parallel\n (uses `multiprocessing.Pool <multiprocessing>`).\n Supply `-1` to use all cores available to the Process.\n Alternatively supply a map-like callable, such as\n `multiprocessing.Pool.map` for evaluating the population in parallel.\n This evaluation is carried out as ``workers(func, iterable)``.\n This option will override the `updating` keyword to\n `updating='deferred'` if `workers != 1`.\n Requires that `func` be pickleable.\n constraints : {NonLinearConstraint, LinearConstraint, Bounds}\n Constraints on the solver, over and above those applied by the `bounds`\n kwd. Uses the approach by Lampinen.\n x0 : None or array-like, optional\n Provides an initial guess to the minimization. Once the population has\n been initialized this vector replaces the first (best) member. This\n replacement is done even if `init` is given an initial population.\n \"\"\"\n\n # Dispatch of mutation strategy method (binomial or exponential).\n _binomial = {'best1bin': '_best1',\n 'randtobest1bin': '_randtobest1',\n 'currenttobest1bin': '_currenttobest1',\n 'best2bin': '_best2',\n 'rand2bin': '_rand2',\n 'rand1bin': '_rand1'}\n _exponential = {'best1exp': '_best1',\n 'rand1exp': '_rand1',\n 'randtobest1exp': '_randtobest1',\n 'currenttobest1exp': '_currenttobest1',\n 'best2exp': '_best2',\n 'rand2exp': '_rand2'}\n\n __init_error_msg = (\"The population initialization method must be one of \"\n \"'latinhypercube' or 'random', or an array of shape \"\n \"(M, N) where N is the number of parameters and M>5\")\n\n def __init__(self, func, bounds, args=(),\n strategy='best1bin', maxiter=1000, popsize=15,\n tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,\n maxfun=np.inf, callback=None, disp=False, polish=True,\n init='latinhypercube', atol=0, updating='immediate',\n workers=1, constraints=(), x0=None):\n\n if strategy in self._binomial:\n self.mutation_func = getattr(self, self._binomial[strategy])\n elif strategy in self._exponential:\n self.mutation_func = getattr(self, self._exponential[strategy])\n else:\n raise ValueError(\"Please select a valid mutation strategy\")\n self.strategy = strategy\n\n self.callback = callback\n self.polish = polish\n\n # set the updating / parallelisation options\n if updating in ['immediate', 'deferred']:\n self._updating = updating\n\n # want to use parallelisation, but updating is immediate\n if workers != 1 and updating == 'immediate':\n warnings.warn(\"differential_evolution: the 'workers' keyword has\"\n \" overridden updating='immediate' to\"\n \" updating='deferred'\", UserWarning)\n self._updating = 'deferred'\n\n # an object with a map method.\n self._mapwrapper = MapWrapper(workers)\n\n # relative and absolute tolerances for convergence\n self.tol, self.atol = tol, atol\n\n # Mutation constant should be in [0, 2). If specified as a sequence\n # then dithering is performed.\n self.scale = mutation\n if (not np.all(np.isfinite(mutation)) or\n np.any(np.array(mutation) >= 2) or\n np.any(np.array(mutation) < 0)):\n raise ValueError('The mutation constant must be a float in '\n 'U[0, 2), or specified as a tuple(min, max)'\n ' where min < max and min, max are in U[0, 2).')\n\n self.dither = None\n if hasattr(mutation, '__iter__') and len(mutation) > 1:\n self.dither = [mutation[0], mutation[1]]\n self.dither.sort()\n\n self.cross_over_probability = recombination\n\n # we create a wrapped function to allow the use of map (and Pool.map\n # in the future)\n self.func = _FunctionWrapper(func, args)\n self.args = args\n\n # convert tuple of lower and upper bounds to limits\n # [(low_0, high_0), ..., (low_n, high_n]\n # -> [[low_0, ..., low_n], [high_0, ..., high_n]]\n if isinstance(bounds, Bounds):\n self.limits = np.array(new_bounds_to_old(bounds.lb,\n bounds.ub,\n len(bounds.lb)),\n dtype=float).T\n else:\n self.limits = np.array(bounds, dtype='float').T\n\n if (np.size(self.limits, 0) != 2 or not\n np.all(np.isfinite(self.limits))):\n raise ValueError('bounds should be a sequence containing '\n 'real valued (min, max) pairs for each value'\n ' in x')\n\n if maxiter is None: # the default used to be None\n maxiter = 1000\n self.maxiter = maxiter\n if maxfun is None: # the default used to be None\n maxfun = np.inf\n self.maxfun = maxfun\n\n # population is scaled to between [0, 1].\n # We have to scale between parameter <-> population\n # save these arguments for _scale_parameter and\n # _unscale_parameter. This is an optimization\n self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])\n self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])\n\n self.parameter_count = np.size(self.limits, 1)\n\n self.random_number_generator = check_random_state(seed)\n\n # default population initialization is a latin hypercube design, but\n # there are other population initializations possible.\n # the minimum is 5 because 'best2bin' requires a population that's at\n # least 5 long\n self.num_population_members = max(5, popsize * self.parameter_count)\n self.population_shape = (self.num_population_members,\n self.parameter_count)\n\n self._nfev = 0\n # check first str otherwise will fail to compare str with array\n if isinstance(init, str):\n if init == 'latinhypercube':\n self.init_population_lhs()\n elif init == 'sobol':\n # must be Ns = 2**m for Sobol'\n n_s = int(2 ** np.ceil(np.log2(self.num_population_members)))\n self.num_population_members = n_s\n self.population_shape = (self.num_population_members,\n self.parameter_count)\n self.init_population_qmc(qmc_engine='sobol')\n elif init == 'halton':\n self.init_population_qmc(qmc_engine='halton')\n elif init == 'random':\n self.init_population_random()\n else:\n raise ValueError(self.__init_error_msg)\n else:\n self.init_population_array(init)\n\n if x0 is not None:\n # scale to within unit interval and\n # ensure parameters are within bounds.\n x0_scaled = self._unscale_parameters(np.asarray(x0))\n if ((x0_scaled > 1.0) | (x0_scaled < 0.0)).any():\n raise ValueError(\n \"Some entries in x0 lay outside the specified bounds\"\n )\n self.population[0] = x0_scaled\n\n # infrastructure for constraints\n self.constraints = constraints\n self._wrapped_constraints = []\n\n if hasattr(constraints, '__len__'):\n # sequence of constraints, this will also deal with default\n # keyword parameter\n for c in constraints:\n self._wrapped_constraints.append(\n _ConstraintWrapper(c, self.x)\n )\n else:\n self._wrapped_constraints = [\n _ConstraintWrapper(constraints, self.x)\n ]\n\n self.constraint_violation = np.zeros((self.num_population_members, 1))\n self.feasible = np.ones(self.num_population_members, bool)\n\n self.disp = disp\n\n def init_population_lhs(self):\n \"\"\"\n Initializes the population with Latin Hypercube Sampling.\n Latin Hypercube Sampling ensures that each parameter is uniformly\n sampled over its range.\n \"\"\"\n rng = self.random_number_generator\n\n # Each parameter range needs to be sampled uniformly. The scaled\n # parameter range ([0, 1)) needs to be split into\n # `self.num_population_members` segments, each of which has the following\n # size:\n segsize = 1.0 / self.num_population_members\n\n # Within each segment we sample from a uniform random distribution.\n # We need to do this sampling for each parameter.\n samples = (segsize * rng.uniform(size=self.population_shape)\n\n # Offset each segment to cover the entire parameter range [0, 1)\n + np.linspace(0., 1., self.num_population_members,\n endpoint=False)[:, np.newaxis])\n\n # Create an array for population of candidate solutions.\n self.population = np.zeros_like(samples)\n\n # Initialize population of candidate solutions by permutation of the\n # random samples.\n for j in range(self.parameter_count):\n order = rng.permutation(range(self.num_population_members))\n self.population[:, j] = samples[order, j]\n\n # reset population energies\n self.population_energies = np.full(self.num_population_members,\n np.inf)\n\n # reset number of function evaluations counter\n self._nfev = 0\n\n def init_population_qmc(self, qmc_engine):\n \"\"\"Initializes the population with a QMC method.\n\n QMC methods ensures that each parameter is uniformly\n sampled over its range.\n\n Parameters\n ----------\n qmc_engine : str\n The QMC method to use for initialization. Can be one of\n ``latinhypercube``, ``sobol`` or ``halton``.\n\n \"\"\"\n from scipy.stats import qmc\n\n rng = self.random_number_generator\n\n # Create an array for population of candidate solutions.\n if qmc_engine == 'latinhypercube':\n sampler = qmc.LatinHypercube(d=self.parameter_count, seed=rng)\n elif qmc_engine == 'sobol':\n sampler = qmc.Sobol(d=self.parameter_count, seed=rng)\n elif qmc_engine == 'halton':\n sampler = qmc.Halton(d=self.parameter_count, seed=rng)\n else:\n raise ValueError(self.__init_error_msg)\n\n self.population = sampler.random(n=self.num_population_members)\n\n # reset population energies\n self.population_energies = np.full(self.num_population_members,\n np.inf)\n\n # reset number of function evaluations counter\n self._nfev = 0\n\n def init_population_random(self):\n \"\"\"\n Initializes the population at random. This type of initialization\n can possess clustering, Latin Hypercube sampling is generally better.\n \"\"\"\n rng = self.random_number_generator\n self.population = rng.uniform(size=self.population_shape)\n\n # reset population energies\n self.population_energies = np.full(self.num_population_members,\n np.inf)\n\n # reset number of function evaluations counter\n self._nfev = 0\n\n def init_population_array(self, init):\n \"\"\"\n Initializes the population with a user specified population.\n\n Parameters\n ----------\n init : np.ndarray\n Array specifying subset of the initial population. The array should\n have shape (M, len(x)), where len(x) is the number of parameters.\n The population is clipped to the lower and upper bounds.\n \"\"\"\n # make sure you're using a float array\n popn = np.asfarray(init)\n\n if (np.size(popn, 0) < 5 or\n popn.shape[1] != self.parameter_count or\n len(popn.shape) != 2):\n raise ValueError(\"The population supplied needs to have shape\"\n \" (M, len(x)), where M > 4.\")\n\n # scale values and clip to bounds, assigning to population\n self.population = np.clip(self._unscale_parameters(popn), 0, 1)\n\n self.num_population_members = np.size(self.population, 0)\n\n self.population_shape = (self.num_population_members,\n self.parameter_count)\n\n # reset population energies\n self.population_energies = np.full(self.num_population_members,\n np.inf)\n\n # reset number of function evaluations counter\n self._nfev = 0\n\n @property\n def x(self):\n \"\"\"\n The best solution from the solver\n \"\"\"\n return self._scale_parameters(self.population[0])\n\n @property\n def convergence(self):\n \"\"\"\n The standard deviation of the population energies divided by their\n mean.\n \"\"\"\n if np.any(np.isinf(self.population_energies)):\n return np.inf\n return (np.std(self.population_energies) /\n np.abs(np.mean(self.population_energies) + _MACHEPS))\n\n def converged(self):\n \"\"\"\n Return True if the solver has converged.\n \"\"\"\n if np.any(np.isinf(self.population_energies)):\n return False\n\n return (np.std(self.population_energies) <=\n self.atol +\n self.tol * np.abs(np.mean(self.population_energies)))\n\n def solve(self):\n \"\"\"\n Runs the DifferentialEvolutionSolver.\n\n Returns\n -------\n res : OptimizeResult\n The optimization result represented as a ``OptimizeResult`` object.\n Important attributes are: ``x`` the solution array, ``success`` a\n Boolean flag indicating if the optimizer exited successfully and\n ``message`` which describes the cause of the termination. See\n `OptimizeResult` for a description of other attributes. If `polish`\n was employed, and a lower minimum was obtained by the polishing,\n then OptimizeResult also contains the ``jac`` attribute.\n \"\"\"\n nit, warning_flag = 0, False\n status_message = _status_message['success']\n\n # The population may have just been initialized (all entries are\n # np.inf). If it has you have to calculate the initial energies.\n # Although this is also done in the evolve generator it's possible\n # that someone can set maxiter=0, at which point we still want the\n # initial energies to be calculated (the following loop isn't run).\n if np.all(np.isinf(self.population_energies)):\n self.feasible, self.constraint_violation = (\n self._calculate_population_feasibilities(self.population))\n\n # only work out population energies for feasible solutions\n self.population_energies[self.feasible] = (\n self._calculate_population_energies(\n self.population[self.feasible]))\n\n self._promote_lowest_energy()\n\n # do the optimization.\n for nit in range(1, self.maxiter + 1):\n # evolve the population by a generation\n try:\n next(self)\n except StopIteration:\n warning_flag = True\n if self._nfev > self.maxfun:\n status_message = _status_message['maxfev']\n elif self._nfev == self.maxfun:\n status_message = ('Maximum number of function evaluations'\n ' has been reached.')\n break\n\n if self.disp:\n print(\"differential_evolution step %d: f(x)= %g\"\n % (nit,\n self.population_energies[0]))\n\n if self.callback:\n c = self.tol / (self.convergence + _MACHEPS)\n warning_flag = bool(self.callback(self.x, convergence=c))\n if warning_flag:\n status_message = ('callback function requested stop early'\n ' by returning True')\n\n # should the solver terminate?\n if warning_flag or self.converged():\n break\n\n else:\n status_message = _status_message['maxiter']\n warning_flag = True\n\n DE_result = OptimizeResult(\n x=self.x,\n fun=self.population_energies[0],\n nfev=self._nfev,\n nit=nit,\n message=status_message,\n success=(warning_flag is not True))\n\n if self.polish:\n polish_method = 'L-BFGS-B'\n\n if self._wrapped_constraints:\n polish_method = 'trust-constr'\n\n constr_violation = self._constraint_violation_fn(DE_result.x)\n if np.any(constr_violation > 0.):\n warnings.warn(\"differential evolution didn't find a\"\n \" solution satisfying the constraints,\"\n \" attempting to polish from the least\"\n \" infeasible solution\", UserWarning)\n\n result = minimize(self.func,\n np.copy(DE_result.x),\n method=polish_method,\n bounds=self.limits.T,\n constraints=self.constraints)\n\n self._nfev += result.nfev\n DE_result.nfev = self._nfev\n\n # Polishing solution is only accepted if there is an improvement in\n # cost function, the polishing was successful and the solution lies\n # within the bounds.\n if (result.fun < DE_result.fun and\n result.success and\n np.all(result.x <= self.limits[1]) and\n np.all(self.limits[0] <= result.x)):\n DE_result.fun = result.fun\n DE_result.x = result.x\n DE_result.jac = result.jac\n # to keep internal state consistent\n self.population_energies[0] = result.fun\n self.population[0] = self._unscale_parameters(result.x)\n\n if self._wrapped_constraints:\n DE_result.constr = [c.violation(DE_result.x) for\n c in self._wrapped_constraints]\n DE_result.constr_violation = np.max(\n np.concatenate(DE_result.constr))\n DE_result.maxcv = DE_result.constr_violation\n if DE_result.maxcv > 0:\n # if the result is infeasible then success must be False\n DE_result.success = False\n DE_result.message = (\"The solution does not satisfy the\"\n \" constraints, MAXCV = \" % DE_result.maxcv)\n\n return DE_result\n\n def _calculate_population_energies(self, population):\n \"\"\"\n Calculate the energies of a population.\n\n Parameters\n ----------\n population : ndarray\n An array of parameter vectors normalised to [0, 1] using lower\n and upper limits. Has shape ``(np.size(population, 0), len(x))``.\n\n Returns\n -------\n energies : ndarray\n An array of energies corresponding to each population member. If\n maxfun will be exceeded during this call, then the number of\n function evaluations will be reduced and energies will be\n right-padded with np.inf. Has shape ``(np.size(population, 0),)``\n \"\"\"\n num_members = np.size(population, 0)\n nfevs = min(num_members,\n self.maxfun - num_members)\n\n energies = np.full(num_members, np.inf)\n\n parameters_pop = self._scale_parameters(population)\n try:\n calc_energies = list(self._mapwrapper(self.func,\n parameters_pop[0:nfevs]))\n energies[0:nfevs] = np.squeeze(calc_energies)\n except (TypeError, ValueError) as e:\n # wrong number of arguments for _mapwrapper\n # or wrong length returned from the mapper\n raise RuntimeError(\n \"The map-like callable must be of the form f(func, iterable), \"\n \"returning a sequence of numbers the same length as 'iterable'\"\n ) from e\n\n self._nfev += nfevs\n\n return energies\n\n def _promote_lowest_energy(self):\n # swaps 'best solution' into first population entry\n\n idx = np.arange(self.num_population_members)\n feasible_solutions = idx[self.feasible]\n if feasible_solutions.size:\n # find the best feasible solution\n idx_t = np.argmin(self.population_energies[feasible_solutions])\n l = feasible_solutions[idx_t]\n else:\n # no solution was feasible, use 'best' infeasible solution, which\n # will violate constraints the least\n l = np.argmin(np.sum(self.constraint_violation, axis=1))\n\n self.population_energies[[0, l]] = self.population_energies[[l, 0]]\n self.population[[0, l], :] = self.population[[l, 0], :]\n self.feasible[[0, l]] = self.feasible[[l, 0]]\n self.constraint_violation[[0, l], :] = (\n self.constraint_violation[[l, 0], :])\n\n def _constraint_violation_fn(self, x):\n \"\"\"\n Calculates total constraint violation for all the constraints, for a given\n solution.\n\n Parameters\n ----------\n x : ndarray\n Solution vector\n\n Returns\n -------\n cv : ndarray\n Total violation of constraints. Has shape ``(M,)``, where M is the\n number of constraints (if each constraint function only returns one\n value)\n \"\"\"\n return np.concatenate([c.violation(x) for c in self._wrapped_constraints])\n\n def _calculate_population_feasibilities(self, population):\n \"\"\"\n Calculate the feasibilities of a population.\n\n Parameters\n ----------\n population : ndarray\n An array of parameter vectors normalised to [0, 1] using lower\n and upper limits. Has shape ``(np.size(population, 0), len(x))``.\n\n Returns\n -------\n feasible, constraint_violation : ndarray, ndarray\n Boolean array of feasibility for each population member, and an\n array of the constraint violation for each population member.\n constraint_violation has shape ``(np.size(population, 0), M)``,\n where M is the number of constraints.\n \"\"\"\n num_members = np.size(population, 0)\n if not self._wrapped_constraints:\n # shortcut for no constraints\n return np.ones(num_members, bool), np.zeros((num_members, 1))\n\n parameters_pop = self._scale_parameters(population)\n\n constraint_violation = np.array([self._constraint_violation_fn(x)\n for x in parameters_pop])\n feasible = ~(np.sum(constraint_violation, axis=1) > 0)\n\n return feasible, constraint_violation\n\n def __iter__(self):\n return self\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n return self._mapwrapper.__exit__(*args)\n\n def _accept_trial(self, energy_trial, feasible_trial, cv_trial,\n energy_orig, feasible_orig, cv_orig):\n \"\"\"\n Trial is accepted if:\n * it satisfies all constraints and provides a lower or equal objective\n function value, while both the compared solutions are feasible\n - or -\n * it is feasible while the original solution is infeasible,\n - or -\n * it is infeasible, but provides a lower or equal constraint violation\n for all constraint functions.\n\n This test corresponds to section III of Lampinen [1]_.\n\n Parameters\n ----------\n energy_trial : float\n Energy of the trial solution\n feasible_trial : float\n Feasibility of trial solution\n cv_trial : array-like\n Excess constraint violation for the trial solution\n energy_orig : float\n Energy of the original solution\n feasible_orig : float\n Feasibility of original solution\n cv_orig : array-like\n Excess constraint violation for the original solution\n\n Returns\n -------\n accepted : bool\n\n \"\"\"\n if feasible_orig and feasible_trial:\n return energy_trial <= energy_orig\n elif feasible_trial and not feasible_orig:\n return True\n elif not feasible_trial and (cv_trial <= cv_orig).all():\n # cv_trial < cv_orig would imply that both trial and orig are not\n # feasible\n return True\n\n return False\n\n def __next__(self):\n \"\"\"\n Evolve the population by a single generation\n\n Returns\n -------\n x : ndarray\n The best solution from the solver.\n fun : float\n Value of objective function obtained from the best solution.\n \"\"\"\n # the population may have just been initialized (all entries are\n # np.inf). If it has you have to calculate the initial energies\n if np.all(np.isinf(self.population_energies)):\n self.feasible, self.constraint_violation = (\n self._calculate_population_feasibilities(self.population))\n\n # only need to work out population energies for those that are\n # feasible\n self.population_energies[self.feasible] = (\n self._calculate_population_energies(\n self.population[self.feasible]))\n\n self._promote_lowest_energy()\n\n if self.dither is not None:\n self.scale = self.random_number_generator.uniform(self.dither[0],\n self.dither[1])\n\n if self._updating == 'immediate':\n # update best solution immediately\n for candidate in range(self.num_population_members):\n if self._nfev > self.maxfun:\n raise StopIteration\n\n # create a trial solution\n trial = self._mutate(candidate)\n\n # ensuring that it's in the range [0, 1)\n self._ensure_constraint(trial)\n\n # scale from [0, 1) to the actual parameter value\n parameters = self._scale_parameters(trial)\n\n # determine the energy of the objective function\n if self._wrapped_constraints:\n cv = self._constraint_violation_fn(parameters)\n feasible = False\n energy = np.inf\n if not np.sum(cv) > 0:\n # solution is feasible\n feasible = True\n energy = self.func(parameters)\n self._nfev += 1\n else:\n feasible = True\n cv = np.atleast_2d([0.])\n energy = self.func(parameters)\n self._nfev += 1\n\n # compare trial and population member\n if self._accept_trial(energy, feasible, cv,\n self.population_energies[candidate],\n self.feasible[candidate],\n self.constraint_violation[candidate]):\n self.population[candidate] = trial\n self.population_energies[candidate] = energy\n self.feasible[candidate] = feasible\n self.constraint_violation[candidate] = cv\n\n # if the trial candidate is also better than the best\n # solution then promote it.\n if self._accept_trial(energy, feasible, cv,\n self.population_energies[0],\n self.feasible[0],\n self.constraint_violation[0]):\n self._promote_lowest_energy()\n\n elif self._updating == 'deferred':\n # update best solution once per generation\n if self._nfev >= self.maxfun:\n raise StopIteration\n\n # 'deferred' approach, vectorised form.\n # create trial solutions\n trial_pop = np.array(\n [self._mutate(i) for i in range(self.num_population_members)])\n\n # enforce bounds\n self._ensure_constraint(trial_pop)\n\n # determine the energies of the objective function, but only for\n # feasible trials\n feasible, cv = self._calculate_population_feasibilities(trial_pop)\n trial_energies = np.full(self.num_population_members, np.inf)\n\n # only calculate for feasible entries\n trial_energies[feasible] = self._calculate_population_energies(\n trial_pop[feasible])\n\n # which solutions are 'improved'?\n loc = [self._accept_trial(*val) for val in\n zip(trial_energies, feasible, cv, self.population_energies,\n self.feasible, self.constraint_violation)]\n loc = np.array(loc)\n self.population = np.where(loc[:, np.newaxis],\n trial_pop,\n self.population)\n self.population_energies = np.where(loc,\n trial_energies,\n self.population_energies)\n self.feasible = np.where(loc,\n feasible,\n self.feasible)\n self.constraint_violation = np.where(loc[:, np.newaxis],\n cv,\n self.constraint_violation)\n\n # make sure the best solution is updated if updating='deferred'.\n # put the lowest energy into the best solution position.\n self._promote_lowest_energy()\n\n return self.x, self.population_energies[0]\n\n def _scale_parameters(self, trial):\n \"\"\"Scale from a number between 0 and 1 to parameters.\"\"\"\n return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2\n\n def _unscale_parameters(self, parameters):\n \"\"\"Scale from parameters to a number between 0 and 1.\"\"\"\n return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5\n\n def _ensure_constraint(self, trial):\n \"\"\"Make sure the parameters lie between the limits.\"\"\"\n mask = np.where((trial > 1) | (trial < 0))\n trial[mask] = self.random_number_generator.uniform(size=mask[0].shape)\n\n def _mutate(self, candidate):\n \"\"\"Create a trial vector based on a mutation strategy.\"\"\"\n trial = np.copy(self.population[candidate])\n\n rng = self.random_number_generator\n\n fill_point = rng.choice(self.parameter_count)\n\n if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:\n bprime = self.mutation_func(candidate,\n self._select_samples(candidate, 5))\n else:\n bprime = self.mutation_func(self._select_samples(candidate, 5))\n\n if self.strategy in self._binomial:\n crossovers = rng.uniform(size=self.parameter_count)\n crossovers = crossovers < self.cross_over_probability\n # the last one is always from the bprime vector for binomial\n # If you fill in modulo with a loop you have to set the last one to\n # true. If you don't use a loop then you can have any random entry\n # be True.\n crossovers[fill_point] = True\n trial = np.where(crossovers, bprime, trial)\n return trial\n\n elif self.strategy in self._exponential:\n i = 0\n crossovers = rng.uniform(size=self.parameter_count)\n crossovers = crossovers < self.cross_over_probability\n while (i < self.parameter_count and crossovers[i]):\n trial[fill_point] = bprime[fill_point]\n fill_point = (fill_point + 1) % self.parameter_count\n i += 1\n\n return trial\n\n def _best1(self, samples):\n \"\"\"best1bin, best1exp\"\"\"\n r0, r1 = samples[:2]\n return (self.population[0] + self.scale *\n (self.population[r0] - self.population[r1]))\n\n def _rand1(self, samples):\n \"\"\"rand1bin, rand1exp\"\"\"\n r0, r1, r2 = samples[:3]\n return (self.population[r0] + self.scale *\n (self.population[r1] - self.population[r2]))\n\n def _randtobest1(self, samples):\n \"\"\"randtobest1bin, randtobest1exp\"\"\"\n r0, r1, r2 = samples[:3]\n bprime = np.copy(self.population[r0])\n bprime += self.scale * (self.population[0] - bprime)\n bprime += self.scale * (self.population[r1] -\n self.population[r2])\n return bprime\n\n def _currenttobest1(self, candidate, samples):\n \"\"\"currenttobest1bin, currenttobest1exp\"\"\"\n r0, r1 = samples[:2]\n bprime = (self.population[candidate] + self.scale *\n (self.population[0] - self.population[candidate] +\n self.population[r0] - self.population[r1]))\n return bprime\n\n def _best2(self, samples):\n \"\"\"best2bin, best2exp\"\"\"\n r0, r1, r2, r3 = samples[:4]\n bprime = (self.population[0] + self.scale *\n (self.population[r0] + self.population[r1] -\n self.population[r2] - self.population[r3]))\n\n return bprime\n\n def _rand2(self, samples):\n \"\"\"rand2bin, rand2exp\"\"\"\n r0, r1, r2, r3, r4 = samples\n bprime = (self.population[r0] + self.scale *\n (self.population[r1] + self.population[r2] -\n self.population[r3] - self.population[r4]))\n\n return bprime\n\n def _select_samples(self, candidate, number_samples):\n \"\"\"\n obtain random integers from range(self.num_population_members),\n without replacement. You can't have the original candidate either.\n \"\"\"\n idxs = list(range(self.num_population_members))\n idxs.remove(candidate)\n self.random_number_generator.shuffle(idxs)\n idxs = idxs[:number_samples]\n return idxs\n\n\nclass _FunctionWrapper:\n \"\"\"\n Object to wrap user cost function, allowing picklability\n \"\"\"\n def __init__(self, f, args):\n self.f = f\n self.args = [] if args is None else args\n\n def __call__(self, x):\n return self.f(x, *self.args)\n\n\nclass _ConstraintWrapper:\n \"\"\"Object to wrap/evaluate user defined constraints.\n\n Very similar in practice to `PreparedConstraint`, except that no evaluation\n of jac/hess is performed (explicit or implicit).\n\n If created successfully, it will contain the attributes listed below.\n\n Parameters\n ----------\n constraint : {`NonlinearConstraint`, `LinearConstraint`, `Bounds`}\n Constraint to check and prepare.\n x0 : array_like\n Initial vector of independent variables.\n\n Attributes\n ----------\n fun : callable\n Function defining the constraint wrapped by one of the convenience\n classes.\n bounds : 2-tuple\n Contains lower and upper bounds for the constraints --- lb and ub.\n These are converted to ndarray and have a size equal to the number of\n the constraints.\n \"\"\"\n def __init__(self, constraint, x0):\n self.constraint = constraint\n\n if isinstance(constraint, NonlinearConstraint):\n def fun(x):\n return np.atleast_1d(constraint.fun(x))\n elif isinstance(constraint, LinearConstraint):\n def fun(x):\n if issparse(constraint.A):\n A = constraint.A\n else:\n A = np.atleast_2d(constraint.A)\n return A.dot(x)\n elif isinstance(constraint, Bounds):\n def fun(x):\n return x\n else:\n raise ValueError(\"`constraint` of an unknown type is passed.\")\n\n self.fun = fun\n\n lb = np.asarray(constraint.lb, dtype=float)\n ub = np.asarray(constraint.ub, dtype=float)\n\n f0 = fun(x0)\n m = f0.size\n\n if lb.ndim == 0:\n lb = np.resize(lb, m)\n if ub.ndim == 0:\n ub = np.resize(ub, m)\n\n self.bounds = (lb, ub)\n\n def __call__(self, x):\n return np.atleast_1d(self.fun(x))\n\n def violation(self, x):\n \"\"\"How much the constraint is exceeded by.\n\n Parameters\n ----------\n x : array-like\n Vector of independent variables\n\n Returns\n -------\n excess : array-like\n How much the constraint is exceeded by, for each of the\n constraints specified by `_ConstraintWrapper.fun`.\n \"\"\"\n ev = self.fun(np.asarray(x))\n\n excess_lb = np.maximum(self.bounds[0] - ev, 0)\n excess_ub = np.maximum(ev - self.bounds[1], 0)\n\n return excess_lb + excess_ub\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nnpoints = 20 # number of integer support points of the distribution minus 1\nnpointsh = npoints // 2\nnpointsf = float(npoints)\nnbound = 4 # bounds for the truncated normal\nnormbound = (1 + 1/npointsf) * nbound # actual bounds of truncated normal\ngrid = np.arange(-npointsh, npointsh+2,1) # integer grid\ngridlimitsnorm = (grid - 0.5) / npointsh * nbound # bin limits for the truncnorm\ngridlimits = grid - 0.5\ngrid = grid[:-1]\nprobs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))\ngridint = grid\n\nrng = np.random.default_rng()\nnormdiscrete = stats.rv_discrete(\n values=(gridint, np.round(probs, decimals=7)),\n name='normdiscrete')\n\nn_sample = 500\nrvs = normdiscrete.rvs(size=n_sample, random_state=rng)\nf, l = np.histogram(rvs,bins=gridlimits)\nsfreq = np.vstack([gridint,f,probs*n_sample]).T\nfs = sfreq[:,1] / float(n_sample)\nft = sfreq[:,2] / float(n_sample)\nfs = sfreq[:,1].cumsum() / float(n_sample)\nft = sfreq[:,2].cumsum() / float(n_sample)\nnd_std = np.sqrt(normdiscrete.stats(moments='v'))\n\nind = gridint # the x locations for the groups\nwidth = 0.35 # the width of the bars\n\nplt.figure()\nplt.subplot(111)\nrects1 = plt.bar(ind, ft, width, color='b')\nrects2 = plt.bar(ind+width, fs, width, color='r')\nnormline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5, scale=nd_std),\n color='b')\n\nplt.ylabel('cdf')\nplt.title('Cumulative Frequency and CDF of normdiscrete')\nplt.xticks(ind+width, ind)\nplt.legend((rects1[0], rects2[0]), ('true', 'sample'))\n\nplt.show()\n"
] |
[
[
"numpy.zeros_like",
"numpy.dot",
"numpy.asarray",
"numpy.identity",
"numpy.asarray_chkfinite",
"numpy.outer",
"numpy.hstack",
"numpy.log2",
"numpy.vstack"
],
[
"numpy.argmin",
"numpy.copy",
"scipy.stats.qmc.LatinHypercube",
"numpy.mean",
"numpy.finfo",
"numpy.where",
"numpy.resize",
"scipy.stats.qmc.Halton",
"scipy._lib._util.check_random_state",
"numpy.size",
"numpy.concatenate",
"numpy.full",
"numpy.zeros_like",
"numpy.fabs",
"numpy.arange",
"numpy.isfinite",
"numpy.atleast_2d",
"scipy.sparse.issparse",
"numpy.array",
"numpy.zeros",
"scipy.stats.qmc.Sobol",
"scipy.optimize.OptimizeResult",
"numpy.std",
"scipy._lib._util.MapWrapper",
"numpy.squeeze",
"numpy.log2",
"numpy.asfarray",
"numpy.isinf",
"numpy.asarray",
"numpy.sum",
"numpy.ones",
"numpy.any",
"numpy.all",
"numpy.linspace",
"numpy.maximum"
],
[
"numpy.histogram",
"numpy.round",
"matplotlib.pyplot.title",
"numpy.random.default_rng",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.vstack",
"matplotlib.pyplot.ylabel",
"scipy.stats.truncnorm.cdf",
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar",
"scipy.stats.norm.cdf",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplot"
]
] |
aldanor/blox
|
[
"429d9c1066e32d8cb119c2d4532ec161c003d5e0"
] |
[
"blox/blosc.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\nimport six\nimport blosc\nimport numpy as np\n\nfrom blox.utils import read_json, write_json, flatten_dtype, restore_dtype\n\n\ndef write_blosc(stream, data, compression='lz4', level=5, shuffle=True):\n if isinstance(compression, six.string_types) and compression.startswith('blosc:'):\n compression = compression[6:]\n data = np.asanyarray(data)\n if data.dtype == np.dtype('O'):\n raise ValueError('unable to serialize: invalid dtype')\n if not data.flags.contiguous:\n raise ValueError('expected contiguous array')\n payload = blosc.compress_ptr(\n data.__array_interface__['data'][0],\n data.size,\n data.dtype.itemsize,\n cname=compression,\n clevel=level,\n shuffle=shuffle\n )\n meta = {\n 'size': data.size * data.dtype.itemsize,\n 'length': len(payload),\n 'comp': (compression, level, int(shuffle)),\n 'shape': data.shape,\n 'dtype': flatten_dtype(data.dtype)\n }\n meta_length = write_json(stream, meta)\n stream.write(payload)\n return len(payload) + meta_length\n\n\ndef read_blosc(stream, out=None):\n meta = read_json(stream)\n shape = tuple(meta['shape'])\n dtype = restore_dtype(meta['dtype'])\n if out is None:\n out = np.empty(shape, dtype)\n elif not isinstance(out, np.ndarray):\n raise TypeError('expected ndarray, got {}'.format(type(out).__name__))\n elif out.shape != shape:\n raise ValueError('incompatible shape: expected {}, got {}'.format(shape, out.shape))\n elif out.dtype != dtype:\n raise ValueError('incompatible dtype: expected {}, got {}'.format(dtype, out.dtype))\n elif not out.flags.contiguous:\n raise ValueError('expected contiguous array')\n blosc.decompress_ptr(\n stream.read(meta['length']),\n out.__array_interface__['data'][0]\n )\n if out.dtype.type is np.record:\n out = out.view(np.recarray)\n return out\n"
] |
[
[
"numpy.empty",
"numpy.asanyarray",
"numpy.dtype"
]
] |
lkwagner/pyqmc
|
[
"72999e8dfdba11e50219e45d5c8b19a879a38370"
] |
[
"pyqmc/mc.py"
] |
[
"# This must be done BEFORE importing numpy or anything else.\n# Therefore it must be in your main script.\nimport os\n\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\nos.environ[\"NUMEXPR_NUM_THREADS\"] = \"1\"\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nimport numpy as np\nimport h5py\n\n\ndef initial_guess(mol, nconfig, r=1.0):\n \"\"\" Generate an initial guess by distributing electrons near atoms\n proportional to their charge.\n\n assign electrons to atoms based on atom charges\n assign the minimum number first, and assign the leftover ones randomly\n this algorithm chooses atoms *with replacement* to assign leftover electrons\n\n Args: \n\n mol: A PySCF-like molecule object. Should have atom_charges(), atom_coords(), and nelec\n\n nconfig: How many configurations to generate.\n\n r: How far from the atoms to distribute the electrons\n\n Returns: \n\n A numpy array with shape (nconfig,nelectrons,3) with the electrons randomly distributed near \n the atoms.\n \n \"\"\"\n from pyqmc.coord import OpenConfigs, PeriodicConfigs\n\n epos = np.zeros((nconfig, np.sum(mol.nelec), 3))\n wts = mol.atom_charges()\n wts = wts / np.sum(wts)\n\n for s in [0, 1]:\n neach = np.array(\n np.floor(mol.nelec[s] * wts), dtype=int\n ) # integer number of elec on each atom\n nleft = (\n mol.nelec[s] * wts - neach\n ) # fraction of electron unassigned on each atom\n nassigned = np.sum(neach) # number of electrons assigned\n totleft = int(mol.nelec[s] - nassigned) # number of electrons not yet assigned\n ind0 = s * mol.nelec[0]\n epos[:, ind0 : ind0 + nassigned, :] = np.repeat(\n mol.atom_coords(), neach, axis=0\n ) # assign core electrons\n if totleft > 0:\n bins = np.cumsum(nleft) / totleft\n inds = np.argpartition(\n np.random.random((nconfig, len(wts))), totleft, axis=1\n )[:, :totleft]\n epos[:, ind0 + nassigned : ind0 + mol.nelec[s], :] = mol.atom_coords()[\n inds\n ] # assign remaining electrons\n\n epos += r * np.random.randn(*epos.shape) # random shifts from atom positions\n if hasattr(mol, \"a\"):\n epos = PeriodicConfigs(epos, mol.lattice_vectors())\n else:\n epos = OpenConfigs(epos)\n return epos\n\n\ndef limdrift(g, cutoff=1):\n \"\"\"\n Limit a vector to have a maximum magnitude of cutoff while maintaining direction\n\n Args:\n g: a [nconf,ndim] vector\n \n cutoff: the maximum magnitude\n\n Returns: \n The vector with the cut off applied.\n \"\"\"\n tot = np.linalg.norm(g, axis=1)\n mask = tot > cutoff\n g[mask, :] = cutoff * g[mask, :] / tot[mask, np.newaxis]\n return g\n\n\ndef vmc_file(hdf_file, data, attr, configs):\n import pyqmc.hdftools as hdftools\n\n if hdf_file is not None:\n with h5py.File(hdf_file, \"a\") as hdf:\n if \"configs\" not in hdf.keys():\n hdftools.setup_hdf(hdf, data, attr)\n configs.initialize_hdf(hdf)\n hdftools.append_hdf(hdf, data)\n configs.to_hdf(hdf)\n\n\ndef vmc_worker(wf, configs, tstep, nsteps, accumulators):\n \"\"\"\n Run VMC for nsteps.\n\n Return a dictionary of averages from each accumulator. \n \"\"\"\n nconf, nelec, _ = configs.configs.shape\n block_avg = {}\n wf.recompute(configs)\n\n for _ in range(nsteps):\n acc = 0.0\n for e in range(nelec):\n # Propose move\n grad = limdrift(np.real(wf.gradient(e, configs.electron(e)).T))\n gauss = np.random.normal(scale=np.sqrt(tstep), size=(nconf, 3))\n newcoorde = configs.configs[:, e, :] + gauss + grad * tstep\n newcoorde = configs.make_irreducible(e, newcoorde)\n\n # Compute reverse move\n new_grad = limdrift(np.real(wf.gradient(e, newcoorde).T))\n forward = np.sum(gauss ** 2, axis=1)\n backward = np.sum((gauss + tstep * (grad + new_grad)) ** 2, axis=1)\n\n # Acceptance\n t_prob = np.exp(1 / (2 * tstep) * (forward - backward))\n ratio = np.multiply(wf.testvalue(e, newcoorde) ** 2, t_prob)\n accept = ratio > np.random.rand(nconf)\n\n # Update wave function\n configs.move(e, newcoorde, accept)\n wf.updateinternals(e, newcoorde, mask=accept)\n acc += np.mean(accept) / nelec\n\n # Rolling average on step\n for k, accumulator in accumulators.items():\n dat = accumulator.avg(configs, wf)\n for m, res in dat.items():\n if k + m not in block_avg:\n block_avg[k + m] = res / nsteps\n else:\n block_avg[k + m] += res / nsteps\n block_avg[\"acceptance\"] = acc\n return block_avg, configs\n\n\ndef vmc_parallel(\n wf, configs, tstep, nsteps_per_block, accumulators, client, npartitions\n):\n config = configs.split(npartitions)\n runs = [\n client.submit(vmc_worker, wf, conf, tstep, nsteps_per_block, accumulators)\n for conf in config\n ]\n allresults = list(zip(*[r.result() for r in runs]))\n configs.join(allresults[1])\n confweight = np.array([len(c.configs) for c in config], dtype=float)\n confweight /= np.mean(confweight) * npartitions\n block_avg = {}\n for k in allresults[0][0].keys():\n block_avg[k] = np.sum(\n [res[k] * w for res, w in zip(allresults[0], confweight)], axis=0\n )\n return block_avg, configs\n\n\ndef vmc(\n wf,\n configs,\n nblocks=10,\n nsteps_per_block=10,\n nsteps=None,\n tstep=0.5,\n accumulators=None,\n verbose=False,\n stepoffset=0,\n hdf_file=None,\n client=None,\n npartitions=None,\n):\n \"\"\"Run a Monte Carlo sample of a given wave function.\n\n Args:\n wf: A Wave function-like class. recompute(), gradient(), and updateinternals() are used, as well as \n anything (such as laplacian() ) used by accumulators\n \n configs: Initial electron coordinates\n\n nblocks: Number of VMC blocks to run \n\n nsteps_per_block: Number of steps to run per block\n\n nsteps: (Deprecated) Number of steps to run, maps to nblocks = 1, nsteps_per_block = nsteps\n\n tstep: Time step for move proposals. Only affects efficiency.\n\n accumulators: A dictionary of functor objects that take in (configs,wf) and return a dictionary of quantities to be averaged. np.mean(quantity,axis=0) should give the average over configurations. If None, then the coordinates will only be propagated with acceptance information.\n \n verbose: Print out step information \n\n stepoffset: If continuing a run, what to start the step numbering at.\n \n hdf_file: Hdf_file to store vmc output.\n\n client: an object with submit() functions that return futures\n\n nworkers: the number of workers to submit at a time\n\n Returns: (df,configs)\n df: A list of dictionaries nstep long that contains all results from the accumulators. These are averaged across all walkers.\n\n configs: The final coordinates from this calculation.\n \n \"\"\"\n if nsteps is not None:\n nblocks = nsteps\n nsteps_per_block = 1\n\n if accumulators is None:\n accumulators = {}\n if verbose:\n print(\"WARNING: running VMC with no accumulators\")\n\n # Restart\n if hdf_file is not None:\n with h5py.File(hdf_file, \"a\") as hdf:\n if \"configs\" in hdf.keys():\n stepoffset = hdf[\"block\"][-1] + 1\n configs.load_hdf(hdf)\n if verbose:\n print(\"Restarting calculation from step\", stepoffset)\n\n df = []\n\n for block in range(nblocks):\n if verbose:\n print(f\"-\", end=\"\", flush=True)\n if client is None:\n block_avg, configs = vmc_worker(\n wf, configs, tstep, nsteps_per_block, accumulators\n )\n else:\n block_avg, configs = vmc_parallel(\n wf, configs, tstep, nsteps_per_block, accumulators, client, npartitions\n )\n # Append blocks\n block_avg[\"block\"] = stepoffset + block\n block_avg[\"nconfig\"] = nsteps_per_block * configs.configs.shape[0]\n vmc_file(hdf_file, block_avg, dict(tstep=tstep), configs)\n df.append(block_avg)\n if verbose:\n print(\"vmc done\")\n\n df_return = {}\n for k in df[0].keys():\n df_return[k] = np.asarray([d[k] for d in df])\n return df_return, configs\n"
] |
[
[
"numpy.linalg.norm",
"numpy.random.rand",
"numpy.asarray",
"numpy.sum",
"numpy.random.randn",
"numpy.exp",
"numpy.mean",
"numpy.sqrt",
"numpy.cumsum",
"numpy.floor"
]
] |
jplummer01/azureml-examples
|
[
"6a073d157f21060312941f71cfbcf25d0c541183"
] |
[
"sdk/jobs/automl-standalone-jobs/automl-image-instance-segmentation-task-fridge-items/jsonl_converter.py"
] |
[
"import argparse\nimport os\nimport json\nimport numpy as np\nimport PIL.Image as Image\nimport xml.etree.ElementTree as ET\n\nfrom simplification.cutil import simplify_coords\nfrom skimage import measure\n\n\ndef convert_mask_to_polygon(\n mask,\n max_polygon_points=100,\n score_threshold=0.5,\n max_refinement_iterations=25,\n edge_safety_padding=1,\n):\n \"\"\"Convert a numpy mask to a polygon outline in normalized coordinates.\n\n :param mask: Pixel mask, where each pixel has an object (float) score in [0, 1], in size ([1, height, width])\n :type: mask: <class 'numpy.array'>\n :param max_polygon_points: Maximum number of (x, y) coordinate pairs in polygon\n :type: max_polygon_points: Int\n :param score_threshold: Score cutoff for considering a pixel as in object.\n :type: score_threshold: Float\n :param max_refinement_iterations: Maximum number of times to refine the polygon\n trying to reduce the number of pixels to meet max polygon points.\n :type: max_refinement_iterations: Int\n :param edge_safety_padding: Number of pixels to pad the mask with\n :type edge_safety_padding: Int\n :return: normalized polygon coordinates\n :rtype: list of list\n \"\"\"\n # Convert to numpy bitmask\n mask = mask[0]\n mask_array = np.array((mask > score_threshold), dtype=np.uint8)\n image_shape = mask_array.shape\n\n # Pad the mask to avoid errors at the edge of the mask\n embedded_mask = np.zeros(\n (\n image_shape[0] + 2 * edge_safety_padding,\n image_shape[1] + 2 * edge_safety_padding,\n ),\n dtype=np.uint8,\n )\n embedded_mask[\n edge_safety_padding : image_shape[0] + edge_safety_padding,\n edge_safety_padding : image_shape[1] + edge_safety_padding,\n ] = mask_array\n\n # Find Image Contours\n contours = measure.find_contours(embedded_mask, 0.5)\n simplified_contours = []\n\n for contour in contours:\n\n # Iteratively reduce polygon points, if necessary\n if max_polygon_points is not None:\n simplify_factor = 0\n while (\n len(contour) > max_polygon_points\n and simplify_factor < max_refinement_iterations\n ):\n contour = simplify_coords(contour, simplify_factor)\n simplify_factor += 1\n\n # Convert to [x, y, x, y, ....] coordinates and correct for padding\n unwrapped_contour = [0] * (2 * len(contour))\n unwrapped_contour[::2] = np.ceil(contour[:, 1]) - edge_safety_padding\n unwrapped_contour[1::2] = np.ceil(contour[:, 0]) - edge_safety_padding\n\n simplified_contours.append(unwrapped_contour)\n\n return _normalize_contour(simplified_contours, image_shape)\n\n\ndef _normalize_contour(contours, image_shape):\n\n height, width = image_shape[0], image_shape[1]\n\n for contour in contours:\n contour[::2] = [x * 1.0 / width for x in contour[::2]]\n contour[1::2] = [y * 1.0 / height for y in contour[1::2]]\n\n return contours\n\n\ndef binarise_mask(mask_fname):\n\n mask = Image.open(mask_fname)\n mask = np.array(mask)\n # instances are encoded as different colors\n obj_ids = np.unique(mask)\n # first id is the background, so remove it\n obj_ids = obj_ids[1:]\n\n # split the color-encoded mask into a set of binary masks\n binary_masks = mask == obj_ids[:, None, None]\n return binary_masks\n\n\ndef parsing_mask(mask_fname):\n\n # For this particular dataset, initially each mask was merged (based on binary mask of each object)\n # in the order of the bounding boxes described in the corresponding PASCAL VOC annotation file.\n # Therefore, we have to extract each binary mask which is in the order of objects in the annotation file.\n # https://github.com/microsoft/computervision-recipes/blob/master/utils_cv/detection/dataset.py\n binary_masks = binarise_mask(mask_fname)\n polygons = []\n for bi_mask in binary_masks:\n\n if len(bi_mask.shape) == 2:\n bi_mask = bi_mask[np.newaxis, :]\n polygon = convert_mask_to_polygon(bi_mask)\n polygons.append(polygon)\n\n return polygons\n\n\ndef convert_mask_in_VOC_to_jsonl(base_dir, remote_path):\n\n src_images = base_dir\n\n # We'll copy each JSONL file within its related MLTable folder\n training_mltable_path = \"./data/training-mltable-folder/\"\n validation_mltable_path = \"./data/validation-mltable-folder/\"\n\n train_validation_ratio = 5\n\n # Path to the training and validation files\n train_annotations_file = os.path.join(\n training_mltable_path, \"train_annotations.jsonl\"\n )\n validation_annotations_file = os.path.join(\n validation_mltable_path, \"validation_annotations.jsonl\"\n )\n\n # Path to the annotations\n annotations_folder = os.path.join(src_images, \"annotations\")\n mask_folder = os.path.join(src_images, \"segmentation-masks\")\n\n # sample json line dictionary\n json_line_sample = {\n \"image_url\": remote_path,\n \"image_details\": {\"format\": None, \"width\": None, \"height\": None},\n \"label\": [],\n }\n\n # Read each annotation and convert it to jsonl line\n with open(train_annotations_file, \"w\") as train_f:\n with open(validation_annotations_file, \"w\") as validation_f:\n for i, filename in enumerate(os.listdir(annotations_folder)):\n if filename.endswith(\".xml\"):\n print(\"Parsing \" + os.path.join(src_images, filename))\n\n root = ET.parse(\n os.path.join(annotations_folder, filename)\n ).getroot()\n\n width = int(root.find(\"size/width\").text)\n height = int(root.find(\"size/height\").text)\n # convert mask into polygon\n mask_fname = os.path.join(mask_folder, filename[:-4] + \".png\")\n polygons = parsing_mask(mask_fname)\n\n labels = []\n for index, object in enumerate(root.findall(\"object\")):\n name = object.find(\"name\").text\n isCrowd = int(object.find(\"difficult\").text)\n labels.append(\n {\n \"label\": name,\n \"bbox\": \"null\",\n \"isCrowd\": isCrowd,\n \"polygon\": polygons[index],\n }\n )\n\n # build the jsonl file\n image_filename = root.find(\"filename\").text\n _, file_extension = os.path.splitext(image_filename)\n json_line = dict(json_line_sample)\n json_line[\"image_url\"] = (\n json_line[\"image_url\"] + \"images/\" + image_filename\n )\n json_line[\"image_details\"][\"format\"] = file_extension[1:]\n json_line[\"image_details\"][\"width\"] = width\n json_line[\"image_details\"][\"height\"] = height\n json_line[\"label\"] = labels\n\n if i % train_validation_ratio == 0:\n # validation annotation\n validation_f.write(json.dumps(json_line) + \"\\n\")\n else:\n # train annotation\n train_f.write(json.dumps(json_line) + \"\\n\")\n else:\n print(\"Skipping unknown file: {}\".format(filename))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(allow_abbrev=False)\n parser.add_argument(\n \"--data_path\",\n type=str,\n help=\"the directory contains images, annotations, and masks\",\n )\n\n args, remaining_args = parser.parse_known_args()\n data_path = args.data_path\n\n convert_mask_in_VOC_to_jsonl(data_path)\n"
] |
[
[
"numpy.array",
"numpy.ceil",
"numpy.zeros",
"numpy.unique"
]
] |
JingZhang918/master_thesis
|
[
"f0d3b874a5ac1c55db028f87a7442a18f2d418eb"
] |
[
"DRL_Automated_Trading/visualization.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport yfinance as yf\nfrom data_process import get_revised_yearly_return, get_revised_monthly_return\nimport plotly.graph_objects as go\nimport config\n\ndef get_returns(prices):\n return prices[-1]/prices[0]-1\n\ndef plot_yearly_diff_comparison(df_drl, df_etf, save_path):\n #return in years\n yearly_asset_return_drl, yearly_reward_return_drl = get_revised_yearly_return(df_drl)\n yearly_return_etf = df_etf.groupby([df_etf.index.year]).Close.apply(get_returns)-config.ETF_EXPENSE\n\n # define histogram colors\n colors_asset_drl = np.where(yearly_asset_return_drl.values > 0, '#06d6a0', '#ef476f')\n colors_reward_drl = np.where(yearly_reward_return_drl.values > 0, '#06d6a0', '#ef476f')\n colors_etf = np.where(yearly_return_etf.values > 0, '#0a9396', '#e63946')\n\n # plot asset diff\n fig = go.Figure()\n fig.add_trace(\n go.Bar(\n x=yearly_asset_return_drl.index,\n y=yearly_asset_return_drl.values * 100,\n name=\"DRL\",\n marker_color=colors_asset_drl,\n text=yearly_asset_return_drl.values * 100,\n texttemplate='%{text:.2f}',\n textposition='outside'\n ),\n )\n fig.add_trace(\n go.Bar(\n x=yearly_return_etf.index,\n y=yearly_return_etf.values * 100,\n name='ETF',\n marker_color=colors_etf,\n text=yearly_return_etf.values * 100,\n texttemplate='%{text:.2f}',\n textposition='outside'\n ),\n )\n\n # make the figure prettier\n layout = go.Layout(\n title=\"Yearly asset return comparison with ETF (%)\",\n plot_bgcolor='#ecf8f8',\n font_family='Monospace',\n font_color='#073b4c',\n font_size=10,\n xaxis=dict( rangeslider=dict(visible=False)),\n autosize=True,\n )\n\n fig.update_layout(layout)\n fig.write_image(save_path+\"yearly_asset_comparison.png\")\n \n # plot reward diff\n fig = go.Figure()\n fig.add_trace(\n go.Bar(\n x=yearly_reward_return_drl.index,\n y=yearly_reward_return_drl.values * 100,\n name=\"DRL\",\n marker_color=colors_reward_drl,\n text=yearly_reward_return_drl.values * 100,\n texttemplate='%{text:.2f}',\n textposition='outside'\n ),\n )\n fig.add_trace(\n go.Bar(\n x=yearly_return_etf.index,\n y=yearly_return_etf.values * 100,\n name='ETF',\n marker_color=colors_etf,\n text=yearly_return_etf.values * 100,\n texttemplate='%{text:.2f}',\n textposition='outside'\n ),\n )\n\n # make the figure prettier\n layout = go.Layout(\n title=\"Yearly reward return comparison with ETF (%)\",\n plot_bgcolor='#ecf8f8',\n font_family='Monospace',\n font_color='#073b4c',\n font_size=10,\n xaxis=dict( rangeslider=dict(visible=False)),\n autosize=True,\n )\n\n fig.update_layout(layout)\n fig.write_image(save_path+\"yearly_reward_comparison.png\")\n \n \n \n\ndef plot_monthly_heatmap(df_drl, df_etf, save_path):\n \n years = 6\n \n monthly_asset_return_drl, monthly_reward_return_drl = get_revised_monthly_return(df_drl)\n monthly_asset_return_drl = np.resize([np.nan]*10 + list(monthly_asset_return_drl.values) + [np.nan]*2, (years, 12))\n monthly_reward_return_drl = np.resize([np.nan]*10 + list(monthly_reward_return_drl.values) + [np.nan]*2, (years, 12))\n\n monthly_return_etf = df_etf.groupby([df_etf.index.year, df_etf.index.month]).Close.apply(get_returns)-config.ETF_EXPENSE\n monthly_return_etf = np.resize([np.nan] * 10 + list(monthly_return_etf.values) + [np.nan] * 2, (years, 12))\n\n months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n years = [i for i in np.arange(2016, 2021 + 1, 1)]\n colorscale = [[0, '#ef476f'], [0.5, 'white'], [1, '#06d6a0']]\n\n # drl asset return heatmap\n monthly_returns = monthly_asset_return_drl\n annotations = go.Annotations()\n for n, row in enumerate(monthly_returns):\n for m, val in enumerate(row):\n annotations.append(go.Annotation(text=str(round(monthly_returns[n][m] * 100, 2)), x=months[m], y=years[n],\n xref='x1', yref='y1', showarrow=False))\n trace = go.Heatmap(x=months, y=years, z=monthly_returns, colorscale=colorscale, showscale=True)\n fig = go.Figure(data=go.Data([trace]))\n fig['layout'].update(\n title=\"DRL monthly asset returns in a heatmap (%)\",\n annotations=annotations,\n xaxis=go.XAxis(ticks='', side='top'),\n yaxis=go.YAxis(ticks='', ticksuffix=' '), # ticksuffix is a workaround to add a bit of padding\n autosize=True\n )\n fig.write_image(save_path+\"heatmap_DRL_asset.png\")\n\n \n # drl reward return heatmap\n monthly_returns = monthly_reward_return_drl\n annotations = go.Annotations()\n for n, row in enumerate(monthly_returns):\n for m, val in enumerate(row):\n annotations.append(go.Annotation(text=str(round(monthly_returns[n][m] * 100, 2)), x=months[m], y=years[n],\n xref='x1', yref='y1', showarrow=False))\n trace = go.Heatmap(x=months, y=years, z=monthly_returns, colorscale=colorscale, showscale=True)\n fig = go.Figure(data=go.Data([trace]))\n fig['layout'].update(\n title=\"DRL monthly reward returns in a heatmap (%)\",\n annotations=annotations,\n xaxis=go.XAxis(ticks='', side='top'),\n yaxis=go.YAxis(ticks='', ticksuffix=' '), # ticksuffix is a workaround to add a bit of padding\n autosize=True\n )\n fig.write_image(save_path+\"heatmap_DRL_reward.png\")\n \n # ETF\n monthly_returns = monthly_return_etf\n annotations = go.Annotations()\n for n, row in enumerate(monthly_returns):\n for m, val in enumerate(row):\n annotations.append(go.Annotation(text=str(round(monthly_returns[n][m] * 100, 2)), x=months[m], y=years[n],\n xref='x1', yref='y1', showarrow=False))\n trace = go.Heatmap(x=months, y=years, z=monthly_returns, colorscale=colorscale, showscale=True)\n fig = go.Figure(data=go.Data([trace]))\n fig['layout'].update(\n title=\"ETF monthly returns in a heatmap (%)\",\n annotations=annotations,\n xaxis=go.XAxis(ticks='', side='top'),\n yaxis=go.YAxis(ticks='', ticksuffix=' '), # ticksuffix is a workaround to add a bit of padding\n autosize=True\n )\n fig.write_image(save_path+\"heatmap_etf.png\")\n\n # asset diff\n monthly_returns = monthly_asset_return_drl - monthly_return_etf\n annotations = go.Annotations()\n for n, row in enumerate(monthly_returns):\n for m, val in enumerate(row):\n annotations.append(go.Annotation(text=str(round(monthly_returns[n][m] * 100, 2)), x=months[m], y=years[n],\n xref='x1', yref='y1', showarrow=False))\n trace = go.Heatmap(x=months, y=years, z=monthly_returns, colorscale=colorscale, showscale=True)\n fig = go.Figure(data=go.Data([trace]))\n fig['layout'].update(\n title=\"Monthly asset return diff with ETF in a heatmap (%)\",\n annotations=annotations,\n xaxis=go.XAxis(ticks='', side='top'),\n yaxis=go.YAxis(ticks='', ticksuffix=' '), # ticksuffix is a workaround to add a bit of padding\n autosize=True\n )\n fig.write_image(save_path+\"heatmap_diff_asset.png\")\n\n\n # reward diff\n monthly_returns = monthly_reward_return_drl - monthly_return_etf\n annotations = go.Annotations()\n for n, row in enumerate(monthly_returns):\n for m, val in enumerate(row):\n annotations.append(go.Annotation(text=str(round(monthly_returns[n][m] * 100, 2)), x=months[m], y=years[n],\n xref='x1', yref='y1', showarrow=False))\n trace = go.Heatmap(x=months, y=years, z=monthly_returns, colorscale=colorscale, showscale=True)\n fig = go.Figure(data=go.Data([trace]))\n fig['layout'].update(\n title=\"Monthly reward return diff with ETF in a heatmap (%)\",\n annotations=annotations,\n xaxis=go.XAxis(ticks='', side='top'),\n yaxis=go.YAxis(ticks='', ticksuffix=' '), # ticksuffix is a workaround to add a bit of padding\n autosize=True\n )\n fig.write_image(save_path+\"heatmap_diff_reward.png\")\n\n\n\ndef plot_trading_behavior(df, ticker, start, end, save_path):\n \n candelstick = yf.Ticker(ticker).history(start=start,end=end)[[\"Open\",\"High\",\"Low\",\"Close\"]]\n candelstick.columns = [c.lower() for c in candelstick.columns]\n \n trading = df[(df[\"ticker\"]==ticker)&(df[\"date\"]>=start)&(df[\"date\"]<=end)]\n \n fig = go.Figure()\n fig.add_trace(go.Candlestick(\n x=candelstick.index,\n open=candelstick.open,\n high=candelstick.high,\n low=candelstick.low,\n close=candelstick.close,\n increasing_line_color='#ef476f',\n decreasing_line_color='#06d6a0',\n showlegend=False\n ),\n )\n buying_signal = trading[(trading.signal == 1)&(trading.transaction_cost>0)]\n selling_signal = trading[(trading.signal == -1)&(trading.transaction_cost>0)]\n\n fig.add_trace(\n go.Scatter(\n x=buying_signal.date,\n y=buying_signal.transaction_price,\n marker = dict(\n color='#073b4c',\n size=10,\n line=dict(\n color='#118ab2',\n width=2\n ),\n symbol='triangle-up'\n\n ),\n mode = \"markers\",\n name = \"Buy\",\n showlegend=True,\n ), \n )\n\n fig.add_trace(\n go.Scatter(\n x=selling_signal.date,\n y=selling_signal.transaction_price,\n marker = dict(\n color='#fb5607',\n size=10,\n line=dict(\n color='#ffbe0b',\n width=2\n ),\n symbol='triangle-down'\n\n ),\n mode = \"markers\",\n name = \"Sell\",\n showlegend=True,\n ), \n )\n layout = go.Layout(\n plot_bgcolor='#ecf8f8',\n font_family='Monospace',\n font_color='#073b4c',\n font_size=10,\n xaxis=dict(\n rangeslider=dict(visible=False)\n ),\n autosize=True\n )\n\n fig.update_xaxes(\n rangebreaks=[\n dict(bounds=['sat', 'mon'])\n ]\n )\n fig.update_layout(layout)\n fig.write_image(save_path+ticker+\"_\"+start+\"_\"+end+\"_trading_signal.png\")\n "
] |
[
[
"numpy.where",
"numpy.arange"
]
] |
zengrz/cgan-isola-2017
|
[
"3f1e45a4ecc26773927b1b8d37f204f2954e8c4e"
] |
[
"src/CustomDataFlow.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: bsds500.py\n\nimport os\nimport glob\nimport numpy as np\nimport cv2\nfrom random import randint\nfrom functools import reduce\n\nfrom tensorpack.utils.fs import download, get_dataset_path\nfrom tensorpack.dataflow.base import RNGDataFlow\n\nIMG_W, IMG_H = 512, 512\nIMG_SUFFIX = '_clean.png'\nGT_SUFFIX = '_texturemap.bin'\n\nclass CustomDataFlow(RNGDataFlow):\n def __init__(self, data_dir, name, shuffle=True):\n \"\"\"\n Args:\n name (str): 'train', 'test', 'val'\n data_dir (str): a directory containing the \"data\" folders, which has folders with the names.\n \"\"\"\n self.data_root = os.path.join(data_dir, 'data')\n print(self.data_root)\n assert os.path.isdir(self.data_root)\n\n self.shuffle = shuffle\n assert name in ['train', 'test', 'val']\n self._load(name)\n\n def _load(self, name):\n image_glob = os.path.join(self.data_root, 'images', name, '*' + IMG_SUFFIX)\n image_files = glob.glob(image_glob)\n gt_dir = os.path.join(self.data_root, 'groundTruth', name)\n self.data = np.zeros((len(image_files), IMG_H, IMG_W), dtype='float32') # NHW\n self.label = np.zeros((len(image_files), IMG_H, IMG_W), dtype='float32') # NHW\n\n for idx, f in enumerate(image_files):\n im = cv2.imread(f, cv2.IMREAD_GRAYSCALE)\n assert im is not None\n orig_im_shape = im.shape\n if im.shape != (IMG_H, IMG_W):\n assert im.shape[0] >= IMG_H and im.shape[1] >= IMG_W, \"{} < {}\".format(im.shape, (IMG_H, IMG_W))\n hi = randint(0, im.shape[0] - IMG_H)\n hf = hi + IMG_H\n wi = randint(0, im.shape[1] - IMG_W)\n wf = wi + IMG_W\n im = im[hi:hf, wi:wf]\n im = im.astype('float32')\n\n imgid = os.path.basename(f).split('.')[0]\n gt_file = os.path.join(gt_dir, imgid + '' + GT_SUFFIX)\n gt = np.fromfile(gt_file, dtype='uint32')\n print(max(max(gt)))\n assert gt is not None\n gt = gt.astype('float32')\n assert gt.shape[0] == reduce(lambda x, y: x*y, orig_im_shape), \"Different number of elements: {} != {}\".format(gt.shape, orig_im_shape)\n gt = np.reshape(gt, orig_im_shape)\n if gt.shape != (IMG_H, IMG_W):\n gt = gt[hi:hf, wi:wf]\n\n self.data[idx] = im\n self.label[idx] = gt\n\n def size(self):\n return self.data.shape[0]\n\n def get_data(self):\n idxs = np.arange(self.data.shape[0])\n if self.shuffle:\n self.rng.shuffle(idxs)\n for k in idxs:\n yield [self.data[k], self.label[k]]\n"
] |
[
[
"numpy.arange",
"numpy.reshape",
"numpy.fromfile"
]
] |
lcskrishna/my-pytorch-experiments
|
[
"b846760bbf8dfa930fa914edcee8f1a71a43fc98"
] |
[
"tensor-numpy-comparision/pytorch_tensor_test.py"
] |
[
"import torch\nimport time\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n\nN, D_in, H, D_out = 64, 1000, 100, 10\n\nx = torch.randn(N, D_in, device= device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\n\nw1 = torch.randn(D_in, H, device=device, dtype = dtype)\nw2 = torch.randn(H, D_out, device=device, dtype = dtype)\n\nstart_time = time.time()\nlearning_rate = 1e-06\n\nfor i in range(500):\n h = x.mm(w1)\n h_relu = h.clamp(min=0)\n y_pred = h_relu.mm(w2)\n\n loss = (y_pred - y).pow(2).sum().item()\n print (i, loss)\n\n ## calculate the gradients.\n grad_y_pred = 2.0 * (y - y_pred)\n grad_w2 = h_relu.t().mm(grad_y_pred)\n grad_h_relu = grad_y_pred.mm(w2.t())\n grad_h = grad_h_relu.clone()\n grad_h[h < 0] = 0\n grad_w1 = x.t().mm(grad_h)\n\n w1 -= learning_rate * grad_w1\n w2 -= learning_rate * grad_w2\n\nelapsed_time = time.time() - start_time\nprint (\"INFO: elapsed time for tensor operations in pytorch - cpu is : {}\".format(elapsed_time))\n"
] |
[
[
"torch.device",
"torch.randn"
]
] |
weihaosky/CycleSiam
|
[
"9d11f6cb236a6699185774e49ebafe8d2f867ebe"
] |
[
"models/siammask_sharp.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom utils.anchors import Anchors\n\n\nclass SiamMask(nn.Module):\n def __init__(self, anchors=None, o_sz=127, g_sz=127):\n super(SiamMask, self).__init__()\n self.anchors = anchors # anchor_cfg\n self.anchor_num = len(self.anchors[\"ratios\"]) * len(self.anchors[\"scales\"])\n self.anchor = Anchors(anchors)\n self.features = None\n self.rpn_model = None\n self.mask_model = None\n self.o_sz = o_sz\n self.g_sz = g_sz\n self.upSample = nn.UpsamplingBilinear2d(size=[g_sz, g_sz])\n\n self.all_anchors = None\n\n def set_all_anchors(self, image_center, size):\n # cx,cy,w,h\n if not self.anchor.generate_all_anchors(image_center, size):\n return\n all_anchors = self.anchor.all_anchors[1] # cx, cy, w, h\n self.all_anchors = torch.from_numpy(all_anchors).float().cuda()\n self.all_anchors = [self.all_anchors[i] for i in range(4)]\n\n def feature_extractor(self, x):\n return self.features(x)\n\n def rpn(self, template, search):\n pred_cls, pred_loc = self.rpn_model(template, search)\n return pred_cls, pred_loc\n\n def mask(self, template, search):\n pred_mask = self.mask_model(template, search)\n return pred_mask\n\n def _add_rpn_loss(self, label_cls, label_loc, lable_loc_weight, label_mask, label_mask_weight,\n rpn_pred_cls, rpn_pred_loc, rpn_pred_mask):\n rpn_loss_cls = select_cross_entropy_loss(rpn_pred_cls, label_cls)\n\n rpn_loss_loc = weight_l1_loss(rpn_pred_loc, label_loc, lable_loc_weight)\n\n rpn_loss_mask, iou_m, iou_5, iou_7 = select_mask_logistic_loss(rpn_pred_mask, label_mask, label_mask_weight)\n\n return rpn_loss_cls, rpn_loss_loc, rpn_loss_mask, iou_m, iou_5, iou_7\n\n def run(self, template, search, softmax=False):\n \"\"\"\n run network\n \"\"\"\n template_feature = self.feature_extractor(template)\n feature, search_feature = self.features.forward_all(search)\n rpn_pred_cls, rpn_pred_loc = self.rpn(template_feature, search_feature)\n corr_feature = self.mask_model.mask.forward_corr(template_feature, search_feature) # (b, 256, w, h)\n rpn_pred_mask = self.refine_model(feature, corr_feature)\n\n if softmax:\n rpn_pred_cls = self.softmax(rpn_pred_cls)\n return rpn_pred_cls, rpn_pred_loc, rpn_pred_mask, template_feature, search_feature\n\n def softmax(self, cls):\n b, a2, h, w = cls.size()\n cls = cls.view(b, 2, a2//2, h, w)\n cls = cls.permute(0, 2, 3, 4, 1).contiguous()\n cls = F.log_softmax(cls, dim=4)\n return cls\n\n def forward(self, input, softmax):\n \"\"\"\n :param input: dict of input with keys of:\n 'template': [b, 3, h1, w1], input template image.\n 'search': [b, 3, h2, w2], input search image.\n 'label_cls':[b, max_num_gts, 5] or None(self.training==False),\n each gt contains x1,y1,x2,y2,class.\n :return: dict of loss, predict, accuracy\n \"\"\"\n template = input['template']\n search = input['search']\n if self.training:\n label_cls = input['label_cls']\n label_loc = input['label_loc']\n lable_loc_weight = input['label_loc_weight']\n label_mask = input['label_mask']\n label_mask_weight = input['label_mask_weight']\n\n rpn_pred_cls, rpn_pred_loc, rpn_pred_mask, template_feature, search_feature = \\\n self.run(template, search, softmax=softmax)\n\n outputs = dict()\n\n outputs['predict'] = [rpn_pred_loc, rpn_pred_cls, rpn_pred_mask, template_feature, search_feature]\n\n if self.training:\n rpn_loss_cls, rpn_loss_loc, rpn_loss_mask, iou_acc_mean, iou_acc_5, iou_acc_7 = \\\n self._add_rpn_loss(label_cls, label_loc, lable_loc_weight, label_mask, label_mask_weight,\n rpn_pred_cls, rpn_pred_loc, rpn_pred_mask)\n outputs['losses'] = [rpn_loss_cls, rpn_loss_loc, rpn_loss_mask]\n outputs['accuracy'] = [iou_acc_mean, iou_acc_5, iou_acc_7]\n\n return outputs\n\n def template(self, z):\n self.zf = self.feature_extractor(z)\n cls_kernel, loc_kernel = self.rpn_model.template(self.zf)\n return cls_kernel, loc_kernel\n\n def track(self, x, cls_kernel=None, loc_kernel=None, softmax=False):\n xf = self.feature_extractor(x)\n rpn_pred_cls, rpn_pred_loc = self.rpn_model.track(xf, cls_kernel, loc_kernel)\n if softmax:\n rpn_pred_cls = self.softmax(rpn_pred_cls)\n return rpn_pred_cls, rpn_pred_loc\n\n\ndef get_cls_loss(pred, label, select):\n if select.nelement() == 0: return pred.sum()*0.\n pred = torch.index_select(pred, 0, select)\n label = torch.index_select(label, 0, select)\n\n return F.nll_loss(pred, label)\n\n\ndef select_cross_entropy_loss(pred, label):\n pred = pred.view(-1, 2)\n label = label.view(-1)\n pos = Variable(label.data.eq(1).nonzero().squeeze()).cuda()\n neg = Variable(label.data.eq(0).nonzero().squeeze()).cuda()\n\n loss_pos = get_cls_loss(pred, label, pos)\n loss_neg = get_cls_loss(pred, label, neg)\n return loss_pos * 0.5 + loss_neg * 0.5\n\n\ndef weight_l1_loss(pred_loc, label_loc, loss_weight):\n \"\"\"\n :param pred_loc: [b, 4k, h, w]\n :param label_loc: [b, 4k, h, w]\n :param loss_weight: [b, k, h, w]\n :return: loc loss value\n \"\"\"\n b, _, sh, sw = pred_loc.size()\n pred_loc = pred_loc.view(b, 4, -1, sh, sw)\n diff = (pred_loc - label_loc).abs()\n diff = diff.sum(dim=1).view(b, -1, sh, sw)\n loss = diff * loss_weight\n return loss.sum().div(b)\n\n\ndef select_mask_logistic_loss(p_m, mask, weight, o_sz=63, g_sz=127):\n weight = weight.view(-1)\n pos = Variable(weight.data.eq(1).nonzero().squeeze())\n if pos.nelement() == 0: return p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0\n\n if len(p_m.shape) == 4:\n p_m = p_m.permute(0, 2, 3, 1).contiguous().view(-1, 1, o_sz, o_sz)\n p_m = torch.index_select(p_m, 0, pos)\n p_m = nn.UpsamplingBilinear2d(size=[g_sz, g_sz])(p_m)\n p_m = p_m.view(-1, g_sz * g_sz)\n else:\n p_m = torch.index_select(p_m, 0, pos)\n\n mask_uf = F.unfold(mask, (g_sz, g_sz), padding=0, stride=8)\n mask_uf = torch.transpose(mask_uf, 1, 2).contiguous().view(-1, g_sz * g_sz)\n\n mask_uf = torch.index_select(mask_uf, 0, pos)\n loss = F.soft_margin_loss(p_m, mask_uf)\n iou_m, iou_5, iou_7 = iou_measure(p_m, mask_uf)\n return loss, iou_m, iou_5, iou_7\n\n\ndef iou_measure(pred, label):\n pred = pred.ge(0)\n mask_sum = pred.eq(1).add(label.eq(1))\n intxn = torch.sum(mask_sum == 2, dim=1).float()\n union = torch.sum(mask_sum > 0, dim=1).float()\n iou = intxn/union\n return torch.mean(iou), (torch.sum(iou > 0.5).float()/iou.shape[0]), (torch.sum(iou > 0.7).float()/iou.shape[0])\n \n\nif __name__ == \"__main__\":\n p_m = torch.randn(4, 63*63, 25, 25)\n cls = torch.randn(4, 1, 25, 25) > 0.9\n mask = torch.randn(4, 1, 255, 255) * 2 - 1\n\n loss = select_mask_logistic_loss(p_m, mask, cls)\n print(loss)\n"
] |
[
[
"torch.nn.functional.nll_loss",
"torch.nn.functional.unfold",
"torch.nn.UpsamplingBilinear2d",
"torch.transpose",
"torch.nn.functional.log_softmax",
"torch.from_numpy",
"torch.mean",
"torch.nn.functional.soft_margin_loss",
"torch.index_select",
"torch.randn",
"torch.sum"
]
] |
wilsonify/tensorflow-examples
|
[
"2271c666b33c7a74047c7196783ab04e9aee8362"
] |
[
"src/tensorflow-examples/tensorflow_examples/converted_notebooks/14_deep_computer_vision_with_cnns.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# **Chapter 14 – Deep Computer Vision Using Convolutional Neural Networks**\n\n# _This notebook contains all the sample code in chapter 14._\n\n# <table align=\"left\">\n# <td>\n# <a target=\"_blank\" href=\"https://colab.research.google.com/github/ageron/handson-ml2/blob/master/14_deep_computer_vision_with_cnns.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n# </td>\n# </table>\n\n# # Setup\n\n# First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.\n\n# In[1]:\n\n\n# Python ≥3.5 is required\nimport sys\nassert sys.version_info >= (3, 5)\n\n# Scikit-Learn ≥0.20 is required\nimport sklearn\nassert sklearn.__version__ >= \"0.20\"\n\ntry:\n # %tensorflow_version only exists in Colab.\n get_ipython().run_line_magic('tensorflow_version', '2.x')\n IS_COLAB = True\nexcept Exception:\n IS_COLAB = False\n\n# TensorFlow ≥2.0 is required\nimport tensorflow as tf\nfrom tensorflow import keras\nassert tf.__version__ >= \"2.0\"\n\nif not tf.test.is_gpu_available():\n print(\"No GPU was detected. CNNs can be very slow without a GPU.\")\n if IS_COLAB:\n print(\"Go to Runtime > Change runtime and select a GPU hardware accelerator.\")\n\n# Common imports\nimport numpy as np\nimport os\n\n# to make this notebook's output stable across runs\nnp.random.seed(42)\ntf.random.set_seed(42)\n\n# To plot pretty figures\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"cnn\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\nos.makedirs(IMAGES_PATH, exist_ok=True)\n\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)\n\n\n# A couple utility functions to plot grayscale and RGB images:\n\n# In[2]:\n\n\ndef plot_image(image):\n plt.imshow(image, cmap=\"gray\", interpolation=\"nearest\")\n plt.axis(\"off\")\n\ndef plot_color_image(image):\n plt.imshow(image, interpolation=\"nearest\")\n plt.axis(\"off\")\n\n\n# # What is a Convolution?\n\n# In[3]:\n\n\nimport numpy as np\nfrom sklearn.datasets import load_sample_image\n\n# Load sample images\nchina = load_sample_image(\"china.jpg\") / 255\nflower = load_sample_image(\"flower.jpg\") / 255\nimages = np.array([china, flower])\nbatch_size, height, width, channels = images.shape\n\n# Create 2 filters\nfilters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)\nfilters[:, 3, :, 0] = 1 # vertical line\nfilters[3, :, :, 1] = 1 # horizontal line\n\noutputs = tf.nn.conv2d(images, filters, strides=1, padding=\"SAME\")\n\nplt.imshow(outputs[0, :, :, 1], cmap=\"gray\") # plot 1st image's 2nd feature map\nplt.axis(\"off\") # Not shown in the book\nplt.show()\n\n\n# In[4]:\n\n\nfor image_index in (0, 1):\n for feature_map_index in (0, 1):\n plt.subplot(2, 2, image_index * 2 + feature_map_index + 1)\n plot_image(outputs[image_index, :, :, feature_map_index])\n\nplt.show()\n\n\n# In[5]:\n\n\ndef crop(images):\n return images[150:220, 130:250]\n\n\n# In[6]:\n\n\nplot_image(crop(images[0, :, :, 0]))\nsave_fig(\"china_original\", tight_layout=False)\nplt.show()\n\nfor feature_map_index, filename in enumerate([\"china_vertical\", \"china_horizontal\"]):\n plot_image(crop(outputs[0, :, :, feature_map_index]))\n save_fig(filename, tight_layout=False)\n plt.show()\n\n\n# In[7]:\n\n\nplot_image(filters[:, :, 0, 0])\nplt.show()\nplot_image(filters[:, :, 0, 1])\nplt.show()\n\n\n# ## Convolutional Layer\n\n# Using `keras.layers.Conv2D()`:\n\n# In[8]:\n\n\nconv = keras.layers.Conv2D(filters=32, kernel_size=3, strides=1,\n padding=\"SAME\", activation=\"relu\")\n\n\n# In[9]:\n\n\nplot_image(crop(outputs[0, :, :, 0]))\nplt.show()\n\n\n# ## VALID vs SAME padding\n\n# In[10]:\n\n\ndef feature_map_size(input_size, kernel_size, strides=1, padding=\"SAME\"):\n if padding == \"SAME\":\n return (input_size - 1) // strides + 1\n else:\n return (input_size - kernel_size) // strides + 1\n\n\n# In[11]:\n\n\ndef pad_before_and_padded_size(input_size, kernel_size, strides=1):\n fmap_size = feature_map_size(input_size, kernel_size, strides)\n padded_size = max((fmap_size - 1) * strides + kernel_size, input_size)\n pad_before = (padded_size - input_size) // 2\n return pad_before, padded_size\n\n\n# In[12]:\n\n\ndef manual_same_padding(images, kernel_size, strides=1):\n if kernel_size == 1:\n return images.astype(np.float32)\n batch_size, height, width, channels = images.shape\n top_pad, padded_height = pad_before_and_padded_size(height, kernel_size, strides)\n left_pad, padded_width = pad_before_and_padded_size(width, kernel_size, strides)\n padded_shape = [batch_size, padded_height, padded_width, channels]\n padded_images = np.zeros(padded_shape, dtype=np.float32)\n padded_images[:, top_pad:height+top_pad, left_pad:width+left_pad, :] = images\n return padded_images\n\n\n# Using `\"SAME\"` padding is equivalent to padding manually using `manual_same_padding()` then using `\"VALID\"` padding (confusingly, `\"VALID\"` padding means no padding at all):\n\n# In[13]:\n\n\nkernel_size = 7\nstrides = 2\n\nconv_valid = keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=strides, padding=\"VALID\")\nconv_same = keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=strides, padding=\"SAME\")\n\nvalid_output = conv_valid(manual_same_padding(images, kernel_size, strides))\n\n# Need to call build() so conv_same's weights get created\nconv_same.build(tf.TensorShape(images.shape))\n\n# Copy the weights from conv_valid to conv_same\nconv_same.set_weights(conv_valid.get_weights())\n\nsame_output = conv_same(images.astype(np.float32))\n\nassert np.allclose(valid_output.numpy(), same_output.numpy())\n\n\n# # Pooling layer\n\n# ## Max pooling\n\n# In[14]:\n\n\nmax_pool = keras.layers.MaxPool2D(pool_size=2)\n\n\n# In[15]:\n\n\ncropped_images = np.array([crop(image) for image in images])\noutput = max_pool(cropped_images)\n\n\n# In[16]:\n\n\nfig = plt.figure(figsize=(12, 8))\ngs = mpl.gridspec.GridSpec(nrows=1, ncols=2, width_ratios=[2, 1])\n\nax1 = fig.add_subplot(gs[0, 0])\nax1.set_title(\"Input\", fontsize=14)\nax1.imshow(cropped_images[0]) # plot the 1st image\nax1.axis(\"off\")\nax2 = fig.add_subplot(gs[0, 1])\nax2.set_title(\"Output\", fontsize=14)\nax2.imshow(output[0]) # plot the output for the 1st image\nax2.axis(\"off\")\nsave_fig(\"china_max_pooling\")\nplt.show()\n\n\n# ## Depth-wise pooling\n\n# In[17]:\n\n\nclass DepthMaxPool(keras.layers.Layer):\n def __init__(self, pool_size, strides=None, padding=\"VALID\", **kwargs):\n super().__init__(**kwargs)\n if strides is None:\n strides = pool_size\n self.pool_size = pool_size\n self.strides = strides\n self.padding = padding\n def call(self, inputs):\n return tf.nn.max_pool(inputs,\n ksize=(1, 1, 1, self.pool_size),\n strides=(1, 1, 1, self.pool_size),\n padding=self.padding)\n\n\n# In[18]:\n\n\ndepth_pool = DepthMaxPool(3)\nwith tf.device(\"/cpu:0\"): # there is no GPU-kernel yet\n depth_output = depth_pool(cropped_images)\ndepth_output.shape\n\n\n# Or just use a `Lambda` layer:\n\n# In[19]:\n\n\ndepth_pool = keras.layers.Lambda(lambda X: tf.nn.max_pool(\n X, ksize=(1, 1, 1, 3), strides=(1, 1, 1, 3), padding=\"VALID\"))\nwith tf.device(\"/cpu:0\"): # there is no GPU-kernel yet\n depth_output = depth_pool(cropped_images)\ndepth_output.shape\n\n\n# In[20]:\n\n\nplt.figure(figsize=(12, 8))\nplt.subplot(1, 2, 1)\nplt.title(\"Input\", fontsize=14)\nplot_color_image(cropped_images[0]) # plot the 1st image\nplt.subplot(1, 2, 2)\nplt.title(\"Output\", fontsize=14)\nplot_image(depth_output[0, ..., 0]) # plot the output for the 1st image\nplt.axis(\"off\")\nplt.show()\n\n\n# ## Average pooling\n\n# In[21]:\n\n\navg_pool = keras.layers.AvgPool2D(pool_size=2)\n\n\n# In[22]:\n\n\noutput_avg = avg_pool(cropped_images)\n\n\n# In[23]:\n\n\nfig = plt.figure(figsize=(12, 8))\ngs = mpl.gridspec.GridSpec(nrows=1, ncols=2, width_ratios=[2, 1])\n\nax1 = fig.add_subplot(gs[0, 0])\nax1.set_title(\"Input\", fontsize=14)\nax1.imshow(cropped_images[0]) # plot the 1st image\nax1.axis(\"off\")\nax2 = fig.add_subplot(gs[0, 1])\nax2.set_title(\"Output\", fontsize=14)\nax2.imshow(output_avg[0]) # plot the output for the 1st image\nax2.axis(\"off\")\nplt.show()\n\n\n# ## Global Average Pooling\n\n# In[24]:\n\n\nglobal_avg_pool = keras.layers.GlobalAvgPool2D()\nglobal_avg_pool(cropped_images)\n\n\n# In[25]:\n\n\noutput_global_avg2 = keras.layers.Lambda(lambda X: tf.reduce_mean(X, axis=[1, 2]))\noutput_global_avg2(cropped_images)\n\n\n# # Tackling Fashion MNIST With a CNN\n\n# In[26]:\n\n\n(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()\nX_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]\ny_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]\n\nX_mean = X_train.mean(axis=0, keepdims=True)\nX_std = X_train.std(axis=0, keepdims=True) + 1e-7\nX_train = (X_train - X_mean) / X_std\nX_valid = (X_valid - X_mean) / X_std\nX_test = (X_test - X_mean) / X_std\n\nX_train = X_train[..., np.newaxis]\nX_valid = X_valid[..., np.newaxis]\nX_test = X_test[..., np.newaxis]\n\n\n# In[27]:\n\n\nfrom functools import partial\n\nDefaultConv2D = partial(keras.layers.Conv2D,\n kernel_size=3, activation='relu', padding=\"SAME\")\n\nmodel = keras.models.Sequential([\n DefaultConv2D(filters=64, kernel_size=7, input_shape=[28, 28, 1]),\n keras.layers.MaxPooling2D(pool_size=2),\n DefaultConv2D(filters=128),\n DefaultConv2D(filters=128),\n keras.layers.MaxPooling2D(pool_size=2),\n DefaultConv2D(filters=256),\n DefaultConv2D(filters=256),\n keras.layers.MaxPooling2D(pool_size=2),\n keras.layers.Flatten(),\n keras.layers.Dense(units=128, activation='relu'),\n keras.layers.Dropout(0.5),\n keras.layers.Dense(units=64, activation='relu'),\n keras.layers.Dropout(0.5),\n keras.layers.Dense(units=10, activation='softmax'),\n])\n\n\n# In[28]:\n\n\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"nadam\", metrics=[\"accuracy\"])\nhistory = model.fit(X_train, y_train, epochs=10, validation_data=[X_valid, y_valid])\nscore = model.evaluate(X_test, y_test)\nX_new = X_test[:10] # pretend we have new images\ny_pred = model.predict(X_new)\n\n\n# ## ResNet-34\n\n# In[29]:\n\n\nDefaultConv2D = partial(keras.layers.Conv2D, kernel_size=3, strides=1,\n padding=\"SAME\", use_bias=False)\n\nclass ResidualUnit(keras.layers.Layer):\n def __init__(self, filters, strides=1, activation=\"relu\", **kwargs):\n super().__init__(**kwargs)\n self.activation = keras.activations.get(activation)\n self.main_layers = [\n DefaultConv2D(filters, strides=strides),\n keras.layers.BatchNormalization(),\n self.activation,\n DefaultConv2D(filters),\n keras.layers.BatchNormalization()]\n self.skip_layers = []\n if strides > 1:\n self.skip_layers = [\n DefaultConv2D(filters, kernel_size=1, strides=strides),\n keras.layers.BatchNormalization()]\n\n def call(self, inputs):\n Z = inputs\n for layer in self.main_layers:\n Z = layer(Z)\n skip_Z = inputs\n for layer in self.skip_layers:\n skip_Z = layer(skip_Z)\n return self.activation(Z + skip_Z)\n\n\n# In[30]:\n\n\nmodel = keras.models.Sequential()\nmodel.add(DefaultConv2D(64, kernel_size=7, strides=2,\n input_shape=[224, 224, 3]))\nmodel.add(keras.layers.BatchNormalization())\nmodel.add(keras.layers.Activation(\"relu\"))\nmodel.add(keras.layers.MaxPool2D(pool_size=3, strides=2, padding=\"SAME\"))\nprev_filters = 64\nfor filters in [64] * 3 + [128] * 4 + [256] * 6 + [512] * 3:\n strides = 1 if filters == prev_filters else 2\n model.add(ResidualUnit(filters, strides=strides))\n prev_filters = filters\nmodel.add(keras.layers.GlobalAvgPool2D())\nmodel.add(keras.layers.Flatten())\nmodel.add(keras.layers.Dense(10, activation=\"softmax\"))\n\n\n# In[31]:\n\n\nmodel.summary()\n\n\n# ## Using a Pretrained Model\n\n# In[32]:\n\n\nmodel = keras.applications.resnet50.ResNet50(weights=\"imagenet\")\n\n\n# In[33]:\n\n\nimages_resized = tf.image.resize(images, [224, 224])\nplot_color_image(images_resized[0])\nplt.show()\n\n\n# In[34]:\n\n\nimages_resized = tf.image.resize_with_pad(images, 224, 224, antialias=True)\nplot_color_image(images_resized[0])\n\n\n# In[35]:\n\n\nimages_resized = tf.image.resize_with_crop_or_pad(images, 224, 224)\nplot_color_image(images_resized[0])\nplt.show()\n\n\n# In[36]:\n\n\nchina_box = [0, 0.03, 1, 0.68]\nflower_box = [0.19, 0.26, 0.86, 0.7]\nimages_resized = tf.image.crop_and_resize(images, [china_box, flower_box], [0, 1], [224, 224])\nplot_color_image(images_resized[0])\nplt.show()\nplot_color_image(images_resized[1])\nplt.show()\n\n\n# In[37]:\n\n\ninputs = keras.applications.resnet50.preprocess_input(images_resized * 255)\nY_proba = model.predict(inputs)\n\n\n# In[38]:\n\n\nY_proba.shape\n\n\n# In[39]:\n\n\ntop_K = keras.applications.resnet50.decode_predictions(Y_proba, top=3)\nfor image_index in range(len(images)):\n print(\"Image #{}\".format(image_index))\n for class_id, name, y_proba in top_K[image_index]:\n print(\" {} - {:12s} {:.2f}%\".format(class_id, name, y_proba * 100))\n print()\n\n\n# ## Pretrained Models for Transfer Learning\n\n# In[40]:\n\n\nimport tensorflow_datasets as tfds\n\ndataset, info = tfds.load(\"tf_flowers\", as_supervised=True, with_info=True)\n\n\n# In[41]:\n\n\ninfo.splits\n\n\n# In[42]:\n\n\ninfo.splits[\"train\"]\n\n\n# In[43]:\n\n\nclass_names = info.features[\"label\"].names\nclass_names\n\n\n# In[44]:\n\n\nn_classes = info.features[\"label\"].num_classes\n\n\n# In[45]:\n\n\ndataset_size = info.splits[\"train\"].num_examples\ndataset_size\n\n\n# In[46]:\n\n\ntest_split, valid_split, train_split = tfds.Split.TRAIN.subsplit([10, 15, 75])\n\ntest_set_raw = tfds.load(\"tf_flowers\", split=test_split, as_supervised=True)\nvalid_set_raw = tfds.load(\"tf_flowers\", split=valid_split, as_supervised=True)\ntrain_set_raw = tfds.load(\"tf_flowers\", split=train_split, as_supervised=True)\n\n\n# In[47]:\n\n\nplt.figure(figsize=(12, 10))\nindex = 0\nfor image, label in train_set_raw.take(9):\n index += 1\n plt.subplot(3, 3, index)\n plt.imshow(image)\n plt.title(\"Class: {}\".format(class_names[label]))\n plt.axis(\"off\")\n\nplt.show()\n\n\n# Basic preprocessing:\n\n# In[48]:\n\n\ndef preprocess(image, label):\n resized_image = tf.image.resize(image, [224, 224])\n final_image = keras.applications.xception.preprocess_input(resized_image)\n return final_image, label\n\n\n# Slightly fancier preprocessing (but you could add much more data augmentation):\n\n# In[49]:\n\n\ndef central_crop(image):\n shape = tf.shape(image)\n min_dim = tf.reduce_min([shape[0], shape[1]])\n top_crop = (shape[0] - min_dim) // 4\n bottom_crop = shape[0] - top_crop\n left_crop = (shape[1] - min_dim) // 4\n right_crop = shape[1] - left_crop\n return image[top_crop:bottom_crop, left_crop:right_crop]\n\ndef random_crop(image):\n shape = tf.shape(image)\n min_dim = tf.reduce_min([shape[0], shape[1]]) * 90 // 100\n return tf.image.random_crop(image, [min_dim, min_dim, 3])\n\ndef preprocess(image, label, randomize=False):\n if randomize:\n cropped_image = random_crop(image)\n cropped_image = tf.image.random_flip_left_right(cropped_image)\n else:\n cropped_image = central_crop(image)\n resized_image = tf.image.resize(cropped_image, [224, 224])\n final_image = keras.applications.xception.preprocess_input(resized_image)\n return final_image, label\n\nbatch_size = 32\ntrain_set = train_set_raw.shuffle(1000).repeat()\ntrain_set = train_set.map(partial(preprocess, randomize=True)).batch(batch_size).prefetch(1)\nvalid_set = valid_set_raw.map(preprocess).batch(batch_size).prefetch(1)\ntest_set = test_set_raw.map(preprocess).batch(batch_size).prefetch(1)\n\n\n# In[50]:\n\n\nplt.figure(figsize=(12, 12))\nfor X_batch, y_batch in train_set.take(1):\n for index in range(9):\n plt.subplot(3, 3, index + 1)\n plt.imshow(X_batch[index] / 2 + 0.5)\n plt.title(\"Class: {}\".format(class_names[y_batch[index]]))\n plt.axis(\"off\")\n\nplt.show()\n\n\n# In[51]:\n\n\nplt.figure(figsize=(12, 12))\nfor X_batch, y_batch in test_set.take(1):\n for index in range(9):\n plt.subplot(3, 3, index + 1)\n plt.imshow(X_batch[index] / 2 + 0.5)\n plt.title(\"Class: {}\".format(class_names[y_batch[index]]))\n plt.axis(\"off\")\n\nplt.show()\n\n\n# In[52]:\n\n\nbase_model = keras.applications.xception.Xception(weights=\"imagenet\",\n include_top=False)\navg = keras.layers.GlobalAveragePooling2D()(base_model.output)\noutput = keras.layers.Dense(n_classes, activation=\"softmax\")(avg)\nmodel = keras.models.Model(inputs=base_model.input, outputs=output)\n\n\n# In[53]:\n\n\nfor index, layer in enumerate(base_model.layers):\n print(index, layer.name)\n\n\n# In[54]:\n\n\nfor layer in base_model.layers:\n layer.trainable = False\n\noptimizer = keras.optimizers.SGD(lr=0.2, momentum=0.9, decay=0.01)\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n metrics=[\"accuracy\"])\nhistory = model.fit(train_set,\n steps_per_epoch=int(0.75 * dataset_size / batch_size),\n validation_data=valid_set,\n validation_steps=int(0.15 * dataset_size / batch_size),\n epochs=5)\n\n\n# In[55]:\n\n\nfor layer in base_model.layers:\n layer.trainable = True\n\noptimizer = keras.optimizers.SGD(learning_rate=0.01, momentum=0.9,\n nesterov=True, decay=0.001)\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n metrics=[\"accuracy\"])\nhistory = model.fit(train_set,\n steps_per_epoch=int(0.75 * dataset_size / batch_size),\n validation_data=valid_set,\n validation_steps=int(0.15 * dataset_size / batch_size),\n epochs=40)\n\n\n# # Classification and Localization\n\n# In[56]:\n\n\nbase_model = keras.applications.xception.Xception(weights=\"imagenet\",\n include_top=False)\navg = keras.layers.GlobalAveragePooling2D()(base_model.output)\nclass_output = keras.layers.Dense(n_classes, activation=\"softmax\")(avg)\nloc_output = keras.layers.Dense(4)(avg)\nmodel = keras.models.Model(inputs=base_model.input,\n outputs=[class_output, loc_output])\nmodel.compile(loss=[\"sparse_categorical_crossentropy\", \"mse\"],\n loss_weights=[0.8, 0.2], # depends on what you care most about\n optimizer=optimizer, metrics=[\"accuracy\"])\n\n\n# In[57]:\n\n\ndef add_random_bounding_boxes(images, labels):\n fake_bboxes = tf.random.uniform([tf.shape(images)[0], 4])\n return images, (labels, fake_bboxes)\n\nfake_train_set = train_set.take(5).repeat(2).map(add_random_bounding_boxes)\n\n\n# In[58]:\n\n\nmodel.fit(fake_train_set, steps_per_epoch=5, epochs=2)\n\n\n# ### Mean Average Precision (mAP)\n\n# In[59]:\n\n\ndef maximum_precisions(precisions):\n return np.flip(np.maximum.accumulate(np.flip(precisions)))\n\n\n# In[60]:\n\n\nrecalls = np.linspace(0, 1, 11)\n\nprecisions = [0.91, 0.94, 0.96, 0.94, 0.95, 0.92, 0.80, 0.60, 0.45, 0.20, 0.10]\nmax_precisions = maximum_precisions(precisions)\nmAP = max_precisions.mean()\nplt.plot(recalls, precisions, \"ro--\", label=\"Precision\")\nplt.plot(recalls, max_precisions, \"bo-\", label=\"Max Precision\")\nplt.xlabel(\"Recall\")\nplt.ylabel(\"Precision\")\nplt.plot([0, 1], [mAP, mAP], \"g:\", linewidth=3, label=\"mAP\")\nplt.grid(True)\nplt.axis([0, 1, 0, 1])\nplt.legend(loc=\"lower center\", fontsize=14)\nplt.show()\n\n\n# Transpose convolutions:\n\n# In[61]:\n\n\ntf.random.set_seed(42)\nX = images_resized.numpy()\n\nconv_transpose = keras.layers.Conv2DTranspose(filters=5, kernel_size=3, strides=2, padding=\"VALID\")\noutput = conv_transpose(X)\noutput.shape\n\n\n# In[62]:\n\n\ndef normalize(X):\n return (X - tf.reduce_min(X)) / (tf.reduce_max(X) - tf.reduce_min(X))\n\nfig = plt.figure(figsize=(12, 8))\ngs = mpl.gridspec.GridSpec(nrows=1, ncols=2, width_ratios=[1, 2])\n\nax1 = fig.add_subplot(gs[0, 0])\nax1.set_title(\"Input\", fontsize=14)\nax1.imshow(X[0]) # plot the 1st image\nax1.axis(\"off\")\nax2 = fig.add_subplot(gs[0, 1])\nax2.set_title(\"Output\", fontsize=14)\nax2.imshow(normalize(output[0, ..., :3]), interpolation=\"bicubic\") # plot the output for the 1st image\nax2.axis(\"off\")\nplt.show()\n\n\n# In[63]:\n\n\ndef upscale_images(images, stride, kernel_size):\n batch_size, height, width, channels = images.shape\n upscaled = np.zeros((batch_size,\n (height - 1) * stride + 2 * kernel_size - 1,\n (width - 1) * stride + 2 * kernel_size - 1,\n channels))\n upscaled[:,\n kernel_size - 1:(height - 1) * stride + kernel_size:stride,\n kernel_size - 1:(width - 1) * stride + kernel_size:stride,\n :] = images\n return upscaled\n\n\n# In[64]:\n\n\nupscaled = upscale_images(X, stride=2, kernel_size=3)\nweights, biases = conv_transpose.weights\nreversed_filters = np.flip(weights.numpy(), axis=[0, 1])\nreversed_filters = np.transpose(reversed_filters, [0, 1, 3, 2])\nmanual_output = tf.nn.conv2d(upscaled, reversed_filters, strides=1, padding=\"VALID\")\n\n\n# In[65]:\n\n\ndef normalize(X):\n return (X - tf.reduce_min(X)) / (tf.reduce_max(X) - tf.reduce_min(X))\n\nfig = plt.figure(figsize=(12, 8))\ngs = mpl.gridspec.GridSpec(nrows=1, ncols=3, width_ratios=[1, 2, 2])\n\nax1 = fig.add_subplot(gs[0, 0])\nax1.set_title(\"Input\", fontsize=14)\nax1.imshow(X[0]) # plot the 1st image\nax1.axis(\"off\")\nax2 = fig.add_subplot(gs[0, 1])\nax2.set_title(\"Upscaled\", fontsize=14)\nax2.imshow(upscaled[0], interpolation=\"bicubic\")\nax2.axis(\"off\")\nax3 = fig.add_subplot(gs[0, 2])\nax3.set_title(\"Output\", fontsize=14)\nax3.imshow(normalize(manual_output[0, ..., :3]), interpolation=\"bicubic\") # plot the output for the 1st image\nax3.axis(\"off\")\nplt.show()\n\n\n# In[66]:\n\n\nnp.allclose(output, manual_output.numpy(), atol=1e-7)\n\n\n# # Exercises\n\n# ## 1. to 8.\n\n# See appendix A.\n\n# ## 9. High Accuracy CNN for MNIST\n# Exercise: Build your own CNN from scratch and try to achieve the highest possible accuracy on MNIST.\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# ## 10. Use transfer learning for large image classification\n# \n# ### 10.1)\n# Create a training set containing at least 100 images per class. For example, you could classify your own pictures based on the location (beach, mountain, city, etc.), or alternatively you can just use an existing dataset (e.g., from TensorFlow Datasets).\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# ### 10.2)\n# Split it into a training set, a validation set and a test set.\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# ### 10.3)\n# Build the input pipeline, including the appropriate preprocessing operations, and optionally add data augmentation.\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# ### 10.4)\n# Fine-tune a pretrained model on this dataset.\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# ## 11.\n# Exercise: Go through TensorFlow's [DeepDream tutorial](https://goo.gl/4b2s6g). It is a fun way to familiarize yourself with various ways of visualizing the patterns learned by a CNN, and to generate art using Deep Learning.\n# \n\n# Simply download the notebook and follow its instructions. For extra fun, you can produce a series of images, by repeatedly zooming in and running the DeepDream algorithm: using a tool such as [ffmpeg](https://ffmpeg.org/) you can then create a video from these images. For example, here is a [DeepDream video](https://www.youtube.com/watch?v=l6i_fDg30p0) I made... as you will see, it quickly turns into a nightmare. ;-) You can find hundreds of [similar videos](https://www.youtube.com/results?search_query=+deepdream) (often much more artistic) on the web.\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"tensorflow.image.resize_with_crop_or_pad",
"tensorflow.keras.optimizers.SGD",
"tensorflow.reduce_min",
"tensorflow.nn.conv2d",
"tensorflow.image.random_flip_left_right",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.GlobalAvgPool2D",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.applications.resnet50.ResNet50",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.shape",
"tensorflow.keras.applications.xception.Xception",
"tensorflow.keras.activations.get",
"tensorflow.random.set_seed",
"matplotlib.pyplot.savefig",
"tensorflow.image.crop_and_resize",
"tensorflow.TensorShape",
"tensorflow.keras.layers.Conv2D",
"numpy.transpose",
"tensorflow.keras.applications.xception.preprocess_input",
"matplotlib.pyplot.tight_layout",
"tensorflow.keras.layers.AvgPool2D",
"tensorflow.nn.max_pool",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"sklearn.datasets.load_sample_image",
"numpy.array",
"numpy.zeros",
"tensorflow.keras.layers.MaxPool2D",
"matplotlib.pyplot.title",
"tensorflow.keras.datasets.fashion_mnist.load_data",
"matplotlib.rc",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.applications.resnet50.decode_predictions",
"tensorflow.keras.layers.Conv2DTranspose",
"matplotlib.pyplot.show",
"matplotlib.gridspec.GridSpec",
"numpy.random.seed",
"tensorflow.keras.applications.resnet50.preprocess_input",
"matplotlib.pyplot.xlabel",
"tensorflow.image.resize_with_pad",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"numpy.linspace",
"matplotlib.pyplot.legend",
"tensorflow.test.is_gpu_available",
"tensorflow.keras.layers.Flatten",
"tensorflow.image.random_crop",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.reduce_max",
"matplotlib.pyplot.ylabel",
"tensorflow.device",
"tensorflow.image.resize",
"tensorflow.reduce_mean",
"numpy.flip",
"matplotlib.pyplot.imshow"
]
] |
Priesemann-Group/historydependence
|
[
"e1adc5eea8cb05cc686bfda0b979244b34d63bb4",
"e1adc5eea8cb05cc686bfda0b979244b34d63bb4"
] |
[
"plots/supplementaries/S13Fig_measures_vs_rate.py",
"plots/supplementaries/S6Fig.py"
] |
[
"\"\"\"Functions\"\"\"\nimport os\nimport sys\nfrom sys import exit, stderr, argv, path, modules\nfrom os.path import isfile, isdir, realpath, dirname, exists\nimport numpy as np\nimport pandas as pd\n# plotting\nimport matplotlib\nimport seaborn.apionly as sns\nfrom matplotlib import rc\nimport matplotlib.lines as mlines\nimport pylab as plt\nimport pickle\n\nPLOTTING_DIR = dirname(realpath(__file__))\nCODE_DIR = '{}/../..'.format(PLOTTING_DIR)\npath.insert(1, '{}/src'.format(CODE_DIR))\nuse_settings_path = False\n\nif 'hde_glm' not in modules:\n import hde_glm as glm\n import hde_utils as utl\n import hde_plotutils as plots\n\ndef get_tau_R_and_R_tot(T_0, setup, regularization_method, recorded_system, rec_length, neuron_index, CODE_DIR, use_settings_path):\n ANALYSIS_DIR, analysis_num_str, R_tot, T_D, T, R, R_CI_lo, R_CI_hi = plots.load_analysis_results(\n recorded_system, rec_length, neuron_index, setup, CODE_DIR, regularization_method = regularization_method, use_settings_path = use_settings_path)\n R_tot = plots.get_R_tot(T, R, R_CI_lo)[0]\n dR = plots.get_dR(T,R,R_tot)\n tau_R = plots.get_T_avg(T, dR, T_0)\n return tau_R, R_tot\n\ndef get_stats(bin_size_ms, T_0_ms, neuron_index, recorded_system, CODE_DIR):\n ANALYSIS_DIR = '%s/analysis/%s/stats_tbin_%dms'%(CODE_DIR, recorded_system, bin_size_ms)\n with open('%s/stats_neuron%d_T_0_%dms.pkl'%(ANALYSIS_DIR, neuron_index, T_0_ms), 'rb') as f:\n return pickle.load(f)\n\ndef get_spike_entropy(p_spike):\n p_nospike = 1 - p_spike\n if p_nospike == 1.0:\n entropy = 0\n else:\n entropy = - p_spike * np.log2(p_spike) - p_nospike * np.log2(p_nospike)\n return entropy\n\n\"\"\"Global parameters\"\"\"\nsetup = 'fivebins'\nregularization_method = 'shuffling'\nrec_length = '40min'\ntotal_mutual_information = False\nbin_size_ms = 5 #ms, time bin for auto_correlation\n# short timescale which is neglected when computing the integrated timescale\n# Slightly below 0.01, because the embeddings chosen for analysis are computed for 0.00998, thus almost 0.01 but sligthtly below.\nT_0_ms = 10\nT_0 = 0.00997\n\nrc('text', usetex=True)\nmatplotlib.rcParams['font.size'] = '16.0'\nmatplotlib.rcParams['xtick.labelsize'] = '16'\nmatplotlib.rcParams['ytick.labelsize'] = '16'\nmatplotlib.rcParams['legend.fontsize'] = '16'\nmatplotlib.rcParams['axes.linewidth'] = 0.6\nmatplotlib.rcParams[\"errorbar.capsize\"] = 2.5\n\n# Colors\nmain_red = sns.color_palette(\"RdBu_r\", 15)[12]\nmain_blue = sns.color_palette(\"RdBu_r\", 15)[1]\nsoft_red = sns.color_palette(\"RdBu_r\", 15)[10]\nsoft_blue = sns.color_palette(\"RdBu_r\", 15)[4]\nviolet = sns.cubehelix_palette(8)[4]\ngreen = sns.cubehelix_palette(8, start=.5, rot=-.75)[3]\n\nfig, ((ax1,ax2)) = plt.subplots(1, 2, figsize=(7, 3.2))\n\n##### Unset Borders #####\n\n\nfor ax in (ax1,ax2):\n ax.spines['top'].set_bounds(0, 0)\n ax.spines['right'].set_bounds(0, 0)\n ax.set_xlim((0.3, 12))\n ax.set_xscale('log')\n ax.set_xlabel(r'firing rate (Hz)')\n\nif total_mutual_information == True:\n ax1.set_ylabel(\n r'total mutual information $I_{\\mathrm{tot}}$')\n ax1.set_yscale('log')\n ax1.set_ylim((0.001, 0.1))\n # ax1.set_yticks([0.0, 0.2, 0.4])\n # ax1.spines['left'].set_bounds(.0, .4)\nelse:\n ax1.set_ylabel(\n r'total history dependence $R_{\\mathrm{tot}}$')\n ax1.set_ylim((0.0, 0.45))\n ax1.set_yticks([0.0, 0.2, 0.4])\n ax1.spines['left'].set_bounds(.0, .4)\n\nax2.set_yscale('log')\nax2.set_ylim((5, 300))\n# ax.set_xticks(np.array([1, 10, 50]))\nax2.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())\nax2.set_ylabel(r'information timescale $\\tau_R$ (ms)')\n\n\n\"\"\"Load data\"\"\"\nrecorded_system = 'CA1'\nnumber_valid_neurons = 28\nR_tot_CA1 = []\nI_tot_CA1 = []\ntau_R_CA1 = []\nrate_CA1 = []\nCV_CA1 = []\nmedianISI_CA1 = []\nautocorrelation_time_CA1 = []\nfor neuron_index in range(number_valid_neurons):\n tau_R, R_tot = get_tau_R_and_R_tot(T_0, setup, regularization_method, recorded_system, rec_length, neuron_index, CODE_DIR, use_settings_path)\n I_tot = plots.load_total_mutual_information(recorded_system, rec_length, neuron_index, setup, CODE_DIR, regularization_method = regularization_method, use_settings_path = use_settings_path)\n stats = get_stats(bin_size_ms, T_0_ms, neuron_index, recorded_system, CODE_DIR)\n rate = stats['rate']\n p_spike = rate*0.005\n H_spiking = get_spike_entropy(p_spike)\n I_tot_rate = R_tot*H_spiking\n R_tot_CA1 += [R_tot]\n I_tot_CA1 += [I_tot_rate]\n tau_R_CA1 += [tau_R*1000]\n rate_CA1 += [stats['rate']]\n medianISI_CA1 += [stats['medianISI']]\n CV_CA1 += [stats['CV']]\n autocorrelation_time_CA1 += [stats['autocorrelation_time']]\n\n\nR_tot_CA1_median = np.median(R_tot_CA1)\nR_tot_CA1_median_loCI, R_tot_CA1_median_hiCI = plots.get_CI_median(R_tot_CA1)\nI_tot_CA1_median = np.median(I_tot_CA1)\nI_tot_CA1_median_loCI, I_tot_CA1_median_hiCI = plots.get_CI_median(I_tot_CA1)\ntau_R_CA1_median = np.median(tau_R_CA1)\ntau_R_CA1_median_loCI, tau_R_CA1_median_hiCI = plots.get_CI_median(tau_R_CA1)\nrate_CA1_median = np.median(rate_CA1)\nrate_CA1_median_loCI, rate_CA1_median_hiCI = plots.get_CI_median(rate_CA1)\nmedianISI_CA1_median = np.median(medianISI_CA1)\nmedianISI_CA1_median_loCI, medianISI_CA1_median_hiCI = plots.get_CI_median(medianISI_CA1)\nCV_CA1_median = np.median(CV_CA1)\nCV_CA1_median_loCI, CV_CA1_median_hiCI = plots.get_CI_median(CV_CA1)\nautocorrelation_time_CA1_median = np.median(autocorrelation_time_CA1)\nautocorrelation_time_CA1_median_loCI, autocorrelation_time_CA1_median_hiCI = plots.get_CI_median(autocorrelation_time_CA1)\n\nrecorded_system = 'retina'\nnumber_valid_neurons = 111\nR_tot_retina = []\nI_tot_retina = []\ntau_R_retina = []\nrate_retina = []\nCV_retina = []\nmedianISI_retina = []\nautocorrelation_time_retina = []\nfor neuron_index in range(number_valid_neurons):\n tau_R, R_tot = get_tau_R_and_R_tot(T_0, setup, regularization_method, recorded_system, rec_length, neuron_index, CODE_DIR, use_settings_path)\n I_tot = plots.load_total_mutual_information(recorded_system, rec_length, neuron_index, setup, CODE_DIR, regularization_method = regularization_method, use_settings_path = use_settings_path)\n stats = get_stats(bin_size_ms, T_0_ms, neuron_index, recorded_system, CODE_DIR)\n rate = stats['rate']\n p_spike = rate*0.005\n H_spiking = get_spike_entropy(p_spike)\n I_tot_rate = R_tot*H_spiking\n R_tot_retina += [R_tot]\n I_tot_retina += [I_tot_rate]\n tau_R_retina += [tau_R*1000]\n rate_retina += [stats['rate']]\n medianISI_retina += [stats['medianISI']]\n CV_retina += [stats['CV']]\n autocorrelation_time_retina += [stats['autocorrelation_time']]\n\nR_tot_retina_median = np.median(R_tot_retina)\nR_tot_retina_median_loCI, R_tot_retina_median_hiCI = plots.get_CI_median(R_tot_retina)\nI_tot_retina_median = np.median(I_tot_retina)\nI_tot_retina_median_loCI, I_tot_retina_median_hiCI = plots.get_CI_median(I_tot_retina)\ntau_R_retina_median = np.median(tau_R_retina)\ntau_R_retina_median_loCI, tau_R_retina_median_hiCI = plots.get_CI_median(tau_R_retina)\nrate_retina_median = np.median(rate_retina)\nrate_retina_median_loCI, rate_retina_median_hiCI = plots.get_CI_median(rate_retina)\nmedianISI_retina_median = np.median(medianISI_retina)\nmedianISI_retina_median_loCI, medianISI_retina_median_hiCI = plots.get_CI_median(medianISI_retina)\nCV_retina_median = np.median(CV_retina)\nCV_retina_median_loCI, CV_retina_median_hiCI = plots.get_CI_median(CV_retina)\nautocorrelation_time_retina_median = np.median(autocorrelation_time_retina)\nautocorrelation_time_retina_median_loCI, autocorrelation_time_retina_median_hiCI = plots.get_CI_median(autocorrelation_time_retina)\n\nrecorded_system = 'culture'\nnumber_valid_neurons = 48\nR_tot_culture = []\nI_tot_culture = []\ntau_R_culture = []\nrate_culture = []\nCV_culture = []\nmedianISI_culture = []\nautocorrelation_time_culture = []\nfor neuron_index in range(number_valid_neurons):\n tau_R, R_tot = get_tau_R_and_R_tot(T_0, setup, regularization_method, recorded_system, rec_length, neuron_index, CODE_DIR, use_settings_path)\n I_tot = plots.load_total_mutual_information(recorded_system, rec_length, neuron_index, setup, CODE_DIR, regularization_method = regularization_method, use_settings_path = use_settings_path)\n stats = get_stats(bin_size_ms, T_0_ms, neuron_index, recorded_system, CODE_DIR)\n rate = stats['rate']\n p_spike = rate*0.005\n H_spiking = get_spike_entropy(p_spike)\n I_tot_rate = R_tot*H_spiking\n R_tot_culture += [R_tot]\n I_tot_culture += [I_tot_rate]\n tau_R_culture += [tau_R*1000]\n rate_culture += [stats['rate']]\n medianISI_culture += [stats['medianISI']]\n CV_culture += [stats['CV']]\n autocorrelation_time_culture += [stats['autocorrelation_time']]\n\nR_tot_culture_median = np.median(R_tot_culture)\nR_tot_culture_median_loCI, R_tot_culture_median_hiCI = plots.get_CI_median(R_tot_culture)\nI_tot_culture_median = np.median(I_tot_culture)\nI_tot_culture_median_loCI, I_tot_culture_median_hiCI = plots.get_CI_median(I_tot_culture)\ntau_R_culture_median = np.median(tau_R_culture)\ntau_R_culture_median_loCI, tau_R_culture_median_hiCI = plots.get_CI_median(tau_R_culture)\nrate_culture_median = np.median(rate_culture)\nrate_culture_median_loCI, rate_culture_median_hiCI = plots.get_CI_median(rate_culture)\nmedianISI_culture_median = np.median(medianISI_culture)\nmedianISI_culture_median_loCI, medianISI_culture_median_hiCI = plots.get_CI_median(medianISI_culture)\nCV_culture_median = np.median(CV_culture)\nCV_culture_median_loCI, CV_culture_median_hiCI = plots.get_CI_median(CV_culture)\nautocorrelation_time_culture_median = np.median(autocorrelation_time_culture)\nautocorrelation_time_culture_median_loCI, autocorrelation_time_culture_median_hiCI = plots.get_CI_median(autocorrelation_time_culture)\n\n\nrecorded_system = 'V1'\nnumber_valid_neurons = 142\nR_tot_V1 = []\nI_tot_V1 = []\ntau_R_V1 = []\nrate_V1 = []\nCV_V1 = []\nmedianISI_V1 = []\nautocorrelation_time_V1 = []\nfor neuron_index in range(number_valid_neurons):\n tau_R, R_tot = get_tau_R_and_R_tot(T_0, setup, regularization_method, recorded_system, rec_length, neuron_index, CODE_DIR, use_settings_path)\n I_tot = plots.load_total_mutual_information(recorded_system, rec_length, neuron_index, setup, CODE_DIR, regularization_method = regularization_method, use_settings_path = use_settings_path)\n stats = get_stats(bin_size_ms, T_0_ms, neuron_index, recorded_system, CODE_DIR)\n rate = stats['rate']\n p_spike = rate*0.005\n H_spiking = get_spike_entropy(p_spike)\n I_tot_rate = R_tot*H_spiking\n R_tot_V1 += [R_tot]\n I_tot_V1 += [I_tot_rate]\n tau_R_V1 += [tau_R*1000]\n rate_V1 += [stats['rate']]\n medianISI_V1 += [stats['medianISI']]\n CV_V1 += [stats['CV']]\n autocorrelation_time_V1 += [stats['autocorrelation_time']]\n\nR_tot_V1_median = np.median(R_tot_V1)\nR_tot_V1_median_loCI, R_tot_V1_median_hiCI = plots.get_CI_median(R_tot_V1)\nI_tot_V1_median = np.median(I_tot_V1)\nI_tot_V1_median_loCI, I_tot_V1_median_hiCI = plots.get_CI_median(I_tot_V1)\ntau_R_V1_median = np.median(tau_R_V1)\ntau_R_V1_median_loCI, tau_R_V1_median_hiCI = plots.get_CI_median(tau_R_V1)\nrate_V1_median = np.median(rate_V1)\nrate_V1_median_loCI, rate_V1_median_hiCI = plots.get_CI_median(rate_V1)\nmedianISI_V1_median = np.median(medianISI_V1)\nmedianISI_V1_median_loCI, medianISI_V1_median_hiCI = plots.get_CI_median(medianISI_V1)\nCV_V1_median = np.median(CV_V1)\nCV_V1_median_loCI, CV_V1_median_hiCI = plots.get_CI_median(CV_V1)\nautocorrelation_time_V1_median = np.median(autocorrelation_time_V1)\nautocorrelation_time_V1_median_loCI, autocorrelation_time_V1_median_hiCI = plots.get_CI_median(autocorrelation_time_V1)\n\n\n# I_tot\nif total_mutual_information == True:\n ax1.errorbar(x=[rate_culture_median], y=[I_tot_culture_median], yerr=[[I_tot_culture_median-I_tot_culture_median_loCI], [I_tot_culture_median_hiCI-I_tot_culture_median]], xerr=[[rate_culture_median-rate_culture_median_loCI], [rate_culture_median_hiCI-rate_culture_median]], color=main_red, marker='v', markersize=6)\n\n ax1.errorbar(x=[rate_retina_median], y=[I_tot_retina_median], yerr=[[I_tot_retina_median-I_tot_retina_median_loCI], [I_tot_retina_median_hiCI-I_tot_retina_median]], xerr=[[rate_retina_median-rate_retina_median_loCI], [rate_retina_median_hiCI-rate_retina_median]], color='orange', marker='o', markersize=6)\n\n ax1.errorbar(x=[rate_V1_median], y=[I_tot_V1_median], yerr=[[I_tot_V1_median-I_tot_V1_median_loCI], [I_tot_V1_median_hiCI-I_tot_V1_median]], xerr=[[rate_V1_median-rate_V1_median_loCI], [rate_V1_median_hiCI-rate_V1_median]], color=green, marker='s', markersize=6)\n ax1.errorbar(x=[rate_CA1_median], y=[I_tot_CA1_median], yerr=[[I_tot_CA1_median-I_tot_CA1_median_loCI], [I_tot_CA1_median_hiCI-I_tot_CA1_median]], xerr=[[rate_CA1_median-rate_CA1_median_loCI], [rate_CA1_median_hiCI-rate_CA1_median]], color=main_blue, marker='D', markersize=6)\nelse:\n R_tot\n ax1.errorbar(x=[rate_culture_median], y=[R_tot_culture_median], yerr=[[R_tot_culture_median-R_tot_culture_median_loCI], [R_tot_culture_median_hiCI-R_tot_culture_median]], xerr=[[rate_culture_median-rate_culture_median_loCI], [rate_culture_median_hiCI-rate_culture_median]], color=main_red, marker='v', markersize=6)\n\n ax1.errorbar(x=[rate_retina_median], y=[R_tot_retina_median], yerr=[[R_tot_retina_median-R_tot_retina_median_loCI], [R_tot_retina_median_hiCI-R_tot_retina_median]], xerr=[[rate_retina_median-rate_retina_median_loCI], [rate_retina_median_hiCI-rate_retina_median]], color='orange', marker='o', markersize=6)\n\n ax1.errorbar(x=[rate_V1_median], y=[R_tot_V1_median], yerr=[[R_tot_V1_median-R_tot_V1_median_loCI], [R_tot_V1_median_hiCI-R_tot_V1_median]], xerr=[[rate_V1_median-rate_V1_median_loCI], [rate_V1_median_hiCI-rate_V1_median]], color=green, marker='s', markersize=6)\n\n ax1.errorbar(x=[rate_CA1_median], y=[R_tot_CA1_median], yerr=[[R_tot_CA1_median-R_tot_CA1_median_loCI], [R_tot_CA1_median_hiCI-R_tot_CA1_median]], xerr=[[rate_CA1_median-rate_CA1_median_loCI], [rate_CA1_median_hiCI-rate_CA1_median]], color=main_blue, marker='D', markersize=6)\n\n# tau_R\nax2.errorbar(x=[rate_culture_median], y=[tau_R_culture_median], yerr=[[tau_R_culture_median-tau_R_culture_median_loCI], [tau_R_culture_median_hiCI-tau_R_culture_median]], xerr=[[rate_culture_median-rate_culture_median_loCI], [rate_culture_median_hiCI-rate_culture_median]], color=main_red, marker='v', markersize=6)\n\nax2.errorbar(x=[rate_retina_median], y=[tau_R_retina_median], yerr=[[tau_R_retina_median-tau_R_retina_median_loCI], [tau_R_retina_median_hiCI-tau_R_retina_median]], xerr=[[rate_retina_median-rate_retina_median_loCI], [rate_retina_median_hiCI-rate_retina_median]], color='orange', marker='o', markersize=6)\n\nax2.errorbar(x=[rate_V1_median], y=[tau_R_V1_median], yerr=[[tau_R_V1_median-tau_R_V1_median_loCI], [tau_R_V1_median_hiCI-tau_R_V1_median]], xerr=[[rate_V1_median-rate_V1_median_loCI], [rate_V1_median_hiCI-rate_V1_median]], color=green, marker='s', markersize=6)\n\nax2.errorbar(x=[rate_CA1_median], y=[tau_R_CA1_median], yerr=[[tau_R_CA1_median-tau_R_CA1_median_loCI], [tau_R_CA1_median_hiCI-tau_R_CA1_median]], xerr=[[rate_CA1_median-rate_CA1_median_loCI], [rate_CA1_median_hiCI-rate_CA1_median]], color=main_blue, marker='D', markersize=6)\n\nif total_mutual_information == True:\n # Itot\n ax1.scatter(rate_culture, I_tot_culture,\n s=3, color=main_red, marker=\"v\", alpha=0.5, zorder=2)\n ax1.scatter(rate_retina, I_tot_retina,\n s=3, color='orange', marker=\"o\", alpha=0.5, zorder=2)\n ax1.scatter(rate_V1, I_tot_V1,\n s=3, color=green, marker=\"s\", alpha=0.5, zorder=2)\n ax1.scatter(rate_CA1, I_tot_CA1,\n s=3, color=main_blue, marker=\"s\", alpha=0.5, zorder=2)\nelse:\n # Rtot\n ax1.scatter(rate_culture, R_tot_culture,\n s=3, color=main_red, marker=\"v\", alpha=0.5, zorder=2)\n ax1.scatter(rate_retina, R_tot_retina,\n s=3, color='orange', marker=\"o\", alpha=0.5, zorder=2)\n ax1.scatter(rate_V1, R_tot_V1,\n s=3, color=green, marker=\"s\", alpha=0.5, zorder=2)\n ax1.scatter(rate_CA1, R_tot_CA1,\n s=3, color=main_blue, marker=\"s\", alpha=0.5, zorder=2)\n\n# tau_R\nax2.scatter(rate_culture, tau_R_culture,\n s=3, color=main_red, marker=\"v\", alpha=0.5, zorder=2)\nax2.scatter(rate_retina, tau_R_retina,\n s=3, color='orange', marker=\"o\", alpha=0.5, zorder=2)\nax2.scatter(rate_V1, tau_R_V1,\n s=3, color=green, marker=\"s\", alpha=0.5, zorder=2)\nax2.scatter(rate_CA1, tau_R_CA1,\n s=3, color=main_blue, marker=\"s\", alpha=0.5, zorder=2)\n\n\n# ax.legend(loc=(1.0, 0.1), frameon=False)\nfig.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)\n\nif total_mutual_information == True:\n plt.savefig('%s/S13Fig_Itot_vs_rate.pdf'%(PLOTTING_DIR),\n format=\"pdf\", bbox_inches='tight')\nelse:\n plt.savefig('%s/S13Fig_measures_vs_rate.pdf'%(PLOTTING_DIR),\n format=\"pdf\", bbox_inches='tight')\n\nplt.show()\nplt.close()\n",
"\"\"\"Functions\"\"\"\nimport matplotlib\nfrom matplotlib import rc\nimport seaborn.apionly as sns\nimport pylab as plt\nimport numpy as np\nfrom sys import exit, stderr, argv, path, modules\nfrom os.path import isfile, isdir, realpath, dirname, exists\n# import plotutils\n\nPLOTTING_DIR = dirname(realpath(__file__))\nCODE_DIR = '{}/../..'.format(PLOTTING_DIR)\npath.insert(1, '{}/src'.format(CODE_DIR))\nuse_settings_path = False\n\nif 'hde_glm' not in modules:\n import hde_glm as glm\n import hde_utils as utl\n import hde_plotutils as plots\n\n\"\"\"Parameters and Settings\"\"\"\nrecorded_system = 'CA1'\nrec_length = '90min'\nDATA_DIR = '{}/data/CA1/'.format(CODE_DIR)\nvalidNeurons = np.load('{}validNeurons.npy'.format(DATA_DIR))\nT_0 = 0.00997\n\n\"\"\"Plotting\"\"\"\n# Font\nrc('text', usetex=True)\nmatplotlib.rcParams['font.size'] = '15.0'\nmatplotlib.rcParams['xtick.labelsize'] = '15'\nmatplotlib.rcParams['ytick.labelsize'] = '15'\nmatplotlib.rcParams['legend.fontsize'] = '15'\nmatplotlib.rcParams['axes.linewidth'] = 0.6\n\n# Colors\nmain_red = sns.color_palette(\"RdBu_r\", 15)[12]\nmain_blue = sns.color_palette(\"RdBu_r\", 15)[1]\nsoft_red = sns.color_palette(\"RdBu_r\", 15)[10]\nsoft_blue = sns.color_palette(\"RdBu_r\", 15)[4]\nviolet = sns.cubehelix_palette(8)[4]\ngreen = sns.cubehelix_palette(8, start=.5, rot=-.75)[3]\n\nfig, axes = plt.subplots(4, 7, figsize=(14., 7.5))\n\n# Sort neurons, put neurons with max_val > 0.2 and max_val <0.3 in a separate group\n\nsmallR = []\nmediumR = []\nhighR = []\nveryhighR = []\nsetup = 'full_shuffling'\nfor neuron_index, neuron in enumerate(validNeurons):\n ANALYSIS_DIR, analysis_num_str,R_tot, T_D, T, R, R_CI_lo, R_CI_hi = plots.load_analysis_results(\n recorded_system, rec_length, neuron_index, setup, CODE_DIR, regularization_method = 'shuffling', use_settings_path = use_settings_path)\n max_val = np.amax(R)\n if max_val > 0.2:\n if max_val >0.3:\n if max_val > 0.405:\n veryhighR += [neuron_index]\n else:\n highR += [neuron_index]\n else:\n mediumR+= [neuron_index]\n else:\n smallR += [neuron_index]\n\nindex_small_to_medium = len(smallR)\nindex_medium_to_high = len(smallR)+len(mediumR)\nindex_high_to_veryhigh = len(smallR)+len(mediumR)+len(highR)\n\nfor k, neuron_index in enumerate(np.append(np.append(np.append(smallR, mediumR),highR),veryhighR)):\n\n ax = axes[int(k/7)][k%7]\n\n \"\"\"Load data full\"\"\"\n setup = 'full_bbc'\n ANALYSIS_DIR, analysis_num_str, R_tot_bbc, T_D_bbc, T, R_bbc, R_bbc_CI_lo, R_bbc_CI_hi = plots.load_analysis_results(\n recorded_system, rec_length, neuron_index, setup, CODE_DIR, regularization_method = 'bbc', use_settings_path = use_settings_path)\n R_tot_bbc, T_D_index_bbc, max_valid_index_bbc = plots.get_R_tot(T, R_bbc, R_bbc_CI_lo)\n dR_bbc = plots.get_dR(T ,R_bbc ,R_tot_bbc)\n tau_R_bbc = plots.get_T_avg(T, dR_bbc, T_0)\n\n # Get R_tot_glm for T_D\n R_tot_glm = plots.load_analysis_results_glm(ANALYSIS_DIR, analysis_num_str)\n\n setup = 'full_shuffling'\n ANALYSIS_DIR, analysis_num_str,R_tot_shuffling, T_D_shuffling, T, R_shuffling, R_shuffling_CI_lo, R_shuffling_CI_hi = plots.load_analysis_results(\n recorded_system, rec_length, neuron_index, setup, CODE_DIR, regularization_method = 'shuffling', use_settings_path = use_settings_path)\n R_tot_shuffling, T_D_index_shuffling, max_valid_index_shuffling = plots.get_R_tot(T, R_shuffling, R_shuffling_CI_lo)\n dR_shuffling = plots.get_dR(T ,R_shuffling ,R_tot_shuffling)\n tau_R_shuffling = plots.get_T_avg(T, dR_shuffling, T_0)\n\n \"\"\"Load data five bins\"\"\"\n setup = 'fivebins'\n ANALYSIS_DIR, analysis_num_str,R_tot_fivebins, T_D_fivebins, T, R_fivebins, R_fivebins_CI_lo, R_fivebins_CI_hi = plots.load_analysis_results(\n recorded_system, rec_length, neuron_index, setup, CODE_DIR, regularization_method = 'shuffling', use_settings_path = use_settings_path)\n R_tot_fivebins, T_D_index_fivebins, max_valid_index_fivebins = plots.get_R_tot(T, R_fivebins, R_fivebins_CI_lo)\n dR_fivebins = plots.get_dR(T ,R_fivebins ,R_tot_fivebins)\n tau_R_fivebins = plots.get_T_avg(T, dR_fivebins, T_0)\n\n \"\"\"Load data onebins\"\"\"\n setup = 'onebin'\n ANALYSIS_DIR, analysis_num_str,R_tot_onebin, T_D_onebin, T, R_onebin, R_onebin_CI_lo, R_onebin_CI_hi = plots.load_analysis_results(\n recorded_system, rec_length, neuron_index, setup, CODE_DIR, regularization_method = 'shuffling', use_settings_path = use_settings_path)\n R_tot_onebin, T_D_index_onebin, max_valid_index_onebin = plots.get_R_tot(T, R_onebin, R_onebin_CI_lo)\n dR_onebin = plots.get_dR(T ,R_onebin ,R_tot_onebin)\n tau_R_onebin = plots.get_T_avg(T, dR_onebin, T_0)\n\n ax.set_xscale('log')\n x_min = 0.005\n x_max = 5.\n ax.set_xlim((0.005, 5.))\n ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())\n ax.spines['bottom'].set_bounds(0.005, 5.)\n ax.set_xticks(np.array([0.01, 0.1, 1.0]))\n ax.set_xticklabels(\n [r'$10$', r'$100$', r'$1000$'], rotation='horizontal')\n\n ##### y-axis ####\n ##### Unset Borders #####\n ax.spines['top'].set_bounds(0, 0)\n ax.spines['right'].set_bounds(0, 0)\n max_val = np.amax(R_shuffling)\n if max_val > 0.2:\n if max_val > 0.3:\n if max_val > 0.405:\n yrange = 0.41\n ymin = 0.1\n ax.set_ylim((0.1, .51))\n ax.set_yticks([0.1, 0.2, 0.3, 0.4, 0.5])\n ax.spines['left'].set_bounds(.1, 0.5)\n else:\n yrange = 0.3\n ymin = 0.1\n ax.set_ylim((0.1, .4))\n ax.set_yticks([0.1, 0.2, 0.3, 0.4])\n ax.spines['left'].set_bounds(.1, 0.4)\n else:\n yrange = 0.3\n ymin = 0.0\n ax.set_ylim((0.0, .3))\n ax.set_yticks([0.0, 0.1, 0.2, 0.3])\n ax.spines['left'].set_bounds(.0, 0.3)\n else:\n yrange = 0.2\n ymin = 0.0\n ax.set_ylim((0.0, .2))\n ax.set_yticks([0.0, 0.1, 0.2])\n ax.spines['left'].set_bounds(.0, 0.2)\n\n ##### Unset Borders #####\n ax.spines['top'].set_bounds(0, 0)\n ax.spines['right'].set_bounds(0, 0)\n\n \"\"\"BBC\"\"\"\n ax.plot(T, R_bbc, linewidth=1.2, color=main_red,\n label=r'BBC, $d_{\\mathrm{max}}=20$', zorder=10)\n ax.fill_between(T, R_bbc_CI_lo, R_bbc_CI_hi,\n facecolor=main_red, zorder= 10, alpha=0.3)\n\n # Rtot indicators\n x = (np.log10(T_D_bbc) - np.log10(x_min)) / \\\n (np.log10(x_max) - np.log10(x_min))\n ax.axhline(y=R_tot_bbc, xmax=x, color=main_red,\n linewidth=0.5, linestyle='--')\n ax.plot([x_min], [R_tot_bbc], marker='d',markersize = 5., color=main_red,\n zorder=8)\n ax.plot([T_D_bbc], [R_tot_bbc], marker='|', markersize = 6., color=main_red,\n zorder=14)\n ax.plot([T[max_valid_index_bbc-1]], [R_tot_bbc], marker='|', markersize = 6., color=main_red,\n zorder=14)\n ax.plot(T[T_D_index_bbc:max_valid_index_bbc], np.zeros(max_valid_index_bbc-T_D_index_bbc)+R_tot_bbc, color = main_red, linewidth=1.5, linestyle='--')\n # tau_R indicators\n ax.plot([tau_R_bbc], [ymin], marker='d', markersize = 5., color=main_red,\n zorder=8)\n ax.axvline(x=tau_R_bbc, ymax=(R_tot_bbc - ymin) / yrange, color=main_red,\n linewidth=0.5, linestyle='--')\n\n if k == 0:\n ax.text(0.007, R_tot_bbc + 0.04 *\n R_tot_bbc, r'$R_{\\mathrm{tot}}$')\n ax.text(tau_R_bbc + 0.7 * tau_R_bbc, ymin + .005, r'$\\tau_R$')\n\n\n \"\"\"Shuffling\"\"\"\n ax.plot(T, R_shuffling, linewidth=1.2, color=main_blue,\n label=r'Shuffling, $d_{\\mathrm{max}}=20$', zorder=3)\n ax.fill_between(T, R_shuffling_CI_lo, R_shuffling_CI_hi,\n facecolor=main_blue, zorder= 8, alpha=0.3)\n # Rtot indicators\n x = (np.log10(T_D_shuffling) - np.log10(x_min)) / \\\n (np.log10(x_max) - np.log10(x_min))\n ax.axhline(y=R_tot_shuffling, xmax=x, color=main_blue,\n linewidth=0.5, linestyle='--')\n ax.plot([x_min], [R_tot_shuffling], marker='d',markersize = 5., color=main_blue,\n zorder=8)\n ax.plot(T[T_D_index_shuffling:max_valid_index_shuffling], np.zeros(max_valid_index_shuffling-T_D_index_shuffling)+R_tot_shuffling, color = main_blue, linewidth=1.5, linestyle='--')\n ax.plot([T_D_shuffling], [R_tot_shuffling], marker='|', markersize = 6., color=main_blue,\n zorder=13)\n ax.plot([T[max_valid_index_shuffling-1]], [R_tot_shuffling], marker='|', markersize = 6., color=main_blue, zorder=14)\n # tau_R indicators\n ax.axvline(x=tau_R_shuffling, ymax=(R_tot_shuffling - ymin) / yrange, color=main_blue,\n linewidth=0.5, linestyle='--')\n ax.plot([tau_R_shuffling], [ymin], marker='d', markersize = 5., color=main_blue,\n zorder=8)\n\n \"\"\"Fivebins\"\"\"\n ax.plot(T, R_fivebins, linewidth=1.2, color=green,\n label=r'Shuffling, $d_{\\mathrm{max}}=5$', zorder=3)\n ax.fill_between(T, R_fivebins_CI_lo, R_fivebins_CI_hi,\n facecolor=green, zorder= 10, alpha=0.3)\n # Rtot indicators\n x = (np.log10(T_D_fivebins) - np.log10(x_min)) / \\\n (np.log10(x_max) - np.log10(x_min))\n ax.axhline(y=R_tot_fivebins, xmax=x, color=green,\n linewidth=0.5, linestyle='--')\n ax.plot([x_min], [R_tot_fivebins], marker='d',markersize = 5., color=green,\n zorder=8)\n ax.plot(T[T_D_index_fivebins:max_valid_index_fivebins], np.zeros(max_valid_index_fivebins-T_D_index_fivebins)+R_tot_fivebins, color = green, linewidth=1.5, linestyle='--')\n ax.plot([T_D_fivebins], [R_tot_fivebins], marker='|', markersize = 6., color=green,\n zorder=12)\n ax.plot([T[max_valid_index_fivebins-1]], [R_tot_fivebins], marker='|', markersize = 6., color=green, zorder=14)\n # tau_R indicators\n ax.plot([tau_R_fivebins], [ymin], marker='d', markersize = 5., color=green,\n zorder=8)\n ax.axvline(x=tau_R_fivebins, ymax=(R_tot_fivebins - ymin) / yrange, color=green,\n linewidth=0.5, linestyle='--')\n\n \"\"\"One bin\"\"\"\n ax.plot(T, R_onebin, linewidth=1.2, color='y',\n label=r'Shuffling, $d_{\\mathrm{max}}=1$', zorder=3)\n ax.fill_between(T, R_onebin_CI_lo, R_onebin_CI_hi,\n facecolor='y', zorder= 10, alpha=0.3)\n # Rtot indicators\n x = (np.log10(T_D_onebin) - np.log10(x_min)) / \\\n (np.log10(x_max) - np.log10(x_min))\n ax.axhline(y=R_tot_onebin, xmax=x, color='y',\n linewidth=0.5, linestyle='--')\n ax.plot([x_min], [R_tot_onebin], marker='d',markersize = 5., color='y',\n zorder=8)\n ax.plot([T_D_onebin], [R_tot_onebin], marker='|', markersize = 6., color='y',\n zorder=8)\n ax.plot([T[max_valid_index_onebin-1]], [R_tot_onebin], marker='|', markersize = 6., color='y', zorder=14)\n ax.plot(T[T_D_index_onebin:max_valid_index_onebin], np.zeros(max_valid_index_onebin-T_D_index_onebin)+R_tot_onebin, color = 'y', linewidth=1.5, linestyle='--')\n # tau_R indicators\n ax.plot([tau_R_onebin], [ymin], marker='d', markersize = 5., color='y',\n zorder=8)\n ax.axvline(x=tau_R_onebin, ymax=(R_tot_onebin - ymin) / yrange, color='y',\n linewidth=0.5, linestyle='--')\n\n \"\"\"GLM\"\"\"\n # Plot R_tot_glm\n ax.plot([T_D_bbc], [R_tot_glm], 's', color=violet, label=r'GLM, $d_{\\mathrm{max}}=50$')\n x = (np.log10(T_D_bbc) - np.log10(x_min)) / \\\n (np.log10(x_max) - np.log10(x_min))\n ax.plot([x_min], [R_tot_glm], marker='d',markersize = 5., color=violet,\n zorder=8)\n ax.axhline(y=R_tot_glm, xmax=x, color=violet,\n linewidth=0.5, linestyle='--')\n\n\n if not int(k/7) == 3:\n xlabels = [item.get_text() for item in ax.get_xticklabels()]\n empty_string_labels = ['']*len(xlabels)\n ax.set_xticklabels(empty_string_labels)\n if not k%7 == 0:\n if not k == index_small_to_medium:\n if not k == index_medium_to_high:\n if not k == index_high_to_veryhigh:\n ylabels = [item.get_text() for item in ax.get_yticklabels()]\n empty_string_labels = ['']*len(ylabels)\n ax.set_yticklabels(empty_string_labels)\n if k == 0:\n ax.legend(loc=(-0.1, 1.2), frameon=False)\n\nfig.text(0.5, - 0.01, r'past range $T$ (ms)', ha='center', va='center', fontsize = 17)\nfig.text(-0.01, 0.5, r'history dependence $R(T)$', ha='center', va='center', rotation='vertical', fontsize = 17)\nfig.tight_layout(pad=1.0, w_pad=-2, h_pad=1.0)\nplt.savefig('{}/S6Fig.pdf'.format(PLOTTING_DIR),\n format=\"pdf\", bbox_inches='tight')\n\nplt.show()\nplt.close()\n"
] |
[
[
"numpy.median",
"matplotlib.ticker.ScalarFormatter",
"numpy.log2",
"matplotlib.rc"
],
[
"numpy.array",
"numpy.zeros",
"matplotlib.rc",
"numpy.amax",
"numpy.append",
"matplotlib.ticker.ScalarFormatter",
"numpy.log10"
]
] |
Screams233/MachineLearning_Python
|
[
"fc954f68d8b2d89ed4463308873ebce0d22ddaa2"
] |
[
"NeuralNetwok/NeuralNetwork.py"
] |
[
"#-*- coding: utf-8 -*-\nimport numpy as np\nfrom scipy import io as spio\nfrom matplotlib import pyplot as plt\nfrom scipy import optimize\nfrom matplotlib.font_manager import FontProperties\nfont = FontProperties(fname=r\"c:\\windows\\fonts\\simsun.ttc\", size=14) # 解决windows环境下画图汉字乱码问题\n\nfrom sklearn import datasets\nfrom sklearn.preprocessing import StandardScaler\nimport time\n\ndef neuralNetwork(input_layer_size,hidden_layer_size,out_put_layer):\n data_img = loadmat_data(\"data_digits.mat\")\n X = data_img['X']\n y = data_img['y']\n\n '''scaler = StandardScaler()\n scaler.fit(X)\n X = scaler.transform(X)''' \n \n m,n = X.shape\n \"\"\"digits = datasets.load_digits()\n X = digits.data\n y = digits.target\n m,n = X.shape\n \n scaler = StandardScaler()\n scaler.fit(X)\n X = scaler.transform(X)\"\"\"\n \n ## 随机显示几行数据\n rand_indices = [t for t in [np.random.randint(x-x, m) for x in range(100)]] # 生成100个0-m的随机数\n display_data(X[rand_indices,:]) # 显示100个数字 \n \n #nn_params = np.vstack((Theta1.reshape(-1,1),Theta2.reshape(-1,1)))\n \n Lambda = 1\n \n initial_Theta1 = randInitializeWeights(input_layer_size,hidden_layer_size); \n initial_Theta2 = randInitializeWeights(hidden_layer_size,out_put_layer)\n \n initial_nn_params = np.vstack((initial_Theta1.reshape(-1,1),initial_Theta2.reshape(-1,1))) #展开theta \n #np.savetxt(\"testTheta.csv\",initial_nn_params,delimiter=\",\")\n start = time.time()\n result = optimize.fmin_cg(nnCostFunction, initial_nn_params, fprime=nnGradient, args=(input_layer_size,hidden_layer_size,out_put_layer,X,y,Lambda), maxiter=100)\n print (u'执行时间:',time.time()-start)\n print (result)\n '''可视化 Theta1'''\n length = result.shape[0]\n Theta1 = result[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1)\n Theta2 = result[hidden_layer_size*(input_layer_size+1):length].reshape(out_put_layer,hidden_layer_size+1) \n display_data(Theta1[:,1:length])\n display_data(Theta2[:,1:length])\n '''预测'''\n p = predict(Theta1,Theta2,X)\n print (u\"预测准确度为:%f%%\"%np.mean(np.float64(p == y.reshape(-1,1))*100)) \n res = np.hstack((p,y.reshape(-1,1)))\n np.savetxt(\"predict.csv\", res, delimiter=',')\n \n\n# 加载mat文件\ndef loadmat_data(fileName):\n return spio.loadmat(fileName)\n\n# 显示100个数字\ndef display_data(imgData):\n sum = 0\n '''\n 显示100个数(若是一个一个绘制将会非常慢,可以将要画的数字整理好,放到一个矩阵中,显示这个矩阵即可)\n - 初始化一个二维数组\n - 将每行的数据调整成图像的矩阵,放进二维数组\n - 显示即可\n '''\n m,n = imgData.shape\n width = np.int32(np.round(np.sqrt(n)))\n height = np.int32(n/width);\n rows_count = np.int32(np.floor(np.sqrt(m)))\n cols_count = np.int32(np.ceil(m/rows_count))\n pad = 1\n display_array = -np.ones((pad+rows_count*(height+pad),pad+cols_count*(width+pad)))\n for i in range(rows_count):\n for j in range(cols_count):\n if sum >= m: #超过了行数,退出当前循环\n break;\n display_array[pad+i*(height+pad):pad+i*(height+pad)+height,pad+j*(width+pad):pad+j*(width+pad)+width] = imgData[sum,:].reshape(height,width,order=\"F\") # order=F指定以列优先,在matlab中是这样的,python中需要指定,默认以行\n sum += 1\n if sum >= m: #超过了行数,退出当前循环\n break;\n \n plt.imshow(display_array,cmap='gray') #显示灰度图像\n plt.axis('off')\n plt.show()\n\n# 代价函数\ndef nnCostFunction(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,Lambda):\n length = nn_params.shape[0] # theta的中长度\n # 还原theta1和theta2\n Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1)\n Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,hidden_layer_size+1)\n \n # np.savetxt(\"Theta1.csv\",Theta1,delimiter=',')\n \n m = X.shape[0]\n class_y = np.zeros((m,num_labels)) # 数据的y对应0-9,需要映射为0/1的关系\n # 映射y\n for i in range(num_labels):\n class_y[:,i] = np.int32(y==i).reshape(1,-1) # 注意reshape(1,-1)才可以赋值\n \n '''去掉theta1和theta2的第一列,因为正则化时从1开始''' \n Theta1_colCount = Theta1.shape[1] \n Theta1_x = Theta1[:,1:Theta1_colCount]\n Theta2_colCount = Theta2.shape[1] \n Theta2_x = Theta2[:,1:Theta2_colCount]\n # 正则化向theta^2\n term = np.dot(np.transpose(np.vstack((Theta1_x.reshape(-1,1),Theta2_x.reshape(-1,1)))),np.vstack((Theta1_x.reshape(-1,1),Theta2_x.reshape(-1,1))))\n \n '''正向传播,每次需要补上一列1的偏置bias'''\n a1 = np.hstack((np.ones((m,1)),X)) \n z2 = np.dot(a1,np.transpose(Theta1)) \n a2 = sigmoid(z2)\n a2 = np.hstack((np.ones((m,1)),a2))\n z3 = np.dot(a2,np.transpose(Theta2))\n h = sigmoid(z3) \n '''代价''' \n J = -(np.dot(np.transpose(class_y.reshape(-1,1)),np.log(h.reshape(-1,1)))+np.dot(np.transpose(1-class_y.reshape(-1,1)),np.log(1-h.reshape(-1,1)))-Lambda*term/2)/m \n #temp1 = (h.reshape(-1,1)-class_y.reshape(-1,1))\n #temp2 = (temp1**2).sum()\n #J = 1/(2*m)*temp2\n return np.ravel(J)\n\n# 梯度\ndef nnGradient(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,Lambda):\n length = nn_params.shape[0]\n Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1).copy() # 这里使用copy函数,否则下面修改Theta的值,nn_params也会一起修改\n Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,hidden_layer_size+1).copy()\n m = X.shape[0]\n class_y = np.zeros((m,num_labels)) # 数据的y对应0-9,需要映射为0/1的关系 \n # 映射y\n for i in range(num_labels):\n class_y[:,i] = np.int32(y==i).reshape(1,-1) # 注意reshape(1,-1)才可以赋值\n \n '''去掉theta1和theta2的第一列,因为正则化时从1开始'''\n Theta1_colCount = Theta1.shape[1] \n Theta1_x = Theta1[:,1:Theta1_colCount]\n Theta2_colCount = Theta2.shape[1] \n Theta2_x = Theta2[:,1:Theta2_colCount]\n \n Theta1_grad = np.zeros((Theta1.shape)) #第一层到第二层的权重\n Theta2_grad = np.zeros((Theta2.shape)) #第二层到第三层的权重\n \n \n '''正向传播,每次需要补上一列1的偏置bias'''\n a1 = np.hstack((np.ones((m,1)),X))\n z2 = np.dot(a1,np.transpose(Theta1))\n a2 = sigmoid(z2)\n a2 = np.hstack((np.ones((m,1)),a2))\n z3 = np.dot(a2,np.transpose(Theta2))\n h = sigmoid(z3)\n \n \n '''反向传播,delta为误差,'''\n delta3 = np.zeros((m,num_labels))\n delta2 = np.zeros((m,hidden_layer_size))\n for i in range(m):\n #delta3[i,:] = (h[i,:]-class_y[i,:])*sigmoidGradient(z3[i,:]) # 均方误差的误差率\n delta3[i,:] = h[i,:]-class_y[i,:] # 交叉熵误差率\n Theta2_grad = Theta2_grad+np.dot(np.transpose(delta3[i,:].reshape(1,-1)),a2[i,:].reshape(1,-1))\n delta2[i,:] = np.dot(delta3[i,:].reshape(1,-1),Theta2_x)*sigmoidGradient(z2[i,:])\n Theta1_grad = Theta1_grad+np.dot(np.transpose(delta2[i,:].reshape(1,-1)),a1[i,:].reshape(1,-1))\n \n Theta1[:,0] = 0\n Theta2[:,0] = 0 \n '''梯度'''\n grad = (np.vstack((Theta1_grad.reshape(-1,1),Theta2_grad.reshape(-1,1)))+Lambda*np.vstack((Theta1.reshape(-1,1),Theta2.reshape(-1,1))))/m\n return np.ravel(grad)\n\n# S型函数 \ndef sigmoid(z):\n h = np.zeros((len(z),1)) # 初始化,与z的长度一致\n \n h = 1.0/(1.0+np.exp(-z))\n return h\n\n# S型函数导数\ndef sigmoidGradient(z):\n g = sigmoid(z)*(1-sigmoid(z))\n return g\n\n# 随机初始化权重theta\ndef randInitializeWeights(L_in,L_out):\n W = np.zeros((L_out,1+L_in)) # 对应theta的权重\n epsilon_init = (6.0/(L_out+L_in))**0.5\n W = np.random.rand(L_out,1+L_in)*2*epsilon_init-epsilon_init # np.random.rand(L_out,1+L_in)产生L_out*(1+L_in)大小的随机矩阵\n return W\n\n\n# 检验梯度是否计算正确\ndef checkGradient(Lambda = 0):\n '''构造一个小型的神经网络验证,因为数值法计算梯度很浪费时间,而且验证正确后之后就不再需要验证了'''\n input_layer_size = 3\n hidden_layer_size = 5\n num_labels = 3\n m = 5\n initial_Theta1 = debugInitializeWeights(input_layer_size,hidden_layer_size); \n initial_Theta2 = debugInitializeWeights(hidden_layer_size,num_labels)\n X = debugInitializeWeights(input_layer_size-1,m)\n y = np.transpose(np.mod(np.arange(1,m+1), num_labels))# 初始化y\n \n y = y.reshape(-1,1)\n nn_params = np.vstack((initial_Theta1.reshape(-1,1),initial_Theta2.reshape(-1,1))) #展开theta \n '''BP求出梯度'''\n grad = nnGradient(nn_params, input_layer_size, hidden_layer_size, \n num_labels, X, y, Lambda) \n '''使用数值法计算梯度'''\n num_grad = np.zeros((nn_params.shape[0]))\n step = np.zeros((nn_params.shape[0]))\n e = 1e-4\n for i in range(nn_params.shape[0]):\n step[i] = e\n loss1 = nnCostFunction(nn_params-step.reshape(-1,1), input_layer_size, hidden_layer_size, \n num_labels, X, y, \n Lambda)\n loss2 = nnCostFunction(nn_params+step.reshape(-1,1), input_layer_size, hidden_layer_size, \n num_labels, X, y, \n Lambda)\n num_grad[i] = (loss2-loss1)/(2*e)\n step[i]=0\n # 显示两列比较\n res = np.hstack((num_grad.reshape(-1,1),grad.reshape(-1,1)))\n print(\"检查梯度的结果,第一列为数值法计算得到的,第二列为BP得到的:\")\n print (res)\n\n# 初始化调试的theta权重\ndef debugInitializeWeights(fan_in,fan_out):\n W = np.zeros((fan_out,fan_in+1))\n x = np.arange(1,fan_out*(fan_in+1)+1)\n W = np.sin(x).reshape(W.shape)/10\n return W\n\n# 预测\ndef predict(Theta1,Theta2,X):\n m = X.shape[0]\n num_labels = Theta2.shape[0]\n #p = np.zeros((m,1))\n '''正向传播,预测结果'''\n X = np.hstack((np.ones((m,1)),X))\n h1 = sigmoid(np.dot(X,np.transpose(Theta1)))\n h1 = np.hstack((np.ones((m,1)),h1))\n h2 = sigmoid(np.dot(h1,np.transpose(Theta2)))\n \n '''\n 返回h中每一行最大值所在的列号\n - np.max(h, axis=1)返回h中每一行的最大值(是某个数字的最大概率)\n - 最后where找到的最大概率所在的列号(列号即是对应的数字)\n '''\n #np.savetxt(\"h2.csv\",h2,delimiter=',')\n p = np.array(np.where(h2[0,:] == np.max(h2, axis=1)[0])) \n for i in np.arange(1, m):\n t = np.array(np.where(h2[i,:] == np.max(h2, axis=1)[i]))\n p = np.vstack((p,t))\n return p \n\nif __name__ == \"__main__\":\n checkGradient()\n neuralNetwork(400, 25, 10)"
] |
[
[
"numpy.random.rand",
"numpy.exp",
"numpy.max",
"matplotlib.font_manager.FontProperties",
"numpy.sin",
"scipy.optimize.fmin_cg",
"numpy.arange",
"numpy.transpose",
"numpy.sqrt",
"numpy.random.randint",
"numpy.int32",
"numpy.vstack",
"matplotlib.pyplot.axis",
"numpy.savetxt",
"numpy.zeros",
"matplotlib.pyplot.show",
"numpy.ceil",
"scipy.io.loadmat",
"numpy.ones",
"numpy.ravel",
"matplotlib.pyplot.imshow"
]
] |
johnmlee101/manim
|
[
"7ec60462e01ce07811580480302721fcf259cb87"
] |
[
"constants.py"
] |
[
"import os\nimport numpy as np\nimport colour\nimport argparse\nimport sys\n\nSCRIPT_DIR = \"\"\nMEDIA_DIR = \"\"\nANIMATIONS_DIR = \"\"\nRASTER_IMAGE_DIR = \"\"\nSVG_IMAGE_DIR = \"\"\nSTAGED_SCENES_DIR = \"\"\nFILE_DIR = \"\"\nTEX_DIR = \"\"\nSAVE_DIR = \"\"\nTEX_IMAGE_DIR = \"\"\nMOBJECT_DIR = \"\"\nIMAGE_MOBJECT_DIR = \"\"\nLIB_DIR = \"\"\nTEX_TEXT_TO_REPLACE = \"\"\nTEMPLATE_TEX_FILE = \"\"\nTEMPLATE_TEXT_FILE = \"\"\nTEMPLATE_CODE_FILE = \"\"\nTEMPLATE_ALIGNAT_FILE = \"\"\n\n\ndef get_configuration():\n try:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"file\", help=\"path to file holding the python code for the scene\"\n )\n parser.add_argument(\n \"scene_name\", nargs=\"?\",\n help=\"Name of the Scene class you want to see\",\n )\n optional_args = [\n (\"-p\", \"--preview\"),\n (\"-w\", \"--write_to_movie\"),\n (\"-s\", \"--show_last_frame\"),\n (\"-l\", \"--low_quality\"),\n (\"-m\", \"--medium_quality\"),\n (\"-g\", \"--save_pngs\"),\n (\"-f\", \"--show_file_in_finder\"),\n (\"-t\", \"--transparent\"),\n (\"-q\", \"--quiet\"),\n (\"-a\", \"--write_all\")\n ]\n for short_arg, long_arg in optional_args:\n parser.add_argument(short_arg, long_arg, action=\"store_true\")\n parser.add_argument(\"-o\", \"--output_name\")\n parser.add_argument(\"-n\", \"--start_at_animation_number\")\n parser.add_argument(\"-r\", \"--resolution\")\n parser.add_argument(\"-c\", \"--color\")\n parser.add_argument(\"-d\", \"--output_directory\")\n args = parser.parse_args()\n if args.output_name is not None:\n output_name_root, output_name_ext = os.path.splitext(\n args.output_name)\n expected_ext = '.png' if args.show_last_frame else '.mp4'\n if output_name_ext not in ['', expected_ext]:\n print(\"WARNING: The output will be to (doubly-dotted) %s%s\" %\n output_name_root % expected_ext)\n output_name = args.output_name\n else:\n # If anyone wants .mp4.mp4 and is surprised to only get .mp4, or such... Well, too bad.\n output_name = output_name_root\n else:\n output_name = args.output_name\n if args.output_directory is None:\n output_dir = os.path.dirname(args.file)\n else:\n output_dir = args.output_directory\n\n except argparse.ArgumentError as err:\n print(str(err))\n sys.exit(2)\n config = {\n \"file\": args.file,\n \"scene_name\": args.scene_name or \"\",\n \"open_video_upon_completion\": args.preview,\n \"show_file_in_finder\": args.show_file_in_finder,\n # By default, write to file\n \"write_to_movie\": args.write_to_movie or not args.show_last_frame,\n \"show_last_frame\": args.show_last_frame,\n \"save_pngs\": args.save_pngs,\n # If -t is passed in (for transparent), this will be RGBA\n \"saved_image_mode\": \"RGBA\" if args.transparent else \"RGB\",\n \"movie_file_extension\": \".mov\" if args.transparent else \".mp4\",\n \"quiet\": args.quiet or args.write_all,\n \"ignore_waits\": args.preview,\n \"write_all\": args.write_all,\n \"output_name\": output_name,\n \"output_dir\": output_dir,\n \"start_at_animation_number\": args.start_at_animation_number,\n \"end_at_animation_number\": None,\n }\n\n # Camera configuration\n config[\"camera_config\"] = {}\n if args.low_quality:\n config[\"camera_config\"].update(LOW_QUALITY_CAMERA_CONFIG)\n config[\"frame_duration\"] = LOW_QUALITY_FRAME_DURATION\n elif args.medium_quality:\n config[\"camera_config\"].update(MEDIUM_QUALITY_CAMERA_CONFIG)\n config[\"frame_duration\"] = MEDIUM_QUALITY_FRAME_DURATION\n else:\n config[\"camera_config\"].update(PRODUCTION_QUALITY_CAMERA_CONFIG)\n config[\"frame_duration\"] = PRODUCTION_QUALITY_FRAME_DURATION\n\n # If the resolution was passed in via -r\n if args.resolution:\n if \",\" in args.resolution:\n height_str, width_str = args.resolution.split(\",\")\n height = int(height_str)\n width = int(width_str)\n else:\n height = int(args.resolution)\n width = int(16 * height / 9)\n config[\"camera_config\"].update({\n \"pixel_height\": height,\n \"pixel_width\": width,\n })\n\n if args.color:\n try:\n config[\"camera_config\"][\"background_color\"] = colour.Color(args.color)\n except AttributeError as err:\n print(\"Please use a valid color\")\n print(err)\n sys.exit(2)\n\n # If rendering a transparent image/move, make sure the\n # scene has a background opacity of 0\n if args.transparent:\n config[\"camera_config\"][\"background_opacity\"] = 0\n\n # Arguments related to skipping\n stan = config[\"start_at_animation_number\"]\n if stan is not None:\n if \",\" in stan:\n start, end = stan.split(\",\")\n config[\"start_at_animation_number\"] = int(start)\n config[\"end_at_animation_number\"] = int(end)\n else:\n config[\"start_at_animation_number\"] = int(stan)\n\n config[\"skip_animations\"] = any([\n config[\"show_last_frame\"] and not config[\"write_to_movie\"],\n config[\"start_at_animation_number\"],\n ])\n return config\n\n\ndef init_directories(config):\n global SCRIPT_DIR\n global MEDIA_DIR\n global ANIMATIONS_DIR\n global RASTER_IMAGE_DIR\n global SVG_IMAGE_DIR\n global STAGED_SCENES_DIR\n global FILE_DIR\n global TEX_DIR\n global SAVE_DIR\n global TEX_IMAGE_DIR\n global MOBJECT_DIR\n global IMAGE_MOBJECT_DIR\n global LIB_DIR\n global TEX_TEXT_TO_REPLACE\n global TEMPLATE_TEX_FILE\n global TEMPLATE_TEXT_FILE\n global TEMPLATE_CODE_FILE\n global TEMPLATE_ALIGNAT_FILE\n\n SCRIPT_DIR = config[\"output_dir\"]\n if os.getenv(\"MEDIA_DIR\"):\n MEDIA_DIR = os.getenv(\"MEDIA_DIR\")\n elif os.path.exists(\"media_dir.txt\"):\n with open(\"media_dir.txt\", 'rU') as media_file:\n MEDIA_DIR = media_file.readline().strip()\n else:\n MEDIA_DIR = os.path.join(SCRIPT_DIR, \"media\")\n\n with open(\"media_dir.txt\", 'w') as media_file:\n media_file.write(MEDIA_DIR)\n #\n ANIMATIONS_DIR = os.path.join(MEDIA_DIR, \"animations\")\n RASTER_IMAGE_DIR = os.path.join(MEDIA_DIR, \"designs\", \"raster_images\")\n SVG_IMAGE_DIR = os.path.join(MEDIA_DIR, \"designs\", \"svg_images\")\n # TODO, staged scenes should really go into a subdirectory of a given scenes directory\n STAGED_SCENES_DIR = os.path.join(ANIMATIONS_DIR, \"staged_scenes\")\n ###\n FILE_DIR = os.path.join(SCRIPT_DIR, \"files\")\n TEX_DIR = os.path.join(FILE_DIR, \"Tex\")\n SAVE_DIR = os.path.join(FILE_DIR, \"saved_states\")\n TEX_IMAGE_DIR = TEX_DIR # TODO, What is this doing?\n # These two may be deprecated now.\n MOBJECT_DIR = os.path.join(FILE_DIR, \"mobjects\")\n IMAGE_MOBJECT_DIR = os.path.join(MOBJECT_DIR, \"image\")\n\n for folder in [FILE_DIR, RASTER_IMAGE_DIR, SVG_IMAGE_DIR, ANIMATIONS_DIR, TEX_DIR,\n TEX_IMAGE_DIR, SAVE_DIR, MOBJECT_DIR, IMAGE_MOBJECT_DIR,\n STAGED_SCENES_DIR]:\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n LIB_DIR = os.path.dirname(os.path.realpath(__file__))\n TEX_TEXT_TO_REPLACE = \"YourTextHere\"\n TEMPLATE_TEX_FILE = os.path.join(LIB_DIR, \"template.tex\")\n TEMPLATE_TEXT_FILE = os.path.join(LIB_DIR, \"text_template.tex\")\n TEMPLATE_CODE_FILE = os.path.join(LIB_DIR, \"code_template.tex\")\n TEMPLATE_ALIGNAT_FILE = os.path.join(LIB_DIR, \"alignat_template.tex\")\n\n\nHELP_MESSAGE = \"\"\"\n Usage:\n python extract_scene.py <module> [<scene name>]\n -p preview in low quality\n -s show and save picture of last frame\n -w write result to file [this is default if nothing else is stated]\n -o <file_name> write to a different file_name\n -l use low quality\n -m use medium quality\n -a run and save every scene in the script, or all args for the given scene\n -q don't print progress\n -f when writing to a movie file, export the frames in png sequence\n -t use transperency when exporting images\n -n specify the number of the animation to start from\n -r specify a resolution\n -c specify a background color\n\"\"\"\nSCENE_NOT_FOUND_MESSAGE = \"\"\"\n That scene is not in the script\n\"\"\"\nCHOOSE_NUMBER_MESSAGE = \"\"\"\nChoose number corresponding to desired scene/arguments.\n(Use comma separated list for multiple entries, or start-end or a range)\nChoice(s): \"\"\"\nINVALID_NUMBER_MESSAGE = \"Fine then, if you don't want to give a valid number I'll just quit\"\n\nNO_SCENE_MESSAGE = \"\"\"\n There are no scenes inside that module\n\"\"\"\n\n\nLOW_QUALITY_FRAME_DURATION = 1. / 15\nMEDIUM_QUALITY_FRAME_DURATION = 1. / 30\nPRODUCTION_QUALITY_FRAME_DURATION = 1. / 60\n\n# There might be other configuration than pixel shape later...\nPRODUCTION_QUALITY_CAMERA_CONFIG = {\n \"pixel_height\": 1440,\n \"pixel_width\": 2560,\n}\n\nHIGH_QUALITY_CAMERA_CONFIG = {\n \"pixel_height\": 1080,\n \"pixel_width\": 1920,\n}\n\nMEDIUM_QUALITY_CAMERA_CONFIG = {\n \"pixel_height\": 720,\n \"pixel_width\": 1280,\n}\n\nLOW_QUALITY_CAMERA_CONFIG = {\n \"pixel_height\": 480,\n \"pixel_width\": 854,\n}\n\nDEFAULT_PIXEL_HEIGHT = PRODUCTION_QUALITY_CAMERA_CONFIG[\"pixel_height\"]\nDEFAULT_PIXEL_WIDTH = PRODUCTION_QUALITY_CAMERA_CONFIG[\"pixel_width\"]\n\nDEFAULT_POINT_DENSITY_2D = 25\nDEFAULT_POINT_DENSITY_1D = 250\n\nDEFAULT_STROKE_WIDTH = 4\n\nFRAME_HEIGHT = 8.0\nFRAME_WIDTH = FRAME_HEIGHT * DEFAULT_PIXEL_WIDTH / DEFAULT_PIXEL_HEIGHT\nFRAME_Y_RADIUS = FRAME_HEIGHT / 2\nFRAME_X_RADIUS = FRAME_WIDTH / 2\n\nSMALL_BUFF = 0.1\nMED_SMALL_BUFF = 0.25\nMED_LARGE_BUFF = 0.5\nLARGE_BUFF = 1\n\nDEFAULT_MOBJECT_TO_EDGE_BUFFER = MED_LARGE_BUFF\nDEFAULT_MOBJECT_TO_MOBJECT_BUFFER = MED_SMALL_BUFF\n\n\n# All in seconds\nDEFAULT_ANIMATION_RUN_TIME = 1.0\nDEFAULT_POINTWISE_FUNCTION_RUN_TIME = 3.0\nDEFAULT_WAIT_TIME = 1.0\n\n\nORIGIN = np.array((0., 0., 0.))\nUP = np.array((0., 1., 0.))\nDOWN = np.array((0., -1., 0.))\nRIGHT = np.array((1., 0., 0.))\nLEFT = np.array((-1., 0., 0.))\nIN = np.array((0., 0., -1.))\nOUT = np.array((0., 0., 1.))\nX_AXIS = np.array((1., 0., 0.))\nY_AXIS = np.array((0., 1., 0.))\nZ_AXIS = np.array((0., 0., 1.))\n\n# Useful abbreviations for diagonals\nUL = UP + LEFT\nUR = UP + RIGHT\nDL = DOWN + LEFT\nDR = DOWN + RIGHT\n\nTOP = FRAME_Y_RADIUS * UP\nBOTTOM = FRAME_Y_RADIUS * DOWN\nLEFT_SIDE = FRAME_X_RADIUS * LEFT\nRIGHT_SIDE = FRAME_X_RADIUS * RIGHT\n\nPI = np.pi\nTAU = 2 * PI\nDEGREES = TAU / 360\n\nANIMATIONS_DIR = os.path.join(MEDIA_DIR, \"animations\")\nRASTER_IMAGE_DIR = os.path.join(MEDIA_DIR, \"designs\", \"raster_images\")\nSVG_IMAGE_DIR = os.path.join(MEDIA_DIR, \"designs\", \"svg_images\")\n# TODO, staged scenes should really go into a subdirectory of a given scenes directory\nSTAGED_SCENES_DIR = os.path.join(ANIMATIONS_DIR, \"staged_scenes\")\n###\nTHIS_DIR = os.path.dirname(os.path.realpath(__file__))\nFILE_DIR = os.path.join(THIS_DIR, \"files\")\nTEX_DIR = os.path.join(FILE_DIR, \"Tex\")\nTEX_IMAGE_DIR = TEX_DIR # TODO, What is this doing?\n# These two may be depricated now.\nMOBJECT_DIR = os.path.join(FILE_DIR, \"mobjects\")\nIMAGE_MOBJECT_DIR = os.path.join(MOBJECT_DIR, \"image\")\n\nfor folder in [FILE_DIR, RASTER_IMAGE_DIR, SVG_IMAGE_DIR, ANIMATIONS_DIR, TEX_DIR,\n TEX_IMAGE_DIR, MOBJECT_DIR, IMAGE_MOBJECT_DIR,\n STAGED_SCENES_DIR]:\n if not os.path.exists(folder):\n os.makedirs(folder)\n\nTEX_USE_CTEX = False\nTEX_FIX_SVG = False\nTEX_TEXT_TO_REPLACE = \"YourTextHere\"\nTEMPLATE_TEX_FILE = os.path.join(THIS_DIR, \"tex_template.tex\" if not TEX_USE_CTEX\n else \"ctex_template.tex\")\nwith open(TEMPLATE_TEX_FILE, \"r\") as infile:\n TEMPLATE_TEXT_FILE_BODY = infile.read()\n TEMPLATE_TEX_FILE_BODY = TEMPLATE_TEXT_FILE_BODY.replace(\n TEX_TEXT_TO_REPLACE,\n \"\\\\begin{align*}\" + TEX_TEXT_TO_REPLACE + \"\\\\end{align*}\",\n )\n\nFFMPEG_BIN = \"ffmpeg\"\n\n\n# Colors\n\nCOLOR_MAP = {\n \"DARK_BLUE\": \"#236B8E\",\n \"DARK_BROWN\": \"#8B4513\",\n \"LIGHT_BROWN\": \"#CD853F\",\n \"BLUE_A\" : \"#1C758A\",\n \"BLUE_B\" : \"#29ABCA\",\n \"BLUE_C\" : \"#58C4DD\",\n \"BLUE_D\" : \"#9CDCEB\",\n \"BLUE_E\" : \"#C7E9F1\",\n \"TEAL_E\": \"#49A88F\",\n \"TEAL_D\": \"#55C1A7\",\n \"TEAL_C\": \"#5CD0B3\",\n \"TEAL_B\": \"#76DDC0\",\n \"TEAL_A\": \"#ACEAD7\",\n \"GREEN_E\": \"#699C52\",\n \"GREEN_D\": \"#77B05D\",\n \"GREEN_C\": \"#83C167\",\n \"GREEN_B\": \"#A6CF8C\",\n \"GREEN_A\": \"#C9E2AE\",\n \"YELLOW_E\": \"#E8C11C\",\n \"YELLOW_D\": \"#F4D345\",\n \"YELLOW_C\": \"#FFFF00\",\n \"YELLOW_B\": \"#FFEA94\",\n \"YELLOW_A\": \"#FFF1B6\",\n \"GOLD_E\": \"#C78D46\",\n \"GOLD_D\": \"#E1A158\",\n \"GOLD_C\": \"#F0AC5F\",\n \"GOLD_B\": \"#F9B775\",\n \"GOLD_A\": \"#F7C797\",\n \"RED_E\": \"#CF5044\",\n \"RED_D\": \"#E65A4C\",\n \"RED_C\": \"#FC6255\",\n \"RED_B\": \"#FF8080\",\n \"RED_A\": \"#F7A1A3\",\n \"MAROON_E\": \"#94424F\",\n \"MAROON_D\": \"#A24D61\",\n \"MAROON_C\": \"#C55F73\",\n \"MAROON_B\": \"#EC92AB\",\n \"MAROON_A\": \"#ECABC1\",\n \"PURPLE_A\" : \"#644172\",\n \"PURPLE_B\" : \"#715582\",\n \"PURPLE_C\" : \"#9A72AC\",\n \"PURPLE_D\" : \"#B189C6\",\n \"PURPLE_E\" : \"#CAA3E8\",\n \"WHITE\": \"#FFFFFF\",\n \"BLACK\": \"#000000\",\n \"LIGHT_GRAY\": \"#BBBBBB\",\n \"LIGHT_GREY\": \"#BBBBBB\",\n \"GRAY\": \"#888888\",\n \"GREY\": \"#888888\",\n \"DARK_GREY\": \"#444444\",\n \"DARK_GRAY\": \"#444444\",\n \"GREY_BROWN\": \"#736357\",\n \"PINK\": \"#D147BD\",\n \"GREEN_SCREEN\": \"#00FF00\",\n \"ORANGE\": \"#FF862F\",\n\n \"ORANGE\": \"#FF7054\", # hsl(10, 67, 60)\n \"MAGENTA_E\": \"#993265\", # hsl(330, 67, 60)\n \"MAGENTA_D\": \"#B23A76\", # hsl(330, 67, 70)\n \"MAGENTA_C\": \"#CC4387\", # hsl(330, 67, 80)\n \"MAGENTA_B\": \"#E54B98\", # hsl(330, 67, 90)\n \"MAGENTA_A\": \"#FF54A9\", # hsl(330, 67, 100)\n \"VIOLET_E\": \"#663399\", # hsl(270, 67, 60)\n \"VIOLET_D\": \"#773BB2\", # hsl(270, 67, 70)\n \"VIOLET_C\": \"#8844CC\", # hsl(270, 67, 80)\n \"VIOLET_B\": \"#994CE5\", # hsl(270, 67, 90)\n \"VIOLET_A\": \"#AA55FF\", # hsl(270, 67, 100)\n \"TEAL_E\": \"#326599\", # hsl(210, 67, 60)\n \"TEAL_D\": \"#3A76B2\", # hsl(210, 67, 70)\n \"TEAL_C\": \"#4387CC\", # hsl(210, 67, 80)\n \"TEAL_B\": \"#4B98E5\", # hsl(210, 67, 90)\n \"TEAL_A\": \"#54A9FF\", # hsl(210, 67, 100)\n}\n\nfor color_name,color_hex in COLOR_MAP.items():\n if color_name == \"WHITE\" or color_name == \"BLACK\":\n continue\n c = colour.Color(color_hex)\n c.set_luminance(c.get_luminance() - 0.08)\n COLOR_MAP[color_name] = c.hex\n\nPALETTE = list(COLOR_MAP.values())\nlocals().update(COLOR_MAP)\nfor name in [s for s in list(COLOR_MAP.keys()) if s.endswith(\"_C\")]:\n locals()[name.replace(\"_C\", \"\")] = locals()[name]\n"
] |
[
[
"numpy.array"
]
] |
efrain2010/matchms
|
[
"80678fdc7325813a1e3bd3ad7e2a60cb7482026a",
"69cedeed5597966619a97c4e4211deaf8610d727"
] |
[
"tests/test_add_parent_mass.py",
"tests/test_ParentmassMatch.py"
] |
[
"import numpy\nfrom matchms import Spectrum\nfrom matchms.filtering import add_parent_mass\n\n\ndef test_add_parent_mass():\n \"\"\"Test if parent mass is correctly derived.\"\"\"\n mz = numpy.array([], dtype='float')\n intensities = numpy.array([], dtype='float')\n metadata = {\"pepmass\": (444.0, 10),\n \"charge\": -1}\n spectrum_in = Spectrum(mz=mz,\n intensities=intensities,\n metadata=metadata)\n\n spectrum = add_parent_mass(spectrum_in)\n\n assert numpy.abs(spectrum.get(\"parent_mass\") - 445.0) < .01, \"Expected parent mass of about 445.0.\"\n",
"import numpy\nimport pytest\nfrom matchms import Spectrum\nfrom matchms.similarity import ParentmassMatch\n\n\ndef test_parentmass_match():\n \"Test with default tolerance.\"\n spectrum_1 = Spectrum(mz=numpy.array([], dtype=\"float\"),\n intensities=numpy.array([], dtype=\"float\"),\n metadata={\"parent_mass\": 100.0})\n\n spectrum_2 = Spectrum(mz=numpy.array([], dtype=\"float\"),\n intensities=numpy.array([], dtype=\"float\"),\n metadata={\"parent_mass\": 101.0})\n\n similarity_score = ParentmassMatch()\n score = similarity_score(spectrum_1, spectrum_2)\n assert not score, \"Expected different score.\"\n\n\ndef test_parentmass_match_tolerance2():\n \"Test with tolerance > difference.\"\n spectrum_1 = Spectrum(mz=numpy.array([], dtype=\"float\"),\n intensities=numpy.array([], dtype=\"float\"),\n metadata={\"parent_mass\": 100.0})\n\n spectrum_2 = Spectrum(mz=numpy.array([], dtype=\"float\"),\n intensities=numpy.array([], dtype=\"float\"),\n metadata={\"parent_mass\": 101.0})\n\n similarity_score = ParentmassMatch(tolerance=2.0)\n score = similarity_score(spectrum_1, spectrum_2)\n assert score, \"Expected different score.\"\n\n\ndef test_parentmass_match_missing_parentmass():\n \"Test with missing parentmass.\"\n spectrum_1 = Spectrum(mz=numpy.array([], dtype=\"float\"),\n intensities=numpy.array([], dtype=\"float\"),\n metadata={\"parent_mass\": 100.0})\n\n spectrum_2 = Spectrum(mz=numpy.array([], dtype=\"float\"),\n intensities=numpy.array([], dtype=\"float\"),\n metadata={})\n\n similarity_score = ParentmassMatch(tolerance=2.0)\n\n with pytest.raises(AssertionError) as msg:\n _ = similarity_score(spectrum_1, spectrum_2)\n\n expected_message_part = \"Missing parent mass.\"\n assert expected_message_part in str(msg.value), \"Expected particular error message.\"\n"
] |
[
[
"numpy.array"
],
[
"numpy.array"
]
] |
MichaelMarien/jax
|
[
"bf3c658114703e955f0b06642c53c6b64c5b2df3"
] |
[
"jax/experimental/maps.py"
] |
[
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport threading\nimport contextlib\nimport numpy as np\nimport itertools as it\nfrom collections import OrderedDict, abc, namedtuple\nfrom typing import (Callable, Iterable, Tuple, Optional, Dict, Any, Set,\n NamedTuple, Union, Sequence)\nfrom warnings import warn\nfrom functools import wraps, partial, partialmethod\nfrom enum import Enum\n\nfrom jax import numpy as jnp\nfrom jax import core\nfrom jax import linear_util as lu\nfrom jax._src.api import Lowered, _check_callable, _check_arg\nfrom jax._src import dispatch\nfrom jax.tree_util import (tree_flatten, tree_unflatten, all_leaves, tree_map,\n tree_leaves)\nfrom jax._src.tree_util import _replace_nones\nfrom jax._src.api_util import (flatten_fun_nokwargs, flatten_axes,\n _ensure_index_tuple, donation_vector,\n shaped_abstractify)\nfrom jax._src import source_info_util\nfrom jax._src.config import config\nfrom jax.errors import JAXTypeError\nfrom jax.interpreters import mlir\nfrom jax.interpreters import partial_eval as pe\nfrom jax.interpreters import pxla\nfrom jax.interpreters import xla\nfrom jax.interpreters import batching\nfrom jax.interpreters import ad\nfrom jax._src.lib import xla_bridge as xb\nfrom jax._src.lib import xla_client as xc\nfrom jax._src.util import (safe_map, safe_zip, HashableFunction,\n as_hashable_function, unzip2, distributed_debug_log,\n tuple_insert, moveaxis, split_list, wrap_name)\nfrom jax import lax\n\nmap, unsafe_map = safe_map, map\nzip = safe_zip\n\nxops = xc.ops\n\n\nclass _PositionalSemantics(Enum):\n \"\"\"Indicates whether the positional shapes of inputs should be interpreted as\n global or local with respect to the multi-host mesh.\n\n While named axes are always associated with global sizes, the outermost pjit\n is the boundary between the local shapes in the outer scope and global\n positional shapes in its inner scope. pjits nested inside that one should not\n attempt to increase the sizes of avals again, and xmap has to take this into\n account when inferring the global size of a named axis.\n \"\"\"\n LOCAL = 0\n GLOBAL = 1\n\n\nclass _PSThreadLocalState(threading.local):\n\n def __init__(self):\n self.val = _PositionalSemantics.LOCAL\n\n_positional_semantics = _PSThreadLocalState()\n\n\nclass FrozenDict(abc.Mapping):\n def __init__(self, *args, **kwargs):\n self.contents = dict(*args, **kwargs)\n\n def __iter__(self):\n return iter(self.contents)\n\n def __len__(self):\n return len(self.contents)\n\n def __getitem__(self, name):\n return self.contents[name]\n\n def __eq__(self, other):\n return isinstance(other, FrozenDict) and self.contents == other.contents\n\n def __hash__(self):\n return hash(tuple(self.contents.items()))\n\n def __repr__(self):\n return f\"FrozenDict({self.contents})\"\n\n# Multi-dimensional generalized map\n\nAxisName = core.AxisName\nResourceAxisName = AxisName # Different name just for documentation purposes\nMesh = pxla.Mesh\n\nclass _Loop(NamedTuple):\n name: ResourceAxisName\n length: int\n\nclass ResourceEnv(NamedTuple):\n physical_mesh: Mesh\n loops: Tuple[_Loop, ...]\n\n def with_mesh(self, mesh: Mesh):\n overlap = set(mesh.axis_names) & (self.resource_axes - set(self.physical_mesh.axis_names))\n if overlap:\n raise ValueError(f\"Cannot update the mesh of the current resource \"\n f\"environment. The new mesh shadows already defined axes \"\n f\"{show_axes(overlap)}\")\n return self._replace(physical_mesh=mesh)\n\n def with_extra_loop(self, loop: _Loop):\n if loop.name in self.resource_axes:\n raise ValueError(f\"Cannot extend the resource environment with loop named \"\n f\"`{loop.name}`. An axis of this name is already defined!\")\n return self._replace(loops=self.loops + (loop,))\n\n @property\n def physical_resource_axes(self) -> Set[ResourceAxisName]:\n return set(self.physical_mesh.axis_names)\n\n @property\n def loop_resource_axes(self) -> Set[ResourceAxisName]:\n return set(loop.name for loop in self.loops)\n\n @property\n def resource_axes(self) -> Set[ResourceAxisName]:\n return self.physical_resource_axes | self.loop_resource_axes\n\n @property\n def shape(self):\n shape = self.physical_mesh.shape\n shape.update(self.loops)\n return shape\n\n @property\n def local_shape(self):\n shape = self.physical_mesh.local_mesh.shape\n shape.update(self.loops)\n return shape\n\n def __repr__(self):\n return f\"ResourceEnv({self.physical_mesh!r}, {self.loops!r})\"\n\nEMPTY_ENV = ResourceEnv(Mesh(np.empty((), dtype=object), ()), ())\n\nclass _ThreadResourcesLocalState(threading.local):\n\n def __init__(self):\n self.env = EMPTY_ENV\n\nthread_resources = _ThreadResourcesLocalState()\n\n\nclass SerialLoop:\n \"\"\"Create an anonymous serial loop resource for use in a single xmap axis.\n\n A use of :py:class:`SerialLoop` in :py:func:`xmap`'s ``axis_resources``\n extends the resource environment with a new serial loop with a unique\n unspecified name, that will only be used to partition the axis that\n used a given instance.\n\n This is unlike :py:func:`serial_loop`, which makes it possible to iterate\n jointly over chunks of multiple axes (with the usual requirement that they\n do not coincide in a named shape of any value in the program).\n\n Example::\n\n # Processes `x` in a vectorized way, but in 20 micro-batches.\n xmap(f, in_axes=['i'], out_axes=[i], axis_resources={'i': SerialLoop(20)})(x)\n\n # Computes the result in a vectorized way, but in 400 micro-batches,\n # once for each coordinate (0, 0) <= (i, j) < (20, 20). Each `SerialLoop`\n # creates a fresh anonymous loop.\n xmap(h, in_axes=(['i'], ['j']), out_axes=['i', 'j'],\n axis_resources={'i': SerialLoop(20), 'j': SerialLoop(20)})(x, y)\n \"\"\"\n length: int\n\n def __init__(self, length):\n self.length = length\n\n def __eq__(self, other):\n return self.length == other.length\n\n def __hash__(self):\n return hash(self.length)\n\n\n@contextlib.contextmanager\ndef serial_loop(name: ResourceAxisName, length: int):\n \"\"\"Define a serial loop resource to be available in scope of this context manager.\n\n This is similar to :py:func:`mesh` in that it extends the resource\n environment with a resource called ``name``. But, any use of this resource\n axis in ``axis_resources`` argument of :py:func:`xmap` will cause the\n body of :py:func:`xmap` to get executed ``length`` times with each execution\n only processing only a slice of inputs mapped along logical axes assigned\n to this resource.\n\n This is especially useful in that it makes it possible to lower the memory\n usage compared to :py:func:`vmap`, because it will avoid simultaneous\n materialization of intermediate values for every point in the batch.\n\n Note that collectives over loop axes are not supported, so they are less\n versatile than physical mesh axes.\n\n Args:\n name: Name of the loop in the resource environment.\n length: Number of iterations.\n\n Example::\n\n with loop('l', 4):\n out = xmap(\n lambda x: jnp.sin(x) * 5, # This will be called 4 times with different\n # slices of x.\n in_axes=['i'], out_axes=['i'],\n axis_resources={'i': 'l'})(x)\n \"\"\"\n old_env: ResourceEnv = getattr(thread_resources, \"env\", EMPTY_ENV)\n thread_resources.env = old_env.with_extra_loop(_Loop(name, length))\n try:\n yield\n finally:\n thread_resources.env = old_env\n\n\n@contextlib.contextmanager\ndef mesh(devices: np.ndarray, axis_names: Sequence[ResourceAxisName]):\n \"\"\"Declare the hardware resources available in the scope of this manager.\n\n In particular, all ``axis_names`` become valid resource names inside the\n managed block and can be used e.g. in the ``axis_resources`` argument of\n :py:func:`xmap`.\n\n If you are compiling in multiple threads, make sure that the\n ``with mesh`` context manager is inside the function that the threads will\n execute.\n\n Args:\n devices: A NumPy ndarray object containing JAX device objects (as\n obtained e.g. from :py:func:`jax.devices`).\n axis_names: A sequence of resource axis names to be assigned to the\n dimensions of the ``devices`` argument. Its length should match the\n rank of ``devices``.\n\n Example::\n\n devices = np.array(jax.devices())[:4].reshape((2, 2))\n with mesh(devices, ('x', 'y')): # declare a 2D mesh with axes 'x' and 'y'\n distributed_out = xmap(\n jnp.vdot,\n in_axes=({0: 'left', 1: 'right'}),\n out_axes=['left', 'right', ...],\n axis_resources={'left': 'x', 'right': 'y'})(x, x.T)\n \"\"\"\n old_env: ResourceEnv = getattr(thread_resources, \"env\", EMPTY_ENV)\n thread_resources.env = old_env.with_mesh(Mesh(np.asarray(devices, dtype=object), axis_names))\n try:\n yield\n finally:\n thread_resources.env = old_env\n\n_next_resource_id = 0\nclass _UniqueResourceName:\n def __init__(self, uid, tag=None):\n self.uid = uid\n self.tag = tag\n def __eq__(self, other):\n return type(other) is _UniqueResourceName and self.uid == other.uid\n def __hash__(self):\n return hash(self.uid)\n def __repr__(self):\n return f\"<UniqueResource {self.tag} {self.uid}>\"\n\ndef fresh_resource_name(tag=None):\n global _next_resource_id\n try:\n return _UniqueResourceName(_next_resource_id, tag)\n finally:\n _next_resource_id += 1\n\n\n# This is really a Dict[AxisName, int], but we don't define a\n# pytree instance for it, so that it is treated as a leaf.\nclass AxisNamePos(FrozenDict):\n user_repr: str\n expected_rank: Optional[int] = None\n\n def __init__(self, *args, user_repr, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_repr = user_repr\n\nclass AxisNamePosWithRank(AxisNamePos):\n def __init__(self, *args, expected_rank, **kwargs):\n super().__init__(*args, **kwargs)\n self.expected_rank = expected_rank\n\n\n# str(...) == 'Ellipsis' which is really annoying\nclass DotDotDotRepr:\n def __repr__(self): return '...'\n\n\ndef _parse_entry(arg_name, entry):\n # Dictionaries mapping axis names to positional axes\n if isinstance(entry, dict) and all(isinstance(v, int) for v in entry.keys()):\n result = AxisNamePos(((name, axis) for axis, name in entry.items()),\n user_repr=str(entry))\n num_mapped_dims = len(entry)\n # Non-empty lists or tuples that optionally terminate with an ellipsis\n elif isinstance(entry, (tuple, list)):\n if entry and entry[-1] == ...:\n constr = AxisNamePos\n entry = entry[:-1]\n tail = [DotDotDotRepr()] if isinstance(entry, list) else (DotDotDotRepr(),)\n user_repr = str(entry + tail)\n else:\n constr = partial(AxisNamePosWithRank, expected_rank=len(entry))\n user_repr = str(entry)\n result = constr(((name, axis) for axis, name in enumerate(entry)\n if name is not None),\n user_repr=user_repr)\n num_mapped_dims = sum(name is not None for name in entry)\n else:\n raise TypeError(f\"\"\"\\\nValue mapping specification in xmap {arg_name} pytree can be either:\n- lists of axis names (possibly ending with the ellipsis object: ...)\n- dictionaries that map positional axes (integers) to axis names (e.g. {2: 'name'})\nbut got: {entry}\"\"\")\n if len(result) != num_mapped_dims:\n raise ValueError(f\"Named axes should be unique within each {arg_name} argument \"\n f\"specification, but one them is: {entry}\")\n for axis in result.values():\n if axis < 0:\n raise ValueError(f\"xmap doesn't support negative axes in {arg_name}\")\n return result\n\ndef _is_axes_leaf(entry):\n if isinstance(entry, dict) and all_leaves(entry.values()):\n return True\n # NOTE: `None`s are not considered leaves by `all_leaves`\n if isinstance(entry, (tuple, list)) and all_leaves(v for v in entry if v is not None):\n return True\n return False\n\ndef _prepare_axes(axes, arg_name):\n entries, treedef = tree_flatten(axes, is_leaf=_is_axes_leaf)\n entries = map(partial(_parse_entry, arg_name), entries)\n return tree_unflatten(treedef, entries), entries, treedef\n\nResource = Union[ResourceAxisName, SerialLoop]\nResourceSet = Union[Resource, Tuple[Resource, ...]]\n\n# TODO: Some syntactic sugar to make the API more usable in a single-axis case?\n# TODO: Are the resource axes scoped lexically or dynamically? Dynamically for now!\ndef xmap(fun: Callable,\n in_axes,\n out_axes,\n *,\n axis_sizes: Dict[AxisName, int] = {},\n axis_resources: Dict[AxisName, ResourceSet] = {},\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\n \"\"\"Assign a positional signature to a program that uses named array axes.\n\n .. warning::\n This is an experimental feature and the details can change at\n any time. Use at your own risk!\n\n .. warning::\n This docstring is aspirational. Not all features of the named axis\n programming model have been implemented just yet.\n\n The usual programming model of JAX (or really NumPy) associates each array\n with two pieces of metadata describing its type: the element type (``dtype``)\n and the ``shape``. :py:func:`xmap` extends this model by adding support for\n *named axes*. In particular, each array used in a function wrapped by\n :py:func:`xmap` can additionally have a non-empty ``named_shape`` attribute,\n which can be used to query the set of named axes (introduced by\n :py:func:`xmap`) appearing in that value along with their shapes.\n Furthermore, in most places where positional axis indices are allowed (for\n example the `axes` arguments in :py:func:`sum`), bound axis names are also\n accepted. The :py:func:`einsum` language is extended inside :py:func:`xmap`\n to additionally allow contractions that involve named axes. Broadcasting of\n named axes happens *by name*, i.e. all axes with equal names are expected to\n have equal shapes in all arguments of a broadcasting operation, while the\n result has a (set) union of all named axes. The positional semantics of the\n program remain unchanged, and broadcasting still implicitly right-aligns\n positional axes for unification. For an extended description of the\n :py:func:`xmap` programming model, please refer to the :py:func:`xmap`\n tutorial notebook in main JAX documentation.\n\n Note that since all top-level JAX expressions are interpreted in the NumPy\n programming model, :py:func:`xmap` can also be seen as an adapter that\n converts a function that uses named axes (including in arguments and returned\n values) into one that takes and returns values that only have positional\n axes.\n\n The default lowering strategy of :py:func:`xmap` converts all named axes into\n positional axes, working similarly to multiple applications of\n :py:func:`vmap`. However, this behavior can be further customized by the\n ``axis_resources`` argument. When specified, each axis introduced by\n :py:func:`xmap` can be assigned to one or more *resource axes*. Those include\n the axes of the hardware mesh, as defined by the :py:func:`mesh` context\n manager. Each value that has a named axis in its ``named_shape`` will be\n partitioned over all mesh axes that axis is assigned to. Hence,\n :py:func:`xmap` can be seen as an alternative to :py:func:`pmap` that also\n exposes a way to automatically partition the computation over multiple\n devices.\n\n .. warning::\n While it is possible to assign multiple axis names to a single resource axis,\n care has to be taken to ensure that none of those named axes co-occur in a\n ``named_shape`` of any value in the named program. At the moment this is\n **completely unchecked** and will result in **undefined behavior**. The\n final release of :py:func:`xmap` will enforce this invariant, but it is a\n work in progress.\n\n Note that you do not have to worry about any of this for as long as no\n resource axis is repeated in ``axis_resources.values()``.\n\n Note that any assignment of ``axis_resources`` doesn't ever change the\n results of the computation, but only how it is carried out (e.g. how many\n devices are used). This makes it easy to try out various ways of\n partitioning a single program in many distributed scenarios (both small- and\n large-scale), to maximize the performance. As such, :py:func:`xmap` can be\n seen as a way to seamlessly interpolate between :py:func:`vmap` and\n :py:func:`pmap`-style execution.\n\n Args:\n fun: Function that uses named axes. Its arguments and return\n value should be arrays, scalars, or (nested) standard Python containers\n (tuple/list/dict) thereof (in general: valid pytrees).\n in_axes: A Python object with the same container (pytree) structure as the\n signature of arguments to ``fun``, but with a positional-to-named axis\n mapping in place of every array argument. The valid positional-to-named\n mappings are: (1) a ``Dict[int, AxisName]`` specifying that a positional\n dimensions given by dictionary keys are to be converted to named axes\n of given names (2) a list of axis names that ends with the Ellipsis object\n (``...``) in which case a number of leading positional axes of the argument\n will be converted into named axes inside the function. Note that ``in_axes``\n can also be a prefix of the argument container structure, in which case the\n mapping is repeated for all arrays in the collapsed subtree.\n out_axes: A Python object with the same container (pytree) structure as the\n returns of ``fun``, but with a positional-to-named axis mapping in place\n of every returned array. The valid positional-to-named mappings are the same\n as in ``in_axes``. Note that ``out_axes`` can also be a prefix of the return\n container structure, in which case the mapping is repeated for all arrays\n in the collapsed subtree.\n axis_sizes: A dict mapping axis names to their sizes. All axes defined by xmap\n have to appear either in ``in_axes`` or ``axis_sizes``. Sizes of axes\n that appear in ``in_axes`` are inferred from arguments whenever possible.\n In multi-host scenarios, the user-specified sizes are expected to be the\n global axis sizes (and might not match the expected size of local inputs).\n axis_resources: A dictionary mapping the axes introduced in this\n :py:func:`xmap` to one or more resource axes. Any array that has in its\n shape an axis with some resources assigned will be partitioned over the\n resources associated with the respective resource axes.\n donate_argnums: Specify which argument buffers are \"donated\" to the computation.\n It is safe to donate argument buffers if you no longer need them once the\n computation has finished. In some cases XLA can make use of donated\n buffers to reduce the amount of memory needed to perform a computation,\n for example recycling one of your input buffers to store a result. You\n should not reuse buffers that you donate to a computation, JAX will raise\n an error if you try to.\n\n For more details on buffer donation see the [FAQ](https://jax.readthedocs.io/en/latest/faq.html#buffer-donation).\n\n backend: This is an experimental feature and the API is likely to change.\n Optional, a string representing the XLA backend. 'cpu', 'gpu', or 'tpu'.\n\n Returns:\n A version of ``fun`` that takes in arrays with positional axes in place of\n named axes bound in this :py:func:`xmap` call, and results with all named\n axes converted to positional axes. If ``axis_resources`` is specified,\n ``fun`` can additionally execute in parallel on multiple devices.\n\n For example, :py:func:`xmap` makes it very easy to convert a function that\n computes the vector inner product (such as :py:func:`jax.numpy.vdot`) into\n one that computes a matrix multiplication:\n\n >>> import jax.numpy as jnp\n >>> x = jnp.arange(10).reshape((2, 5))\n >>> xmap(jnp.vdot,\n ... in_axes=({0: 'left'}, {1: 'right'}),\n ... out_axes=['left', 'right', ...])(x, x.T)\n DeviceArray([[ 30, 80],\n [ 80, 255]], dtype=int32)\n\n Note that the contraction in the program is performed over the positional axes,\n while named axes are just a convenient way to achieve batching. While this\n might seem like a silly example at first, it might turn out to be useful in\n practice, since with conjuction with ``axis_resources`` this makes it possible\n to implement a distributed matrix-multiplication in just a few lines of code::\n\n devices = np.array(jax.devices())[:4].reshape((2, 2))\n with mesh(devices, ('x', 'y')): # declare a 2D mesh with axes 'x' and 'y'\n distributed_out = xmap(\n jnp.vdot,\n in_axes=({0: 'left'}, {1: 'right'}),\n out_axes=['left', 'right', ...],\n axis_resources={'left': 'x', 'right': 'y'})(x, x.T)\n\n Still, the above examples are quite simple. After all, the xmapped\n computation was a simple NumPy function that didn't use the axis names at all!\n So, let's explore a slightly larger example which is linear regression::\n\n def regression_loss(x, y, w, b):\n # Contract over in_features. Batch and out_features are present in\n # both inputs and output, so they don't need to be mentioned\n y_pred = jnp.einsum('{in_features},{in_features}->{}', x, w) + b\n error = jnp.sum((y - y_pred) ** 2, axis='out_features')\n return jnp.mean(error, axis='batch')\n\n xmap(regression_loss,\n in_axes=(['batch', 'in_features', ...],\n ['batch', 'out_features', ...],\n ['in_features', 'out_features', ...],\n ['out_features', ...]),\n out_axes={}) # Loss is reduced over all axes, including batch!\n\n .. note::\n When using ``axis_resources`` along with a mesh that is controlled by\n multiple JAX hosts, keep in mind that in any given process :py:func:`xmap`\n only expects the data slice that corresponds to its local devices to be\n specified. This is in line with the current multi-host :py:func:`pmap`\n programming model.\n \"\"\"\n warn(\"xmap is an experimental feature and probably has bugs!\")\n _check_callable(fun)\n\n if isinstance(in_axes, list) and not _is_axes_leaf(in_axes):\n # To be a tree prefix of the positional args tuple, in_axes can never be a\n # list: if in_axes is not a leaf, it must be a tuple of trees. However,\n # in cases like these users expect tuples and lists to be treated\n # essentially interchangeably, so we canonicalize lists to tuples here\n # rather than raising an error. https://github.com/google/jax/issues/2367\n in_axes = tuple(in_axes)\n\n if in_axes == (): # Allow empty argument lists\n in_axes, in_axes_entries = (), []\n else:\n in_axes, in_axes_entries, _ = _prepare_axes(in_axes, \"in_axes\")\n if out_axes == ():\n raise ValueError(\"xmapped functions cannot have no return values\")\n else:\n out_axes, out_axes_entries, out_axes_treedef = _prepare_axes(out_axes, \"out_axes\")\n out_axes_entries = tuple(out_axes_entries) # Make entries hashable\n\n axis_sizes_names = set(axis_sizes.keys())\n in_axes_names = set(it.chain(*(spec.keys() for spec in in_axes_entries)))\n defined_names = axis_sizes_names | in_axes_names\n out_axes_names = set(it.chain(*(spec.keys() for spec in out_axes_entries)))\n\n anon_serial_loops = []\n def normalize_resource(r) -> ResourceAxisName:\n if isinstance(r, SerialLoop):\n name = fresh_resource_name()\n anon_serial_loops.append((name, r.length))\n return name\n return r\n\n normalized_axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]] = {}\n for axis in defined_names:\n resources = axis_resources.get(axis, ())\n if not isinstance(resources, tuple):\n resources = (resources,)\n normalized_axis_resources[axis] = tuple(unsafe_map(normalize_resource, resources))\n frozen_axis_resources = FrozenDict(normalized_axis_resources)\n necessary_resources = set(it.chain(*frozen_axis_resources.values()))\n\n axes_with_resources = set(frozen_axis_resources.keys())\n if axes_with_resources > defined_names:\n raise ValueError(f\"All axes that were assigned resources have to appear in \"\n f\"in_axes or axis_sizes, but the following are missing: \"\n f\"{axes_with_resources - defined_names}\")\n if out_axes_names > defined_names:\n raise ValueError(f\"All axis names appearing in out_axes must also appear in \"\n f\"in_axes or axis_sizes, but the following are missing: \"\n f\"{out_axes_names - defined_names}\")\n\n for axis, resources in frozen_axis_resources.items():\n if len(set(resources)) != len(resources): # type: ignore\n raise ValueError(f\"Resource assignment of a single axis must be a tuple of \"\n f\"distinct resources, but specified {resources} for axis {axis}\")\n\n donate_argnums = _ensure_index_tuple(donate_argnums)\n\n # A little performance optimization to avoid iterating over all args unnecessarily\n has_input_rank_assertions = any(spec.expected_rank is not None for spec in in_axes_entries)\n has_output_rank_assertions = any(spec.expected_rank is not None for spec in out_axes_entries)\n\n def infer_params(*args):\n # Putting this outside of fun_mapped would make resources lexically scoped\n resource_env = thread_resources.env\n available_resources = set(resource_env.shape.keys())\n\n if necessary_resources - available_resources:\n raise ValueError(f\"In-scope resources are insufficient to execute the \"\n f\"xmapped function. The missing resources are: \"\n f\"{necessary_resources - available_resources}\")\n\n args_flat, in_tree = tree_flatten(args)\n fun_flat, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)\n if donate_argnums:\n donated_invars = donation_vector(donate_argnums, args, ())\n else:\n donated_invars = (False,) * len(args_flat)\n in_axes_flat = _flatten_axes(\"xmap in_axes\", in_tree, in_axes, tupled_args=True)\n\n # Some pytree containers might be unhashable, so we flatten the out_axes\n # pytree into a treedef and entries which are guaranteed to be hashable.\n out_axes_thunk = HashableFunction(\n lambda: tuple(_flatten_axes(\"xmap out_axes\", out_tree(), out_axes, tupled_args=False)),\n closure=(out_axes_entries, out_axes_treedef))\n\n axis_resource_count = _get_axis_resource_count(\n _positional_semantics.val, frozen_axis_resources, resource_env)\n for axis, size in axis_sizes.items():\n resources = axis_resource_count[axis]\n if size % resources.nglobal != 0:\n global_size = \"Global size\" if resources.distributed else \"Size\"\n raise ValueError(f\"{global_size} of axis {axis} ({size}) is not divisible \"\n f\"by the total number of resources assigned to this axis \"\n f\"({frozen_axis_resources[axis]}, {resources.nglobal} in total)\")\n frozen_global_axis_sizes = _get_axis_sizes(args_flat, in_axes_flat,\n axis_sizes, axis_resource_count)\n\n missing_sizes = defined_names - set(frozen_global_axis_sizes.keys())\n if missing_sizes:\n raise ValueError(f\"Failed to infer size of axes: {', '.join(unsafe_map(str, missing_sizes))}. \"\n f\"You've probably passed in empty containers in place of arguments that had \"\n f\"those axes in their in_axes. Provide the sizes of missing axes explicitly \"\n f\"via axis_sizes to fix this error.\")\n\n if has_input_rank_assertions:\n for arg, spec in zip(args_flat, in_axes_flat):\n if spec.expected_rank is not None and spec.expected_rank != arg.ndim:\n raise ValueError(f\"xmap argument has an in_axes specification of {spec.user_repr}, \"\n f\"which asserts that it should be of rank {spec.expected_rank}, \"\n f\"but the argument has rank {arg.ndim} (and shape {arg.shape})\")\n params = dict(\n name=getattr(fun, '__name__', '<unnamed function>'),\n in_axes=tuple(in_axes_flat),\n out_axes_thunk=out_axes_thunk,\n donated_invars=donated_invars,\n global_axis_sizes=frozen_global_axis_sizes,\n axis_resources=frozen_axis_resources,\n resource_env=resource_env,\n backend=backend,\n spmd_in_axes=None,\n spmd_out_axes_thunk=None,\n positional_semantics=_positional_semantics.val)\n return fun_flat, args_flat, params, in_tree, out_tree\n\n def verify_outputs(out_flat, out_tree, params):\n if has_output_rank_assertions:\n for out, spec in zip(out_flat, params['out_axes_thunk']()):\n if spec.expected_rank is not None and spec.expected_rank != out.ndim:\n raise ValueError(f\"xmap output has an out_axes specification of {spec.user_repr}, \"\n f\"which asserts that it should be of rank {spec.expected_rank}, \"\n f\"but the output has rank {out.ndim} (and shape {out.shape})\")\n return tree_unflatten(out_tree(), out_flat)\n\n def fun_mapped(*args):\n tree_map(_check_arg, args)\n fun_flat, args_flat, params, _, out_tree = infer_params(*args)\n out_flat = xmap_p.bind(fun_flat, *args_flat, **params)\n return verify_outputs(out_flat, out_tree, params)\n\n def decorate_serial(f):\n for loop_params in reversed(anon_serial_loops):\n f = serial_loop(*loop_params)(f)\n return f\n\n def lower(*args):\n fun_flat, args_flat, params, in_tree, out_tree = infer_params(*args)\n avals_flat = [shaped_abstractify(arg) for arg in args_flat]\n computation = make_xmap_callable(\n fun_flat, params['name'], params['in_axes'], params['out_axes_thunk'],\n params['donated_invars'], params['global_axis_sizes'], params['axis_resources'],\n params['resource_env'], params['backend'], params['spmd_in_axes'],\n params['spmd_out_axes_thunk'], params['positional_semantics'], *avals_flat)\n return Lowered(\n computation, in_tree, out_tree(), donate_argnums, no_kwargs=True)\n\n fun_mapped = wraps(fun)(decorate_serial(fun_mapped))\n fun_mapped.lower = decorate_serial(lower)\n\n return fun_mapped\n\ndef xmap_impl(fun: lu.WrappedFun, *args, name, in_axes, out_axes_thunk, donated_invars,\n global_axis_sizes, axis_resources, resource_env, backend,\n spmd_in_axes, spmd_out_axes_thunk, positional_semantics):\n in_avals = [core.raise_to_shaped(core.get_aval(arg)) for arg in args]\n xmap_callable = make_xmap_callable(\n fun, name, in_axes, out_axes_thunk, donated_invars, global_axis_sizes,\n axis_resources, resource_env, backend,\n spmd_in_axes, spmd_out_axes_thunk, positional_semantics,\n *in_avals).compile().unsafe_call\n distributed_debug_log((\"Running xmapped function\", name),\n (\"python function\", fun.f),\n (\"mesh\", resource_env.physical_mesh),\n (\"abstract args\", in_avals))\n return xmap_callable(*args)\n\n@lu.cache\ndef make_xmap_callable(fun: lu.WrappedFun,\n name,\n in_axes, out_axes_thunk, donated_invars,\n global_axis_sizes, axis_resources, resource_env, backend,\n spmd_in_axes, spmd_out_axes_thunk, positional_semantics,\n *in_avals):\n assert positional_semantics == _PositionalSemantics.LOCAL\n plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)\n\n # TODO: Making axis substitution final style would allow us to avoid\n # tracing to jaxpr here\n mapped_in_avals = [_delete_aval_axes(aval, in_axes, global_axis_sizes)\n for aval, in_axes in zip(in_avals, in_axes)]\n with core.extend_axis_env_nd(global_axis_sizes.items()):\n with dispatch.log_elapsed_time(f\"Finished tracing + transforming {fun.__name__} \"\n \"for xmap in {elapsed_time} sec\"):\n jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(fun, mapped_in_avals)\n out_axes = out_axes_thunk()\n _check_out_avals_vs_out_axes(out_avals, out_axes, global_axis_sizes)\n # NOTE: We don't use avals and all params, so only pass in the relevant parts (too lazy...)\n _resource_typing_xmap([], dict(axis_resources=axis_resources,\n out_axes=out_axes,\n call_jaxpr=jaxpr,\n resource_env=resource_env,\n name=name),\n source_info_util.new_source_info(), resource_env, {})\n jaxpr = plan.subst_axes_with_resources(jaxpr)\n use_spmd_lowering = config.experimental_xmap_spmd_lowering\n ensure_fixed_sharding = config.experimental_xmap_ensure_fixed_sharding\n if use_spmd_lowering and ensure_fixed_sharding:\n jaxpr = _fix_inferred_spmd_sharding(jaxpr, resource_env)\n\n f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(jaxpr, consts)))\n f = hide_mapped_axes(f, tuple(in_axes), tuple(out_axes))\n f = plan.vectorize_and_loop(f, in_axes, out_axes)\n\n used_resources = _jaxpr_resources(jaxpr, resource_env) | set(it.chain(*axis_resources.values()))\n used_mesh_axes = used_resources & resource_env.physical_resource_axes\n if used_mesh_axes:\n assert spmd_in_axes is None and spmd_out_axes_thunk is None # No outer xmaps, so should be None\n mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)\n mesh = resource_env.physical_mesh\n global_in_avals = [mesh.local_to_global(ax, av)\n for ax, av in safe_zip(mesh_in_axes, in_avals)]\n if config.experimental_xmap_spmd_lowering_manual:\n tiling_method = pxla.TilingMethod.MANUAL\n else:\n tiling_method = pxla.TilingMethod.VECTORIZE\n return pxla.lower_mesh_computation(\n f, name, mesh,\n mesh_in_axes, mesh_out_axes, donated_invars,\n use_spmd_lowering, global_in_avals,\n tiling_method=tiling_method, in_is_gda=[False] * len(global_in_avals))\n else:\n return dispatch.lower_xla_callable(\n f, None, backend, name, donated_invars, *((a, None) for a in in_avals))\n\nclass EvaluationPlan(NamedTuple):\n \"\"\"Encapsulates preprocessing common to top-level xmap invocations and its translation rule.\"\"\"\n resource_env: ResourceEnv\n physical_axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]]\n loop_axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]]\n axis_subst_dict: Dict[AxisName, Tuple[ResourceAxisName, ...]]\n axis_vmap_size: Dict[AxisName, Optional[int]]\n\n @property\n def axis_subst(self) -> core.AxisSubst:\n return lambda name: self.axis_subst_dict.get(name, (name,))\n\n @property\n def resource_axis_env(self):\n env = dict(self.resource_env.shape)\n for axis, size in self.axis_vmap_size.items():\n if size is None:\n continue\n vmap_axis = self.axis_subst_dict[axis][-1]\n env[vmap_axis] = size\n return env\n\n @classmethod\n def from_axis_resources(cls,\n axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]],\n resource_env: ResourceEnv,\n global_axis_sizes: Dict[AxisName, int]):\n physical_axis_resources, loop_axis_resources = _unzip_axis_resources(\n axis_resources, resource_env)\n axis_resource_count = _get_axis_resource_count(None, axis_resources, resource_env)\n axis_subst_dict = dict(axis_resources)\n axis_vmap_size: Dict[AxisName, Optional[int]] = {}\n for naxis, raxes in sorted(axis_resources.items(), key=lambda x: str(x[0])):\n num_resources = axis_resource_count[naxis]\n assert global_axis_sizes[naxis] % num_resources.nglobal == 0\n local_tile_size = global_axis_sizes[naxis] // num_resources.nglobal\n # We have to vmap when there are no resources (to handle the axis name!) or\n # when every resource gets chunks of values.\n if not raxes or local_tile_size > 1:\n axis_vmap_size[naxis] = local_tile_size\n axis_subst_dict[naxis] += (fresh_resource_name(naxis),)\n else:\n axis_vmap_size[naxis] = None\n return cls(resource_env,\n physical_axis_resources, loop_axis_resources,\n axis_subst_dict, axis_vmap_size)\n\n def subst_axes_with_resources(self, jaxpr):\n try:\n if any(self.loop_axis_resources.values()):\n _check_no_loop_collectives(jaxpr, self.loop_axis_resources)\n with core.extend_axis_env_nd(self.resource_axis_env.items()):\n return core.subst_axis_names_jaxpr(jaxpr, self.axis_subst)\n except core.DuplicateAxisNameError:\n raise AssertionError(\"Incomplete resource type-checking? Please open a bug report!\")\n\n def vectorize_and_loop(self, f: lu.WrappedFun, in_axes, out_axes) -> lu.WrappedFun:\n vmap_axes = {\n naxis: raxes[-1]\n for naxis, raxes in self.axis_subst_dict.items()\n if self.axis_vmap_size[naxis] is not None\n }\n for naxis, vaxis in sorted(vmap_axes.items(), key=lambda x: x[1].uid):\n local_tile_size = self.axis_vmap_size[naxis]\n map_in_axes = tuple(unsafe_map(lambda spec: spec.get(naxis, None), in_axes))\n map_out_axes = tuple(unsafe_map(lambda spec: spec.get(naxis, None), out_axes))\n f = batching.vtile(f, map_in_axes, map_out_axes, tile_size=local_tile_size, axis_name=vaxis)\n\n used_loops = set(it.chain.from_iterable(self.loop_axis_resources.values()))\n if not used_loops:\n return f\n\n if len(used_loops) > 1:\n # TODO: Support multiple loops\n raise NotImplementedError(\"Only one loop per xmap is supported\")\n loop_in_axes = _to_resource_axes(in_axes, self.loop_axis_resources)\n loop_out_axes = _to_resource_axes(out_axes, self.loop_axis_resources)\n loop_name, = used_loops\n loop_length = self.resource_env.shape[loop_name]\n def looped_f(*args):\n def body(i, _):\n # XXX: This call_wrapped is only valid under the assumption that scan\n # only ever traces the body once (which it does at the moment).\n result = f.call_wrapped(\n *(_slice_tile(arg, spec.get(loop_name, None), i, loop_length)\n for arg, spec in zip(args, loop_in_axes)))\n return i + 1, result\n _, stacked_results = lax.scan(body, 0, (), length=loop_length)\n return [_merge_leading_axis(sresult, spec.get(loop_name, None))\n for sresult, spec in zip(stacked_results, loop_out_axes)]\n return lu.wrap_init(looped_f)\n\n def to_mesh_axes(self, in_axes, out_axes):\n \"\"\"\n Convert in/out_axes parameters ranging over logical dimensions to\n in/out_axes that range over the mesh dimensions.\n \"\"\"\n return (_to_resource_axes(in_axes, self.physical_axis_resources),\n _to_resource_axes(out_axes, self.physical_axis_resources))\n\n\n# -------- xmap primitive and its transforms --------\n\n# xmap has a different set of parameters than pmap, so we make it its own primitive type\nclass XMapPrimitive(core.MapPrimitive):\n def __init__(self):\n super().__init__('xmap')\n self.def_impl(xmap_impl)\n self.def_custom_bind(self.bind)\n\n def bind(self, fun, *args, in_axes, **params):\n assert len(in_axes) == len(args), (in_axes, args)\n return core.map_bind(self, fun, *args, in_axes=in_axes, **params)\n\n def process(self, trace, fun, tracers, params):\n return trace.process_xmap(self, fun, tracers, params)\n\n def post_process(self, trace, out_tracers, params):\n raise NotImplementedError\n\n def get_bind_params(self, params):\n new_params = dict(params)\n subfun = lu.wrap_init(partial(core.eval_jaxpr, new_params.pop('call_jaxpr'), ()))\n axes = new_params.pop('out_axes')\n new_params['out_axes_thunk'] = HashableFunction(lambda: axes, closure=axes)\n spmd_axes = new_params.pop('spmd_out_axes')\n if spmd_axes is not None:\n new_params['spmd_out_axes_thunk'] = \\\n HashableFunction(lambda: spmd_axes, closure=spmd_axes)\n else:\n new_params['spmd_out_axes_thunk'] = None\n return [subfun], new_params\n\nxmap_p = XMapPrimitive()\ncore.EvalTrace.process_xmap = core.EvalTrace.process_call # type: ignore\ndef _process_xmap_default(self, call_primitive, f, tracers, params):\n raise NotImplementedError(f\"{type(self)} must override process_xmap to handle xmap\")\ncore.Trace.process_xmap = _process_xmap_default # type: ignore\n\ndef _xmap_axis_subst(params, subst, traverse):\n if 'call_jaxpr' not in params: # TODO(apaszke): This feels sketchy, but I'm not sure why\n return params\n if not traverse:\n return params\n def shadowed_subst(name):\n return (name,) if name in params['global_axis_sizes'] else subst(name)\n with core.extend_axis_env_nd(params['global_axis_sizes'].items()):\n new_jaxpr = core.subst_axis_names_jaxpr(params['call_jaxpr'], shadowed_subst)\n return dict(params, call_jaxpr=new_jaxpr)\ncore.axis_substitution_rules[xmap_p] = _xmap_axis_subst\n\n# NOTE: We don't have to handle spmd_{in|out}_axes here, because\n# SPMD batching always gets involved as the last transform before XLA translation\nad.JVPTrace.process_xmap = ad.JVPTrace.process_call # type: ignore\nad.call_param_updaters[xmap_p] = ad.call_param_updaters[xla.xla_call_p]\n\ndef _xmap_transpose(params, call_jaxpr, args, cts_in, cts_in_avals, reduce_axes):\n all_args, in_tree_def = tree_flatten(((), args, cts_in)) # empty consts\n fun = lu.hashable_partial(\n lu.wrap_init(ad.backward_pass),\n call_jaxpr, reduce_axes + tuple(params['global_axis_sizes'].keys()))\n fun, nz_arg_cts = ad.nonzero_outputs(fun)\n fun, out_tree = flatten_fun_nokwargs(fun, in_tree_def)\n # Preserve axis for primal arguments, skip tangents (represented as undefined primals).\n in_axes, out_axes = params['in_axes'], params['out_axes']\n new_in_axes = (*(axis for axis, x in zip(in_axes, args) if not ad.is_undefined_primal(x)),\n *(axis for axis, x in zip(out_axes, cts_in) if type(x) is not ad.Zero))\n # NOTE: This assumes that the output cotangents being zero is a deterministic\n # function of which input cotangents were zero.\n @as_hashable_function(closure=(in_axes, tuple(type(c) is ad.Zero for c in cts_in)))\n def out_axes_thunk():\n return tuple(axis for axis, nz in zip(in_axes, nz_arg_cts()) if nz)\n new_params = dict(params,\n name=wrap_name(params['name'], 'transpose'),\n in_axes=new_in_axes,\n out_axes_thunk=out_axes_thunk,\n donated_invars=(False,) * len(new_in_axes),\n spmd_out_axes_thunk=None)\n del new_params['out_axes']\n del new_params['spmd_out_axes']\n out_flat = xmap_p.bind(fun, *all_args, **new_params)\n arg_cts = tree_unflatten(out_tree(), out_flat)\n\n axis_resource_count = _get_axis_resource_count(\n params['positional_semantics'], params['axis_resources'], params['resource_env'])\n local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)\n for axis, global_size in params['global_axis_sizes'].items()}\n def unmap_zero(zero, axes):\n return ad.Zero(_insert_aval_axes(zero.aval, axes, local_axis_sizes))\n return tuple(unmap_zero(arg_ct, in_axis) if type(arg_ct) is ad.Zero else arg_ct\n for arg_ct, in_axis in zip(arg_cts, in_axes))\nad.primitive_transposes[xmap_p] = _xmap_transpose\n\n\ndef _typecheck_xmap(\n *in_avals, call_jaxpr, name, in_axes, out_axes, donated_invars,\n global_axis_sizes, axis_resources, resource_env, backend,\n spmd_in_axes, spmd_out_axes, positional_semantics):\n axis_resource_count = _get_axis_resource_count(\n positional_semantics, axis_resources, resource_env)\n local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)\n for axis, global_size in global_axis_sizes.items()}\n binder_in_avals = [_insert_aval_axes(v.aval, a_in_axes, local_axis_sizes)\n for v, a_in_axes in zip(call_jaxpr.invars, in_axes)]\n for binder_in_aval, in_aval in zip(binder_in_avals, in_avals):\n if not core.typecompat(binder_in_aval, in_aval):\n raise core.JaxprTypeError(\n f\"xmap passes operand {in_aval} to jaxpr expecting {binder_in_aval}\")\n\n mapped_in_avals = [_delete_aval_axes(a, a_in_axes, global_axis_sizes)\n for a, a_in_axes in zip(in_avals, in_axes)]\n with core.extend_axis_env_nd(global_axis_sizes.items()):\n core._check_jaxpr(lambda: core.JaxprPpContext(), call_jaxpr,\n mapped_in_avals)\n\n mapped_out_avals = [v.aval for v in call_jaxpr.outvars]\n out_avals = [_insert_aval_axes(a, a_out_axes, local_axis_sizes)\n for a, a_out_axes in zip(mapped_out_avals, out_axes)]\n return out_avals\ncore.custom_typechecks[xmap_p] = _typecheck_xmap\n\ndef show_axes(axes):\n return \", \".join(sorted([f\"`{a}`\" for a in axes]))\n\ndef _resource_typing_xmap(avals,\n params,\n source_info: source_info_util.SourceInfo,\n resource_env,\n outer_axis_resources):\n axis_resources = params['axis_resources']\n inner_axis_resources = dict(outer_axis_resources)\n inner_axis_resources.update(axis_resources)\n if len(inner_axis_resources) < len(outer_axis_resources) + len(axis_resources):\n overlap = set(outer_axis_resources) & set(axis_resources)\n raise JAXTypeError(\n f\"Detected disallowed xmap axis name shadowing at \"\n f\"{source_info_util.summarize(source_info)} \"\n f\"(shadowed axes: {show_axes(overlap)})\")\n\n if resource_env.physical_mesh != params['resource_env'].physical_mesh:\n raise RuntimeError(\"Changing the physical mesh is not allowed inside xmap.\")\n\n call_jaxpr = params['call_jaxpr']\n pxla.resource_typecheck(\n params['call_jaxpr'], resource_env, inner_axis_resources,\n lambda: (f\"an xmapped function {params['name']} \" +\n (f\"(xmap called at {source_info_util.summarize(source_info)})\"\n if source_info else \"\")))\n\n for v, axes in zip(call_jaxpr.outvars, params['out_axes']):\n broadcast_axes = set(axes) - set(v.aval.named_shape)\n used_resources = set(it.chain.from_iterable(\n inner_axis_resources[a] for a in v.aval.named_shape))\n for baxis in broadcast_axes:\n baxis_resources = set(inner_axis_resources[baxis])\n overlap = baxis_resources & used_resources\n if overlap:\n resource_to_axis = {}\n for axis in v.aval.named_shape:\n for raxis in inner_axis_resources[axis]:\n resource_to_axis[raxis] = axis\n partitioning_axes = set(resource_to_axis[raxis] for raxis in overlap)\n raise JAXTypeError(\n f\"One of xmapped function ({params['name']}) outputs is broadcast \"\n f\"along axis `{baxis}` which is assigned to resources \"\n f\"{show_axes(baxis_resources)}, but the output is already \"\n f\"partitioned along {show_axes(overlap)}, because its \"\n f\"named shape contains {show_axes(partitioning_axes)}\")\npxla.custom_resource_typing_rules[xmap_p] = _resource_typing_xmap\n\n\n# This is DynamicJaxprTrace.process_map with some very minor modifications\ndef _dynamic_jaxpr_process_xmap(self, primitive, f, tracers, params):\n from jax.interpreters.partial_eval import (\n trace_to_subjaxpr_dynamic, DynamicJaxprTracer,\n convert_constvars_jaxpr, new_jaxpr_eqn)\n assert primitive is xmap_p\n in_avals = [t.aval for t in tracers]\n global_axis_sizes = params['global_axis_sizes']\n mapped_in_avals = [_delete_aval_axes(a, a_in_axes, global_axis_sizes)\n for a, a_in_axes in zip(in_avals, params['in_axes'])]\n with core.extend_axis_env_nd(global_axis_sizes.items()):\n jaxpr, mapped_out_avals, consts = trace_to_subjaxpr_dynamic(\n f, self.main, mapped_in_avals)\n out_axes = params['out_axes_thunk']()\n if params['spmd_out_axes_thunk'] is not None:\n spmd_out_axes = params['spmd_out_axes_thunk']()\n else:\n spmd_out_axes = None\n axis_resource_count = _get_axis_resource_count(\n params['positional_semantics'], params['axis_resources'], params['resource_env'])\n local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)\n for axis, global_size in global_axis_sizes.items()}\n out_avals = [_insert_aval_axes(a, a_out_axes, local_axis_sizes)\n for a, a_out_axes in zip(mapped_out_avals, out_axes)]\n _check_out_avals_vs_out_axes(out_avals, out_axes, params['global_axis_sizes'])\n source_info = source_info_util.current()\n out_tracers = [DynamicJaxprTracer(self, a, source_info) for a in out_avals]\n invars = map(self.getvar, tracers)\n constvars = map(self.getvar, map(self.instantiate_const, consts))\n outvars = map(self.makevar, out_tracers)\n new_in_axes = (AxisNamePos(user_repr='{}'),) * len(consts) + params['in_axes']\n if params['spmd_in_axes'] is None:\n new_spmd_in_axes = None\n else:\n new_spmd_in_axes = (None,) * len(consts) + params['spmd_in_axes']\n new_donated_invars = (False,) * len(consts) + params['donated_invars']\n with core.extend_axis_env_nd(global_axis_sizes.items()):\n call_jaxpr = convert_constvars_jaxpr(jaxpr)\n new_params = dict(params, in_axes=new_in_axes, out_axes=out_axes,\n donated_invars=new_donated_invars,\n spmd_in_axes=new_spmd_in_axes,\n spmd_out_axes=spmd_out_axes,\n call_jaxpr=call_jaxpr)\n del new_params['out_axes_thunk']\n del new_params['spmd_out_axes_thunk']\n eqn = new_jaxpr_eqn([*constvars, *invars], outvars, primitive,\n new_params, source_info)\n self.frame.eqns.append(eqn)\n return out_tracers\npe.DynamicJaxprTrace.process_xmap = _dynamic_jaxpr_process_xmap # type: ignore\n\ndef _xmap_partial_eval_custom_params_updater(\n unks_in: Sequence[bool],\n kept_outs_known: Sequence[bool], kept_outs_staged: Sequence[bool],\n num_res: int, params_known: dict, params_staged: dict\n ) -> Tuple[dict, dict]:\n assert params_known['spmd_in_axes'] is None and params_known['spmd_out_axes'] is None\n assert params_staged['spmd_in_axes'] is None and params_staged['spmd_out_axes'] is None\n\n # pruned inputs to jaxpr_known according to unks_in\n donated_invars_known, _ = pe.partition_list(unks_in, params_known['donated_invars'])\n in_axes_known, _ = pe.partition_list(unks_in, params_known['in_axes'])\n if num_res == 0:\n residual_axes = []\n else:\n residual_axes = [\n AxisNamePos(zip(sort_named_shape, range(len(sort_named_shape))),\n user_repr=f'<internal: {sort_named_shape}>')\n for named_shape in (v.aval.named_shape for v in params_known['call_jaxpr'].outvars[:-num_res])\n # We sort here to make the iteration order deterministic\n for sort_named_shape in [sorted(named_shape, key=str)]\n ]\n _, out_axes_known = pe.partition_list(kept_outs_known, params_known['out_axes'])\n new_params_known = dict(params_known,\n in_axes=tuple(in_axes_known),\n out_axes=(*out_axes_known, *residual_axes),\n donated_invars=tuple(donated_invars_known))\n assert len(new_params_known['in_axes']) == len(params_known['call_jaxpr'].invars)\n assert len(new_params_known['out_axes']) == len(params_known['call_jaxpr'].outvars)\n\n # added num_res new inputs to jaxpr_staged\n donated_invars_staged = (*(False for _ in range(num_res)), *params_staged['donated_invars'])\n _, out_axes_staged = pe.partition_list(kept_outs_staged, params_staged['out_axes'])\n new_params_staged = dict(params_staged,\n in_axes=(*residual_axes, *params_staged['in_axes']),\n out_axes=tuple(out_axes_staged),\n donated_invars=donated_invars_staged)\n assert len(new_params_staged['in_axes']) == len(params_staged['call_jaxpr'].invars)\n assert len(new_params_staged['out_axes']) == len(params_staged['call_jaxpr'].outvars)\n return new_params_known, new_params_staged\npe.partial_eval_jaxpr_custom_rules[xmap_p] = \\\n partial(pe.call_partial_eval_custom_rule, 'call_jaxpr',\n _xmap_partial_eval_custom_params_updater)\n\n\n@lu.transformation_with_aux\ndef out_local_named_shapes(local_axes, *args, **kwargs):\n ans = yield args, kwargs\n ans_axes = [frozenset(a.aval.named_shape) & local_axes for a in ans]\n yield ans, ans_axes\n\n@lu.transformation_with_aux\ndef hide_units(unit_args, *args, **kwargs):\n ans = yield restore_units(unit_args, args), kwargs\n yield filter_units(ans)\n\ndef filter_units(vals):\n vals_no_units = [v for v in vals if v is not core.unit]\n vals_is_unit = [v is core.unit for v in vals]\n return vals_no_units, vals_is_unit\n\ndef restore_units(is_unit, vals):\n vals_it = iter(vals)\n vals_with_units = [core.unit if u else next(vals_it) for u in is_unit]\n try:\n next(vals_it)\n raise RuntimeError(\"Expected the iterator to be exhausted\")\n except StopIteration:\n return vals_with_units\n\n\ndef _jaxpr_trace_process_xmap(self, primitive, f: lu.WrappedFun, tracers, params):\n from jax.interpreters.partial_eval import (\n PartialVal, JaxprTracer, _drop_vars, _dce_open_jaxpr,\n convert_constvars_jaxpr, new_eqn_recipe)\n assert primitive is xmap_p\n in_axes = params['in_axes']\n donated_invars = params['donated_invars']\n global_axis_sizes = params['global_axis_sizes']\n\n in_pvals = [t.pval for t in tracers]\n in_pvals = [pval if pval.is_known()\n else PartialVal.unknown(_delete_aval_axes(pval[0], axes, global_axis_sizes))\n for pval, axes in zip(in_pvals, in_axes)]\n\n const_axes_s = lu.Store()\n def app(f, *args):\n args_no_units, in_units = filter_units(args)\n f, out_units = hide_units(f, tuple(in_units))\n f, out_named_shapes = out_local_named_shapes(f, frozenset(global_axis_sizes))\n out_axes_thunk = params['out_axes_thunk']\n @as_hashable_function(closure=out_axes_thunk)\n def new_out_axes_thunk():\n out_axes = out_axes_thunk()\n axes_units, const_units = split_list(out_units(), [len(out_axes)])\n assert not any(const_units)\n num_consts = len(const_units)\n out_axes_no_units = [a for a, u in zip(out_axes, axes_units) if not u]\n const_axes: Sequence[AxisNamePos]\n if num_consts == 0:\n const_axes = ()\n else:\n const_axes = [\n AxisNamePos(zip(sort_named_shape, range(len(sort_named_shape))),\n user_repr=f'<internal: {sort_named_shape}>')\n for named_shape in out_named_shapes()[-num_consts:]\n # We sort here to make the iteration order deterministic\n for sort_named_shape in [sorted(named_shape, key=str)]\n ]\n if not const_axes_s: # NOTE: This can be called multiple times\n const_axes_s.store(const_axes)\n assert const_axes_s.val == const_axes\n return (*out_axes_no_units, *const_axes)\n pe_params = dict(\n params,\n in_axes=tuple(a for a, u in zip(in_axes, in_units) if not u),\n donated_invars=tuple(a for a, u in zip(donated_invars, in_units) if not u),\n out_axes_thunk=new_out_axes_thunk)\n outs_no_units = primitive.bind(f, *args_no_units, **pe_params)\n new_out_axes_thunk() # Make sure it is called at least once to compute const_axes\n return restore_units(out_units(), outs_no_units)\n\n jaxpr, out_pvals, consts, env_tracers = self.partial_eval(\n f, in_pvals, app, instantiate=False)\n\n out_axes = params['out_axes_thunk']()\n const_axes = const_axes_s.val\n axis_resource_count = _get_axis_resource_count(\n params['positional_semantics'], params['axis_resources'], params['resource_env'])\n local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)\n for axis, global_size in global_axis_sizes.items()}\n out_pvals = [pval if pval.is_known() else\n PartialVal.unknown(_insert_aval_axes(pval[0], axes, local_axis_sizes))\n for pval, axes in zip(out_pvals, out_axes)]\n\n with core.extend_axis_env_nd(global_axis_sizes.items()):\n # Skip known invars and outvars, and lift constants as regular invars\n in_knowns = tuple(t.pval.is_known() for t in it.chain(env_tracers, tracers))\n out_unknowns = tuple(not pval.is_known() for pval in out_pvals)\n jaxpr = _drop_vars(jaxpr, in_knowns, (False,) * len(jaxpr.outvars))\n jaxpr = _dce_open_jaxpr(jaxpr, out_unknowns, drop_outputs=True)\n jaxpr = convert_constvars_jaxpr(jaxpr)\n\n # Known tracers get propagated as if they were constants\n known_tracers_out = [self.new_const(pval.get_known()) for pval in out_pvals\n if pval.is_known()]\n\n # I'm not 100% if that's correct, but it is an assumption that\n # JaxprTrace.process_call already makes.\n if any(t.pval.is_known() for t in env_tracers):\n raise AssertionError(\"Please open a bug report!\")\n # Unknown tracers need to have the jaxpr set up as their recipe\n unknown_tracers_in = (*env_tracers, *(t for t in tracers if not t.pval.is_known()))\n unknown_tracers_out = [JaxprTracer(self, pval, None) for pval in out_pvals\n if not pval.is_known()]\n const_tracers = map(self.new_instantiated_const, consts)\n\n # Set up new params\n new_in_axes = (*const_axes,\n *(None for _ in env_tracers),\n *(axis for axis, t in zip(in_axes, tracers)\n if not t.pval.is_known()))\n new_out_axes = tuple(axis for axis, pval in zip(out_axes, out_pvals)\n if not pval.is_known())\n\n assert params['spmd_in_axes'] is None and params['spmd_out_axes_thunk'] is None\n new_params = dict(\n params,\n call_jaxpr=jaxpr,\n donated_invars=(*(False for _ in const_tracers),\n *(d for d, t in zip(donated_invars, tracers) if not t.pval.is_known())),\n in_axes=new_in_axes,\n out_axes=new_out_axes,\n spmd_out_axes=None)\n del new_params['out_axes_thunk']\n del new_params['spmd_out_axes_thunk']\n\n eqn = new_eqn_recipe((*const_tracers, *unknown_tracers_in),\n unknown_tracers_out,\n primitive, new_params, source_info_util.current())\n for t in unknown_tracers_out: t.recipe = eqn\n return pe._zip_knowns(known_tracers_out, unknown_tracers_out, out_unknowns)\npe.JaxprTrace.process_xmap = _jaxpr_trace_process_xmap\n\n\ndef _batch_trace_update_spmd_axes(\n spmd_in_axes, spmd_out_axes_thunk,\n axis_name, dims, dims_out_thunk):\n \"\"\"Extends spmd in and out axes with the position of the trace's batch dimension.\"\"\"\n not_mapped = batching.not_mapped\n def insert_spmd_axis(axes, nd):\n too_short = nd - len(axes)\n if too_short > 0:\n axes += (None,) * too_short\n return tuple_insert(axes, nd, axis_name)\n\n if spmd_in_axes is None:\n spmd_in_axes = ((),) * len(dims)\n new_spmd_in_axes = tuple(\n spmd_axes if d is not_mapped else insert_spmd_axis(spmd_axes, d)\n for spmd_axes, d in zip(spmd_in_axes, dims))\n\n @as_hashable_function(closure=spmd_out_axes_thunk)\n def new_spmd_out_axes_thunk():\n dims_out = dims_out_thunk()\n if spmd_out_axes_thunk is None:\n spmd_out_axes = ((),) * len(dims_out)\n else:\n spmd_out_axes = spmd_out_axes_thunk()\n return tuple(\n spmd_out_axes if nd is not_mapped else insert_spmd_axis(spmd_out_axes, nd)\n for spmd_out_axes, nd in zip(spmd_out_axes, dims_out))\n\n return new_spmd_in_axes, new_spmd_out_axes_thunk\n\ndef _batch_trace_process_xmap(self, is_spmd, primitive, f: lu.WrappedFun, tracers, params):\n not_mapped = batching.not_mapped\n vals, dims = unzip2((t.val, t.batch_dim) for t in tracers)\n assert primitive is xmap_p\n if not is_spmd and all(dim is not_mapped for dim in dims):\n return primitive.bind(f, *vals, **params)\n else:\n assert len({x.shape[d] for x, d in zip(vals, dims) if d is not not_mapped}) == 1\n def fmap_dims(axes, f):\n return AxisNamePos(((name, f(axis)) for name, axis in axes.items()),\n user_repr=axes.user_repr)\n new_in_axes = tuple(\n fmap_dims(in_axes, lambda a: a + (d is not not_mapped and d <= a))\n for d, in_axes in zip(dims, params['in_axes']))\n mapped_dims_in = tuple(\n d if d is not_mapped else d - sum(a < d for a in in_axis.values())\n for d, in_axis in zip(dims, params['in_axes']))\n f, mapped_dims_out = batching.batch_subtrace(f, self.main, mapped_dims_in)\n out_axes_thunk: Callable[[], Sequence[AxisNamePos]] = params['out_axes_thunk']\n dims_out_thunk = lambda: tuple(d if d is not_mapped else axis_after_insertion(d, out_axes)\n for d, out_axes in zip(mapped_dims_out(), out_axes_thunk()))\n def axis_after_insertion(axis, inserted_named_axes):\n for inserted_axis in sorted(inserted_named_axes.values()):\n if inserted_axis >= axis:\n break\n axis += 1\n return axis\n # NOTE: This assumes that the choice of the dimensions over which outputs\n # are batched is entirely dependent on the function and not e.g. on the\n # data or its shapes.\n @as_hashable_function(closure=out_axes_thunk)\n def new_out_axes_thunk():\n return tuple(\n out_axes if d is not_mapped else\n fmap_dims(out_axes, lambda a, nd=axis_after_insertion(d, out_axes): a + (nd <= a))\n for out_axes, d in zip(out_axes_thunk(), mapped_dims_out()))\n\n if not is_spmd:\n assert params['spmd_in_axes'] is None and params['spmd_out_axes_thunk'] is None\n new_spmd_in_axes = None\n new_spmd_out_axes_thunk = None\n else:\n new_spmd_in_axes, new_spmd_out_axes_thunk = _batch_trace_update_spmd_axes(\n params['spmd_in_axes'], params['spmd_out_axes_thunk'],\n self.axis_name, dims, dims_out_thunk)\n\n new_params = dict(params,\n in_axes=new_in_axes, out_axes_thunk=new_out_axes_thunk,\n spmd_in_axes=new_spmd_in_axes,\n spmd_out_axes_thunk=new_spmd_out_axes_thunk)\n vals_out = primitive.bind(f, *vals, **new_params)\n dims_out = dims_out_thunk()\n return [batching.BatchTracer(self, v, d) for v, d in zip(vals_out, dims_out)]\nbatching.BatchTrace.process_xmap = partialmethod(_batch_trace_process_xmap, False) # type: ignore\npxla.SPMDBatchTrace.process_xmap = partialmethod(_batch_trace_process_xmap, True) # type: ignore\n\n\n# -------- nested xmap handling --------\n\ndef _xmap_lowering_rule(ctx, *args, **kwargs):\n if isinstance(ctx.module_context.axis_context, mlir.SPMDAxisContext):\n if config.experimental_xmap_spmd_lowering_manual:\n return _xmap_lowering_rule_spmd_manual(ctx, *args, **kwargs)\n else:\n return _xmap_lowering_rule_spmd(ctx, *args, **kwargs)\n elif isinstance(ctx.module_context.axis_context, mlir.ReplicaAxisContext):\n return _xmap_lowering_rule_replica(ctx, *args, **kwargs)\n else:\n raise AssertionError(\"Unrecognized axis context type!\")\nmlir.register_lowering(xmap_p, _xmap_lowering_rule)\n\ndef _xmap_lowering_rule_replica(ctx, *in_nodes,\n call_jaxpr, name,\n in_axes, out_axes, donated_invars,\n global_axis_sizes,\n spmd_in_axes, spmd_out_axes,\n positional_semantics,\n axis_resources, resource_env, backend):\n xla.check_backend_matches(backend, ctx.module_context.platform)\n # The only way for any of those two assertions to be violated is when xmap\n # is using the SPMD lowering, but then this rule shouldn't even trigger.\n assert positional_semantics == _PositionalSemantics.LOCAL\n assert spmd_in_axes is None and spmd_out_axes is None\n plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)\n\n axis_resource_count = _get_axis_resource_count(positional_semantics, axis_resources, resource_env)\n if any(resource_count.distributed for resource_count in axis_resource_count.values()):\n raise NotImplementedError\n local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)\n for axis, global_size in global_axis_sizes.items()}\n\n local_mesh = resource_env.physical_mesh.local_mesh\n local_mesh_shape = local_mesh.shape\n mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)\n\n local_avals = [pxla.tile_aval_nd(\n local_mesh_shape, aval_mesh_in_axes,\n _insert_aval_axes(v.aval, aval_in_axes, local_axis_sizes))\n for v, aval_in_axes, aval_mesh_in_axes\n in zip(call_jaxpr.invars, in_axes, mesh_in_axes)]\n # We have to substitute before tracing, because we want the vectorized\n # axes to be used in the jaxpr.\n resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)\n f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))\n f = hide_mapped_axes(f, tuple(in_axes), tuple(out_axes))\n f = plan.vectorize_and_loop(f, in_axes, out_axes)\n # NOTE: We don't extend the resource env with the mesh shape, because those\n # resources are already in scope! It's the outermost xmap that introduces\n # them!\n vectorized_jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(f, local_avals)\n _check_out_avals_vs_out_axes(out_avals, out_axes, global_axis_sizes)\n assert not consts\n\n tiled_ins = (\n mlir.lower_fun(partial(_tile, in_axes=arg_in_axes,\n axis_sizes=local_mesh_shape),\n multiple_results=False)(\n mlir.LoweringRuleContext(module_context=ctx.module_context,\n primitive=None,\n avals_in=[aval], avals_out=None),\n in_node)[0]\n if v.aval is not core.abstract_unit else in_node\n for v, aval, in_node, arg_in_axes\n in zip(call_jaxpr.invars, ctx.avals_in, in_nodes, mesh_in_axes))\n\n # NOTE: We don't extend the resource env with the mesh shape, because those\n # resources are already in scope! It's the outermost xmap that introduces\n # them!\n # We in-line here rather than generating a Call HLO as in the xla_call\n # translation rule just because the extra tuple stuff is a pain.\n sub_ctx = ctx.module_context.replace(\n name_stack=xla.extend_name_stack(ctx.module_context.name_stack,\n xla.wrap_name(name, 'xmap')))\n tiled_outs = mlir.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (), *tiled_ins)\n\n outs = [\n mlir.lower_fun(\n partial(_untile, out_axes=ans_out_axes, axis_sizes=local_mesh_shape,\n platform=ctx.module_context.platform),\n multiple_results=False)(\n mlir.LoweringRuleContext(module_context=ctx.module_context,\n primitive=None,\n avals_in=[vectorized_outvar.aval],\n avals_out=None), tiled_out)[0]\n if v.aval is not core.abstract_unit else tiled_out\n for v, vectorized_outvar, tiled_out, ans_out_axes\n in zip(call_jaxpr.outvars, vectorized_jaxpr.outvars, tiled_outs,\n mesh_out_axes)]\n return outs\n\n\ndef _xmap_lowering_rule_spmd(ctx, *global_in_nodes,\n call_jaxpr, name, in_axes, out_axes,\n donated_invars, global_axis_sizes, spmd_in_axes,\n spmd_out_axes, positional_semantics,\n axis_resources, resource_env, backend):\n xla.check_backend_matches(backend, ctx.module_context.platform)\n plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)\n\n resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)\n f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))\n f = hide_mapped_axes(f, in_axes, out_axes)\n f = plan.vectorize_and_loop(f, in_axes, out_axes)\n mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)\n mesh = resource_env.physical_mesh\n f = pxla.vtile_by_mesh(f, mesh, mesh_in_axes, mesh_out_axes)\n\n # XXX: We modify mesh_in_axes and mesh_out_axes here\n def add_spmd_axes(flat_mesh_axes: Sequence[pxla.ArrayMapping],\n flat_extra_axes: Optional[Sequence[Sequence[Sequence[pxla.MeshAxisName]]]]):\n if flat_extra_axes is None:\n return\n for axes, extra in zip(flat_mesh_axes, flat_extra_axes):\n for dim, dim_extra_axis in enumerate(extra):\n if dim_extra_axis is None: continue\n assert dim_extra_axis not in axes\n assert not config.jax_enable_checks or all(v != dim for v in axes.values())\n axes[dim_extra_axis] = dim\n add_spmd_axes(mesh_in_axes, spmd_in_axes)\n add_spmd_axes(mesh_out_axes, spmd_out_axes)\n global_in_avals = ctx.avals_in\n vectorized_jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic(f, global_in_avals)\n assert not consts\n\n global_sharding_spec = pxla.mesh_sharding_specs(mesh.shape, mesh.axis_names)\n sharded_global_in_nodes = [\n [mlir.wrap_with_sharding_op(node, global_sharding_spec(aval, aval_axes).sharding_proto())]\n if aval_axes else [node]\n for node, aval, aval_axes in zip(global_in_nodes, global_in_avals, mesh_in_axes)\n ]\n\n # We in-line here rather than generating a Call HLO as in the xla_call\n # translation rule just because the extra tuple stuff is a pain.\n sub_ctx = ctx.module_context.replace(\n name_stack=xla.extend_name_stack(ctx.module_context.name_stack,\n xla.wrap_name(name, 'xmap')))\n global_out_nodes = mlir.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (),\n *sharded_global_in_nodes)\n\n sharded_global_out_nodes = [\n mlir.wrap_with_sharding_op(node, global_sharding_spec(aval, aval_axes).sharding_proto())\n if aval_axes else node\n for (node,), aval, aval_axes in zip(global_out_nodes, global_out_avals, mesh_out_axes)\n ]\n\n return sharded_global_out_nodes\n\n\ndef _xmap_lowering_rule_spmd_manual(ctx, *global_in_nodes,\n call_jaxpr, name, in_axes, out_axes,\n donated_invars, global_axis_sizes, spmd_in_axes,\n spmd_out_axes, positional_semantics,\n axis_resources, resource_env, backend):\n assert spmd_in_axes is None and spmd_out_axes is None\n # This first part (up to vtile_manual) is shared with non-MANUAL SPMD rule.\n xla.check_backend_matches(backend, ctx.module_context.platform)\n plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)\n\n resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)\n f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))\n f = hide_mapped_axes(f, in_axes, out_axes)\n f = plan.vectorize_and_loop(f, in_axes, out_axes)\n\n # NOTE: Sharding constraints are handled entirely by vtile_manual!\n mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)\n mesh = resource_env.physical_mesh\n f = pxla.vtile_manual(f, mesh, mesh_in_axes, mesh_out_axes)\n\n # NOTE: We don't extend the resource env with the mesh shape, because those\n # resources are already in scope! It's the outermost xmap that introduces\n # them!\n global_in_avals = ctx.avals_in\n vectorized_jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic(f, global_in_avals)\n assert not consts\n\n # We in-line here rather than generating a Call HLO as in the xla_call\n # translation rule just because the extra tuple stuff is a pain.\n manual_mesh_axes = frozenset(it.chain.from_iterable(plan.physical_axis_resources.values()))\n assert isinstance(ctx.module_context.axis_context, mlir.SPMDAxisContext)\n sub_ctx = ctx.module_context.replace(\n name_stack=xla.extend_name_stack(ctx.module_context.name_stack,\n xla.wrap_name(name, 'xmap')),\n axis_context=ctx.module_context.axis_context.extend_manual(manual_mesh_axes))\n global_out_nodes = mlir.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (),\n *([n] for n in global_in_nodes))\n\n return global_out_nodes\n\n\ndef _xmap_translation_rule(*args, **kwargs):\n if config.experimental_xmap_spmd_lowering_manual:\n raise NotImplementedError(\"Manual lowering only supported in MLIR lowering\")\n elif config.experimental_xmap_spmd_lowering:\n return _xmap_translation_rule_spmd(*args, **kwargs)\n else:\n return _xmap_translation_rule_replica(*args, **kwargs)\nxla.register_translation(xmap_p, _xmap_translation_rule)\n\ndef _xmap_translation_rule_replica(ctx, avals_in, avals_out, *in_nodes,\n call_jaxpr, name,\n in_axes, out_axes, donated_invars,\n global_axis_sizes,\n spmd_in_axes, spmd_out_axes,\n positional_semantics,\n axis_resources, resource_env, backend):\n xla.check_backend_matches(backend, ctx.platform)\n # The only way for any of those two assertions to be violated is when xmap\n # is using the SPMD lowering, but then this rule shouldn't even trigger.\n assert positional_semantics == _PositionalSemantics.LOCAL\n assert spmd_in_axes is None and spmd_out_axes is None\n plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)\n\n axis_resource_count = _get_axis_resource_count(positional_semantics, axis_resources, resource_env)\n if any(resource_count.distributed for resource_count in axis_resource_count.values()):\n raise NotImplementedError\n local_axis_sizes = {axis: axis_resource_count[axis].to_local(global_size)\n for axis, global_size in global_axis_sizes.items()}\n\n local_mesh = resource_env.physical_mesh.local_mesh\n local_mesh_shape = local_mesh.shape\n mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)\n\n local_avals = [pxla.tile_aval_nd(\n local_mesh_shape, aval_mesh_in_axes,\n _insert_aval_axes(v.aval, aval_in_axes, local_axis_sizes))\n for v, aval_in_axes, aval_mesh_in_axes\n in zip(call_jaxpr.invars, in_axes, mesh_in_axes)]\n # We have to substitute before tracing, because we want the vectorized\n # axes to be used in the jaxpr.\n resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)\n f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))\n f = hide_mapped_axes(f, tuple(in_axes), tuple(out_axes))\n f = plan.vectorize_and_loop(f, in_axes, out_axes)\n # NOTE: We don't extend the resource env with the mesh shape, because those\n # resources are already in scope! It's the outermost xmap that introduces\n # them!\n vectorized_jaxpr, out_avals, consts = pe.trace_to_jaxpr_dynamic(f, local_avals)\n _check_out_avals_vs_out_axes(out_avals, out_axes, global_axis_sizes)\n assert not consts\n\n tiled_ins = (\n xla.lower_fun(\n partial(_tile, in_axes=arg_in_axes, axis_sizes=local_mesh_shape),\n new_style=True, multiple_results=False)(ctx, [aval], None, in_node)[0]\n if aval is not core.abstract_unit else in_node\n for aval, in_node, arg_in_axes\n in zip(avals_in, in_nodes, mesh_in_axes))\n\n # NOTE: We don't extend the resource env with the mesh shape, because those\n # resources are already in scope! It's the outermost xmap that introduces\n # them!\n # We in-line here rather than generating a Call HLO as in the xla_call\n # translation rule just because the extra tuple stuff is a pain.\n sub_ctx = ctx.replace(\n name_stack=xla.extend_name_stack(ctx.name_stack,\n xla.wrap_name(name, 'xmap')))\n tiled_outs = xla.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (), *tiled_ins)\n\n outs = [\n xla.lower_fun(\n partial(_untile, out_axes=ans_out_axes, axis_sizes=local_mesh_shape,\n platform=ctx.platform),\n new_style=True, multiple_results=False)(\n ctx, [v.aval], None, tiled_out\n )[0]\n if v.aval is not core.abstract_unit else tiled_out\n for v, tiled_out, ans_out_axes\n in zip(vectorized_jaxpr.outvars, tiled_outs, mesh_out_axes)]\n return outs\n\ndef _tile_base_indices(tile_shape, axes, axis_sizes):\n zero = np.zeros((), dtype=np.int32)\n linear_idxs = [zero] * len(tile_shape)\n strides = [1] * len(tile_shape)\n for name, axis in reversed(axes.items()):\n axis_index = lax.axis_index(name)\n stride_c = np.array(strides[axis], np.int32)\n if linear_idxs[axis] is zero and strides[axis] == 1:\n linear_idxs[axis] = axis_index\n else:\n linear_idxs[axis] = lax.add(linear_idxs[axis],\n lax.mul(axis_index, stride_c))\n strides[axis] *= axis_sizes[name]\n return [zero if linear_idx is zero else\n lax.mul(linear_idx, np.array(tile_dim_size, np.int32))\n for linear_idx, tile_dim_size in zip(linear_idxs, tile_shape)]\n\n\ndef _tile(x, in_axes, axis_sizes):\n if not in_axes:\n return x\n tile_shape = list(x.shape)\n for name, axis in in_axes.items():\n axis_size = axis_sizes[name]\n assert tile_shape[axis] % axis_size == 0\n tile_shape[axis] //= axis_size\n base_idxs = _tile_base_indices(tile_shape, in_axes, axis_sizes)\n return lax.dynamic_slice(x, base_idxs, tile_shape)\n\n\n# TODO(b/110096942): more efficient gather\ndef _untile(x, out_axes, axis_sizes, platform):\n # TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU\n convert_bool = (np.issubdtype(x.dtype, np.bool_)\n and platform in ('cpu', 'gpu'))\n if convert_bool:\n x = lax.convert_element_type(x, np.dtype(np.float32))\n\n tile_shape = list(x.shape)\n shape = list(tile_shape)\n for name, axis in out_axes.items():\n shape[axis] *= axis_sizes[name]\n base_idxs = _tile_base_indices(tile_shape, out_axes, axis_sizes)\n\n padded = lax.broadcast(np.array(0, x.dtype), shape)\n padded = lax.dynamic_update_slice(padded, x, base_idxs)\n out = lax.psum(padded, tuple(out_axes.keys()))\n\n # TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU\n if convert_bool:\n nonzero = lax.ne(out, np.array(0, dtype=np.float32))\n out = lax.convert_element_type(nonzero, np.dtype(np.bool_))\n return out\n\n\ndef _xmap_translation_rule_spmd(ctx, avals_in, avals_out, *global_in_nodes,\n call_jaxpr, name,\n in_axes, out_axes, donated_invars,\n global_axis_sizes,\n spmd_in_axes, spmd_out_axes,\n positional_semantics,\n axis_resources, resource_env, backend):\n xla.check_backend_matches(backend, ctx.platform)\n plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, global_axis_sizes)\n\n resource_call_jaxpr = plan.subst_axes_with_resources(call_jaxpr)\n f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))\n f = hide_mapped_axes(f, in_axes, out_axes)\n f = plan.vectorize_and_loop(f, in_axes, out_axes)\n mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)\n mesh = resource_env.physical_mesh\n f = pxla.vtile_by_mesh(f, mesh, mesh_in_axes, mesh_out_axes)\n\n # XXX: We modify mesh_in_axes and mesh_out_axes here\n def add_spmd_axes(flat_mesh_axes: Sequence[pxla.ArrayMapping],\n flat_extra_axes: Optional[Sequence[Sequence[Sequence[pxla.MeshAxisName]]]]):\n if flat_extra_axes is None:\n return\n for axes, extra in zip(flat_mesh_axes, flat_extra_axes):\n for dim, dim_extra_axis in enumerate(extra):\n if dim_extra_axis is None: continue\n assert dim_extra_axis not in axes\n assert not config.jax_enable_checks or all(v != dim for v in axes.values())\n axes[dim_extra_axis] = dim\n add_spmd_axes(mesh_in_axes, spmd_in_axes)\n add_spmd_axes(mesh_out_axes, spmd_out_axes)\n # NOTE: We don't extend the resource env with the mesh shape, because those\n # resources are already in scope! It's the outermost xmap that introduces\n # them!\n global_in_avals = [\n core.ShapedArray(xla_type.dimensions(), xla_type.numpy_dtype())\n for in_node in global_in_nodes\n for xla_type in (ctx.builder.get_shape(in_node),)\n ]\n vectorized_jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic(\n f, global_in_avals)\n assert not consts\n\n global_sharding_spec = pxla.mesh_sharding_specs(mesh.shape, mesh.axis_names)\n\n def set_sharding(node, aval, aval_axes):\n sharding_proto = global_sharding_spec(aval, aval_axes).sharding_proto()\n if not config.experimental_xmap_ensure_fixed_sharding:\n # Do not specify sharding on other dimensions.\n unspecified_dims = set(range(aval.ndim))\n for axis in set(aval_axes.values()):\n unspecified_dims.remove(axis)\n return xla.set_sharding_proto(ctx.builder, node, sharding_proto,\n unspecified_dims)\n else:\n return xla.set_sharding_proto(ctx.builder, node, sharding_proto)\n\n sharded_global_in_nodes = [\n set_sharding(node, aval, aval_axes) if aval_axes else node for node, aval,\n aval_axes in zip(global_in_nodes, global_in_avals, mesh_in_axes)\n ]\n\n # We in-line here rather than generating a Call HLO as in the xla_call\n # translation rule just because the extra tuple stuff is a pain.\n sub_ctx = ctx.replace(\n name_stack=xla.extend_name_stack(ctx.name_stack,\n xla.wrap_name(name, 'xmap')))\n global_out_nodes = xla.jaxpr_subcomp(sub_ctx, vectorized_jaxpr, (),\n *sharded_global_in_nodes)\n\n sharded_global_out_nodes = [\n set_sharding(node, aval, aval_axes) if aval_axes else node for node, aval,\n aval_axes in zip(global_out_nodes, global_out_avals, mesh_out_axes)\n ]\n\n return sharded_global_out_nodes\n\n\n# -------- helper functions --------\n\ndef _delete_aval_axes(aval, axes: AxisNamePos, global_axis_sizes):\n assert isinstance(aval, core.ShapedArray)\n shape = list(aval.shape)\n named_shape = dict(aval.named_shape)\n for name, dim in sorted(axes.items(), key=lambda x: x[1], reverse=True):\n named_shape[name] = global_axis_sizes[name]\n del shape[dim]\n return aval.update(shape=tuple(shape), named_shape=named_shape)\n\ndef _insert_aval_axes(aval, axes: AxisNamePos, local_axis_sizes):\n assert isinstance(aval, core.ShapedArray)\n shape = list(aval.shape)\n named_shape = dict(aval.named_shape)\n for name, dim in sorted(axes.items(), key=lambda x: x[1]):\n shape.insert(dim, local_axis_sizes[name])\n named_shape.pop(name, None) # The name might be missing --- it's a broadcast.\n return aval.update(shape=tuple(shape), named_shape=named_shape)\n\n\nclass ResourceCount(namedtuple('ResourceCount', ['semantics', 'nglobal', 'nlocal'])):\n def to_local(self, global_size):\n if self.semantics == _PositionalSemantics.GLOBAL:\n return global_size\n elif self.semantics == _PositionalSemantics.LOCAL:\n assert global_size % self.nglobal == 0, \"Please report this issue!\"\n return (global_size // self.nglobal) * self.nlocal\n else:\n raise AssertionError(\"Unhandled case {_positional_semantics}\")\n\n def to_global(self, local_size):\n if self.semantics == _PositionalSemantics.GLOBAL:\n return local_size\n elif self.semantics == _PositionalSemantics.LOCAL:\n assert local_size % self.nlocal == 0, \"Please report this issue!\"\n return (local_size // self.nlocal) * self.nglobal\n else:\n raise AssertionError(f\"Unhandled case {_positional_semantics}\")\n\n @property\n def distributed(self):\n return self.nglobal != self.nlocal\n\n\ndef _get_axis_resource_count(semantics, axis_resources, resource_env) -> Dict[ResourceAxisName, ResourceCount]:\n global_res_shape = resource_env.shape\n local_res_shape = resource_env.local_shape\n return {axis: ResourceCount(semantics,\n int(np.prod(map(global_res_shape.get, resources), dtype=np.int64)),\n int(np.prod(map(local_res_shape.get, resources), dtype=np.int64)))\n for axis, resources in axis_resources.items()}\n\n\ndef _get_axis_sizes(args_flat: Iterable[Any],\n in_axes_flat: Iterable[AxisNamePos],\n global_axis_sizes: Dict[AxisName, int],\n axis_resource_count: Dict[AxisName, ResourceCount]):\n global_axis_sizes = dict(global_axis_sizes)\n for arg, in_axes in zip(args_flat, in_axes_flat):\n for name, dim in in_axes.items():\n resources = axis_resource_count[name]\n local_ = \"local \" if resources.distributed else \"\"\n try:\n local_dim_size = arg.shape[dim]\n except IndexError:\n # TODO(apaszke): Handle negative indices. Check for overlap too!\n raise ValueError(f\"One of xmap arguments has an in_axes specification of \"\n f\"{in_axes.user_repr}, which implies that it has at least \"\n f\"{max(in_axes.values()) + 1} dimensions, but the argument \"\n f\"has rank {arg.ndim}\")\n if local_dim_size % resources.nlocal != 0:\n raise ValueError(f\"One of xmap arguments has an in_axes specification of \"\n f\"{in_axes.user_repr}, which implies that its size in dimension \"\n f\"{dim} ({local_dim_size}) should be divisible by the number of \"\n f\"{local_}resources assigned to axis {name} ({resources.nlocal})\")\n global_dim_size = resources.to_global(local_dim_size)\n if name in global_axis_sizes:\n expected_local_dim_size = resources.to_local(global_axis_sizes[name])\n if local_dim_size != expected_local_dim_size:\n raise ValueError(f\"The {local_}size of axis {name} was previously inferred to be \"\n f\"{expected_local_dim_size}, but found an argument of shape {arg.shape} \"\n f\"with in_axes specification {in_axes.user_repr}. Shape mismatch \"\n f\"occurs in dimension {dim}: {local_dim_size} != {expected_local_dim_size}\")\n global_axis_sizes[name] = global_dim_size\n return FrozenDict(global_axis_sizes)\n\n\ndef lookup_exactly_one_of(d: AxisNamePos, names: Set[AxisName]) -> Optional[int]:\n res = None\n for name in names:\n if name in d:\n if res is not None:\n raise ValueError(\"An input was mapped to the same resource twice\")\n res = d[name]\n return res\n\n\n@lu.transformation\ndef hide_mapped_axes(flat_in_axes, flat_out_axes, *flat_args):\n def _squeeze_mapped_axes(arg, axes: AxisNamePos):\n for dim in sorted(axes.values(), reverse=True):\n arg = arg.squeeze(dim)\n return arg\n\n def _unsqueeze_mapped_axes(out, axes: AxisNamePos):\n try:\n return jnp.expand_dims(out, tuple(axes.values()))\n except ValueError as e:\n # Improve the axis out of bounds errors\n # TODO(apaszke): Handle negative indices. Check for overlap too!\n if e.args[0].startswith('axis') and 'out of bounds' in e.args[0]:\n raise ValueError(f\"One of xmap outputs has an out_axes specification of \"\n f\"{axes.user_repr}, which requires the result of the xmapped \"\n f\"function to have at least {max(axes.values()) - len(axes) + 1} \"\n f\"positional dimensions, but it only has {out.ndim}\")\n raise\n\n squeezed_args = map(_squeeze_mapped_axes, flat_args, flat_in_axes)\n flat_outputs = yield squeezed_args, {}\n yield map(_unsqueeze_mapped_axes, flat_outputs, flat_out_axes)\n\n\ndef _jaxpr_resources(jaxpr, resource_env) -> Set[ResourceAxisName]:\n if isinstance(jaxpr, core.ClosedJaxpr):\n jaxpr = jaxpr.jaxpr\n assert isinstance(jaxpr, core.Jaxpr)\n used_resources = set()\n for eqn in jaxpr.eqns:\n if eqn.primitive is xmap_p:\n if eqn.params['resource_env'].physical_mesh != resource_env.physical_mesh:\n raise RuntimeError(\"Changing the physical mesh is not allowed inside xmap.\")\n used_resources |= set(it.chain(*eqn.params['axis_resources'].values()))\n updates = core.traverse_jaxpr_params(\n partial(_jaxpr_resources, resource_env=resource_env), eqn.params).values()\n for update in updates:\n used_resources |= update\n return used_resources\n\n\ndef _to_resource_axes(axes_specs: Sequence[AxisNamePos],\n axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]]):\n \"\"\"\n Convert in/out_axes parameters ranging over logical dimensions to\n ones that range over resource dimensions.\n\n Note that values no longer have to be distinct, as multiple resource\n axes can tile a single positional axes. This is why the result is\n an OrderedDict with an implicit major-to-minor ordering.\n \"\"\"\n return tuple(OrderedDict((resource_axis, pos_axis)\n for logical_axis, pos_axis in axes.items()\n for resource_axis in axis_resources[logical_axis])\n for axes in axes_specs)\n\n\ndef _merge_leading_axis(x, axis: Optional[int]):\n if axis is None:\n # We assume that the output does not vary along the leading axis\n return lax.index_in_dim(x, 0, axis=0, keepdims=False)\n else:\n x_moved = moveaxis(x, 0, axis)\n shape = list(x_moved.shape)\n shape[axis:axis + 2] = [shape[axis] * shape[axis + 1]]\n return x_moved.reshape(shape)\n\n\ndef _slice_tile(x, dim: Optional[int], i, n: int):\n \"\"\"Selects an `i`th (out of `n`) tiles of `x` along `dim`.\"\"\"\n if dim is None: return x\n (tile_size, rem) = divmod(x.shape[dim], n)\n assert rem == 0, \"Please open a bug report!\"\n return lax.dynamic_slice_in_dim(x, i * tile_size, slice_size=tile_size, axis=dim)\n\n\ndef _unzip_axis_resources(axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]],\n resource_env: ResourceEnv):\n \"\"\"Splits axis_resources into separate dicts for physical and loop resources.\"\"\"\n physical_axis_resources = {}\n loop_axis_resources = {}\n loop_resource_axes = resource_env.loop_resource_axes\n for axis, raxes in axis_resources.items():\n first_loop = 0\n for raxis in raxes:\n if raxis in loop_resource_axes:\n break\n else:\n first_loop += 1\n physical_axis_resources[axis] = raxes[:first_loop]\n loop_resources = loop_axis_resources[axis] = raxes[first_loop:]\n if not all(name in loop_resource_axes for name in loop_resources):\n raise NotImplementedError(\"Loop resources cannot appear before mesh axes \"\n \"in the resource_axis argument\")\n return physical_axis_resources, loop_axis_resources\n\n\ndef _check_out_avals_vs_out_axes(out_avals: Sequence[core.AbstractValue],\n out_axes: Sequence[AxisNamePos],\n global_axis_sizes: Dict[AxisName, int]):\n defined_axes = set(global_axis_sizes)\n for aval, axes in zip(out_avals, out_axes):\n if not isinstance(aval, core.ShapedArray):\n if axes:\n raise AssertionError(f\"Only array abstract values can have non-empty \"\n f\"out_axes, but {aval} has {axes}\")\n continue\n undeclared_axes = (set(aval.named_shape) - set(axes)) & defined_axes\n if undeclared_axes:\n undeclared_axes_str = sorted([str(axis) for axis in undeclared_axes])\n raise TypeError(f\"One of xmap results has an out_axes specification of \"\n f\"{axes.user_repr}, but is actually mapped along more axes \"\n f\"defined by this xmap call: {', '.join(undeclared_axes_str)}\")\n\n\n# TODO: We should relax this at least for \"constructor primitives\"\n# such as axis_index or zeros.\ndef _check_no_loop_collectives(jaxpr, loop_axis_resources):\n if isinstance(jaxpr, core.ClosedJaxpr):\n jaxpr = jaxpr.jaxpr\n def subst_no_loop(name):\n if loop_axis_resources.get(name, ()):\n raise RuntimeError(f\"Named axes with loop resources assigned to them cannot \"\n f\"be referenced inside the xmapped computation (e.g. in \"\n f\"collectives), but `{name}` violates that rule\")\n return (name,)\n for eqn in jaxpr.eqns:\n core.subst_axis_names(eqn.primitive, eqn.params, subst_no_loop, traverse=False)\n rec = partial(_check_no_loop_collectives, loop_axis_resources=loop_axis_resources)\n core.traverse_jaxpr_params(rec, eqn.params)\n\n\ndef _fix_inferred_spmd_sharding(jaxpr, resource_env, gen_fresh_name = None):\n from jax.experimental.pjit import sharding_constraint_p, ParsedPartitionSpec\n rec = lambda jaxpr: _fix_inferred_spmd_sharding(jaxpr, resource_env, gen_fresh_name)\n if isinstance(jaxpr, core.ClosedJaxpr):\n return jaxpr.map_jaxpr(rec)\n assert isinstance(jaxpr, core.Jaxpr)\n if gen_fresh_name is None:\n gen_fresh_name = core.gensym([jaxpr])\n new_eqns = []\n for eqn in jaxpr.eqns:\n new_jaxpr_params = core.traverse_jaxpr_params(rec, eqn.params)\n tmp_outvars = [gen_fresh_name(v.aval) for v in eqn.outvars]\n new_eqns.append(core.JaxprEqn(eqn.invars, tmp_outvars, eqn.primitive,\n dict(eqn.params, **new_jaxpr_params), eqn.source_info))\n for outvar, tmpvar in zip(eqn.outvars, tmp_outvars):\n new_eqns.append(core.JaxprEqn([tmpvar], [outvar], sharding_constraint_p,\n dict(resource_env=resource_env, axis_resources=ParsedPartitionSpec((), ())),\n eqn.source_info))\n return core.Jaxpr(jaxpr.constvars, jaxpr.invars, jaxpr.outvars, new_eqns)\n\ndef _flatten_axes(what, tree, axes, tupled_args):\n try:\n return tuple(flatten_axes(what, tree, axes, tupled_args=tupled_args))\n except ValueError:\n pass\n # Replace axis_resources with unparsed versions to avoid revealing internal details\n flatten_axes(what, tree, tree_map(lambda parsed: NoQuotesStr(parsed.user_repr), axes),\n tupled_args=tupled_args)\n raise AssertionError(\"Please open a bug request!\") # This should be unreachable\n\nclass NoQuotesStr(str):\n __repr__ = str.__str__\n\n\n# -------- soft_pmap --------\n\ndef soft_pmap(fun: Callable, axis_name: Optional[AxisName] = None, in_axes=0\n ) -> Callable:\n warn(\"soft_pmap is an experimental feature and probably has bugs!\")\n _check_callable(fun)\n axis_name = core._TempAxisName(fun) if axis_name is None else axis_name\n\n if any(axis != 0 for axis in tree_leaves(in_axes)):\n raise ValueError(f\"soft_pmap in_axes leaves must be 0 or None, got {in_axes}\")\n proxy = object()\n in_axes = _replace_nones(proxy, in_axes)\n in_axes = tree_map(lambda i: {i: axis_name} if i is not proxy else {}, in_axes)\n\n\n @wraps(fun)\n def f_pmapped(*args, **kwargs):\n mesh_devices = np.array(xb.local_devices())\n with mesh(mesh_devices, ['devices']):\n return xmap(fun, in_axes=in_axes, out_axes={0: axis_name},\n axis_resources={axis_name: 'devices'})(*args, **kwargs)\n return f_pmapped\n\n# -------- config flags --------\n\ndef _thread_local_flag_unsupported(_):\n raise RuntimeError(\"thread-local xmap flags not supported!\")\ndef _clear_compilation_cache(_):\n make_xmap_callable.cache_clear() # type: ignore\n\ndef _ensure_spmd_and(f):\n def update(v):\n if v and not config.experimental_xmap_spmd_lowering:\n raise RuntimeError(\"This flag requires enabling the experimental_xmap_spmd_lowering flag\")\n return f(v)\n return update\n\ndef _ensure_supports_manual_and(f):\n def update(v):\n if v and not hasattr(xc.OpSharding.Type, \"MANUAL\"):\n raise RuntimeError(\"This flag requires a version of jaxlib that supports MANUAL sharding type\")\n return f(v)\n return update\n\ntry:\n config.define_bool_state(\n name=\"experimental_xmap_spmd_lowering\",\n default=False,\n help=(\"When set, multi-device xmap computations will be compiled through \"\n \"the XLA SPMD partitioner instead of explicit cross-replica collectives. \"\n \"Not supported on CPU!\"),\n update_global_hook=_clear_compilation_cache,\n update_thread_local_hook=_thread_local_flag_unsupported)\n config.define_bool_state(\n name=\"experimental_xmap_spmd_lowering_manual\",\n default=False,\n help=(\"When set, multi-device xmap computations will be compiled using \"\n \"the MANUAL partitioning feature of the XLA SPMD partitioner instead of \"\n \"sharding constraints on vectorized code. \"\n \"Requires experimental_xmap_spmd_lowering!\"),\n update_global_hook=_ensure_supports_manual_and(_ensure_spmd_and(_clear_compilation_cache)),\n update_thread_local_hook=_thread_local_flag_unsupported)\n config.define_bool_state(\n name=\"experimental_xmap_ensure_fixed_sharding\",\n default=False,\n help=(\"When set and `experimental_xmap_spmd_lowering` is enabled, the lowering will \"\n \"try to limit the flexibility of the automated SPMD partitioner heuristics \"\n \"by emitting additional sharding annotations for program intermediates.\"),\n update_global_hook=_ensure_spmd_and(_clear_compilation_cache),\n update_thread_local_hook=_thread_local_flag_unsupported)\nexcept Exception:\n raise ImportError(\"jax.experimental.maps has to be imported before JAX flags \"\n \"are parsed\")\n"
] |
[
[
"numpy.array",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"numpy.issubdtype",
"numpy.dtype"
]
] |
tanishq1g/Pytorch_codes
|
[
"45ac44d70b021556d0c451a1553c4aaabd1a5410"
] |
[
"ZeroToAll/08_dataset_loader.py"
] |
[
"# one epoch : one forward pass or one backward pass for all training examples\n# batch size : the number of training examples on one forward/backward pass. the higher the batch size, the more memory space you will need\n# number of iterations : number of passes, each pass using[batch size] number of examples\n# one pass = one backward pass and one forward pass\n# example : if 1000 examples and batchsize is 500 it will take 2 iterations to complete one epoch\n\nimport torch\nimport numpy as np\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\nimport os\n\n# https://pytorch.org/docs/stable/_modules/torch/utils/data/dataset.html\nclass DiabetesDataset(Dataset):\n '''\n All other datasets should subclass Dataset class. All subclasses should override\n ``__len__``, that provides the size of the dataset, and ``__getitem__``,\n supporting integer indexing in range from 0 to len(self) exclusive.\n '''\n def __init__(self):\n # download and read data\n dir_path = os.path.dirname(os.path.realpath(__file__))\n xy = np.loadtxt(dir_path + '/data/diabetes.csv.gz', delimiter=',', dtype=np.float32)\n self.len = xy.shape[0]\n self.x_data = torch.from_numpy(xy[:, 0:-1])\n self.y_data = torch.from_numpy(xy[:, [-1]])\n\n def __getitem__(self, index):\n # returns one item on the index\n return self.x_data[index], self.y_data[index]\n\n def __len__(self):\n # returns length of the dataset\n return self.len\n\n\n# dataset object\n\ndataset = DiabetesDataset()\n\n# https://pytorch.org/docs/stable/data.html\n# dataloader Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset.\ntrain_loader = DataLoader(\n dataset = dataset,\n batch_size = 32,\n shuffle = True,\n num_workers = 2\n)\n\nfor epoch in range(2):\n for i, data in enumerate(train_loader):\n # get the inputs\n inputs, labels = data\n\n # wrap them in Variable\n inputs, labels = Variable(inputs), Variable(labels)\n\n # Run your training process\n print(epoch, i, \"inputs\", inputs.data.shape, \"labels\", labels.data.shape)"
] |
[
[
"torch.autograd.Variable",
"numpy.loadtxt",
"torch.utils.data.DataLoader",
"torch.from_numpy"
]
] |
Mrxuelovecode/Deep-Multi-Sphere-SVDD
|
[
"60a52659db8c9de971ca373562a3417731da7975"
] |
[
"src/baseline_kde.py"
] |
[
"'''\n This is a revision to \"Deep One-Class Classification\" implementation by\n Lukas Ruff (https://github.com/lukasruff/Deep-SVDD)\n Summary of the changes:\n 1. Mobifall dataset is added\n 2. Normal and anomaly classes can be given as input label vectors\n'''\n\nimport argparse\nimport os\nimport numpy as np\n\nfrom kde import KDE\nfrom config import Configuration as Cfg\nfrom utils.log import log_exp_config, log_KDE, log_AD_results, log_AD_info\nfrom utils.visualization.images_plot import plot_outliers_and_most_normal\n\n\n# ====================================================================\n# Parse arguments\n# --------------------------------------------------------------------\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dataset\",\n help=\"dataset name\",\n type=str, choices=[\"mnist\", \"cifar10\", \"mobiFall\"])\nparser.add_argument(\"--xp_dir\",\n help=\"directory for the experiment\",\n type=str)\nparser.add_argument(\"--kernel\",\n help=\"kernel\",\n type=str, choices=[\"gaussian\", \"tophat\", \"epanechnikov\", \"exponential\", \"linear\", \"cosine\"])\nparser.add_argument(\"--GridSearchCV\",\n help=\"Use GridSearchCV to determine bandwidth\",\n type=int, default=0)\nparser.add_argument(\"--out_frac\",\n help=\"fraction of outliers in data set\",\n type=float, default=0)\nparser.add_argument(\"--seed\",\n help=\"numpy seed\",\n type=int, default=0)\nparser.add_argument(\"--ad_experiment\",\n help=\"specify if experiment should be two- or multiclass\",\n type=int, default=1)\nparser.add_argument(\"--unit_norm_used\",\n help=\"norm to use for scaling the data to unit norm\",\n type=str, default=\"l1\")\nparser.add_argument(\"--gcn\",\n help=\"apply global contrast normalization in preprocessing\",\n type=int, default=0)\nparser.add_argument(\"--zca_whitening\",\n help=\"specify if data should be whitened\",\n type=int, default=0)\nparser.add_argument(\"--pca\",\n help=\"apply pca in preprocessing\",\n type=int, default=0)\nparser.add_argument(\"--plot_examples\",\n help=\"specify if examples of anomalies and normal data should be ploted\",\n type=int, default=0)\nparser.add_argument(\"--info_file\",\n help=\"filename for all results in log folder\",\n type=str)\n#mnist\nparser.add_argument(\"--mnist_val_frac\",\n help=\"specify the fraction the validation set of the initial training data should be\",\n type=float, default=0) # default of 0 as k-fold cross-validation is performed internally.\nparser.add_argument(\"--mnist_normal\",\n help=\"specify normal class in MNIST\",\n type=str, default='0')\nparser.add_argument(\"--mnist_outlier\",\n help=\"specify outlier class in MNIST\",\n type=str, default='range(1,10)')\n#cifar10\nparser.add_argument(\"--cifar10_val_frac\",\n help=\"specify the fraction the validation set of the initial training data should be\",\n type=float, default=0) # default of 0 as k-fold cross-validation is performed internally.\nparser.add_argument(\"--cifar10_normal\",\n help=\"specify normal class in CIFAR-10\",\n type=str, default='0')\nparser.add_argument(\"--cifar10_outlier\",\n help=\"specify outlier class in CIFAR-10\",\n type=str, default='range(1,10)')\n#mobiFall\nparser.add_argument(\"--mobiFall_val_frac\",\n help=\"specify the fraction the validation set of the initial training data should be\",\n type=np.float32, default=np.float32(1./6))\nparser.add_argument(\"--mobiFall_normal\",\n help=\"specify normal class in mobiFall\",\n type=str, default='0')\nparser.add_argument(\"--mobiFall_outlier\",\n help=\"specify outlier class in mobiFall\",\n type=str, default='range(1,10)')\n# ====================================================================\n\n\ndef main():\n\n args = parser.parse_args()\n print('Options:')\n for (key, value) in vars(args).iteritems():\n print(\"{:16}: {}\".format(key, value))\n\n assert os.path.exists(args.xp_dir)\n\n # update config data\n\n # plot parameters\n Cfg.xp_path = args.xp_dir\n Cfg.info_file = args.info_file\n\n # dataset\n Cfg.seed = args.seed\n Cfg.out_frac = args.out_frac\n Cfg.ad_experiment = bool(args.ad_experiment)\n Cfg.unit_norm_used = args.unit_norm_used\n Cfg.gcn = bool(args.gcn)\n Cfg.zca_whitening = bool(args.zca_whitening)\n Cfg.pca = bool(args.pca)\n Cfg.mnist_val_frac = args.mnist_val_frac\n Cfg.mnist_normal = args.mnist_normal\n Cfg.mnist_outlier = args.mnist_outlier\n Cfg.cifar10_val_frac = args.cifar10_val_frac\n Cfg.cifar10_normal = args.cifar10_normal\n Cfg.cifar10_outlier = args.cifar10_outlier\n\n #mobiFall\n Cfg.mobiFall_bias = bool(args.mobiFall_bias)\n Cfg.mobiFall_rep_dim = args.mobiFall_rep_dim\n Cfg.mobiFall_normal = args.mobiFall_normal\n Cfg.mobiFall_outlier = args.mobiFall_outlier\n\n # KDE parameters\n Cfg.kde_GridSearchCV = bool(args.GridSearchCV)\n\n # initialize KDE\n kde = KDE(dataset=args.dataset, kernel=args.kernel)\n\n # train KDE model\n kde.train(bandwidth_GridSearchCV=Cfg.kde_GridSearchCV)\n\n # predict scores\n kde.predict(which_set='train')\n kde.predict(which_set='test')\n\n # log\n log_exp_config(Cfg.xp_path, args.dataset)\n log_KDE(Cfg.xp_path, args.kernel, kde.bandwidth)\n log_AD_results(Cfg.xp_path, kde)\n log_AD_info(kde)\n\n # pickle/serialize\n kde.dump_model(filename=Cfg.xp_path + \"/model.p\")\n kde.log_results(filename=Cfg.xp_path + \"/AD_results.p\")\n\n # plot targets and outliers sorted\n n_img = 32\n plot_outliers_and_most_normal(kde, n_img, Cfg.xp_path)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.float32"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.