repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
timgates42/kapre
[ "e0fffbbd2f9a8d1bcc4d337d15389d059646b2a8" ]
[ "tests/test_time_frequency.py" ]
[ "import pytest\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras\nimport librosa\nfrom kapre import STFT, Magnitude, Phase, Delta, InverseSTFT, ApplyFilterbank\nfrom kapre.composed import (\n get_melspectrogram_layer,\n get_log_frequency_spectrogram_layer,\n get_stft_mag_phase,\n get_perfectly_reconstructing_stft_istft,\n get_stft_magnitude_layer,\n)\n\nfrom utils import get_audio, save_load_compare\n\n\ndef _num_frame_valid(nsp_src, nsp_win, len_hop):\n \"\"\"Computes the number of frames with 'valid' setting\"\"\"\n return (nsp_src - (nsp_win - len_hop)) // len_hop\n\n\ndef _num_frame_same(nsp_src, len_hop):\n \"\"\"Computes the number of frames with 'same' setting\"\"\"\n return int(np.ceil(float(nsp_src) / len_hop))\n\n\ndef allclose_phase(a, b, atol=1e-3):\n \"\"\"Testing phase.\n Remember that a small error in complex value may lead to a large phase difference\n if the norm is very small.\n\n Therefore, it makes more sense to test it on the complex value itself rather than breaking it down to phase.\n\n \"\"\"\n np.testing.assert_allclose(np.sin(a), np.sin(b), atol=atol)\n np.testing.assert_allclose(np.cos(a), np.cos(b), atol=atol)\n\n\ndef allclose_complex_numbers(a, b, atol=1e-3):\n np.testing.assert_equal(np.shape(a), np.shape(b))\n np.testing.assert_allclose(np.abs(a), np.abs(b), rtol=1e-5, atol=atol)\n np.testing.assert_allclose(np.real(a), np.real(b), rtol=1e-5, atol=atol)\n np.testing.assert_allclose(np.imag(a), np.imag(b), rtol=1e-5, atol=atol)\n\n\n@pytest.mark.parametrize('n_fft', [1000])\n@pytest.mark.parametrize('hop_length', [None, 256])\n@pytest.mark.parametrize('n_ch', [1, 2, 6])\n@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])\ndef test_spectrogram_correctness(n_fft, hop_length, n_ch, data_format):\n def _get_stft_model(following_layer=None):\n # compute with kapre\n stft_model = tensorflow.keras.models.Sequential()\n stft_model.add(\n STFT(\n n_fft=n_fft,\n win_length=win_length,\n hop_length=hop_length,\n window_fn=None,\n pad_end=False,\n input_data_format=data_format,\n output_data_format=data_format,\n input_shape=input_shape,\n name='stft',\n )\n )\n if following_layer is not None:\n stft_model.add(following_layer)\n return stft_model\n\n src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)\n win_length = n_fft # test with x2\n # compute with librosa\n S_ref = librosa.core.stft(\n src_mono, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False\n ).T # (time, freq)\n\n S_ref = np.expand_dims(S_ref, axis=2) # time, freq, ch=1\n S_ref = np.tile(S_ref, [1, 1, n_ch]) # time, freq, ch=n_ch\n if data_format == 'channels_first':\n S_ref = np.transpose(S_ref, (2, 0, 1)) # ch, time, freq\n\n stft_model = _get_stft_model()\n\n S_complex = stft_model.predict(batch_src)[0] # 3d representation\n allclose_complex_numbers(S_ref, S_complex)\n\n # test Magnitude()\n stft_mag_model = _get_stft_model(Magnitude())\n S = stft_mag_model.predict(batch_src)[0] # 3d representation\n np.testing.assert_allclose(np.abs(S_ref), S, atol=2e-4)\n\n # # test Phase()\n stft_phase_model = _get_stft_model(Phase())\n S = stft_phase_model.predict(batch_src)[0] # 3d representation\n allclose_phase(np.angle(S_complex), S)\n\n\n@pytest.mark.parametrize('n_fft', [512])\n@pytest.mark.parametrize('sr', [22050])\n@pytest.mark.parametrize('hop_length', [None, 256])\n@pytest.mark.parametrize('n_ch', [2])\n@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])\n@pytest.mark.parametrize('amin', [1e-5, 1e-3])\n@pytest.mark.parametrize('dynamic_range', [120.0, 80.0])\n@pytest.mark.parametrize('n_mels', [40])\n@pytest.mark.parametrize('mel_f_min', [0.0])\n@pytest.mark.parametrize('mel_f_max', [8000])\ndef test_melspectrogram_correctness(\n n_fft, sr, hop_length, n_ch, data_format, amin, dynamic_range, n_mels, mel_f_min, mel_f_max\n):\n \"\"\"Test the correctness of melspectrogram.\n\n Note that mel filterbank is tested separated\n\n \"\"\"\n\n def _get_melgram_model(return_decibel, amin, dynamic_range, input_shape=None):\n # compute with kapre\n melgram_model = get_melspectrogram_layer(\n n_fft=n_fft,\n sample_rate=sr,\n n_mels=n_mels,\n mel_f_min=mel_f_min,\n mel_f_max=mel_f_max,\n win_length=win_length,\n hop_length=hop_length,\n input_data_format=data_format,\n output_data_format=data_format,\n return_decibel=return_decibel,\n input_shape=input_shape,\n db_amin=amin,\n db_dynamic_range=dynamic_range,\n )\n return melgram_model\n\n src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)\n\n win_length = n_fft # test with x2\n # compute with librosa\n S_ref = librosa.feature.melspectrogram(\n src_mono,\n sr=sr,\n n_fft=n_fft,\n hop_length=hop_length,\n win_length=win_length,\n center=False,\n power=1.0,\n n_mels=n_mels,\n fmin=mel_f_min,\n fmax=mel_f_max,\n ).T\n\n S_ref = np.expand_dims(S_ref, axis=2) # time, freq, ch=1\n S_ref = np.tile(S_ref, [1, 1, n_ch]) # time, freq, ch=n_ch\n\n if data_format == 'channels_first':\n S_ref = np.transpose(S_ref, (2, 0, 1)) # ch, time, freq\n\n # melgram\n melgram_model = _get_melgram_model(\n return_decibel=False, input_shape=input_shape, amin=None, dynamic_range=120.0\n )\n S = melgram_model.predict(batch_src)[0] # 3d representation\n np.testing.assert_allclose(S_ref, S, atol=1e-4)\n\n # log melgram\n melgram_model = _get_melgram_model(\n return_decibel=True, input_shape=input_shape, amin=amin, dynamic_range=dynamic_range\n )\n S = melgram_model.predict(batch_src)[0] # 3d representation\n S_ref_db = librosa.power_to_db(S_ref, ref=1.0, amin=amin, top_db=dynamic_range)\n\n np.testing.assert_allclose(\n S_ref_db, S, rtol=3e-3\n ) # decibel is evaluated with relative tolerance\n\n\n@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])\ndef test_log_spectrogram_runnable(data_format):\n \"\"\"test if log spectrogram layer works well\"\"\"\n src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=1)\n _ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True)\n _ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=False)\n\n\n@pytest.mark.xfail\ndef test_log_spectrogram_fail():\n \"\"\"test if log spectrogram layer works well\"\"\"\n src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)\n _ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True, log_n_bins=200)\n\n\ndef test_delta():\n \"\"\"test delta layer\"\"\"\n specgrams = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)\n specgrams = np.reshape(specgrams, (1, -1, 1, 1)) # (b, t, f, ch)\n delta_model = tensorflow.keras.models.Sequential()\n delta_model.add(Delta(win_length=3, input_shape=(4, 1, 1), data_format='channels_last'))\n\n delta_kapre = delta_model(specgrams)\n delta_ref = np.array([0.5, 1.0, 1.0, 0.5], dtype=np.float32)\n delta_ref = np.reshape(delta_ref, (1, -1, 1, 1)) # (b, t, f, ch)\n\n np.testing.assert_allclose(delta_kapre, delta_ref)\n\n\n@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])\ndef test_mag_phase(data_format):\n n_ch = 1\n n_fft, hop_length, win_length = 512, 256, 512\n\n src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)\n\n mag_phase_layer = get_stft_mag_phase(\n input_shape=input_shape,\n n_fft=n_fft,\n win_length=win_length,\n hop_length=hop_length,\n input_data_format=data_format,\n output_data_format=data_format,\n )\n model = tensorflow.keras.models.Sequential()\n model.add(mag_phase_layer)\n mag_phase_kapre = model(batch_src)[0] # a 2d image shape\n\n ch_axis = 0 if data_format == 'channels_first' else 2 # non-batch\n mag_phase_ref = np.stack(\n librosa.magphase(\n librosa.stft(\n src_mono, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False,\n ).T\n ),\n axis=ch_axis,\n )\n np.testing.assert_equal(mag_phase_kapre.shape, mag_phase_ref.shape)\n # magnitude test\n np.testing.assert_allclose(\n np.take(mag_phase_kapre, [0,], axis=ch_axis),\n np.take(mag_phase_ref, [0,], axis=ch_axis),\n atol=2e-4,\n )\n # phase test - todo - yeah..\n\n\n@pytest.mark.parametrize('waveform_data_format', ['default', 'channels_first', 'channels_last'])\n@pytest.mark.parametrize('stft_data_format', ['default', 'channels_first', 'channels_last'])\n@pytest.mark.parametrize('hop_ratio', [0.5, 0.25, 0.125])\ndef test_perfectly_reconstructing_stft_istft(waveform_data_format, stft_data_format, hop_ratio):\n n_ch = 1\n src_mono, batch_src, input_shape = get_audio(data_format=waveform_data_format, n_ch=n_ch)\n time_axis = 1 if waveform_data_format == 'channels_first' else 0 # non-batch!\n len_src = input_shape[time_axis]\n\n n_fft = 2048\n hop_length = int(2048 * hop_ratio)\n n_added_frames = int(1 / hop_ratio) - 1\n\n stft, istft = get_perfectly_reconstructing_stft_istft(\n stft_input_shape=input_shape,\n n_fft=n_fft,\n hop_length=hop_length,\n waveform_data_format=waveform_data_format,\n stft_data_format=stft_data_format,\n )\n # Test - [STFT -> ISTFT]\n model = tf.keras.models.Sequential([stft, istft])\n\n recon_waveform = model(batch_src)\n\n # trim off the pad_begin part\n len_pad_begin = n_fft - hop_length\n if waveform_data_format == 'channels_first':\n recon_waveform = recon_waveform[:, :, len_pad_begin : len_pad_begin + len_src]\n else:\n recon_waveform = recon_waveform[:, len_pad_begin : len_pad_begin + len_src, :]\n\n np.testing.assert_allclose(batch_src, recon_waveform, atol=1e-5)\n\n # Test - [ISTFT -> STFT]\n S = librosa.stft(src_mono, n_fft=n_fft, hop_length=hop_length).T.astype(\n np.complex64\n ) # (time, freq)\n\n ch_axis = 1 if stft_data_format == 'channels_first' else 3 # batch shape\n S = np.expand_dims(S, (0, ch_axis))\n model = tf.keras.models.Sequential([istft, stft])\n recon_S = model(S)\n\n # trim off the frames coming from zero-pad result\n n = n_added_frames\n n_added_frames += n\n if stft_data_format == 'channels_first':\n if n != 0:\n S = S[:, :, n:-n, :]\n recon_S = recon_S[:, :, n_added_frames:-n_added_frames, :]\n else:\n if n != 0:\n S = S[:, n:-n, :, :]\n recon_S = recon_S[:, n_added_frames:-n_added_frames, :, :]\n\n np.testing.assert_equal(S.shape, recon_S.shape)\n allclose_complex_numbers(S, recon_S)\n\n\ndef test_save_load():\n \"\"\"test saving/loading of models that has stft, melspectorgrma, and log frequency.\"\"\"\n\n src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)\n # test STFT save/load\n save_load_compare(\n STFT(input_shape=input_shape, pad_begin=True), batch_src, allclose_complex_numbers\n )\n # test melspectrogram save/load\n save_load_compare(\n get_melspectrogram_layer(input_shape=input_shape, return_decibel=True),\n batch_src,\n np.testing.assert_allclose,\n )\n # test log frequency spectrogram save/load\n save_load_compare(\n get_log_frequency_spectrogram_layer(input_shape=input_shape, return_decibel=True),\n batch_src,\n np.testing.assert_allclose,\n )\n # test stft_mag_phase\n save_load_compare(\n get_stft_mag_phase(input_shape=input_shape, return_decibel=True),\n batch_src,\n np.testing.assert_allclose,\n )\n # test stft mag\n save_load_compare(\n get_stft_magnitude_layer(input_shape=input_shape), batch_src, np.testing.assert_allclose\n )\n\n\n@pytest.mark.xfail()\n@pytest.mark.parametrize('layer', [STFT, InverseSTFT])\ndef test_wrong_input_data_format(layer):\n _ = layer(input_data_format='weird_string')\n\n\n@pytest.mark.xfail()\n@pytest.mark.parametrize('layer', [STFT, InverseSTFT])\ndef test_wrong_input_data_format(layer):\n _ = layer(output_data_format='weird_string')\n\n\n@pytest.mark.xfail()\n@pytest.mark.parametrize('layer', [Delta, ApplyFilterbank])\ndef test_wrong_data_format(layer):\n _ = layer(data_format='weird_string')\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array", "numpy.sin", "numpy.angle", "numpy.reshape", "numpy.testing.assert_equal", "numpy.tile", "numpy.real", "numpy.shape", "numpy.take", "numpy.transpose", "tensorflow.keras.models.Sequential", "numpy.cos", "numpy.abs", "numpy.imag", "numpy.expand_dims" ] ]
jiaxx/temporal_learning_paper
[ "abffd5bfb36aaad7139485a9b8bd29f3858389e8" ]
[ "code/learningutil.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 4 16:39:50 2013\n\n@author: Xiaoxuan Jia\n\"\"\"\n\nimport json\nimport csv\nimport re\nimport scipy.io\nimport scipy.stats\nimport random\nimport numpy as np\nimport os\nimport itertools\nimport cPickle as pk\nimport pymongo\nimport scipy\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\n\ndef SBcorrection(corr, mult_factor):\n pred = (mult_factor*corr)/(1+(mult_factor-1)*corr)\n return pred\n\ndef normalize_CM(CF):\n new_CF = np.zeros(np.shape(CF))\n for col in range(0, np.shape(CF)[1]):\n total = np.sum(CF[:,col])\n norm_col = CF[:,col]/float(total)\n new_CF[:,col] = norm_col\n return new_CF\n\ndef d_prime2x2(CF):\n H = CF[0,0]/(CF[0,0]+CF[1,0]) # H = hit/(hit+miss)\n F = CF[0,1]/(CF[0,1]+CF[1,1]) # F = False alarm/(false alarm+correct rejection)\n if H == 1:\n H = 1-1/(2*(CF[0,0]+CF[1,0]))\n if H == 0:\n H = 0+1/(2*(CF[0,0]+CF[1,0]))\n if F == 0:\n F = 0+1/(2*(CF[0,1]+CF[1,1]))\n if F == 1:\n F = 1-1/(2*(CF[0,1]+CF[1,1]))\n d = norm.ppf(H)-norm.ppf(F)\n return d\n\ndef d_prime(CF): #have problem when called by module name, artificially change to n by 5 matrix\n d = []\n for i in range(len(CF[0][1])):\n H = CF[0][i, i]/sum(CF[0][:,i]) # H = target diagnal/target column\n tempCF = scipy.delete(CF[0], i, 1) # delete the target column\n F = sum(tempCF[i,:])/sum(tempCF)\n #if H == 1:\n # H = 1-1/(2*sum(CF[0][:,i]))\n #if H == 0:\n # H = 0+1/(2*sum(CF[0][:,i]))\n #if F == 0:\n # F = 0+1/(2*sum(tempCF))\n #if F == 1:\n # F = 1-1/(2*sum(tempCF))\n d.append(norm.ppf(H)-norm.ppf(F))\n return d\n\ndef offDmass(CF):\n return sum(CF[np.eye(CF.shape[0])==0]/float(sum(CF)))\n\nclass expDataDB(object):\n \n def __init__(self, collection, selector, numObjs, obj, trialNum):\n\n\n conn = pymongo.Connection(port = 22334, host = 'localhost')\n db = conn.mturk\n col = db[collection]\n \n self.obj = obj\n self.trialNum = trialNum\n self.subj_data = list(col.find(selector))\n self.numObjs = numObjs\n\n if obj != 'face':\n obj_inds = []\n for idx, t in enumerate(self.subj_data[0]['ImgData']):\n if len(np.unique(obj_inds)) == self.numObjs:\n break\n else:\n if len(t)<10:\n obj_inds.append(t[0]['obj'])\n else:\n obj_inds.append(t['obj'])\n\n self.models = np.unique(obj_inds)\n\n self.models_idxs = {}\n for idx, model in enumerate(self.models):\n self.models_idxs[model] = idx\n self.models_idxs = self.models_idxs\n\n\n self.trial_data = self.preprocess(self.subj_data, self.obj, self.trialNum)\n self.numResp = numObjs \n self.totalTrials = len(self.trial_data)\n self.corr_type = 'pearson'\n \n def init_from_pickle(self, pkFile):\n f = open(pkFile, 'rb')\n data = pk.load(f)\n f.close()\n self.subj_data = data\n self.trial_data = self.preprocess(self.subj_data)\n self.totalTrials = len(self.trial_data)\n \n def setPopCM(self):\n if self.numResp == 2:\n self.popCM, self.CM_order = self.getPopCM2x2fast(self.trial_data)\n else:\n self.popCM, self.CM_order = self.getPopCM(self.trial_data)\n \n def preprocess(self, subj_data, obj, trialNum): \n # before the fb experiment, the HvM metadata, uploaded urls dont have unique hash id in the url, after feedback exp, both meta and the pushed json files changed\n RV = [] #Response vector\n SV = [] #Stimulus vector\n DV = [] #Distractor vector\n if obj=='face':\n RV = [] #Response vector\n DV = [] #Distractor vector\n RT = []\n for subj in self.subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']\n models_name = np.unique(subj['Response'])\n models_size = np.unique(subj['Size'])\n self.models = []\n for idx1 in models_name:\n for idx2 in models_size:\n self.models.append([str(idx1)+'_'+str(idx2)])\n models_idxs = {}\n for idx, model in enumerate(self.models):\n models_idxs[tuple(model)] = idx\n self.models_idxs = models_idxs\n\n for t_idx, t in enumerate(subj['RT']):\n if t_idx>=trialNum[0] and t_idx<trialNum[1]:\n RT.append(t)\n for r_idx, r in enumerate(subj['Response']):\n if r_idx>=trialNum[0] and r_idx<trialNum[1]:\n RV.append([str(r)+'_'+str(subj['Size'][r_idx])])\n for s_idx, s in enumerate(subj['StimShown']):\n if s_idx>=trialNum[0] and s_idx<trialNum[1]:\n DV.append([str(s)+'_'+str(subj['Size'][s_idx])])\n\n elif obj=='obj_lack':\n RV_s = [] #Response vector\n DV_s = [] #Distractor vector\n RV_p = [] \n DV_p = []\n RV_r = [] \n DV_r = []\n RV = []\n DV = []\n for subj in self.subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']\n self.models = np.unique(subj['Response'])\n models_idxs = {}\n for idx, model in enumerate(self.models):\n models_idxs[tuple(model)] = idx\n self.models_idxs = models_idxs\n\n for r_idx, r in enumerate(subj['Response']):\n if r_idx>=trialNum[0] and r_idx<trialNum[1]:\n if subj['ImgData'][r_idx]['tname'] == 'obj_size':\n RV_s.append(r)\n elif subj['ImgData'][r_idx]['tname'] == 'position':\n RV_p.append(r)\n elif subj['ImgData'][r_idx]['tname'] == 'rotation':\n RV_r.append(r)\n else: #'objectome32'\n RV.append(r)\n\n for s_idx, s in enumerate(subj['StimPresent']):\n if s_idx>=trialNum[0] and s_idx<trialNum[1]:\n if subj['ImgData'][s_idx]['tname'] == 'obj_size':\n DV_s.append(s)\n elif subj['ImgData'][s_idx]['tname'] == 'position':\n DV_p.append(s)\n elif subj['ImgData'][s_idx]['tname'] == 'rotation':\n DV_r.append(s)\n else:\n DV.append(s)\n\n elif obj=='obj':\n RV_s = [] #Response vector\n DV_s = [] #Distractor vector\n RV_p = [] \n DV_p = []\n RV_r = [] \n DV_r = []\n RV = []\n DV = []\n for subj in self.subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']\n self.models = np.unique(subj['Response'])\n models_idxs = {}\n for idx, model in enumerate(self.models):\n models_idxs[tuple(model)] = idx\n self.models_idxs = models_idxs\n\n for r_idx, r in enumerate(subj['Response']):\n if r_idx>=trialNum[0] and r_idx<trialNum[1]:\n if subj['ImgData'][r_idx][0]['tname'] == 'obj_size':\n RV_s.append(r)\n elif subj['ImgData'][r_idx][0]['tname'] == 'position':\n RV_p.append(r)\n elif subj['ImgData'][r_idx][0]['tname'] == 'rotation':\n RV_r.append(r)\n else: #'objectome32'\n RV.append(r)\n\n for s_idx, s in enumerate(subj['StimPresent']):\n if s_idx>=trialNum[0] and s_idx<trialNum[1]:\n if subj['ImgData'][s_idx][0]['tname'] == 'obj_size':\n DV_s.append(s)\n elif subj['ImgData'][s_idx][0]['tname'] == 'position':\n DV_p.append(s)\n elif subj['ImgData'][s_idx][0]['tname'] == 'rotation':\n DV_r.append(s)\n else:\n DV.append(s)\n\n elif obj=='2way':\n\n RV = [] #Response vector\n DV = [] #Distractor vector\n RV_s = [] #Response vector\n DV_s = [] #Distractor vector\n SV_s = []\n SV = []\n for subj in self.subj_data:\n\n for t_idx, t in enumerate(subj['ImgData']):\n if t_idx>=trialNum[0] and t_idx<trialNum[1]:\n if subj['ImgData'][t_idx][0]['tname'] == 'obj_size':\n SV_s.append([t[1]['obj'],t[2]['obj']])\n else: #'objectome32'\n SV.append([t[1]['obj'],t[2]['obj']])\n\n for r_idx, r in enumerate(subj['Response']):\n if r_idx>=trialNum[0] and r_idx<trialNum[1]:\n if subj['ImgData'][r_idx][0]['tname'] == 'obj_size':\n RV_s.append(r)\n else: #'objectome32'\n RV.append(r)\n\n for s_idx, s in enumerate(subj['StimPresent']):\n if s_idx>=trialNum[0] and s_idx<trialNum[1]:\n if subj['ImgData'][s_idx][0]['tname'] == 'obj_size':\n DV_s.append(s)\n else:\n DV.append(s)\n\n elif obj=='2way_face':\n\n RV = [] #Response vector\n DV = [] #Distractor vector\n RV_s = [] #Response vector\n DV_s = [] #Distractor vector\n SV_s = []\n SV = []\n for subj in self.subj_data:\n\n for t_idx, t in enumerate(subj['ImgData']):\n if t_idx>=trialNum[0] and t_idx<trialNum[1]:\n if subj['ImgData'][t_idx][0]['var'] == 'V0_size':\n SV_s.append([t[1]['obj'],t[2]['obj']])\n else: #'objectome32'\n SV.append([t[1]['obj'],t[2]['obj']])\n\n for r_idx, r in enumerate(subj['Response']):\n if r_idx>=trialNum[0] and r_idx<trialNum[1]:\n if subj['ImgData'][r_idx][0]['var'] == 'V0_size':\n RV_s.append(r)\n else: #'objectome32'\n RV.append(r)\n\n for s_idx, s in enumerate(subj['StimPresent']):\n if s_idx>=trialNum[0] and s_idx<trialNum[1]:\n if subj['ImgData'][s_idx][0]['var'] == 'V0_size':\n DV_s.append(s)\n else:\n DV.append(s)\n\n\n else:\n RV = [] #Response vector\n DV = [] #Distractor vector\n for subj in subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']\n self.models = np.unique(subj['TestStim'])\n models_idxs = {}\n for idx, model in enumerate(self.models):\n models_idxs[tuple(model)] = idx\n self.models_idxs = models_idxs\n\n for r_idx, r in enumerate(subj['Response']):\n if r_idx>=trialNum[0] and r_idx<trialNum[1]:\n RV.append(r)\n for s_idx, s in enumerate(subj['StimPresent']):\n if s_idx>=trialNum[0] and s_idx<trialNum[1]:\n DV.append(s)\n\n\n if obj=='obj':\n new_data_s = []\n new_data_p = []\n new_data_r = []\n new_data = []\n for idx, shown in enumerate(DV_s):\n model = shown\n CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown\n CF_row_idx = self.models_idxs[tuple(RV_s[idx])] #response\n new_data_s.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors\n for idx, shown in enumerate(DV_p):\n model = shown\n CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown\n CF_row_idx = self.models_idxs[tuple(RV_p[idx])] #response\n new_data_p.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors\n for idx, shown in enumerate(DV_r):\n model = shown\n CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown\n CF_row_idx = self.models_idxs[tuple(RV_r[idx])] #response\n new_data_r.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors\n for idx, shown in enumerate(DV):\n model = shown\n CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown\n CF_row_idx = self.models_idxs[tuple(RV[idx])] #response\n new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors\n return [new_data_s, new_data_p, new_data_r, new_data]\n\n elif obj=='2way':\n new_data_s = []\n new_data = []\n for idx, shown in enumerate(DV_s):\n model = shown\n CF_col_idx = self.models_idxs[model] #stimulus shown\n CF_row_idx = self.models_idxs[RV_s[idx]] #response\n new_data_s.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV_s[idx]]]) #order is shown, picked, distractors\n for idx, shown in enumerate(DV):\n model = shown\n CF_col_idx = self.models_idxs[model] #stimulus shown\n CF_row_idx = self.models_idxs[RV[idx]] #response\n new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV[idx]]]) #order is shown, picked, distractors\n return [new_data_s, new_data]\n\n elif obj=='2way_face':\n new_data_s = []\n new_data = []\n for idx, shown in enumerate(DV_s):\n model = shown\n CF_col_idx = self.models_idxs[model] #stimulus shown\n CF_row_idx = self.models_idxs[RV_s[idx]] #response\n new_data_s.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV_s[idx]]]) #order is shown, picked, distractors\n for idx, shown in enumerate(DV):\n model = shown\n CF_col_idx = self.models_idxs[model] #stimulus shown\n CF_row_idx = self.models_idxs[RV[idx]] #response\n new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV[idx]]]) #order is shown, picked, distractors\n return [new_data_s, new_data]\n\n elif obj=='face':\n new_data = []\n for idx, shown in enumerate(DV):\n if RT[idx]<3000:\n model = shown\n CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown\n CF_row_idx = self.models_idxs[tuple(RV[idx])] #response\n new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors\n return new_data\n\n else:\n new_data = []\n for idx, shown in enumerate(DV):\n model = shown\n CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown\n CF_row_idx = self.models_idxs[tuple(RV[idx])] #response\n new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors\n return new_data\n\n\n \n def getPopCM2x2fast(self, trial_data):\n combs = list(itertools.combinations(range(0, self.numObjs), 2))\n CMs = {}\n for c in combs:\n CMs[c] = np.zeros((2,2))\n for t in trial_data: # each trial can only increase +1 in total; statistics is based on many trials\n target = t[0]\n pick = t[1]\n cm = tuple(sorted(t[2])) #Because itertools always spits out the combs in sorted order; the two-way task is designed for each pair, either target is presented with equal times\n if target == cm[0]: #stimulus = True: when the signal present\n if target == pick: #response = true; Hit\n CMs[cm][0,0] += 1\n else: # response = False; Miss\n CMs[cm][1,0] += 1\n else: # stimulus = False; when the signal does not present\n if target == pick: # response = false; correct rejection\n CMs[cm][1,1] += 1 \n else: # response = true; false alarm\n CMs[cm][0,1] += 1\n return [CMs[c] for c in combs], combs\n \n def getPopCM(self, trial_data, order=[]): # trial_data is for individual subj or for all subj (myresult.trial_data)\n if len(trial_data[0][2]) != len(self.trial_data[0][2]):\n numResp = len(trial_data[0][2]) # should not use self.trial_data\n else:\n numResp = len(self.trial_data[0][2])\n #print numResp\n obj_inds = []\n for t in trial_data:\n if len(np.unique(obj_inds)) == self.numObjs:\n break\n else:\n obj_inds.append(t[0])\n\n if len(np.unique(obj_inds)) != self.numObjs:\n obj_inds = range(self.numObjs)\n else:\n obj_inds = obj_inds\n\n combs = list(itertools.combinations(np.unique(obj_inds), numResp)) \n CMs = [np.zeros((numResp, numResp)) for i in range(0, len(combs))]\n for trial in trial_data:\n distractor = [m for m in trial[2] if m != trial[0]]\n target = trial[0]\n pick = trial[1]\n possCombs = [[comb, idx] for idx, comb in enumerate(combs) if target in comb]\n for comb in possCombs:\n if set(distractor).issubset(set(comb[0])):\n if len(order) > 0:\n comb[0] = order\n if pick == target:\n idx = comb[0].index(pick)\n CMs[comb[1]][idx, idx] += 1\n elif pick != target:\n CMs[comb[1]][comb[0].index(pick), comb[0].index(target)] += 1\n else:\n print('Matrix Error')\n return CMs, combs\n\n\n def getexposureCM(self, trial_data, trialNum, expoNum): # trial_data is for individual subj or for all subj (myresult.trial_data)\n if len(trial_data[0][2]) != len(self.trial_data[0][2]):\n numResp = len(trial_data[0][2]) # should not use self.trial_data\n else:\n numResp = len(self.trial_data[0][2])\n #print numResp\n obj_inds = []\n for t in trial_data:\n if len(np.unique(obj_inds)) == self.numObjs:\n break\n else:\n obj_inds.append(t[0])\n\n condi = self.subj_data[0]['Combinations']\n newcondi = []\n s1 = set(['NONSWAP', 'SWAP'])\n for subj in self.subj_data:\n s2 = set(subj.keys())\n for s in subj[list(s1.intersection(s2))[0]]:\n newcondi.append([x for idx, x in enumerate(condi[int(s)]) if idx>= expoNum[0] and idx<expoNum[1]]) #need to modify if the total number of condtion change\n\n if len(newcondi) != len(trial_data):\n print('trial number inconsistent')\n else:\n print(str(len(trial_data)))\n\n RV = [] #Response vector\n DV = [] #Distractor vector\n\n for subj in self.subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']\n models = np.unique(subj['Response'])\n self.models = []\n for idx in models:\n self.models.append(idx)\n models_idxs = {}\n for idx, model in enumerate(self.models):\n models_idxs[tuple(model)] = idx\n self.models_idxs = models_idxs\n\n for r_idx, r in enumerate(subj['Response']):\n if r_idx>=trialNum[0] and r_idx<trialNum[1]:\n RV.append(r)\n for s_idx, s in enumerate(subj['StimShown']):\n if s_idx>=trialNum[0] and s_idx<trialNum[1]:\n DV.append(s)\n\n new_data = []\n for idx, shown in enumerate(DV):\n model = shown\n CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown\n CF_row_idx = self.models_idxs[tuple(RV[idx])] #response\n new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors\n return newcondi, new_data\n \n def computeSplitHalf_size(self, numSplits, subsample, verbose = False, correct = True, plot_ = False): #subsample equal to total trial number if don't want to subsample\n import scipy.stats\n trial_data = self.trial_data\n Rs = []\n for s in range(0, numSplits):\n if verbose == True:\n print(s)\n else:\n pass\n np.random.shuffle(trial_data)\n if int(subsample)%2 == 0:\n half1.extend(t[0:subsample/2])\n half2.extend(t[-subsample/2:])\n else:\n half1.extend(t[0:subsample/2+1])\n half2.extend(t[-subsample/2:])\n\n if self.numResp == 2:\n CM1, combs = self.getPopCM2x2fast(half1)\n CM2, combs = self.getPopCM2x2fast(half2)\n else:\n CM1, combs = self.getPopCM(half1)\n CM2, combs = self.getPopCM(half2)\n half1_array = []\n half2_array = []\n for mat in range(0, len(CM1)):\n newarray = np.reshape(normalize_CM(CM1[mat]),(CM1[mat].shape[0]*CM1[mat].shape[1],-1))\n half1_array += list([x for x in newarray if x!=0])\n newarray = np.reshape(normalize_CM(CM2[mat]),(CM2[mat].shape[0]*CM2[mat].shape[1],-1))\n half2_array += list([x for x in newarray if x!=0])\n if self.corr_type == 'pearson':\n Rs.append(scipy.stats.pearsonr(half1_array, half2_array)[0])\n #correct = False\n else:\n Rs.append(scipy.stats.spearmanr(half1_array, half2_array)[0])\n if plot_ == True:\n plt.plot(half1_array, half2_array, 'b.')\n if correct == False:\n return Rs\n else:\n Rs_c = [SBcorrection(r, 2) for r in Rs]\n return Rs_c\n\n def computeSplitHalf_dprime(self, pair_trial_data, boot, starttrial, verbose = False, correct = True, plot_ = False, trial_data = None): #subsample equal to total trial number if don't want to subsample\n import scipy.stats\n\n count = [len(trial) for trial in pair_trial_data]\n\n corr_dprime = []\n for i in range(boot):\n temp = []\n for w in range(min(count)-starttrial+1):\n a = [random.sample(trial, w+starttrial) for trial in pair_trial_data]\n subsample = len(a[0])\n Rs = []\n for b in range(boot):\n half1 = []\n half2 = []\n for t in a:\n np.random.shuffle(t)\n if int(subsample)%2 == 0:\n half1.extend(t[0:subsample/2])\n half2.extend(t[-subsample/2:])\n else:\n half1.extend(t[0:subsample/2+1])\n half2.extend(t[-subsample/2:])\n CM1, combs = self.getPopCM2x2fast(half1)\n CM2, combs = self.getPopCM2x2fast(half2)\n \n half1_dprime = []\n half2_dprime = []\n for mat in range(0, len(CM1)):\n half1_dprime.append(d_prime2x2(CM1[mat])) # previously normalized CM, which caused nan when divided by 0\n half2_dprime.append(d_prime2x2(CM2[mat]))\n \n Rs.append(scipy.stats.spearmanr(half1_dprime, half2_dprime)[0])\n \n temp.append(np.ma.masked_invalid(Rs).mean(0))\n corr_dprime.append(temp)\n return corr_dprime\n\n def computeSplitHalf(self, numSplits, subsample, verbose = False, correct = True, plot_ = False, trial_data = None): #subsample equal to total trial number if don't want to subsample\n import scipy.stats\n if trial_data == None:\n trial_data = self.trial_data\n else:\n trial_data = trial_data\n\n Rs = []\n for s in range(0, numSplits):\n if verbose == True:\n print(s)\n else:\n pass\n np.random.shuffle(trial_data)\n\n half1 = []\n half2 = []\n if int(subsample)%2 == 0:\n half1.extend(trial_data[0:subsample/2])\n half2.extend(trial_data[-subsample/2:])\n else:\n half1.extend(trial_data[0:subsample/2+1])\n half2.extend(trial_data[-subsample/2:])\n\n if self.numResp == 2:\n CM1, combs = self.getPopCM2x2fast(half1)\n CM2, combs = self.getPopCM2x2fast(half2)\n else:\n CM1, combs = self.getPopCM(half1)\n CM2, combs = self.getPopCM(half2)\n\n half1_array = []\n half2_array = []\n for mat in range(0, len(CM1)):\n half1_array += list(normalize_CM(CM1[mat])[np.eye(CM1[mat].shape[0])==0])\n half2_array += list(normalize_CM(CM2[mat])[np.eye(CM2[mat].shape[0])==0])\n if self.corr_type == 'pearson':\n Rs.append(scipy.stats.pearsonr(half1_array, half2_array)[0])\n #correct = False\n else:\n Rs.append(scipy.stats.spearmanr(half1_array, half2_array)[0])\n if plot_ == True:\n plt.plot(half1_array, half2_array, 'b.')\n if correct == False:\n return Rs\n else:\n Rs_c = [SBcorrection(r, 2) for r in Rs]\n return Rs_c\n \n def imputeNtoM(self, use_objects):\n #Produces a single imputed matrix of a given size for given objects. The matrix will have blank entries\n #if you ask for a greater size than is given by the number of objects represented by your data\n obj_inds = []\n for t in self.trial_data:\n if len(np.unique(obj_inds)) == self.numObjs:\n break\n else:\n obj_inds.append(t[0])\n t = []\n for obj in use_objects:\n t.append(self.models.index(obj))\n import itertools\n combs = list(itertools.combinations(t, self.numResp))\n CM_imputed = np.zeros((len(t),len(t)))\n for trial in self.trial_data:\n for comb in combs:\n if set(comb).issubset(set(trial[2])):\n if trial[0] == trial[1]:\n CM_imputed[t.index(trial[0]), t.index(trial[0])] += 1\n else:\n CM_imputed[t.index(trial[1]), t.index(trial[0])] += 1\n return CM_imputed\n\n\n " ]
[ [ "scipy.stats.norm.ppf", "numpy.zeros", "numpy.sum", "matplotlib.pyplot.plot", "scipy.stats.spearmanr", "numpy.ma.masked_invalid", "numpy.shape", "numpy.random.shuffle", "numpy.eye", "scipy.stats.pearsonr", "scipy.delete", "numpy.unique" ] ]
hsmohammed/rudaux
[ "673b2bb2d6b08f9d9c34a2ed6e284d9def1a0fc7" ]
[ "scripts/canvas.py" ]
[ "import requests\nimport urllib.parse\nimport posixpath\nimport pandas as pd\n\ndef get_enrollment_dates(course):\n '''Takes a course object and returns student dates of enrollment.\n Useful for handling late registrations and modified deadlines.\n\n Example:\n course.get_enrollment_date()'''\n url_path = posixpath.join(\"api\", \"v1\", \"courses\", course['course_id'], \"enrollments\")\n api_url = urllib.parse.urljoin(course['hostname'], url_path)\n token = course['token']\n resp = None\n students = []\n while resp is None or resp.links['current']['url'] != resp.links['last']['url']:\n resp = requests.get(\n url = api_url if resp is None else resp.links['next']['url'],\n headers = {\n \"Authorization\": f\"Bearer {token}\",\n \"Accept\": \"application/json+canvas-string-ids\"\n },\n json={\n \"type\": [\"StudentEnrollment\"],\n \"per_page\":\"100\"\n }\n )\n students.extend(resp.json())\n\n enrollment_dates = {}\n for st in students:\n enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16]\n return enrollment_dates\n\ndef get_assignments(course):\n '''Takes a course object and returns\n a Pandas data frame with all existing assignments and their attributes/data\n\n Example:\n course.get_assignments()'''\n url_path = posixpath.join(\"api\", \"v1\", \"courses\", course['course_id'], \"assignments\")\n api_url = urllib.parse.urljoin(course['hostname'], url_path)\n token = course['token']\n resp = requests.get(\n url=api_url,\n headers={\n \"Authorization\": f\"Bearer {token}\",\n \"Accept\": \"application/json+canvas-string-ids\"\n },\n json={\n \"per_page\": \"10000\"\n },\n )\n assignments = resp.json()\n assign_data = pd.DataFrame.from_dict(assignments)\n return assign_data\n\ndef get_assignment_lock_date(course, assignment):\n '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.\n \n Example:\n course.get_assignment_due_date('worksheet_01')'''\n assignments = get_assignments(course)\n assignments = assignments[['name', 'lock_at']].query('name == @assignment')\n lock_date = assignments['lock_at'].to_numpy()[0]\n if lock_date is None:\n return lock_date\n lock_date = lock_date.replace(\"T\", \"-\")\n lock_date = lock_date.replace(\":\", \"-\")\n return lock_date[:16]\n\n\n\ndef get_assignment_due_date(course, assignment):\n '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.\n \n Example:\n course.get_assignment_due_date('worksheet_01')'''\n assignments = get_assignments(course)\n assignments = assignments[['name', 'due_at']].query('name == @assignment')\n due_date = assignments['due_at'].to_numpy()[0]\n if due_date is None:\n return due_date\n due_date = due_date.replace(\"T\", \"-\")\n due_date = due_date.replace(\":\", \"-\")\n return due_date[:16]\n\ndef get_assignment_unlock_date(course, assignment):\n '''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.\n \n Example:\n course.get_assignment_unlock_date('worksheet_01')'''\n assignments = get_assignments(course)\n assignments = assignments[['name', 'unlock_at']].query('name == @assignment')\n unlock_date = assignments['unlock_at'].to_numpy()[0]\n if unlock_date is None:\n return unlock_date\n unlock_date = unlock_date.replace(\"T\", \"-\").replace(':', '-')\n return unlock_date[:16]\n\n\ndef get_assignment_id(course, assignment):\n '''Takes a course object and the name of a Canvas assignment and returns the Canvas ID.\n \n Example:\n course.get_assignment_id('worksheet_01')'''\n assignments = get_assignments(course)\n assignments = assignments[['name', 'id']].query('name == @assignment')\n return assignments['id'].values[0]\n\ndef get_grades(course, assignment):\n '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas.\n \n Example:\n course.get_grades(course, 'worksheet_01')'''\n assignment_id = get_assignment_id(course, assignment)\n url_path = posixpath.join(\"api\", \"v1\", \"courses\", course['course_id'], \"assignments\", assignment_id, \"submissions\")\n api_url = urllib.parse.urljoin(course['hostname'], url_path)\n token = course['token']\n\n resp = None\n scores = {}\n while resp is None or resp.links['current']['url'] != resp.links['last']['url']:\n resp = requests.get(\n url = api_url if resp is None else resp.links['next']['url'],\n headers = {\n \"Authorization\": f\"Bearer {token}\",\n \"Accept\": \"application/json+canvas-string-ids\"\n },\n json={\n \"per_page\":\"100\"\n }\n )\n scores.update( {res['user_id'] : res['score'] for res in resp.json()} )\n return scores\n\ndef grades_need_posting(course, assignment):\n '''Takes a course object, an assignment name, and get the grades for that assignment from Canvas.\n \n Example:\n course.get_grades(course, 'worksheet_01')'''\n assignment_id = get_assignment_id(course, assignment)\n url_path = posixpath.join(\"api\", \"v1\", \"courses\", course['course_id'], \"assignments\", assignment_id, \"submissions\")\n api_url = urllib.parse.urljoin(course['hostname'], url_path)\n token = course['token']\n\n #get enrollments to avoid the test student's submissions\n real_stu_ids = list(get_enrollment_dates(course).keys())\n \n resp = None\n posted_flags = []\n while resp is None or resp.links['current']['url'] != resp.links['last']['url']:\n resp = requests.get(\n url = api_url if resp is None else resp.links['next']['url'],\n headers = {\n \"Authorization\": f\"Bearer {token}\",\n \"Accept\": \"application/json+canvas-string-ids\"\n },\n json={\n \"per_page\":\"100\"\n }\n )\n posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids])\n\n return not all(posted_flags)\n\ndef post_grade(course, assignment, student, score):\n '''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas.\n\n Example:\n course.post_grades(dsci100, 'worksheet_01', '23423', 10)'''\n assignment_id = get_assignment_id(course, assignment)\n url_post_path = posixpath.join(\"api\", \"v1\", \"courses\", course['course_id'], \"assignments\", assignment_id, \"submissions\", student)\n api_url = urllib.parse.urljoin(course['hostname'], url_post_path)\n token = course['token']\n resp = requests.put(\n url = urllib.parse.urljoin(api_url, student),\n headers = {\n \"Authorization\": f\"Bearer {token}\",\n \"Accept\": \"application/json+canvas-string-ids\"\n },\n json={\n \"submission\": {\"posted_grade\": score}\n },\n )\n\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
danielkrause/DCASE2022-data-generator
[ "b0ff595e7cf7e5581d9a0ee4d3292a41117db8e5" ]
[ "audio_synthesizer.py" ]
[ "import numpy as np\r\nimport scipy.io\r\nimport utils\r\nimport os\r\nimport mat73\r\nimport scipy.signal as signal\r\nimport soundfile\r\n\r\nclass AudioSynthesizer(object):\r\n def __init__(\r\n self, params, mixtures, mixture_setup, db_config, audio_format\r\n ):\r\n self._mixtures = mixtures\r\n self._rirpath = params['rirpath']\r\n self._db_path = params['db_path']\r\n self._audio_format = audio_format\r\n self._outpath = params['mixturepath'] + '/' + mixture_setup['scenario'] + '/' + self._audio_format\r\n self._rirdata = db_config._rirdata\r\n self._nb_rooms = len(self._rirdata)\r\n self._room_names = []\r\n for nr in range(self._nb_rooms):\r\n self._room_names.append(self._rirdata[nr][0][0][0])\r\n self._classnames = mixture_setup['classnames']\r\n self._fs_mix = mixture_setup['fs_mix']\r\n self._t_mix = mixture_setup['mixture_duration']\r\n self._l_mix = int(np.round(self._fs_mix * self._t_mix))\r\n self._time_idx100 = np.arange(0., self._t_mix, 0.1)\r\n self._stft_winsize_moving = 0.1*self._fs_mix//2\r\n self._nb_folds = len(mixtures)\r\n self._apply_event_gains = db_config._apply_class_gains\r\n if self._apply_event_gains:\r\n self._class_gains = db_config._class_gains\r\n \r\n \r\n def synthesize_mixtures(self):\r\n rirdata2room_idx = {1: 0, 2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 8: 6, 9: 7, 10: 8} # room numbers in the rirdata array\r\n # create path if doesn't exist\r\n if not os.path.isdir(self._outpath):\r\n os.makedirs(self._outpath)\r\n \r\n for nfold in range(self._nb_folds):\r\n print('Generating scene audio for fold {}'.format(nfold+1))\r\n\r\n rooms = self._mixtures[nfold][0]['roomidx']\r\n nb_rooms_in_fold = len(rooms)\r\n for nr in range(nb_rooms_in_fold):\r\n\r\n nroom = rooms[nr]\r\n nb_mixtures = len(self._mixtures[nfold][nr]['mixture'])\r\n print('Loading RIRs for room {}'.format(nroom+1))\r\n \r\n room_idx = rirdata2room_idx[nroom]\r\n if nroom > 9:\r\n struct_name = 'refs_{}_{}'.format(nroom,self._room_names[room_idx])\r\n else:\r\n struct_name = 'refs_0{}_{}'.format(nroom,self._room_names[room_idx])\r\n path = self._rirpath + '/' + struct_name + '.mat'\r\n rirs = mat73.loadmat(path)\r\n rirs = rirs['rirs'][self._audio_format]\r\n # stack all the RIRs for all heights to make one large trajectory\r\n print('Stacking same trajectory RIRs')\r\n lRir = len(rirs[0][0])\r\n nCh = len(rirs[0][0][0])\r\n \r\n n_traj = np.shape(self._rirdata[room_idx][0][2])[0]\r\n n_rirs_max = np.max(np.sum(self._rirdata[room_idx][0][3],axis=1))\r\n \r\n channel_rirs = np.zeros((lRir, nCh, n_rirs_max, n_traj))\r\n for ntraj in range(n_traj):\r\n nHeights = np.sum(self._rirdata[room_idx][0][3][ntraj,:]>0)\r\n \r\n nRirs_accum = 0\r\n \r\n # flip the direction of each second height, so that a\r\n # movement can jump from the lower to the higher smoothly and\r\n # continue moving the opposite direction\r\n flip = False\r\n for nheight in range(nHeights):\r\n nRirs_nh = self._rirdata[room_idx][0][3][ntraj,nheight]\r\n rir_l = len(rirs[ntraj][nheight][0,0,:])\r\n if flip:\r\n channel_rirs[:, :, nRirs_accum + np.arange(0,nRirs_nh),ntraj] = rirs[ntraj][nheight][:,:,np.arange(rir_l-1,-1,-1)]\r\n else:\r\n channel_rirs[:, :, nRirs_accum + np.arange(0,nRirs_nh),ntraj] = rirs[ntraj][nheight]\r\n \r\n nRirs_accum += nRirs_nh\r\n flip = not flip\r\n \r\n del rirs #clear some memory\r\n \r\n for nmix in range(nb_mixtures):\r\n print('Writing mixture {}/{}'.format(nmix+1,nb_mixtures))\r\n\r\n ### WRITE TARGETS EVENTS\r\n mixture_nm = self._mixtures[nfold][nr]['mixture'][nmix]\r\n try:\r\n nb_events = len(mixture_nm['class'])\r\n except TypeError:\r\n nb_events = 1\r\n \r\n mixsig = np.zeros((self._l_mix, 4))\r\n for nev in range(nb_events):\r\n if not nb_events == 1:\r\n classidx = int(mixture_nm['class'][nev])\r\n onoffset = mixture_nm['event_onoffsets'][nev,:]\r\n filename = mixture_nm['files'][nev]\r\n ntraj = int(mixture_nm['trajectory'][nev])\r\n \r\n else:\r\n classidx = int(mixture_nm['class'])\r\n onoffset = mixture_nm['event_onoffsets']\r\n filename = mixture_nm['files']\r\n ntraj = int(mixture_nm['trajectory'])\r\n \r\n # load event audio and resample to match RIR sampling\r\n eventsig, fs_db = soundfile.read(self._db_path + '/' + filename)\r\n if len(np.shape(eventsig)) > 1:\r\n eventsig = eventsig[:,0]\r\n eventsig = signal.resample_poly(eventsig, self._fs_mix, fs_db)\r\n \r\n #spatialize audio\r\n riridx = mixture_nm['rirs'][nev] if nb_events > 1 else mixture_nm['rirs']\r\n \r\n \r\n moving_condition = mixture_nm['isMoving'][nev] if nb_events > 1 else mixture_nm['isMoving']\r\n if nb_events > 1 and not moving_condition:\r\n riridx = int(riridx[0]) if len(riridx)==1 else riridx.astype('int')\r\n \r\n if moving_condition:\r\n nRirs_moving = len(riridx) if np.shape(riridx) else 1\r\n ir_times = self._time_idx100[np.arange(0,nRirs_moving)]\r\n mixeventsig = 481.6989*utils.ctf_ltv_direct(eventsig, channel_rirs[:, :, riridx, ntraj], ir_times, self._fs_mix, self._stft_winsize_moving) / float(len(eventsig))\r\n else:\r\n\r\n mixeventsig0 = scipy.signal.convolve(eventsig, np.squeeze(channel_rirs[:, 0, riridx, ntraj]), mode='full', method='fft')\r\n mixeventsig1 = scipy.signal.convolve(eventsig, np.squeeze(channel_rirs[:, 1, riridx, ntraj]), mode='full', method='fft')\r\n mixeventsig2 = scipy.signal.convolve(eventsig, np.squeeze(channel_rirs[:, 2, riridx, ntraj]), mode='full', method='fft')\r\n mixeventsig3 = scipy.signal.convolve(eventsig, np.squeeze(channel_rirs[:, 3, riridx, ntraj]), mode='full', method='fft')\r\n\r\n mixeventsig = np.stack((mixeventsig0,mixeventsig1,mixeventsig2,mixeventsig3),axis=1)\r\n if self._apply_event_gains:\r\n # apply random gain to each event based on class gain, distribution given externally\r\n K=1000\r\n rand_energies_per_spec = utils.sample_from_quartiles(K, self._class_gains[classidx])\r\n intr_quart_energies_per_sec = rand_energies_per_spec[K + np.arange(3*(K+1))]\r\n rand_energy_per_spec = intr_quart_energies_per_sec[np.random.randint(len(intr_quart_energies_per_sec))]\r\n sample_onoffsets = mixture_nm['sample_onoffsets'][nev]\r\n sample_active_time = sample_onoffsets[1] - sample_onoffsets[0]\r\n target_energy = rand_energy_per_spec*sample_active_time\r\n if self._audio_format == 'mic':\r\n event_omni_energy = np.sum(np.sum(mixeventsig,axis=1)**2)\r\n elif self._audio_format == 'foa':\r\n event_omni_energy = np.sum(mixeventsig[:,0]**2)\r\n \r\n norm_gain = np.sqrt(target_energy / event_omni_energy)\r\n mixeventsig = norm_gain * mixeventsig\r\n\r\n lMixeventsig = np.shape(mixeventsig)[0]\r\n if np.round(onoffset[0]*self._fs_mix) + lMixeventsig <= self._t_mix * self._fs_mix:\r\n mixsig[int(np.round(onoffset[0]*self._fs_mix)) + np.arange(0,lMixeventsig,dtype=int), :] += mixeventsig\r\n else:\r\n lMixeventsig_trunc = int(self._t_mix * self._fs_mix - int(np.round(onoffset[0]*self._fs_mix)))\r\n mixsig[int(np.round(onoffset[0]*self._fs_mix)) + np.arange(0,lMixeventsig_trunc,dtype=int), :] += mixeventsig[np.arange(0,lMixeventsig_trunc,dtype=int), :]\r\n\r\n # normalize\r\n gnorm = 0.5/np.max(np.max(np.abs(mixsig)))\r\n\r\n mixsig = gnorm*mixsig\r\n mixture_filename = 'fold{}_room{}_mix{:03}.wav'.format(nfold+1, nr+1, nmix+1)\r\n soundfile.write(self._outpath + '/' + mixture_filename, mixsig, self._fs_mix)\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n" ]
[ [ "numpy.zeros", "numpy.round", "numpy.sum", "numpy.shape", "numpy.stack", "numpy.arange", "numpy.sqrt", "numpy.abs", "scipy.signal.resample_poly", "numpy.squeeze" ] ]
Superhzf/PaperImplementation
[ "a800a9e2fb52fb70766bf91b52871621e0e1dd55" ]
[ "NLP/The_Bottom_up_Evolution_of_Representations_in_the_Transformer/analytics.py" ]
[ "from sklearn.cluster import MiniBatchKMeans\nimport numpy as np\n\nimport torch\nfrom models import TransformerModel, Seq2SeqTransformer, generate_square_subsequent_mask\nfrom models import LM_NAME, MLM_NAME, MT_NAME, NLAYERS, NUM2WORD\nimport os\nfrom data_preprocessing import DATA_DIR_DEV, SAVE_DATA_MT_TRAIN\nfrom data_preprocessing import SAVE_VOCAB_SRC, SAVE_VOCAB_TRG, PAD_WORD\nimport pickle\nfrom torchtext.legacy.data import Dataset, BucketIterator\nimport pandas as pd\nfrom analytics_helper import MostFreqToken, GetInter, GetMI, GetInterValues\nfrom analytics_helper import MIN_SAMPLE_SIZE_DEV, MIN_SAMPLE_SIZE_FULL\nfrom analytics_helper import N_FREQUENT_DEV, N_FREQUENT_FULL\nfrom analytics_helper import N_CLUSTER_DEV, N_CLUSTER_FULL\nfrom data_preprocessing import SAVE_MODEL_PATH, DEVELOPMENT_MODE\nfrom MT_helpers import patch_trg, create_mask\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nif DEVELOPMENT_MODE:\n min_sample_size=MIN_SAMPLE_SIZE_DEV\n N_frequent=N_FREQUENT_DEV\n N_cluster=N_CLUSTER_DEV\n data_dir=DATA_DIR_DEV\n\nelse:\n min_sample_size=MIN_SAMPLE_SIZE_FULL\n N_frequent=N_FREQUENT_FULL\n N_cluster=N_CLUSTER_FULL\n data_dir=DATA_DIR_FULL\n\n\nMI_results_INP={LM_NAME.split('.')[0]:[],\n f\"{MLM_NAME.split('.')[0]}_SAME\":[],\n f\"{MLM_NAME.split('.')[0]}_DIFF\":[],\n MT_NAME.split('.')[0]:[]}\n\nMI_results_OUT={LM_NAME.split('.')[0]:[],\n MLM_NAME.split('.')[0]:[]}\n\nMODELS_INP=[LM_NAME, MLM_NAME, MT_NAME]\n\nvocab_pkl_src = os.path.join(data_dir, SAVE_VOCAB_SRC)\nvocab_pkl_trg = os.path.join(data_dir, SAVE_VOCAB_TRG)\ntrain_pkl = os.path.join(data_dir, SAVE_DATA_MT_TRAIN)\nfield_src = pickle.load(open(vocab_pkl_src, 'rb'))\nfield_trg = pickle.load(open(vocab_pkl_trg, 'rb'))\nsrc_pad_idx = field_src.vocab.stoi[PAD_WORD]\ntrg_pad_idx = field_trg.vocab.stoi[PAD_WORD]\ntrain_examples = pickle.load(open(train_pkl, 'rb'))\nfields = {'src':field_src , 'trg':field_trg}\ntrain = Dataset(examples=train_examples, fields=fields)\ntrain_iter = BucketIterator(train, batch_size=1, device=device, train=True, shuffle=False)\nfrequent_vocab = MostFreqToken(field_src, N_frequent, min_sample_size)\n\n# token_reps_list saves NLAYERS dicts, for ith dict, the key is the token ID,\n# the value is the representation of the ID in the ith layer.\ntoken_reps_model_INP={}\ntoken_reps_model_OUT={}\nfor this_model_name in MODELS_INP:\n token_reps_list=[]\n for _ in range(NLAYERS):\n this_token_reps={}\n for this_token_id in frequent_vocab:\n this_token_reps[this_token_id]=[]\n token_reps_list.append(this_token_reps)\n if this_model_name.startswith(\"MLM\"):\n token_reps_model_INP[f\"{MLM_NAME.split('.')[0]}_SAME\"]=token_reps_list\n token_reps_model_INP[f\"{MLM_NAME.split('.')[0]}_DIFF\"]=token_reps_list\n token_reps_model_OUT[this_model_name.split('.')[0]]=token_reps_list\n elif this_model_name.startswith(\"LM\"):\n token_reps_model_INP[this_model_name.split('.')[0]]=token_reps_list\n token_reps_model_OUT[this_model_name.split('.')[0]]=token_reps_list\n elif this_model_name.startswith(\"MT\"):\n token_reps_model_INP[this_model_name.split('.')[0]]=token_reps_list\n\nsample_size_dict_INP={}\nsample_size_dict_OUT={}\nfor this_model_name in MODELS_INP:\n if this_model_name.startswith(\"MLM\"):\n this_sample_size_dict_INP_SAME={}\n this_sample_size_dict_INP_DIFF={}\n this_sample_size_dict_OUT={}\n for this_token_id in frequent_vocab:\n this_sample_size_dict_INP_SAME[this_token_id]=0\n this_sample_size_dict_INP_DIFF[this_token_id]=0\n this_sample_size_dict_OUT[this_token_id]=0\n sample_size_dict_INP[f\"{this_model_name.split('.')[0]}_SAME\"]=this_sample_size_dict_INP_SAME\n sample_size_dict_INP[f\"{this_model_name.split('.')[0]}_DIFF\"]=this_sample_size_dict_INP_DIFF\n sample_size_dict_OUT[this_model_name.split('.')[0]]=this_sample_size_dict_OUT\n elif this_model_name.startswith(\"LM\"):\n this_sample_size_dict_INP={}\n this_sample_size_dict_OUT={}\n for this_token_id in frequent_vocab:\n this_sample_size_dict_INP[this_token_id]=0\n this_sample_size_dict_OUT[this_token_id]=0\n sample_size_dict_INP[this_model_name.split('.')[0]]=this_sample_size_dict_INP\n sample_size_dict_OUT[this_model_name.split('.')[0]]=this_sample_size_dict_OUT\n elif this_model_name.startswith(\"MT\"):\n this_sample_size_dict_INP={}\n for this_token_id in frequent_vocab:\n this_sample_size_dict_INP[this_token_id]=0\n sample_size_dict_INP[this_model_name.split('.')[0]]=this_sample_size_dict_INP\n\n\n\nfor batch in train_iter:\n src_seq_MT = batch.src.to(device)\n target_sample_INP_MT=GetInter(src_seq_MT.detach().numpy(), frequent_vocab)\n\n src_seq_MLM_SAME = batch.src.to(device)\n target_sample_INP_MLM_SAME=GetInter(src_seq_MLM_SAME.detach().numpy(), frequent_vocab)\n\n src_seq=batch.src.to(device)\n src_seq_MLM_DIFF = src_seq.clone()\n src_mask = generate_square_subsequent_mask(src_seq.size(0))\n rand_value = torch.rand(src_seq.shape)\n rand_mask = (rand_value < 0.15) * (input != src_pad_idx)\n mask_idx=(rand_mask.flatten() == True).nonzero().view(-1)\n src_seq_MLM_DIFF = src_seq_MLM_DIFF.flatten()\n src_seq_MLM_DIFF[mask_idx] = 103\n src_seq_MLM_DIFF = src_seq_MLM_DIFF.view(src_seq.size())\n target_sample_INP_MLM_DIFF=GetInter(src_seq_MLM_DIFF.detach().numpy(), frequent_vocab)\n\n src_seq_LM = batch.src[:-1]\n target_sample_INP_LM=GetInter(src_seq_LM.detach().numpy(), frequent_vocab)\n\n trg = batch.trg\n trg_seq_MT, gold = map(lambda x: x.to(device), patch_trg(trg, trg_pad_idx))\n trg_seq_MT = trg_seq_MT.to(device)\n\n trg_seq_LM = src_seq[1:].to(device)\n target_sample_OUT_LM=GetInter(trg_seq_LM.detach().numpy(), frequent_vocab)\n\n trg_seq_MLM = src_seq\n target_sample_OUT_MLM=GetInter(trg_seq_MLM.detach().numpy(), frequent_vocab)\n\n for this_model_name in MODELS_INP:\n this_model = torch.load(os.path.join(SAVE_MODEL_PATH,this_model_name))\n this_model.eval()\n if this_model_name.startswith(\"MT\") and len(target_sample_INP_MT)>0:\n src_mask, trg_mask, src_padding_mask, trg_padding_mask = create_mask(src_seq_MT, trg_seq_MT, src_pad_idx, trg_pad_idx)\n _ = this_model(src=src_seq_MT,\n src_mask=src_mask,\n trg=trg_seq_MT,\n tgt_mask=trg_mask,\n src_padding_mask=src_padding_mask,\n tgt_padding_mask=trg_padding_mask,\n memory_key_padding_mask=src_padding_mask)\n token_reps_list=token_reps_model_INP[MT_NAME.split('.')[0]]\n this_sample_size_dict=sample_size_dict_INP[this_model_name.split('.')[0]]\n GetInterValues(this_model, target_sample_INP_MT, NUM2WORD, token_reps_list, this_sample_size_dict, min_sample_size, NLAYERS)\n elif this_model_name.startswith(\"MLM\"):\n if len(target_sample_INP_MLM_SAME)>0:\n src_mask = generate_square_subsequent_mask(src_seq_MLM_SAME.size(0))\n src_padding_mask = (src_seq_MLM_SAME == src_pad_idx).transpose(0, 1)\n _ = this_model(src_seq_MLM_SAME, src_mask.to(device),src_padding_mask.to(device))\n token_reps_list=token_reps_model_INP[f\"{MLM_NAME.split('.')[0]}_SAME\"]\n this_sample_size_dict=sample_size_dict_INP[f\"{this_model_name.split('.')[0]}_SAME\"]\n GetInterValues(this_model, target_sample_INP_MLM_SAME, NUM2WORD, token_reps_list, this_sample_size_dict, min_sample_size, NLAYERS)\n\n if len(target_sample_INP_MLM_DIFF)>0 and len(target_sample_OUT_MLM)>0:\n src_mask = generate_square_subsequent_mask(src_seq_MLM_DIFF.size(0))\n src_padding_mask = (src_seq_MLM_DIFF == src_pad_idx).transpose(0, 1)\n _ = this_model(src_seq_MLM_DIFF.to(device), src_mask.to(device),src_padding_mask.to(device))\n token_reps_list_INP=token_reps_model_INP[f\"{MLM_NAME.split('.')[0]}_DIFF\"]\n this_sample_size_dict_INP=sample_size_dict_INP[f\"{this_model_name.split('.')[0]}_DIFF\"]\n\n token_reps_list_OUT=token_reps_model_OUT[MLM_NAME.split('.')[0]]\n this_sample_size_dict_OUT=sample_size_dict_OUT[this_model_name.split('.')[0]]\n\n GetInterValues(this_model, target_sample_INP_MLM_DIFF, NUM2WORD, token_reps_list_INP, this_sample_size_dict_INP, min_sample_size, NLAYERS)\n GetInterValues(this_model, target_sample_OUT_MLM, NUM2WORD, token_reps_list_OUT, this_sample_size_dict_OUT, min_sample_size, NLAYERS)\n elif this_model_name.startswith(\"LM\") and len(target_sample_INP_LM)>0 and len(target_sample_OUT_LM)>0:\n src_mask = generate_square_subsequent_mask(src_seq_LM.size(0))\n src_padding_mask = (src_seq_LM == src_pad_idx).transpose(0, 1)\n _ = this_model(src_seq_LM, src_mask.to(device),src_padding_mask.to(device))\n token_reps_list_INP=token_reps_model_INP[this_model_name.split('.')[0]]\n token_reps_list_OUT=token_reps_model_OUT[this_model_name.split('.')[0]]\n\n this_sample_size_dict_INP=sample_size_dict_INP[this_model_name.split('.')[0]]\n this_sample_size_dict_OUT=sample_size_dict_OUT[this_model_name.split('.')[0]]\n\n GetInterValues(this_model, target_sample_INP_LM, NUM2WORD, token_reps_list_INP, this_sample_size_dict_INP, min_sample_size, NLAYERS)\n GetInterValues(this_model, target_sample_OUT_LM, NUM2WORD, token_reps_list_OUT, this_sample_size_dict_OUT, min_sample_size, NLAYERS)\n\n\n # we only need to keep the minimum sample size that has been collected\n this_min_sample_size_inp=float('inf')\n this_min_sample_size_out=float('inf')\n\n for model_name, this_sample_size_dict in sample_size_dict_INP.items():\n for token_id, size in this_sample_size_dict.items():\n if size<this_min_sample_size_inp:\n this_min_sample_size_inp=size\n\n for model_name, this_sample_size_dict in sample_size_dict_OUT.items():\n for token_id, size in this_sample_size_dict.items():\n if size<this_min_sample_size_out:\n this_min_sample_size_out=size\n\n is_enough=True\n if this_min_sample_size_inp>=min_sample_size and this_min_sample_size_out>=min_sample_size:\n for model_name, reps_dict in token_reps_model_INP.items():\n if is_enough is False:\n break\n for this_layer in reps_dict:\n if is_enough is False:\n break\n for token_id, rep_list in this_layer.items():\n if len(rep_list)<min_sample_size:\n is_enough=False\n break\n\n for model_name, reps_list in token_reps_model_OUT.items():\n if is_enough is False:\n break\n for this_layer in reps_dict:\n if is_enough is False:\n break\n for token_id, rep_list in this_layer.items():\n if len(rep_list)<min_sample_size:\n is_enough=False\n break\n else:\n is_enough=False\n if is_enough:\n break\n\nif is_enough is False:\n assert 1==0, \"We have not collected enough data!\"\n\nfor this_model_name in MODELS_INP:\n if this_model_name.startswith(\"MLM\"):\n token_reps_list=token_reps_model_INP[f\"{MLM_NAME.split('.')[0]}_SAME\"]\n result_list=MI_results_INP[f\"{MLM_NAME.split('.')[0]}_SAME\"]\n GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)\n\n token_reps_list=token_reps_model_INP[f\"{MLM_NAME.split('.')[0]}_DIFF\"]\n result_list=MI_results_INP[f\"{MLM_NAME.split('.')[0]}_DIFF\"]\n GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)\n\n token_reps_list=token_reps_model_OUT[MLM_NAME.split('.')[0]]\n result_list=MI_results_OUT[MLM_NAME.split('.')[0]]\n GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)\n\n elif this_model_name.startswith(\"MT\"):\n token_reps_list=token_reps_model_INP[this_model_name.split('.')[0]]\n result_list=MI_results_INP[this_model_name.split('.')[0]]\n GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)\n\n elif this_model_name.startswith(\"LM\"):\n token_reps_list=token_reps_model_INP[this_model_name.split('.')[0]]\n result_list=MI_results_INP[this_model_name.split('.')[0]]\n GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)\n\n token_reps_list=token_reps_model_OUT[MLM_NAME.split('.')[0]]\n result_list=MI_results_OUT[this_model_name.split('.')[0]]\n GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)\n\n\n\nprint(\"result\",MI_results_INP)\nprint(\"result\",MI_results_OUT)\n" ]
[ [ "torch.rand", "torch.cuda.is_available" ] ]
samokhinv/deep_pipe
[ "9461b02f5f32c3e9f24490619ebccf417979cffc" ]
[ "dpipe/batch_iter/utils.py" ]
[ "from typing import Callable, Iterable, Sequence\n\nimport numpy as np\n\nfrom dpipe.im.axes import AxesLike, AxesParams\nfrom dpipe.itertools import lmap, squeeze_first\nfrom dpipe.im import pad_to_shape\n\n\ndef pad_batch_equal(batch, padding_values: AxesParams = 0, ratio: AxesParams = 0.5):\n \"\"\"\n Pad each element of ``batch`` to obtain a correctly shaped array.\n\n References\n ----------\n `pad_to_shape`\n \"\"\"\n max_shapes = np.max(lmap(np.shape, batch), axis=0)\n # if not scalars\n if max_shapes.size != 0:\n batch = [pad_to_shape(x, max_shapes, padding_values=padding_values, ratio=ratio) for x in batch]\n return np.array(batch)\n\n\ndef unpack_args(func: Callable, *args, **kwargs):\n \"\"\"\n Returns a function that takes an iterable and unpacks it while calling ``func``.\n\n ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.\n\n Examples\n --------\n >>> def add(x, y):\n >>> return x + y\n >>>\n >>> add_ = unpack_args(add)\n >>> add(1, 2) == add_([1, 2])\n >>> True\n \"\"\"\n\n def wrapper(xs, *args_, **kwargs_):\n return func(*xs, *args_, *args, **kwargs_, **kwargs)\n\n return wrapper\n\n\ndef multiply(func: Callable, *args, **kwargs):\n \"\"\"\n Returns a function that takes an iterable and maps ``func`` over it.\n Useful when multiple batches require the same function.\n\n ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.\n \"\"\"\n\n def wrapped(xs: Iterable, *args_, **kwargs_) -> tuple:\n return tuple(func(x, *args_, *args, **kwargs_, **kwargs) for x in xs)\n\n return wrapped\n\n\ndef apply_at(index: AxesLike, func: Callable, *args, **kwargs):\n \"\"\"\n Returns a function that takes an iterable and applies ``func`` to the values at the corresponding ``index``.\n\n ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.\n\n Examples\n --------\n >>> first_sqr = apply_at(0, np.square)\n >>> first_sqr([3, 2, 1])\n >>> (9, 2, 1)\n \"\"\"\n index = set(np.atleast_1d(index).tolist())\n\n def wrapped(xs: Sequence, *args_, **kwargs_) -> tuple:\n index_ = {i + len(xs) if i < 0 else i for i in index}\n for idx in index_:\n if idx < 0 or idx >= len(xs):\n raise IndexError(f'Index {idx} out of bounds.')\n\n return tuple(func(x, *args_, *args, **kwargs_, **kwargs) if i in index_ else x for i, x in enumerate(xs))\n\n return wrapped\n\n\ndef zip_apply(*functions: Callable, **kwargs):\n \"\"\"\n Returns a function that takes an iterable and zips ``functions`` over it.\n\n ``kwargs`` are passed to each function as additional arguments.\n\n Examples\n --------\n >>> zipper = zip_apply(np.square, np.sqrt)\n >>> zipper([4, 9])\n >>> (16, 3)\n \"\"\"\n\n def wrapped(xs: Sequence, *args, **kwargs_) -> tuple:\n return tuple(func(x, *args, **kwargs_, **kwargs) for func, x in zip(functions, xs))\n\n return wrapped\n\n\ndef random_apply(p: float, func: Callable, *args, **kwargs):\n \"\"\"\n Returns a function that applies ``func`` with a given probability ``p``.\n\n ``args`` and ``kwargs`` are passed to ``func`` as additional arguments.\n \"\"\"\n\n def wrapped(*args_, **kwargs_):\n if np.random.binomial(1, p):\n return func(*args_, *args, **kwargs_, **kwargs)\n return squeeze_first(args_)\n\n return wrapped\n\n\ndef sample_args(func: Callable, *args: Callable, **kwargs: Callable):\n \"\"\"\n Returns a function that samples arguments for ``func`` from ``args`` and ``kwargs``.\n\n Each argument in ``args`` and ``kwargs`` must be a callable that samples a random value.\n\n Examples\n --------\n >>> from scipy.ndimage import rotate\n >>>\n >>> random_rotate = sample_args(rotate, angle=np.random.normal)\n >>> random_rotate(x)\n >>> # same as\n >>> rotate(x, angle=np.random.normal())\n \"\"\"\n\n def wrapped(*args_, **kwargs_):\n return func(*args_, *([arg() for arg in args]), **kwargs_, **{name: arg() for name, arg in kwargs.items()})\n\n return wrapped\n" ]
[ [ "numpy.array", "numpy.random.binomial", "numpy.atleast_1d" ] ]
zeuseyera/baselines-kr
[ "c9926418d2d8efee21ef20d548366eaaaa193011" ]
[ "baselines/a2c/utils.py" ]
[ "import os\nimport numpy as np\nimport tensorflow as tf\nfrom collections import deque\n\ndef sample(logits):\n noise = tf.random_uniform(tf.shape(logits))\n return tf.argmax(logits - tf.log(-tf.log(noise)), 1)\n\ndef cat_entropy(logits):\n a0 = logits - tf.reduce_max(logits, 1, keepdims=True)\n ea0 = tf.exp(a0)\n z0 = tf.reduce_sum(ea0, 1, keepdims=True)\n p0 = ea0 / z0\n\n return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1)\n\ndef cat_entropy_softmax(p0):\n return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1)\n\ndef ortho_init(scale=1.0):\n def _ortho_init(shape, dtype, partition_info=None):\n #lasagne ortho init for tf\n shape = tuple(shape)\n if len(shape) == 2:\n flat_shape = shape\n elif len(shape) == 4: # assumes NHWC\n flat_shape = (np.prod(shape[:-1]), shape[-1])\n else:\n raise NotImplementedError\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n q = u if u.shape == flat_shape else v # pick the one with the correct shape\n q = q.reshape(shape)\n return (scale * q[:shape[0], :shape[1]]).astype(np.float32)\n return _ortho_init\n\ndef conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False):\n if data_format == 'NHWC':\n channel_ax = 3\n strides = [1, stride, stride, 1]\n bshape = [1, 1, 1, nf]\n elif data_format == 'NCHW':\n channel_ax = 1\n strides = [1, 1, stride, stride]\n bshape = [1, nf, 1, 1]\n else:\n raise NotImplementedError\n bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]\n nin = x.get_shape()[channel_ax].value\n wshape = [rf, rf, nin, nf]\n with tf.variable_scope(scope):\n w = tf.get_variable(\"w\", wshape, initializer=ortho_init(init_scale))\n b = tf.get_variable(\"b\", bias_var_shape, initializer=tf.constant_initializer(0.0))\n if not one_dim_bias and data_format == 'NHWC':\n b = tf.reshape(b, bshape)\n return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b\n\ndef fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):\n with tf.variable_scope(scope):\n nin = x.get_shape()[1].value\n w = tf.get_variable(\"w\", [nin, nh], initializer=ortho_init(init_scale))\n b = tf.get_variable(\"b\", [nh], initializer=tf.constant_initializer(init_bias))\n return tf.matmul(x, w)+b\n\ndef batch_to_seq(h, nbatch, nsteps, flat=False):\n if flat:\n h = tf.reshape(h, [nbatch, nsteps])\n else:\n h = tf.reshape(h, [nbatch, nsteps, -1])\n return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]\n\ndef seq_to_batch(h, flat = False):\n shape = h[0].get_shape().as_list()\n if not flat:\n assert(len(shape) > 1)\n nh = h[0].get_shape()[-1].value\n return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])\n else:\n return tf.reshape(tf.stack(values=h, axis=1), [-1])\n\ndef lstm(xs, ms, s, scope, nh, init_scale=1.0):\n nbatch, nin = [v.value for v in xs[0].get_shape()]\n with tf.variable_scope(scope):\n wx = tf.get_variable(\"wx\", [nin, nh*4], initializer=ortho_init(init_scale))\n wh = tf.get_variable(\"wh\", [nh, nh*4], initializer=ortho_init(init_scale))\n b = tf.get_variable(\"b\", [nh*4], initializer=tf.constant_initializer(0.0))\n\n c, h = tf.split(axis=1, num_or_size_splits=2, value=s)\n for idx, (x, m) in enumerate(zip(xs, ms)):\n c = c*(1-m)\n h = h*(1-m)\n z = tf.matmul(x, wx) + tf.matmul(h, wh) + b\n i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)\n i = tf.nn.sigmoid(i)\n f = tf.nn.sigmoid(f)\n o = tf.nn.sigmoid(o)\n u = tf.tanh(u)\n c = f*c + i*u\n h = o*tf.tanh(c)\n xs[idx] = h\n s = tf.concat(axis=1, values=[c, h])\n return xs, s\n\ndef _ln(x, g, b, e=1e-5, axes=[1]):\n u, s = tf.nn.moments(x, axes=axes, keep_dims=True)\n x = (x-u)/tf.sqrt(s+e)\n x = x*g+b\n return x\n\ndef lnlstm(xs, ms, s, scope, nh, init_scale=1.0):\n nbatch, nin = [v.value for v in xs[0].get_shape()]\n with tf.variable_scope(scope):\n wx = tf.get_variable(\"wx\", [nin, nh*4], initializer=ortho_init(init_scale))\n gx = tf.get_variable(\"gx\", [nh*4], initializer=tf.constant_initializer(1.0))\n bx = tf.get_variable(\"bx\", [nh*4], initializer=tf.constant_initializer(0.0))\n\n wh = tf.get_variable(\"wh\", [nh, nh*4], initializer=ortho_init(init_scale))\n gh = tf.get_variable(\"gh\", [nh*4], initializer=tf.constant_initializer(1.0))\n bh = tf.get_variable(\"bh\", [nh*4], initializer=tf.constant_initializer(0.0))\n\n b = tf.get_variable(\"b\", [nh*4], initializer=tf.constant_initializer(0.0))\n\n gc = tf.get_variable(\"gc\", [nh], initializer=tf.constant_initializer(1.0))\n bc = tf.get_variable(\"bc\", [nh], initializer=tf.constant_initializer(0.0))\n\n c, h = tf.split(axis=1, num_or_size_splits=2, value=s)\n\n for idx, (x, m) in enumerate(zip(xs, ms)):\n c = c*(1-m)\n h = h*(1-m)\n z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b\n i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)\n i = tf.nn.sigmoid(i)\n f = tf.nn.sigmoid(f)\n o = tf.nn.sigmoid(o)\n u = tf.tanh(u)\n c = f*c + i*u\n h = o*tf.tanh(_ln(c, gc, bc))\n xs[idx] = h\n\n s = tf.concat(axis=1, values=[c, h])\n \n return xs, s\n\ndef conv_to_fc(x):\n nh = np.prod([v.value for v in x.get_shape()[1:]])\n x = tf.reshape(x, [-1, nh])\n return x\n\ndef discount_with_dones(rewards, dones, gamma):\n discounted = []\n r = 0\n for reward, done in zip(rewards[::-1], dones[::-1]):\n r = reward + gamma*r*(1.-done) # fixed off by one bug\n discounted.append(r)\n return discounted[::-1]\n\ndef find_trainable_variables(key):\n return tf.trainable_variables(key)\n\ndef make_path(f):\n return os.makedirs(f, exist_ok=True)\n\ndef constant(p):\n return 1\n\ndef linear(p):\n return 1-p\n\ndef middle_drop(p):\n eps = 0.75\n if 1-p<eps:\n return eps*0.1\n return 1-p\n\ndef double_linear_con(p):\n p *= 2\n eps = 0.125\n if 1-p<eps:\n return eps\n return 1-p\n\ndef double_middle_drop(p):\n eps1 = 0.75\n eps2 = 0.25\n if 1-p<eps1:\n if 1-p<eps2:\n return eps2*0.5\n return eps1*0.1\n return 1-p\n\nschedules = {\n 'linear':linear,\n 'constant':constant,\n 'double_linear_con': double_linear_con,\n 'middle_drop': middle_drop,\n 'double_middle_drop': double_middle_drop\n}\n\nclass Scheduler(object):\n\n def __init__(self, v, nvalues, schedule):\n self.n = 0.\n self.v = v\n self.nvalues = nvalues\n self.schedule = schedules[schedule]\n\n def value(self):\n current_value = self.v*self.schedule(self.n/self.nvalues)\n self.n += 1.\n return current_value\n\n def value_steps(self, steps):\n return self.v*self.schedule(steps/self.nvalues)\n\n\nclass EpisodeStats:\n def __init__(self, nsteps, nenvs):\n self.episode_rewards = []\n for i in range(nenvs):\n self.episode_rewards.append([])\n self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths\n self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards\n self.nsteps = nsteps\n self.nenvs = nenvs\n\n def feed(self, rewards, masks):\n rewards = np.reshape(rewards, [self.nenvs, self.nsteps])\n masks = np.reshape(masks, [self.nenvs, self.nsteps])\n for i in range(0, self.nenvs):\n for j in range(0, self.nsteps):\n self.episode_rewards[i].append(rewards[i][j])\n if masks[i][j]:\n l = len(self.episode_rewards[i])\n s = sum(self.episode_rewards[i])\n self.lenbuffer.append(l)\n self.rewbuffer.append(s)\n self.episode_rewards[i] = []\n\n def mean_length(self):\n if self.lenbuffer:\n return np.mean(self.lenbuffer)\n else:\n return 0 # on the first params dump, no episodes are finished\n\n def mean_reward(self):\n if self.rewbuffer:\n return np.mean(self.rewbuffer)\n else:\n return 0\n\n\n# For ACER\ndef get_by_index(x, idx):\n assert(len(x.get_shape()) == 2)\n assert(len(idx.get_shape()) == 1)\n idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx\n y = tf.gather(tf.reshape(x, [-1]), # flatten input\n idx_flattened) # use flattened indices\n return y\n\ndef check_shape(ts,shapes):\n i = 0\n for (t,shape) in zip(ts,shapes):\n assert t.get_shape().as_list()==shape, \"id \" + str(i) + \" shape \" + str(t.get_shape()) + str(shape)\n i += 1\n\ndef avg_norm(t):\n return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1)))\n\ndef gradient_add(g1, g2, param):\n print([g1, g2, param.name])\n assert (not (g1 is None and g2 is None)), param.name\n if g1 is None:\n return g2\n elif g2 is None:\n return g1\n else:\n return g1 + g2\n\ndef q_explained_variance(qpred, q):\n _, vary = tf.nn.moments(q, axes=[0, 1])\n _, varpred = tf.nn.moments(q - qpred, axes=[0, 1])\n check_shape([vary, varpred], [[]] * 2)\n return 1.0 - (varpred / vary)\n" ]
[ [ "tensorflow.exp", "tensorflow.constant_initializer", "tensorflow.nn.conv2d", "tensorflow.matmul", "tensorflow.nn.moments", "tensorflow.reshape", "numpy.mean", "tensorflow.sqrt", "tensorflow.stack", "tensorflow.tanh", "tensorflow.trainable_variables", "numpy.random.normal", "tensorflow.shape", "tensorflow.concat", "tensorflow.variable_scope", "numpy.prod", "tensorflow.squeeze", "tensorflow.split", "tensorflow.nn.sigmoid", "tensorflow.range", "numpy.reshape", "tensorflow.log", "tensorflow.reduce_sum", "numpy.linalg.svd", "tensorflow.reduce_max", "tensorflow.square" ] ]
samiulshuvo/se_relativisticgan
[ "5501c4d96faa03eb3c1fd776b232b68940183f4d" ]
[ "data_ops.py" ]
[ "\"\"\"\nData processing routines\nDeepak Baby, UGent, June 2018\ndeepak.baby@ugent.be\n\"\"\"\n\nimport numpy as np\n\ndef reconstruct_wav(wavmat, stride_factor=0.5):\n \"\"\"\n Reconstructs the audiofile from sliced matrix wavmat\n \"\"\"\n window_length = wavmat.shape[1]\n window_stride = int(stride_factor * window_length)\n wav_length = (wavmat.shape[0] -1 ) * window_stride + window_length\n wav_recon = np.zeros((1,wav_length))\n #print (\"wav recon shape \" + str(wav_recon.shape))\n for k in range (wavmat.shape[0]):\n wav_beg = k * window_stride\n wav_end = wav_beg + window_length\n wav_recon[0, wav_beg:wav_end] += wavmat[k, :]\n\n # now compute the scaling factor for multiple instances\n noverlap = int(np.ceil(1/stride_factor))\n scale_ = (1/float(noverlap)) * np.ones((1, wav_length))\n for s in range(noverlap-1):\n s_beg = s * window_stride\n s_end = s_beg + window_stride\n scale_[0, s_beg:s_end] = 1/ (s+1)\n scale_[0, -s_beg - 1 : -s_end:-1] = 1/ (s+1)\n\n return wav_recon * scale_\n\ndef pre_emph(x, coeff=0.95):\n \"\"\"\n Apply pre_emph on 2d data (batch_size x window_length)\n \"\"\"\n #print (\"x shape: \" + str(x.shape))\n x0 = x[:, 0]\n x0 = np.expand_dims(x0, axis=1)\n diff = x[:, 1:] - coeff * x[:, :-1]\n x_preemph = np.concatenate((x0, diff), axis=1)\n if not x.shape == x_preemph.shape:\n print (\"ERROR: Pre-emphasis is wrong\")\n #print (\"x_preemph shape: \" + str(x_preemph.shape))\n return x_preemph\n\ndef de_emph(y, coeff=0.95):\n \"\"\"\n Apply de_emphasis on test data: works only on 1d data\n \"\"\"\n if coeff <= 0:\n return y\n x = np.zeros((y.shape[0],), dtype=np.float32)\n #print(\"in_shape\" + str(y.shape))\n x[0] = y[0]\n for n in range(1, y.shape[0], 1):\n x[n] = coeff * x[n - 1] + y[n]\n return x\n\ndef data_preprocess(wav, preemph=0.95):\n wav = (2./65535.) * (wav.astype('float32') - 32767) + 1.\n if preemph > 0:\n wav = pre_emph(wav, coeff=preemph)\n return wav.astype('float32')\n" ]
[ [ "numpy.concatenate", "numpy.ceil", "numpy.zeros", "numpy.ones", "numpy.expand_dims" ] ]
icc2115/dl-selection
[ "e39ef0e73bf631e413bac48db791aed617dd7e32" ]
[ "selection/layers/utils.py" ]
[ "import torch\nimport torch.nn.functional as F\n\n\ndef clamp_probs(probs):\n eps = torch.finfo(probs.dtype).eps\n return torch.clamp(probs, min=eps, max=1-eps)\n\ndef concrete_sample(logits, temperature, shape=torch.Size([])):\n '''\n Sampling for Concrete distribution.\n\n See Eq. 10 of Maddison et al., 2017.\n '''\n uniform_shape = torch.Size(shape) + logits.shape\n u = clamp_probs(torch.rand(uniform_shape, dtype=torch.float32,\n device=logits.device))\n gumbels = - torch.log(- torch.log(u))\n scores = (logits + gumbels) / temperature\n return scores.softmax(dim=-1)\n\ndef bernoulli_concrete_sample(logits, temperature, shape=torch.Size([])):\n '''\n Sampling for BinConcrete distribution.\n\n See PyTorch source code, differs from Eq. 16 of Maddison et al., 2017.\n '''\n uniform_shape = torch.Size(shape) + logits.shape\n u = clamp_probs(torch.rand(uniform_shape, dtype=torch.float32,\n device=logits.device))\n return torch.sigmoid((F.logsigmoid(logits) - F.logsigmoid(-logits)\n + torch.log(u) - torch.log(1 - u)) / temperature)\n" ]
[ [ "torch.Size", "torch.rand", "torch.finfo", "torch.nn.functional.logsigmoid", "torch.clamp", "torch.log" ] ]
willidert/aux_est_micro
[ "6882794efa609f4171d86a0de1599bb97ac1ff2b" ]
[ "data-clean/clean.py" ]
[ "import pandas as pd\nimport numpy as np\n\n\ndef gloriosafuncao(df):\n\n df = pd.DataFrame([df])\n\n numerico = [\n 11, \"email\", 1, 2, 3, 7,\n 8, 9, 12, 10, 13, 14,\n 15, 16, 17, 18, 19, 20, 21, 4, 5, 6\n ]\n\n df.columns = numerico\n\n labels = [\n 'email',\n 'PPI',\n 'ProgramasSociais',\n 'ModalidadeEnsino',\n # 'Beneficiario',\n 'QtdDependentes',\n 'EscolaridadePai',\n 'EscolaridadeMae',\n 'RendaPerCapita',\n 'AtividadeRemunerada',\n 'SituacaoFinanceira',\n 'QtdResponsaveisFinanceiros',\n 'CondicaoTrabalho',\n 'CondicaoRenda',\n 'MoraCidadeCampus',\n 'CondMoradia',\n 'TipoTransporte',\n 'NConducoes',\n 'DoencaCronica',\n 'Medicacao',\n 'Deficiencia',\n 'FDoencaCronica',\n 'FMedicacao',\n ]\n\n nomes_ordenados = [df.columns.to_list()[0]] + df.columns.to_list()[2:]\n nomes_ordenados.sort()\n nomes_ordenados = [df.columns.to_list()[1]] + nomes_ordenados\n\n df = df[nomes_ordenados]\n df.columns = labels\n\n condicoes = [\n 'Desempregado',\n 'Trabalhador Informal',\n 'Trabalhador Autônomo',\n 'Aposentado',\n 'Empregado CLT',\n # 'Pescador/agricultor familiar',\n 'Beneficiário INSS',\n 'Funcionário Público'\n ]\n\n rotulos = [\n 'Desempregado',\n 'Informal',\n 'Autonomo',\n 'Aposentado',\n 'CLT',\n # 'PescAgriF',\n 'INSS',\n 'FuncionarioPublico'\n ]\n\n for rotulo, cond in zip(rotulos, condicoes):\n df[rotulo] = df['CondicaoTrabalho'].map(\n lambda x: 'sim' if cond in x else 'nao')\n\n df['MoraCidadeCampus'] = df['MoraCidadeCampus'].apply(\n lambda x: x.split(',')[0].lower())\n\n df['TipoTransporte'] = df['TipoTransporte'].apply(\n lambda x: ''.join(x.split()[1]).capitalize())\n\n df['AteDois'] = df['QtdResponsaveisFinanceiros']\\\n .apply(lambda x: 'sim' if ' '\n .join(x.split()[:-1]) == '1' or ' '\n .join(x.split()[:-1]) == '2' else 'nao')\n\n df[['TipoTransporte', 'QtdResponsaveisFinanceiros',\n 'MoraCidadeCampus', 'AteDois']].head()\n\n binario = [\n 'PPI',\n 'ProgramasSociais',\n # 'Beneficiario',\n 'AtividadeRemunerada',\n 'MoraCidadeCampus',\n 'DoencaCronica',\n 'Medicacao',\n 'Deficiencia',\n 'FDoencaCronica',\n 'FMedicacao',\n 'AteDois',\n 'Desempregado',\n 'Informal',\n 'Autonomo',\n 'Aposentado',\n 'CLT',\n # 'PescAgriF',\n 'INSS',\n 'FuncionarioPublico'\n ]\n\n df_binario = pd.DataFrame()\n\n for elemento in binario:\n df_binario[elemento] = df[elemento].replace(\n ['sim', 'nao'], [1, 0]).astype(int)\n\n modalidade_map = {\n 'Graduação': 1,\n 'Médio Integrado EJA': 2,\n 'Médio Técnico Integrado': 4,\n 'Técnico Subsequente': 3,\n }\n\n transporte_map = {\n 'Pé': 1,\n 'Próprio': 1,\n 'Público': 2,\n 'Alternativo': 3\n }\n\n escolaridade_map = {\n 'Desconheço': 4,\n 'Não se aplica': 4,\n 'Sem escolaridade': 4,\n 'Ensino fundamental': 3,\n 'Ensino médio': 2,\n 'Ensino superior': 1,\n }\n\n moradia_map = {\n 'Própria': 1,\n 'Cedida': 2,\n 'Financiada': 3,\n 'Alugada': 4,\n 'Outros': 4\n }\n\n categorias = df['RendaPerCapita'].astype(\n 'category').cat.categories.tolist()\n valores = [3, 2, 9, 8, 7, 6, 5, 4, 10, 1]\n renda_percapita_map = {k: v for k, v in zip(categorias, valores)}\n\n categorias = df['SituacaoFinanceira'].astype(\n 'category').cat.categories.tolist()\n valores = [4, 2, 2, 1, 4, 5, 1]\n situacao_fin_map = {k: v for k, v in zip(categorias, valores)}\n\n categorias = df['QtdDependentes'].astype(\n 'category').cat.categories.tolist()\n valores = [2, 3, 4, 5, 1]\n dependentes_map = {k: v for k, v in zip(categorias, valores)}\n\n categorias = df['NConducoes'].astype('category').cat.categories.tolist()\n valores = [2, 3, 1]\n conducoes_map = {k: v for k, v in zip(categorias, valores)}\n\n categorias = df['CondicaoRenda'].astype('category').cat.categories.tolist()\n valores = [1, 2, 3]\n cond_renda_map = {k: v for k, v in zip(categorias, valores)}\n\n labels = [\n 'CondMoradia',\n 'TipoTransporte',\n 'RendaPerCapita',\n 'SituacaoFinanceira',\n 'NConducoes',\n 'CondicaoRenda',\n \"ModalidadeEnsino\",\n \"EscolaridadeMae\",\n \"EscolaridadePai\",\n \"QtdDependentes\"\n ]\n label_encode = df[labels].copy()\n\n label_encode['CondMoradia'].replace(moradia_map, inplace=True)\n label_encode['TipoTransporte'].replace(transporte_map, inplace=True)\n label_encode['EscolaridadePai'].replace(escolaridade_map, inplace=True)\n label_encode['EscolaridadeMae'].replace(escolaridade_map, inplace=True)\n label_encode['SituacaoFinanceira'].replace(situacao_fin_map, inplace=True)\n label_encode['RendaPerCapita'].replace(renda_percapita_map, inplace=True)\n label_encode['QtdDependentes'].replace(dependentes_map, inplace=True)\n label_encode['NConducoes'].replace(conducoes_map, inplace=True)\n label_encode['CondicaoRenda'].replace(cond_renda_map, inplace=True)\n label_encode['ModalidadeEnsino'].replace(modalidade_map, inplace=True)\n\n qtd = pd.DataFrame()\n qtd_res = ['ResFin_1', 'ResFin_2', 'ResFin_3', 'ResFin_4ouMais']\n opcs = [\n '1 membro',\n '2 membros',\n '3 membros',\n '4 ou mais membros'\n ]\n\n df['QtdResponsaveisFinanceiros'].replace(opcs, qtd_res)\n\n for iqtd in qtd_res:\n qtd[iqtd] = df['QtdResponsaveisFinanceiros'].map(\n lambda x: int(1) if iqtd in x else int(0))\n\n dados_limpos = pd.concat([df_binario, label_encode, qtd], axis=1)\n\n ordem = ['PPI',\n 'ProgramasSociais',\n 'AtividadeRemunerada',\n 'MoraCidadeCampus',\n 'DoencaCronica',\n 'Medicacao',\n 'Deficiencia',\n 'FDoencaCronica',\n 'FMedicacao',\n 'AteDois',\n 'Desempregado',\n 'Informal',\n 'Autonomo',\n 'Aposentado',\n 'CLT',\n 'INSS',\n 'FuncionarioPublico',\n 'ModalidadeEnsino',\n 'CondMoradia',\n 'TipoTransporte',\n 'EscolaridadeMae',\n 'EscolaridadePai',\n 'RendaPerCapita',\n 'SituacaoFinanceira',\n 'QtdDependentes',\n 'NConducoes',\n 'CondicaoRenda',\n 'ResFin_1',\n 'ResFin_2',\n 'ResFin_3',\n 'ResFin_4ouMais']\n\n dados_limpos = dados_limpos[ordem]\n dados_limpos['email'] = df['email']\n\n return np.array(dados_limpos.loc[0]).reshape(1, -1)\n" ]
[ [ "pandas.DataFrame", "numpy.array", "pandas.concat" ] ]
ZXTFINAL/deeplearning
[ "52208b43fc8f9a1ea8508b1c07140c70e1529459", "52208b43fc8f9a1ea8508b1c07140c70e1529459" ]
[ "1_boston.py", "1_gradient.py" ]
[ "import numpy as np\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\ndataset = load_boston()\nX = dataset.data\ny = dataset.target\nmean = X.mean(axis=0)\nstd = X.std(axis=0)\nX = (X-mean)/std\n# print(X)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\nn_train = X_train.shape[0]\nn_features = X_train.shape[1]\n\n# 权重初始化\nw = np.random.rand(n_features)\nb = 1.1\nlr = 0.001\nepoches = 3000\n\n\ndef model(x):\n y_hat = w.dot(x)+b\n\n return y_hat\n\n\ndef loss_funtion(X, y):\n total_loss = 0\n n_samples = len(X)\n for i in range(n_samples):\n xi = X[i]\n yi = y[i]\n yi_hat = model(xi)\n total_loss += abs(yi_hat-yi)**2\n avg_loss = (1/n_samples)*total_loss\n return avg_loss\n\n\nreg = 0.5\nfor epoch in range(epoches):\n sum_w = 0.0\n sum_b = 0.0\n for i in range(n_train):\n xi = X_train[i]\n yi = y_train[i]\n yi_hat = model(xi)\n sum_w += (yi_hat-yi)*xi\n sum_b += (yi_hat - yi)\n grad_w = (2/n_train)*sum_w+(2.0*reg*w)\n grad_b = (2/n_train)*sum_b # 偏置项不做正则化处理\n w = w-lr*grad_w\n b = b-lr*grad_b\n\ntrain_loss = loss_funtion(X_train, y_train)\ntest_loss = loss_funtion(X_test, y_test)\nprint(train_loss)\nprint(test_loss)\n", "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef function(x):\n y = x**2+1\n return y\n\n\nepoches = 50\nlr = 0.1\nxi = -18\n\n\ndef get_gradient(x):\n gradient = 2*x\n return gradient\n\n\ntrajectory = []\n\n\ndef get_x_star(xi):\n for i in range(epoches):\n trajectory.append(xi)\n xi = xi-lr*get_gradient(xi)\n x_star = xi\n return x_star\n\n\nget_x_star(xi)\nx = np.arange(-20, 20, 0.1)\ny = function(x)\n\nplt.plot(x, y)\n\nx_trajectory = np.array(trajectory)\ny_trajectory = function(np.array(trajectory))\n\nplt.scatter(x_trajectory, y_trajectory)\nplt.show()\n" ]
[ [ "sklearn.model_selection.train_test_split", "numpy.random.rand", "sklearn.datasets.load_boston" ], [ "numpy.array", "matplotlib.pyplot.plot", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.scatter" ] ]
Isaac-Flath/lightning-flash
[ "320f87707587d92a13c8831778864b33af4fe421" ]
[ "flash/image/embedding/vissl/transforms/utilities.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport torch\n\nfrom flash.core.data.data_source import DefaultDataKeys\n\n\ndef vissl_collate_helper(samples):\n result = []\n\n for batch_ele in samples:\n _batch_ele_dict = {}\n _batch_ele_dict.update(batch_ele)\n _batch_ele_dict[DefaultDataKeys.INPUT] = -1\n\n result.append(_batch_ele_dict)\n\n return torch.utils.data._utils.collate.default_collate(result)\n\n\ndef multicrop_collate_fn(samples):\n \"\"\"Multi-crop collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n \"\"\"\n result = vissl_collate_helper(samples)\n\n inputs = [[] for _ in range(len(samples[0][DefaultDataKeys.INPUT]))]\n for batch_ele in samples:\n multi_crop_imgs = batch_ele[DefaultDataKeys.INPUT]\n\n for idx, crop in enumerate(multi_crop_imgs):\n inputs[idx].append(crop)\n\n for idx, ele in enumerate(inputs):\n inputs[idx] = torch.stack(ele)\n\n result[DefaultDataKeys.INPUT] = inputs\n\n return result\n\n\ndef simclr_collate_fn(samples):\n \"\"\"Multi-crop collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n \"\"\"\n result = vissl_collate_helper(samples)\n\n inputs = []\n num_views = len(samples[0][DefaultDataKeys.INPUT])\n view_idx = 0\n while view_idx < num_views:\n for batch_ele in samples:\n imgs = batch_ele[DefaultDataKeys.INPUT]\n inputs.append(imgs[view_idx])\n\n view_idx += 1\n\n result[DefaultDataKeys.INPUT] = torch.stack(inputs)\n\n return result\n\n\ndef moco_collate_fn(samples):\n \"\"\"MOCO collate function for VISSL integration.\n\n Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT\n \"\"\"\n result = vissl_collate_helper(samples)\n\n inputs = []\n for batch_ele in samples:\n inputs.append(torch.stack(batch_ele[DefaultDataKeys.INPUT]))\n\n result[DefaultDataKeys.INPUT] = torch.stack(inputs).squeeze()[:, 0, :, :, :].squeeze()\n result[\"data_momentum\"] = torch.stack(inputs).squeeze()[:, 1, :, :, :].squeeze()\n\n return result\n" ]
[ [ "torch.utils.data._utils.collate.default_collate", "torch.stack" ] ]
eddy-ilg/iviz
[ "8c392dcc75a6563c5d076bbbd84152273a3c0f71" ]
[ "python/iviz/Util.py" ]
[ "#!/usr/bin/env python3\n#-*- coding: utf-8 -*-\n\nfrom PyQt5.QtGui import QImage, qRgb, QPixmap\nimport numpy as np\nimport numpy as np\n\ngray_color_table = [qRgb(i, i, i) for i in range(256)]\n\ndef toQImage(data, copy=True):\n if data is None:\n return QImage()\n\n data = data.copy()\n\n data[data>255] = 255\n data[data<0] = 0\n data = data.astype(np.uint8)\n\n if data.dtype == np.uint8:\n if len(data.shape) == 2:\n qim = QImage(data.data, data.shape[1], data.shape[0], data.strides[0], QImage.Format_Indexed8)\n qim.setColorTable(gray_color_table)\n return qim.copy() if copy else qim\n\n elif len(data.shape) == 3:\n if data.shape[2] == 1:\n qim = QImage(data.data, data.shape[1], data.shape[0], data.strides[0], QImage.Format_Grayscale8)\n return qim.copy() if copy else qim\n if data.shape[2] == 3:\n qim = QImage(data.data, data.shape[1], data.shape[0], data.strides[0], QImage.Format_RGB888)\n return qim.copy() if copy else qim\n elif data.shape[2] == 4:\n qim = QImage(data.data, data.shape[1], data.shape[0], data.strides[0], QImage.Format_ARGB32)\n return qim.copy() if copy else qim\n else:\n raise Exception(\"Conversion of %d channel array to QImage not implemented\" % data.shape[2])\n\n raise Exception(\"Conversion of %d dimension array to QImage not implemented\" % len(data.shape))\n\ndef toQPixmap(data):\n if data is None: return QPixmap()\n elif isinstance(data, QPixmap): return data\n elif isinstance(data, QImage): QPixmap.fromImage(data)\n elif hasattr(data, 'pixmap'): return data.pixmap()\n else: return QPixmap.fromImage(toQImage(data))\n\ndef qPixmapToNumpy(pixmap):\n image = pixmap.toImage()\n image = image.convertToFormat(QImage.Format.Format_RGB32)\n\n width = image.width()\n height = image.height()\n\n ptr = image.bits()\n ptr.setsize(height * width * 4)\n arr = np.frombuffer(ptr, np.uint8).reshape((height, width, 4))\n return arr[:, :, 0:3].copy()\n" ]
[ [ "numpy.frombuffer" ] ]
dalpengholic/Udacity_Recommendations_with_IBM
[ "8c620b733bf91b7b97b607373d0e6ff86934d03d" ]
[ "model/recommendation_functions.py" ]
[ "import pandas as pd\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.metrics.pairwise import linear_kernel\n\ndef email_mapper(df):\n coded_dict = dict()\n cter = 1\n email_encoded = []\n \n for val in df['email']:\n if val not in coded_dict:\n coded_dict[val] = cter\n cter+=1\n \n email_encoded.append(coded_dict[val])\n return email_encoded\n\ndef create_user_item_matrix(df):\n '''\n INPUT:\n df - pandas dataframe with article_id, title, user_id columns\n \n OUTPUT:\n user_item - user item matrix \n \n Description:\n Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with \n an article and a 0 otherwise\n '''\n # Fill in the function here\n user_item = df.groupby('user_id')['article_id'].value_counts().unstack()\n user_item[user_item.isna() == False] = 1\n \n return user_item # return the user_item matrix \n\n\ndef get_top_articles(n, df):\n '''\n INPUT:\n n - (int) the number of top articles to return\n df - (pandas dataframe) df as defined at the top of the notebook \n \n OUTPUT:\n top_articles - (list) A list of the top 'n' article titles \n \n '''\n article_id_grouped_df = df.groupby(['title'])\n top_articles = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()\n \n return top_articles # Return the top article titles from df (not df_content)\n\ndef get_top_article_ids(n, df):\n '''\n INPUT:\n n - (int) the number of top articles to return\n df - (pandas dataframe) df as defined at the top of the notebook \n \n OUTPUT:\n top_articles - (list) A list of the top 'n' article titles \n \n '''\n article_id_grouped_df = df.groupby(['article_id'])\n top_articles_ids = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist()\n\n return top_articles_ids # Return the top article ids\n\n\n\ndef user_user_recs(user_id, user_item, df, m=10):\n '''\n INPUT:\n user_id - (int) a user id\n m - (int) the number of recommendations you want for the user\n \n OUTPUT:\n recs - (list) a list of recommendations for the user by article id\n rec_names - (list) a list of recommendations for the user by article title\n \n Description:\n Loops through the users based on closeness to the input user_id\n For each user - finds articles the user hasn't seen before and provides them as recs\n Does this until m recommendations are found\n \n Notes:\n * Choose the users that have the most total article interactions \n before choosing those with fewer article interactions.\n\n * Choose articles with the articles with the most total interactions \n before choosing those with fewer total interactions. \n \n '''\n def get_user_articles_names_ids(user_id):\n '''\n INPUT:\n user_id\n\n\n OUTPUT:\n article_ids - (list) a list of the article ids seen by the user\n article_names - (list) a list of article names associated with the list of article ids \n (this is identified by the doc_full_name column in df_content)\n \n Description:\n Provides a list of the article_ids and article titles that have been seen by a user\n '''\n # Your code here\n article_ids = user_item.loc[user_id][user_item.loc[user_id] ==1].index.tolist()\n article_names = []\n for i in article_ids:\n try:\n title = df[df['article_id'] == i]['title'].unique()[0]\n except IndexError:\n title =\"None\"\n \n article_names.append(title)\n article_ids = list(map(str, article_ids))\n \n return article_ids, article_names # return the ids and names\n\n def find_similar_users():\n ''' \n OUTPUT:\n similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first\n \n Description:\n Computes the similarity of every pair of users based on the dot product\n Returns an ordered\n \n '''\n # compute similarity of each user to the provided user\n user_item_tmp = user_item.copy()\n user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0\n row = user_item_tmp.loc[user_id] # 2. Select a row\n result_dot = row@user_item_tmp.T # 3. Dot product of each of row of the matrix \n result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id\n most_similar_users = result_dot.sort_values(ascending=False).index.tolist() # sort by similarity # create list of just the ids\n \n return most_similar_users # return a list of the users in order from most to least similar\n\n def get_top_sorted_users(most_similar_users):\n \n '''\n INPUT:\n most_similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first \n \n OUTPUT:\n neighbors_df - (pandas dataframe) a dataframe with:\n neighbor_id - is a neighbor user_id\n similarity - measure of the similarity of each user to the provided user_id\n num_interactions - the number of articles viewed by the user - if a u\n \n Other Details - sort the neighbors_df by the similarity and then by number of interactions where \n highest of each is higher in the dataframe\n \n '''\n # Make neighbor_id column\n df_user_id_grouped =df.groupby(\"user_id\")\n df_user_id_grouped['article_id'].count().sort_values(ascending=False)\n neighbors_df = pd.DataFrame()\n neighbors_df['neighbor_id'] = most_similar_users\n \n # make similarity column\n user_item_tmp = user_item.copy()\n user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0\n row = user_item_tmp.loc[user_id] # Select a row\n result_dot = row@user_item_tmp.T # Dot product of each of row of the matrix \n result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id\n similarity = result_dot.sort_values(ascending=False).values.tolist()[0:10] \n neighbors_df['similarity'] = similarity\n \n # Make num_interactions column\n num_interactions = []\n for i in neighbors_df['neighbor_id']:\n counted_interaction = df_user_id_grouped['article_id'].count().loc[i]\n num_interactions.append(counted_interaction)\n neighbors_df['num_interactions'] = num_interactions\n neighbors_df = neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False)\n \n return neighbors_df # Return the dataframe specified in the doc_string\n \n recs = []\n rec_names =[]\n counter = 0\n # Get seen article ids and names from selected user id\n article_ids, article_names = get_user_articles_names_ids(user_id)\n # Make set to find unseen articles\n seen_ids_set = set(article_ids)\n most_similar_users = find_similar_users()[0:10]\n neighbors_df = get_top_sorted_users(most_similar_users)\n # Find similar users of the selected user\n similar_users_list = neighbors_df['neighbor_id'] # Get neighbor_df\n\n\n # Make recommendation list\n for sim_user in similar_users_list:\n if counter < m: \n # Get seen article ids and names from similar users\n sim_article_ids, sim_article_names = get_user_articles_names_ids(sim_user)\n # Make dict (key: article_ids, value:article_names)\n sim_user_dict = dict(zip(sim_article_ids, sim_article_names)) \n # Make set to find unseen articles\n sim_seen_ids_set = set(sim_article_ids)\n # Create set of unseen articles_ids\n unseen_ids_set = sim_seen_ids_set.difference(seen_ids_set)\n\n for i in unseen_ids_set: \n if counter < m: \n recs.append(i)\n rec_names.append(sim_user_dict[i])\n counter += 1\n \n \n return recs, rec_names\n\n\n###\n\n\ndef make_Tfidf_array(df_content):\n def tokenize(text):\n '''\n Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word. \n The funtions also cleans irrelevant stopwords.\n Input:\n 1. text: text message\n Output:\n 1. Clean_tokens : list of tokenized clean words\n '''\n # Get rid of other sepcial characters \n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n # Tokenize\n tokens = word_tokenize(text)\n # Lemmatize\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()\n clean_tokens.append(clean_tok)\n\n # Remove stop words \n stopwords = nltk.corpus.stopwords.words('english')\n clean_tokens = [token for token in clean_tokens if token not in stopwords]\n\n return clean_tokens\n\n corpus = df_content['doc_description']\n df_content['doc_description'].fillna(df_content['doc_full_name'], inplace=True)\n stop_words = stopwords.words(\"english\")\n lemmatizer = WordNetLemmatizer()\n # Text Processing, Feature Extraction\n vect = TfidfVectorizer(tokenizer=tokenize)\n # get counts of each token (word) in text data\n X = vect.fit_transform(corpus)\n X = X.toarray()\n\n return vect, X\n\n\ndef make_content_recs(article_id, df_content, df, m=10):\n '''\n INPUT:\n article_id = (int) a article id in df_content\n m - (int) the number of recommendations you want for the user\n df_content - (pandas dataframe) df_content as defined at the top of the notebook \n df - (pandas dataframe) df as defined at the top of the notebook \n\n OUTPUT:\n recs - (list) a list of recommendations for the user by article id\n rec_names - (list) a list of recommendations for the user by article title\n '''\n def tokenize(text):\n '''\n Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word. \n The funtions also cleans irrelevant stopwords.\n Input:\n 1. text: text message\n Output:\n 1. Clean_tokens : list of tokenized clean words\n '''\n # Get rid of other sepcial characters \n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n # Tokenize\n tokens = word_tokenize(text)\n # Lemmatize\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip()\n clean_tokens.append(clean_tok)\n\n # Remove stop words \n stopwords = nltk.corpus.stopwords.words('english')\n clean_tokens = [token for token in clean_tokens if token not in stopwords]\n\n return clean_tokens\n \n vect, X = make_Tfidf_array(df_content)\n \n if article_id in df_content.article_id:\n cosine_similarity = linear_kernel(X, X)\n df_similarity = pd.DataFrame(cosine_similarity[article_id], columns=['similarity'])\n df_similarity_modified = df_similarity.drop(article_id)\n recs = df_similarity_modified.similarity.sort_values(ascending=False).index[0:10].tolist()\n rec_names = []\n\n for i in recs:\n name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]\n rec_names.append(name)\n\n else:\n tfidf_feature_name = vect.get_feature_names()\n # Get title of the document of interest\n booktitle = df[df['article_id'] == article_id]['title'].values[0]\n # Tokenize the title\n booktitle_tokenized = tokenize(booktitle)\n\n X_slice_list = []\n for i in booktitle_tokenized:\n if i in tfidf_feature_name:\n X_slice_list.append(tfidf_feature_name.index(i))\n\n X_slice_list.sort()\n X_sliced = X[:,X_slice_list]\n check_df = pd.DataFrame(X_sliced, columns=X_slice_list)\n check_df['sum'] = check_df.sum(axis=1)\n recs = check_df.sort_values(\"sum\", ascending=False)[0:10].index.tolist()\n rec_names = []\n for i in recs:\n name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0]\n rec_names.append(name)\n\n return recs, rec_names\n\n" ]
[ [ "pandas.DataFrame", "sklearn.metrics.pairwise.linear_kernel", "sklearn.feature_extraction.text.TfidfVectorizer" ] ]
hurwitzlab/viral-learning
[ "8d7aebc0d58fa32a429f4a47593452ee2722ba82" ]
[ "vl/model/training.py" ]
[ "\"\"\"\nTraining and validation method for arbitrary models.\n\"\"\"\nimport io\nimport os\nimport sys\nimport time\n\nfrom keras import Sequential\nfrom keras.layers import Dense, Dropout, BatchNormalization\n\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nimport numpy as np\nimport pandas as pd\n\n\nplt.switch_backend('agg')\n\n\ndef train_and_evaluate(model, model_name, training_epochs, the_data):\n\n print('model.metrics_names: {}'.format(model.metrics_names))\n\n total_steps = training_epochs * the_data.get_training_mini_batches_per_epoch()\n training_index = pd.RangeIndex(start=0, stop=total_steps, name='Training Step')\n training_metrics_df = pd.DataFrame(\n data=np.zeros((total_steps, len(model.metrics_names))),\n columns=model.metrics_names,\n index=training_index)\n\n # evaluate the model on the dev set(s) after each epoch\n dev_index = pd.RangeIndex(start=0, stop=training_epochs, name='Epoch')\n dev_columns = pd.MultiIndex.from_product(\n iterables=(the_data.get_dev_set_names(), model.metrics_names),\n names=('dev set', 'metric'))\n dev_metrics_df = pd.DataFrame(\n data=np.zeros((training_epochs, len(the_data.get_dev_set_names()) * len(model.metrics_names))),\n columns=dev_columns,\n index=dev_index)\n\n print(dev_metrics_df.head())\n\n steps_per_epoch = the_data.get_training_mini_batches_per_epoch()\n\n # n counts number of training iterations\n n = 0\n t0 = time.time()\n ##with h5py.File(the_data.fp, 'r', libver='latest', swmr=True) as train_test_file:\n # train for all epochs\n t00 = time.time()\n ##for train_X, train_y, step, epoch in the_data.get_training_mini_batches(data_file=train_test_file, yield_state=True):\n for train_X, train_y, step, epoch in the_data.get_training_data_generator()(yield_state=True):\n if epoch > training_epochs:\n print('completed {} training epochs in {:5.2f}s'.format(training_epochs, time.time()-t0))\n break\n else:\n # train on one mini batch\n print('training on batch {} ({})'.format(step, steps_per_epoch))\n training_metrics = model.train_on_batch(train_X, train_y)\n training_metrics_df.loc[n, model.metrics_names] = training_metrics\n n += 1\n\n # look at performance on dev data after each epoch\n # re-plot the training and dev metrics after each epoch\n if step == steps_per_epoch:\n print('completed training epoch {} in {:5.2f}s'.format(epoch, time.time()-t00))\n print('{} steps per epoch'.format(steps_per_epoch))\n print('{:5.2f}s per step'.format((time.time()-t00)/steps_per_epoch))\n print(training_metrics_df.loc[n-2:n])\n t00 = time.time()\n print('evaluate the model on the dev set(s)')\n\n #evaluate_dev_sets(epoch=epoch, model=model, the_data=the_data, train_test_file=train_test_file, dev_metrics_df=dev_metrics_df)\n evaluate_dev_sets(epoch=epoch, model=model, the_data=the_data, dev_metrics_df=dev_metrics_df)\n\n plot_training_and_dev_metrics(\n training_metrics_df,\n dev_metrics_df,\n model_name=model_name,\n steps_per_epoch=steps_per_epoch,\n epoch_count=training_epochs,\n output_fp=model_name + '.pdf')\n\n return training_metrics_df, dev_metrics_df\n\n\ndef evaluate_dev_sets(epoch, model, the_data, dev_metrics_df):\n\n for dev_steps, dev_set_name, dev_generator in the_data.get_dev_generators():\n sys.stdout.write('.')\n # print('dev set: \"{}\"'.format(dev_set_name))\n # print(' dev steps: {}'.format(dev_steps))\n dev_metrics = model.evaluate_generator(generator=dev_generator, steps=dev_steps)\n dev_metrics_df.loc[epoch - 1, (dev_set_name, model.metrics_names)] = dev_metrics\n sys.stdout.write('\\n')\n print('dev metrics:\\n{}'.format(dev_metrics_df.loc[epoch - 1]))\n\n\ndef build_layer(model_name, layer_type, kwargs):\n if layer_type == 'Dense':\n model_name.write('_dns_{}'.format(kwargs['units']))\n if 'kernel_regularizer' in kwargs:\n # the l2 field is a ndarray with shape ()\n # indexing with [] gives error 'too many indices'\n # the item() method is the first way I found to extract the float value from l2\n model_name.write('_l2_{:6.4f}'.format(kwargs['kernel_regularizer'].l2.item()))\n layer = Dense(**kwargs)\n elif layer_type == 'Dropout':\n model_name.write('_drp_{:3.2f}'.format(kwargs['rate']))\n layer = Dropout(**kwargs)\n elif layer_type == 'BatchNormalization':\n model_name.write('_bn')\n layer = BatchNormalization(**kwargs)\n else:\n raise Exception()\n\n return layer\n\n\ndef build_model(layers, model=None, input_dim=None):\n \"\"\"\n Build and return a Sequential model with Dense layers given by the layers argument.\n\n Arguments\n model (keras.Sequential) model to which layers will be added\n input_dim (int) dimension of input\n layers (tuple) sequence of 2-ples, one per layer, such as ((64, 'relu'), (64, 'relu'), (1, 'sigmoid'))\n\n Return\n model_name (str) a name for the model\n model (Model) a compiled model\n \"\"\"\n if model is None:\n model = Sequential()\n\n model_name = io.StringIO()\n layer_type, kwargs = layers[0]\n if input_dim is None:\n pass\n else:\n kwargs['input_dim'] = input_dim\n\n for layer_type, kwargs in layers:\n layer = build_layer(model_name, layer_type, kwargs)\n model.add(layer)\n\n model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n # trim the leading '_' from the model name - lazy!\n return model_name.getvalue()[1:], model\n\n\ndef plot_training_and_dev_metrics(training_metrics_df, dev_metrics_df, model_name, steps_per_epoch, epoch_count, output_fp):\n # generate network-specific accuracy and loss keys\n output_dp, output_filename = os.path.split(output_fp)\n output_basename, output_ext = os.path.splitext(output_filename)\n\n ##separate_plots_fp = os.path.join(output_dp, output_basename + '_separate' + output_ext)\n\n ##sorted_training_history_list = sorted(training_history_list, key=lambda h: h[2]['val_acc'][-1], reverse=True)\n\n with PdfPages(output_fp) as pdfpages:\n #for model_name, layers, history, t in sorted_training_history_list:\n #training_accuracy_loss = {}\n #validation_accuracy_loss = {}\n\n #training_accuracy_loss['acc ' + model_name] = history['acc']\n #training_accuracy_loss['loss ' + model_name] = history['loss']\n #validation_accuracy_loss['val_acc ' + model_name] = history['val_acc']\n #validation_accuracy_loss['val_loss ' + model_name] = history['val_loss']\n\n #training_df = pd.DataFrame(\n # data=training_accuracy_loss,\n # index=[b + 1 for b in range(epoch_count * batches_per_epoch)])\n #training_df.index.name = 'batch'\n\n #validation_df = pd.DataFrame(\n # data=validation_accuracy_loss,\n # index=[(e + 1) * batches_per_epoch for e in range(epoch_count)])\n #validation_df.index.name = 'batch'\n\n fig, ax1 = plt.subplots()\n legend = []\n #for loss_column in [column for column in training_df.columns if 'loss' in column and model_name in column]:\n #for training_metric_column in training_metrics_df.columns:\n #print('training metric column: {}'.format(training_metric_column))\n ax1.plot(training_metrics_df.index, training_metrics_df.loc[:, 'loss'], color='tab:blue', alpha=0.8)\n legend.append('training loss')\n #for loss_column in [column for column in validation_df.columns if\n # 'loss' in column and model_name in column]:\n # print('validation loss column: {}'.format(loss_column))\n # ax1.plot(validation_df.index, validation_df.loc[:, loss_column], color='tab:orange', alpha=0.8)\n # legend.append('val_loss')\n ax1.set_xlabel('epoch')\n tick_spacing = steps_per_epoch\n ax1.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))\n ax1.set_xticklabels([0] + list(range(epoch_count+1)))\n ax1.set_ylabel('loss')\n ax1.legend(legend, loc='lower left')\n\n ax2 = ax1.twinx()\n legend = []\n #for acc_column in [column for column in training_metrics_df.columns if 'acc' in column]:\n #print('training acc column: {}'.format(acc_column))\n ax2.plot(training_metrics_df.index, training_metrics_df.loc[:, 'acc'], color='tab:purple', alpha=0.8)\n legend.append('training acc')\n for dev_acc_column in [column for column in dev_metrics_df.columns if 'acc' in column]:\n print('validation acc column: {}'.format(dev_acc_column))\n ax2.plot([steps_per_epoch * (n + 1) for n in dev_metrics_df.index], dev_metrics_df.loc[:, dev_acc_column], alpha=0.8)\n legend.append(dev_acc_column)\n ax2.set_title('Training and Development Metrics\\n{}'.format(model_name))\n ax2.set_ylim(0.0, 1.0)\n ax2.set_ylabel('accuracy')\n print(legend)\n ax2.legend(legend, loc='lower right')\n\n pdfpages.savefig()\n\n\n" ]
[ [ "matplotlib.pyplot.switch_backend", "matplotlib.ticker.MultipleLocator", "matplotlib.backends.backend_pdf.PdfPages", "pandas.RangeIndex", "matplotlib.pyplot.subplots" ] ]
lerry-lee/similarity-model
[ "74ea7f4fc97382d87e6ab71531e66182ca1ba3f4" ]
[ "ernie/classification/service/client.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nfrom propeller.service.client import InferenceClient\nfrom propeller import log\nimport six\nfrom tmp import util\nfrom time import time\nimport numpy as np\n\nclass ErnieClient(InferenceClient):\n def __init__(self, \n vocab_file, \n host='localhost', \n port=8888, \n batch_size=32, \n num_coroutine=1, \n timeout=10., \n max_seqlen=128):\n host_port = 'tcp://%s:%d' % (host, port) \n client = super(ErnieClient, self).__init__(host_port, batch_size=batch_size, num_coroutine=num_coroutine, timeout=timeout)\n self.vocab = {j.strip().split(b'\\t')[0].decode('utf8'): i for i, j in enumerate(open(vocab_file, 'rb'))}\n self.tokenizer = util.data.CharTokenizer(self.vocab.keys())\n self.max_seqlen = max_seqlen\n self.cls_id = self.vocab['[CLS]']\n self.sep_id = self.vocab['[SEP]']\n\n def txt_2_id(self, text):\n ids = np.array([self.vocab[i] for i in self.tokenizer(text)])\n return ids\n\n def pad_and_batch(self, ids):\n max_len = max(map(len, ids))\n padded = np.stack([np.pad(i, [[0, max_len - len(i)]], mode='constant')for i in ids])\n padded = np.expand_dims(padded, axis=-1)\n return padded\n\n def __call__(self, text_a, text_b=None):\n if text_b is not None and len(text_a) != len(text_b):\n raise ValueError('text_b %d has different size than text_a %d' % (text_b, text_a))\n text_a = [i.encode('utf8') if isinstance(i, six.string_types) else i for i in text_a]\n if text_b is not None:\n text_b = [i.encode('utf8') if isinstance(i, six.string_types) else i for i in text_b]\n\n ids_a = map(self.txt_2_id, text_a)\n if text_b is not None:\n ids_b = map(self.txt_2_id, text_b)\n ret = [util.data.build_2_pair(a, b, self.max_seqlen, self.cls_id, self.sep_id) for a, b in zip(ids_a, ids_b)]\n else:\n ret = [util.data.build_1_pair(a, self.max_seqlen, self.cls_id, self.sep_id) for a in ids_a]\n sen_ids, token_type_ids = zip(*ret)\n sen_ids = self.pad_and_batch(sen_ids)\n token_type_ids = self.pad_and_batch(token_type_ids)\n ret, = super(ErnieClient, self).__call__(sen_ids, token_type_ids)\n return ret\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='ernie_encoder_client')\n parser.add_argument('--host', type=str, default='localhost')\n parser.add_argument('-i', '--input', type=str, required=True)\n parser.add_argument('-o', '--output', type=str, required=True)\n parser.add_argument('-p', '--port', type=int, default=8888)\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--num_coroutine', type=int, default=1)\n parser.add_argument('--vocab', type=str, required=True)\n args = parser.parse_args()\n\n client = ErnieClient(args.vocab, args.host, args.port, batch_size=args.batch_size, num_coroutine=args.num_coroutine)\n inputs = [i.strip().split(b'\\t') for i in open(args.input, 'rb').readlines()]\n if len(inputs) == 0:\n raise ValueError('empty input')\n send_batch = args.num_coroutine * args.batch_size\n send_num = len(inputs) // send_batch + 1\n rets = []\n start = time()\n for i in range(send_num):\n slice = inputs[i * send_batch: (i + 1) * send_batch]\n if len(slice) == 0:\n continue\n columns = list(zip(*slice))\n if len(columns) > 2:\n raise ValueError('inputs file has more than 2 columns')\n ret = client(*columns)\n if len(ret.shape) == 3:\n ret = ret[:, 0, :] # take cls\n rets.append(ret)\n end = time()\n with open(args.output, 'wb') as outf:\n arr = np.concatenate(rets, 0)\n np.save(outf, arr)\n log.info('query num: %d average latency %.5f' % (len(inputs), (end - start)/len(inputs)))\n\n" ]
[ [ "numpy.concatenate", "numpy.save", "numpy.expand_dims" ] ]
mczhuge/Kaleido-BERT
[ "1b14073e3ad3490c50bbd1e7e94846830671b332" ]
[ "easytransfer/losses/kd_loss.py" ]
[ "# coding=utf-8\n# Copyright (c) 2019 Alibaba PAI team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport tensorflow as tf\n\n\ndef build_kd_loss(teacher_logits,\n student_logits,\n task_balance=0.3,\n distill_tempreture=2.0,\n labels=None,\n loss_type='mse'):\n if loss_type == 'mse':\n # mean square error\n return mse_loss(teacher_logits, student_logits)\n elif loss_type == 'xent':\n # cross entropy\n return xent_loss(teacher_logits, student_logits, labels,\n distill_tempreture, task_balance)\n else:\n # kl divergence\n return kld_loss(teacher_logits, student_logits, labels,\n distill_tempreture, task_balance)\n\n\ndef mse_loss(teacher_logits, student_logits):\n loss = tf.reduce_mean(tf.nn.l2_loss(teacher_logits - student_logits))\n return loss\n\n\ndef xent_loss(teacher_logits, student_logits, labels, distill_tempreture,\n task_balance):\n student_task_xent = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(labels),\n logits=student_logits))\n teacher_targets = tf.nn.softmax(teacher_logits / distill_tempreture)\n student_distill_xent = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n labels=tf.stop_gradient(teacher_targets), logits=student_logits))\n losses = task_balance * student_task_xent\n losses += (1 - task_balance) * student_distill_xent\n\n return losses\n\n\ndef kld_loss(teacher_logits, student_logits, labels, distill_temperature,\n task_balance):\n student_task_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=tf.squeeze(labels), logits=student_logits)\n student_distill = tf.reduce_sum(tf.nn.softmax(student_logits / distill_temperature) * (\n tf.log(tf.nn.softmax(student_logits / distill_temperature + 1e-5) -\n tf.log(tf.nn.softmax(teacher_logits / distill_temperature + 1e-5)))))\n losses = task_balance * tf.reduce_mean(student_task_xent)\n losses += (1 - task_balance) * tf.reduce_mean(student_distill)\n\n return losses\n\n\ndef build_kd_probes_loss(teacher_logits,\n student_logits,\n task_balance=0.3,\n distill_tempreture=2.0,\n labels=None,\n loss_type='mse'):\n teacher_n_layers = len(teacher_logits) - 1\n student_n_layers = len(student_logits) - 1\n probes_kd_loss = 0.0\n for i in range(student_n_layers):\n proportional_layer_idx = int(math.ceil(i * teacher_n_layers / student_n_layers))\n\n student_layer_logits = student_logits[i]\n teacher_layer_logits = teacher_logits[proportional_layer_idx]\n probes_kd_loss += build_kd_loss(teacher_logits=teacher_layer_logits,\n student_logits=student_layer_logits,\n task_balance=task_balance,\n distill_tempreture=distill_tempreture,\n labels=labels,\n loss_type=loss_type)\n return probes_kd_loss\n" ]
[ [ "tensorflow.nn.l2_loss", "tensorflow.squeeze", "tensorflow.nn.softmax", "tensorflow.reduce_mean", "tensorflow.stop_gradient" ] ]
UNIZAR-30226-2022-09/back-end
[ "7f20e141e34bf0ae7cce70515a1e4bb0cd85b173" ]
[ ".vscode-server/data/User/History/-1f47d17c/IWlp.py" ]
[ "# from flask import Flask, Blueprint\n# from flask_sqlalchemy import SQLAlchemy\n# from flask_login import LoginManager\n# import os\n\nfrom flask import Flask, jsonify, request, make_response, redirect, url_for\nimport jwt\nimport datetime\nimport os\nfrom functools import wraps\nfrom flask_sqlalchemy import SQLAlchemy\nimport uuid\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom werkzeug.utils import secure_filename\nfrom sqlalchemy import select\nfrom flask_migrate import Migrate, migrate\nfrom flask_cors import CORS\nfrom sqlalchemy import inspect\nfrom sqlalchemy import Table, Column, MetaData, Integer, Computed\nfrom numpy import array\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secretollave'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'\nABSOLUTE_PATH_TO_YOUR_FOLDER ='/home/dani/flask/static/fotosPerfil'\nABSOLUTE_PATH_TO_YOUR_PDF_FOLDER ='/home/dani/flask/static/pdf'\nCORS(app)\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\n\n# Models\nclass Usuario(db.Model):\n nick = db.Column(db.String(20), primary_key=True)\n Nombre_de_usuario = db.Column(db.String(50))\n password = db.Column(db.String(50))\n e_mail = db.Column(db.String(50), unique=True, nullable=False)\n descripcion = db.Column(db.String(1000))\n link = db.Column(db.String(200))\n foto_de_perfil = db.Column(db.String(400))\n\nclass Sigue(db.Model):\n #id = db.Column(db.Integer, primary_key=True )\n Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)\n Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)\n\nclass Chat(db.Model):\n\n #Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())\n timestamp = db.Column(db.TIMESTAMP, nullable=False,\n server_default=db.func.now(),\n onupdate=db.func.now())\n\n mensaje = db.Column(db.String(1000))\n Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)\n Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)\n\n\nclass Publicacion(db.Model):\n\n id = db.Column(Integer,primary_key=True)\n #id = db.Sequence('id', start=1, increment=1)\n descripcion = db.Column(db.String(1000))\n #Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())\n timestamp = db.Column(db.TIMESTAMP, nullable=False,\n server_default=db.func.now(),\n onupdate=db.func.now())\n Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'))\n\nclass Propia(db.Model):\n\n pdf = db.Column(db.String(400))\n id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)\n\n\nclass Recomendacion(db.Model):\n\n link = db.Column(db.String(200),nullable=False)\n titulo = db.Column(db.String(200),nullable=False)\n autor = db.Column(db.String(200),nullable=False)\n id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)\n\nclass Tematica(db.Model):\n\n tema = db.Column(db.String(50), primary_key=True )\n\n\nclass Notificaciones(db.Model):\n\n id = db.Column(db.Integer, primary_key=True )\n fecha = db.Column(db.Date)\n Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)\n\n\nclass Prefiere(db.Model):\n\n Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)\n tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)\n\n\nclass Trata_pub_del_tema(db.Model):\n\n id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)\n tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)\n\nclass Gusta(db.Model):\n\n id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)\n Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)\n\n\nclass Comenta(db.Model):\n\n id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)\n Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)\n comentario = db.Column(db.String(1000))\n\nclass Guarda(db.Model):\n\n id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)\n Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)\n\nclass Trata(db.Model):\n\n id_publi = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)\n id_notif = db.Column(db.String(20), db.ForeignKey('notificaciones.id'),primary_key=True)\n\n\nclass Genera(db.Model):\n\n id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)\n Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)\n\n\n\n\ndef token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n #token = request.args.get('token') #http://127.0.0.1:5000/route?token=djsnvidnoffofn\n #data = request.get_json()\n token = request.headers['token']\n #token = data['token']\n if not token:\n return jsonify({'error': 'Token no existe'}), 403\n\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n current_user = Usuario.query.filter_by(nick=data['nick']).first()\n current_user = data['nick']\n except:\n return jsonify({'error': 'Token no valido'}), 403\n\n return f(current_user,*args, **kwargs)\n return decorated\n\n\n@app.route('/unprotected')\ndef unprotected():\n return jsonify({'message': 'Puede entrar tol mundo'})\n\n@app.route('/protected')\n@token_required\ndef protected(current_user):\n print(current_user)\n return jsonify({'message': 'Puedes entrar si puedes'})\n\n# Ruta para el login\n\n\n\n@app.route('/register', methods=['POST'])\ndef add_data():\n data= request.get_json()\n #nick = request.form.get(\"nick\")\n #password = request.form.get(\"password\")\n #e_mail = request.form.get(\"e_mail\")\n\n\n user = Usuario.query.filter_by(e_mail=data['e_mail']).first()\n nick = Usuario.query.filter_by(nick=data['nick']).first()\n if user: # si esto devuelve algo entonces el email existe\n return jsonify({'error': 'Existe correo'}) #json diciendo error existe email\n if nick:\n return jsonify({'error': 'Existe nick'})\n #if (check_email(e_mail) == True and check_password(data['password']) == True ):\n register = Usuario(nick=data['nick'],password=generate_password_hash(data['password']), e_mail=data['e_mail'],foto_de_perfil=\"platon.jpg\")\n db.session.add(register)\n db.session.commit()\n\n\n token = jwt.encode({'nick' : data['nick'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])\n return jsonify({'token' : token.decode('UTF-8')})\n\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n # auth = request.authorization #new ESTO SI LO HACES CON AUTH\n\n data= request.get_json()\n\n if '@' in data['nickOcorreo']:\n user = Usuario.query.filter_by(e_mail=data['nickOcorreo']).first()\n else:\n user = Usuario.query.filter_by(nick=data['nickOcorreo']).first()\n\n if not user:\n return jsonify({'error': 'No existe ese usuario'})#error mal user\n if not check_password_hash(user.password, data['password']):\n return jsonify({'error': 'Mal contraseña'}) #error mala contraseña\n\n\n token = jwt.encode({'nick' : data['nickOcorreo'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=9999999)}, app.config['SECRET_KEY'])\n return jsonify({'token' : token.decode('UTF-8')})\n\n\n\n\n@app.route('/editarPerfil', methods=['GET'])\n@token_required\ndef editarPerfilget(current_user):\n s = select([Usuario.Nombre_de_usuario, Usuario.descripcion,Usuario.link, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))\n result = db.session.execute(s)\n\n seguidos= db.session.query(Sigue).filter(Sigue.Usuario_Nicka == current_user ).count()\n seguidores= db.session.query(Sigue).filter(Sigue.Usuario_Nickb == current_user ).count()\n nposts= db.session.query(Publicacion).filter(Publicacion.Usuario_Nicka == current_user ).count()\n\n tema = select([Prefiere.tema]).where((Prefiere.Usuario_Nicka == current_user))\n temas = db.session.execute(tema)\n vector = []\n for row in temas:\n vector += row\n for row in result:\n fila = {\n \"nick\": current_user,\n \"nombre_de_usuario\":row[0],\n \"descripcion\":row[1],\n \"link\":row[2],\n \"foto_de_perfil\": 'http://51.255.50.207:5000/display/' + row[3],\n \"nsiguiendo\": seguidos,\n \"nseguidores\": seguidores,\n \"nposts\": nposts,\n \"tematicas\": vector\n #\"foto_de_perfil\" :url_for('static', filename='fotosPerfil/' + row[3])\n }\n return fila\n\n@app.route('/display/<filename>')\ndef foto(filename):\n return redirect(url_for('static', filename='fotosPerfil/' + filename),code = 301)\n\n\n@app.route('/editarPerfil', methods=['POST'])\n@token_required\ndef editarPerfilpost(current_user):\n\n data= request.get_json()\n user = Usuario.query.filter_by(nick=current_user).first()\n user.Nombre_de_usuario = data['nombre_de_usuario']\n print(data['nombre_de_usuario'])\n print(data['descripcion'])\n print(data['link'])\n print(data['tematicas'])\n user.descripcion = data['descripcion']\n user.link = data['link']\n tematicas = data['tematicas']\n for temas in tematicas:\n tema = Prefiere.query.filter_by(tema=temas).first()\n if not tema:\n tema = Prefiere(Usuario_Nicka=current_user, tema = temas)\n db.session.add(tema)\n #db.session.commit()\n #cambia_foto\n\n db.session.commit()\n\n token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])\n return jsonify({'token' : token.decode('UTF-8')})\n\n\n@app.route('/actualizarImagen', methods=['POST'])\n@token_required\ndef actualizarImagen(current_user):\n user = Usuario.query.filter_by(nick=current_user).first()\n\n if request.files['nueva_foto'] is not None: #data['cambia_foto']:\n\n \tfile = request.files['nueva_foto']\n \tprint(request.files['nueva_foto'])\n \tfilename = secure_filename(file.filename)\n \tfile.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_FOLDER, filename))\n \tuser.foto_de_perfil = filename\n \tdb.session.commit()\n\n token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])\n return jsonify({'token' : token.decode('UTF-8')})\n\n@app.route('/subirPost', methods=['POST'])\n@token_required\ndef subirPost(current_user):\n\n data= request.get_json()\n\n publicacion = Publicacion(descripcion=data['descripcion'],Usuario_Nicka=current_user) #coger id\n db.session.add(publicacion)\n db.session.commit()\n\n tematicas = data['tematicas']\n for temas in tematicas:\n temita = Tematica.query.filter_by(tema=temas).first()\n if temita:\n nuevo = Trata_pub_del_tema(id=publicacion.id, tema = temita.tema)\n db.session.add(nuevo)\n db.session.commit()\n if (data['tipo']==\"1\"): # articulo\n print(\"xd\")\n guardarPDF(request.files['pdf'], publicacion.id)\n elif(data['tipo']==\"2\"): # recomendacion\n recomendacion = Recomendacion(link=data['link'],titulo=data['titulo'], autor = data['autor'], id = publicacion.id)\n db.session.add(recomendacion)\n \n \n db.session.commit()\n token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])\n return jsonify({'token' : token.decode('UTF-8')})\n\n\ndef guardarPDF(pdf,_id):\n propia = Propia.query.filter_by(id=_id).first()\n if pdf is not None:\n \tfile = pdf\n \tprint(pdf)\n \tfilename = secure_filename(file.filename)\n \tfile.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER, filename))\n \tpropia.pdf = filename\n \tdb.session.add(propia)\n\n\n@app.route('/getPostsPropios', methods=['GET'])\n@token_required\ndef getPostsPropios(current_user):\n\n data= request.get_json()\n\n a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))\n resulta = db.session.execute(a)\n #s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)\n \n s=select(Publicacion).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())\n results = db.session.execute(s)\n\n \n for r in results:\n for i in range(data['id']-8,data['id']):\n a = select([Propia.id, Propia.pdf]).where((Propia.id == r.id))\n resulta = db.session.execute(a)\n\n Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == row[1] ).count()\n Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == row[1] ).count()\n Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == row[1] ).count()\n\n fila = {\n \"id\": r.id,\n \"nick\": current_user,\n \"descripcion\":r.descripcion,\n \"timestamp\":r.timestamp,\n \"pdf\": 'http://51.255.50.207:5000/display2/' + a.pdf,\n \"nlikes\": Gustas,\n \"ncomentarios\": Comentarios,\n \"nguardados\": Guardados,\n \"usuario\": resulta.nombre_de_usuario\n }\n \n return fila\n\n\n@app.route('/display2/<filename>')\ndef pdf(filename):\n return redirect(url_for('static', filename='pdf/' + filename),code = 301)\n\n@app.route('/getPostsRecomendados', methods=['GET'])\n@token_required\ndef getPostsRecomendados(current_user):\n\n #data= request.get_json()\n\n a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))\n resultb = db.session.execute(a)\n Nombre_de_usuario = \"\"\n for b in resultb: \n Nombre_de_usuario=b.Nombre_de_usuario\n #s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)\n \n s = select([Publicacion]).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())\n\n results = db.session.execute(s)\n \n # for record in results:\n # print(\"\\n\", record)\n\n vector0 = array([])\n\n vector1 = []\n vector2 = []\n \n for r in results:\n print(str(r.id))\n vector0 += r.id\n vector1 += str(r.descripcion)\n vector2 += str(r.timestamp)\n \n # for r in results:\n # for b in resultb: \n # a = select([Recomendacion.id, Recomendacion.link,Recomendacion.titulo,Recomendacion.autor]).where((Recomendacion.id == r.id))\n # resulta = db.session.execute(a)\n # for a in resultaa:\n # Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == r.id ).count()\n # Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == r.id ).count()\n # Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == r.id ).count()\n\n \n print(vector0)\n fila = {\n \"id\": vector0,\n #\"link\": a.link,\n #\"titulo\": a.titulo,\n #\"autor\": a.autor,\n \"nick\": current_user,\n \"descripcion\": vector1,\n \"timestamp\": vector2,\n #\"nlikes\": Gustas,\n #\"ncomentarios\": Comentarios,\n #\"nguardados\": Guardados,\n \"usuario\": Nombre_de_usuario\n }\n \n return fila\n\ndef check_email(email):\n\n regex = '^[a-z0-9]+[\\._]?[a-z0-9]+[@]\\w+[.]\\w{2,3}$'\n\n if(re.search(regex,email)):\n return True\n else:\n return False\n\n# Contraseñas de entre 8 y 32 carácteres.\n\ndef check_password(password):\n\n regex = '^(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])(?=.*[*.!@$%^&(){}[]:;<>,.?/~_+-=|\\]).{8,32}$'\n\n if(re.search(regex,password)):\n return True\n else:\n return False\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.array" ] ]
sushmaakoju/parser
[ "e40e3f818921141044b499e231ae75e6bf4141c2" ]
[ "src/equation_parser/equations.py" ]
[ "from __future__ import absolute_import\n\n#SymPy is a non-commercial alternative to Mathematica and Maple\n# SymPy can map variable to a value or a matrix.\n# SymPy's Symbolic Statistical Modelling uses scintific computing.\nimport sys\nimport numpy as np\nimport sympy as sp\nimport pandas as pd\nfrom pathlib import Path\nfrom .tokens import *\nfrom .equation import *\n\nclass Equations(Equation):\n def __init__(self):\n path = Path(__file__).parent\n self.filepath = path.joinpath(\"fixtures\",\"equations.xlsx\")\n self.equations_sheet = \"equations\"\n self.column_mapping_sheet = \"col_var_mapping\" \n self.data_sheet = \"values\" \n self.mappings = None\n self.df = None\n self.equations_df = pd.DataFrame()\n self.equations = dict()\n self.lhs = None\n self.values = dict()\n \n def upload_data_equations(self, filepath, equations_sheet, data_sheet, column_mapping_sheet=\"\"):\n if not self.validate_file_inputs(filepath, equations_sheet, data_sheet):\n return False\n self.filepath = filepath\n self.equations_df = pd.read_excel(self.filepath, sheet_name=equations_sheet, mangle_dupe_cols=True)\n self.df = pd.read_excel(self.filepath, sheet_name=data_sheet, mangle_dupe_cols=True)\n if column_mapping_sheet:\n self.mappings = pd.read_excel(self.filepath, sheet_name=column_mapping_sheet, mangle_dupe_cols=True)\n\n def validate_file_inputs(self, filepath, equations_sheet, data_sheet):\n if not filepath or not equations_sheet or not data_sheet:\n raise Exception(\"Empty upload data inputs. Please provide valid inputs to file upload.\")\n else:\n return True\n return False\n\n def process_equations(self):\n self.lhs = self.equations_df['name']\n eq_list = self.equations_df['equation']\n self.equations = dict()\n for variable, equation in zip(self.lhs, eq_list):\n self.equations[variable] = Equation(equation, self.df)\n self.equations[variable].set_symbols(self.mappings)\n self.values[variable] = self.equations[variable].evaluate(self.values)\n result_df = pd.DataFrame.from_dict(self.values)\n result_df.to_csv(\"results.csv\", index=False)\n return self.values\n" ]
[ [ "pandas.DataFrame.from_dict", "pandas.DataFrame", "pandas.read_excel" ] ]
rejux/rklearn-lib
[ "56bc4f087a8c971cb545d65b0c1f9bafaaec3d67", "56bc4f087a8c971cb545d65b0c1f9bafaaec3d67" ]
[ "rklearn/tests/it/cifar10_cnn.py", "rklearn/tests/it/test_rklearn_perceptron_binary_mnist.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#############\n## Imports ##\n#############\n\nimport os\nimport sys ; sys.path.append(\"/home/developer/workspace/rklearn-lib\")\nimport tensorflow as tf\n\nfrom rklearn.tfoo_v1 import BaseModel\n\n#################\n## CIFAR10CNN ##\n#################\n\nclass CIFAR10CNN(BaseModel):\n\n ################\n ## __init__() ##\n ################\n\n def __init__(self, config, logger = None):\n super().__init__(config, logger)\n\n try:\n\n # these parameters are sent to the trainer through the model because it is easier\n self.num_epochs = self.config.cifar10_cnn[\"num_epochs\"] \n self.learning_rate = self.config.cifar10_cnn[\"learning_rate\"]\n \n self.max_to_keep = self.config.cifar10_cnn[\"max_to_keep\"]\n self.checkpoint_dir = self.config.cifar10_cnn[\"checkpoint_dir\"]\n self.model_dir = self.config.cifar10_cnn[\"model_dir\"]\n\n os.makedirs(self.checkpoint_dir, exist_ok = True)\n os.makedirs(self.model_dir, exist_ok = True)\n \n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"error msg = {}, error type = {}, error file = {}, error line = {}\".format(e, exc_type, fname, exc_tb.tb_lineno))\n\n raise RuntimeError(\"Error in CIFAR10CNN construction regarding the checkpoints and model directories!\")\n\n ###################\n ## build_model() ##\n ###################\n\n def build_model(self):\n \"\"\"\n Build the custom CNN for the CIFAR-10 dataset.\n \"\"\"\n\n # The input data holders (cf. shapes after prepa) \n self.X = tf.compat.v1.placeholder(tf.float32, shape = (None, \n self.config.data[\"image_size\"], \n self.config.data[\"image_size\"], \n self.config.data[\"num_channels\"]), name=\"X\") # ex. (50000, 32, 32, 3)\n self.y = tf.compat.v1.placeholder(tf.int32, shape = (None, self.config.data[\"num_categories\"]), name=\"y\") # ex. (50000, 10) \n self.train = tf.compat.v1.placeholder(tf.bool)\n\n # The CNN architecture = conv/poo layers + flatten layer + connected layers\n with tf.name_scope(\"cnn\"):\n\n # a. Create convolution/pooling layers = conv + drop + pool + conv + drop + pool + conv + pool + conv + drop \n self.conv1 = tf.layers.conv2d(self.X, \n self.config.cifar10_cnn[\"num_filters\"], \n self.config.cifar10_cnn[\"filter_size\"], \n padding='same', activation=tf.nn.relu)\n self.drop1 = tf.layers.dropout(self.conv1, self.config.cifar10_cnn[\"keep_prob\"], training=self.train)\n self.pool1 = tf.layers.max_pooling2d(self.drop1, 2, 2)\n\n self.conv2 = tf.layers.conv2d(self.pool1, \n self.config.cifar10_cnn[\"num_filters\"], \n self.config.cifar10_cnn[\"filter_size\"],\n padding='same', activation=tf.nn.relu)\n self.drop2 = tf.layers.dropout(self.conv2, self.config.cifar10_cnn[\"keep_prob\"], training=self.train)\n self.pool2 = tf.layers.max_pooling2d(self.drop2, 2, 2)\n \n self.conv3 = tf.layers.conv2d(self.pool2, \n self.config.cifar10_cnn[\"num_filters\"], \n self.config.cifar10_cnn[\"filter_size\"],\n padding='same', activation=tf.nn.relu)\n self.pool3 = tf.layers.max_pooling2d(self.conv3, 2, 2)\n \n self.conv4 = tf.layers.conv2d(self.pool3, \n self.config.cifar10_cnn[\"num_filters\"], \n self.config.cifar10_cnn[\"filter_size\"],\n padding='same', activation=tf.nn.relu)\n self.drop3 = tf.layers.dropout(self.conv4, self.config.cifar10_cnn[\"keep_prob\"], training=self.train)\n \n # b. Flatten input data\n self.flatten = tf.reshape(self.drop3, [-1, self.config.cifar10_cnn[\"fc1_nb_units\"]])\n\n # Create connected layers: fc1, fc2\n with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected], \n normalizer_fn=tf.contrib.layers.batch_norm, \n normalizer_params={\"is_training\": self.train}):\n self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn[\"fc1_nb_units\"])\n self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data[\"num_categories\"], activation_fn=None)\n\n # Compute loss\n with tf.name_scope(\"loss\"):\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y))\n\n # Optimizer\n with tf.name_scope(\"training_op\"):\n self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss)\n\n # Perf metrics\n with tf.name_scope(\"accuracy\"):\n prediction = tf.equal(tf.argmax(self.fc2, 1), tf.argmax(self.y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))\n\n\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n##############################################\n## test_rklearn_perceptron_binary_mnist.py ##\n##############################################\n\n\"\"\"\nThis program is an application of the Perceptron algorithm to the MNIST dataset.\nSince the Perceptron is only capable of doing binary classification, we'll just use it to \nidentify one digit 5. Thus, it will predict if a digit is 5 (Y) or not 5 (N).\n\nUsage:\n$ cd <top_dir>\n$ python rklearn/tests/test_rklearn_perceptron_binary_mnist.py --conf=rklearn/tests/config/config-mnist.yaml\n\n\"\"\"\n\n\n\n#############\n## Imports ##\n#############\n\nimport unittest\n\nimport os\nimport sys\nimport time\nimport argparse\nimport yaml\nimport numpy as np ; np.random.seed(1) # ; np.set_printoptions(threshold=np.inf)\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\nfrom rktools.loggers import init_logger\n\nfrom rklearn.perceptron import Perceptron\nfrom rklearn.open_data_loaders import load_mnist_sklearn\nfrom rklearn.plotters import mnist_digit_pretty_printer, plot_simple_sequence\n\n#############################\n## binarize_mnist_labels() ##\n#############################\n\ndef binarize_mnist_labels(y, pos_class):\n \"\"\"\n Create the 2 classes in the whole set: pos_class set to 1 and others set to 0\n \"\"\"\n return np.where(y == pos_class, 1, 0) # pos_class => 1, all others => 0\n\n\n\n#############\n## main() ##\n#############\n\ndef main(config):\n\n try:\n\n start = time.time()\n\n logger.info(\"\")\n logger.info(\"===========================\")\n logger.info(\"Load the MNIST data...\")\n logger.info(\"===========================\")\n logger.info(\"\")\n\n os.makedirs(config[\"mnist_data\"][\"data_home\"], exist_ok=True)\n \n start_prep = time.time()\n X,y = load_mnist_sklearn(data_home=config[\"mnist_data\"][\"data_home\"], shuffle = True, logger = logger)\n end_prep = time.time()\n logger.info(\"Data loaded in {} seconds\".format(end_prep - start_prep))\n\n logger.info(\"X.shape = {}, y.shape = {}\".format(X.shape, y.shape))\n logger.debug(\"X.sample = \\n{}, \\ny.sample = {}\".format(X[:5,], y[:5]))\n\n\n logger.info(\"\")\n logger.info(\"===========================\")\n logger.info(\"Labels binarization...\")\n logger.info(\"===========================\")\n logger.info(\"\")\n \n # get the index of the first element = pos_class sample\n pos_class = config[\"mnist_data\"][\"pos_class\"]\n logger.info(\"Positive class = {} for this use case. All over are negatives.\".format(pos_class))\n \n # get the indices of one sample of pos_class, and another of a negative class\n\n try:\n index_pos_sample = y.tolist().index(pos_class)\n except:\n raise Exception(\"No element equal to {} (e.g. pos_class) was found in the target vector. Abort!\".format(pos_class))\n\n # we can safely assume that the index before is a neg class\n\n if index_pos_sample > 0:\n index_neg_sample = index_pos_sample - 1 \n else: \n index_neg_sample = index_pos_sample + 1\n\n logger.info(\"index_pos_sample = {} => label = {}\".format(index_pos_sample, y[index_pos_sample]))\n logger.info(\"index_neg_sample = {} => label = {}\".format(index_neg_sample, y[index_neg_sample]))\n\n # Labels binarization\n y_bin = binarize_mnist_labels(y, pos_class)\n\n logger.info(\"(after binarization) Labels samples, y_bin[:10] = {}\".format(y_bin[:10]))\n\n logger.info(\"\")\n logger.info(\"=============================\")\n logger.info(\"Train-test split the data\")\n logger.info(\"=============================\")\n logger.info(\"\")\n\n # train: 0 -> 60K - 1 / test: 60K -> 70K => ratio test vs train = 15% \n X_train, X_test, y_train, y_test = train_test_split(X, y_bin, train_size=60000, random_state=1, stratify=y_bin)\n\n logger.info(\"X_train.shape = {}, y_train.shape = {}\".format(X_train.shape, y_train.shape))\n logger.info(\"X_test.shape = {}, y_test.shape = {}\".format(X_test.shape, y_test.shape))\n\n logger.info(\"\")\n logger.info(\"Check the stratifications in both train and test sets:\")\n \n bin_count = np.bincount(y_bin)\n logger.info(\"Labels counts in y_bin = {} => percentage neg/pos = {:.2f}%\".format(bin_count, ((bin_count[1]/bin_count[0])*100)))\n\n bin_count = np.bincount(y_train)\n logger.info(\"Labels counts in y_train = {} => percentage neg/pos = {:.2f}%\".format(bin_count, ((bin_count[1]/bin_count[0])*100)))\n\n bin_count = np.bincount(y_test)\n logger.info(\"Labels counts in y_test = {} => percentage neg/pos = {:.2f}%\".format(bin_count, ((bin_count[1]/bin_count[0])*100)))\n\n logger.info(\"\")\n logger.info(\"=========================================\")\n logger.info(\"Data viz: display some samples data\")\n logger.info(\"=========================================\")\n logger.info(\"\")\n\n # recalculate the index_pos_sample, and index_neg_sample, because \n # split hasmodified the indexing\n\n try:\n index_pos_sample = y_train.tolist().index(1) # pos_class is now 1\n except:\n raise Exception(\"No element equal to 1 (e.g. pos_class) was found in the target vector. Abort!\")\n\n if index_pos_sample > 0:\n index_neg_sample = index_pos_sample - 1 \n else: \n index_neg_sample = index_pos_sample + 1\n\n logger.info(\"A pos_class sample at index {}: \".format(index_pos_sample))\n logger.info(\"new label = {}\".format(y_train[index_pos_sample]))\n logger.info(\"raw pixels values = \\n{}\".format(np.reshape(X_train[index_pos_sample], (-1, 28))))\n logger.info(\"pretty print version = \\n{}\".format(mnist_digit_pretty_printer(np.reshape(X_train[index_pos_sample], (-1, 28)))))\n \n logger.info(\"A neg_class sample ayt index {}: \".format(index_neg_sample))\n logger.info(\"new label = {}\".format(y_train[index_neg_sample]))\n logger.info(\"raw pixels values = \\n{}\".format(np.reshape(X_train[index_neg_sample], (-1, 28))))\n logger.info(\"pretty print version = \\n{}\".format(mnist_digit_pretty_printer(np.reshape(X_train[index_neg_sample], (-1, 28)))))\n\n logger.info(\"\")\n logger.info(\"=============================\")\n logger.info(\"Data preparation\")\n logger.info(\"=============================\")\n logger.info(\"\")\n\n # simple normalization (e.g. scale values between [0,1] \n X_prep = X_train/255\n y_prep = y_train\n\n logger.info(\"Some data viz to check quickly the (X_prep, y_prep) matrices...\")\n logger.info(\"\\tX_prep.shape = {}, y_prep.shape = {}\".format(X_prep.shape, y_prep.shape))\n logger.info(\"\")\n logger.info(\"The pos_class sample at index {} (after data prep phase): \".format(index_pos_sample))\n logger.info(\"\\tnew label = {}\".format(y_prep[index_pos_sample]))\n logger.info(\"\\traw pixels values = \\n{}\".format(np.reshape(X_prep[index_pos_sample], (-1, 28))))\n logger.info(\"\\tpretty print version = \\n{}\".format(mnist_digit_pretty_printer(np.reshape(X_prep[index_pos_sample], (-1, 28)))))\n\n logger.info(\"\")\n logger.info(\"===================================\")\n logger.info(\"Fitting the rklearn perceptron\")\n logger.info(\"===================================\")\n logger.info(\"\")\n\n # training plot\n os.makedirs(config[\"mnist_data\"][\"plots_dir\"], exist_ok=True)\n train_error_fig = config[\"perceptron_hyper\"][\"train_error_fig\"]\n\n # hyperparams\n lr = config[\"perceptron_hyper\"][\"lr\"]\n n_epochs = config[\"perceptron_hyper\"][\"n_epochs\"]\n\n logger.info(\"Hyperparameters:\")\n logger.info(\"\\tlr = {}\".format(lr))\n logger.info(\"\\tnb epochs = {}\".format(n_epochs))\n\n start_fit = time.time()\n ppn = Perceptron(lr=lr, n_epochs=n_epochs)\n ppn.fit(X_prep, y_prep)\n end_fit = time.time()\n logger.info(\"Fit done in {} seconds\".format(end_fit - start_fit))\n logger.info(\"Training errors for all epochs = {}\".format(ppn.errors_))\n\n suffix = \"perceptron-lr-{}-epochs-{}\".format(lr, n_epochs)\n fig = train_error_fig.format(suffix)\n plot_simple_sequence(ppn.errors_,\n xlabel=\"Epochs\", ylabel=\"Errors\",\n title=\"Training errors = f(Epochs) - lr = {}\".format(lr)).savefig(fig, dpi=300)\n logger.info(\"Plotted trainig errors in {}\".format(fig))\n\n logger.info(\"\")\n logger.info(\"===========================\")\n logger.info(\"Testing Accuracy\")\n logger.info(\"===========================\")\n logger.info(\"\")\n\n # Simple accuracy\n y_pred = ppn.predict(X_test)\n logger.info(\"Misclassified examples: {} on {} samples \".format((y_test != y_pred).sum(), len(y_test)))\n logger.info(\"Classification Accuracy: {:.3f}\".format(accuracy_score(y_test, y_pred)))\n\n end = time.time()\n logger.info(\"Total duration = {} secs.\".format(end - start))\n\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logger.error(\"error msg = {}, error type = {}, error file = {}, error line = {}\".format(e, exc_type, fname, exc_tb.tb_lineno))\n\n\n###################\n## parse_args() ##\n###################\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--conf\", help=\"Path to the YAML configuration file\", required=True,)\n ns, args = parser.parse_known_args(namespace=unittest)\n return ns, sys.argv[:1] + args\n\n###############\n## __main__ ##\n###############\n\nif __name__ == '__main__':\n\n logger = None\n FLAGS = None\n config = None\n\n # Parse cmd line arguments\n FLAGS, argv = parse_args()\n sys.argv[:] = argv\n\n with open(FLAGS.conf, 'r') as ymlfile:\n config = yaml.load(ymlfile, Loader=yaml.FullLoader)\n\n assert(config is not None)\n\n logger = init_logger(name=\"test_mnist_perceptron\", config = config)\n\n logger.info(\"\")\n logger.info(\"#############################################\")\n logger.info(\"## test_rklearn_perceptron_binary_mnist.py ##\")\n logger.info(\"#############################################\")\n logger.info(\"\")\n\n main(config)\n\n" ]
[ [ "tensorflow.compat.v1.placeholder", "tensorflow.layers.dropout", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.layers.max_pooling2d", "tensorflow.contrib.layers.fully_connected", "tensorflow.argmax", "tensorflow.reshape", "tensorflow.cast", "tensorflow.layers.conv2d", "tensorflow.name_scope", "tensorflow.contrib.framework.arg_scope" ], [ "numpy.bincount", "numpy.reshape", "numpy.random.seed", "sklearn.metrics.accuracy_score", "numpy.where", "sklearn.model_selection.train_test_split" ] ]
gmweir/QuasiOptics
[ "0974178984f845597c5209217613c26edf931ed0", "0974178984f845597c5209217613c26edf931ed0" ]
[ "colorpy/colorpy-0.1.0/blackbody.py", "colorpy/ColorPy/ColorPy-master/plots.py" ]
[ "'''\nblackbody.py - Color of thermal blackbodies.\n\nDescription:\n\nCalculate the spectrum of a thermal blackbody at an arbitrary temperature.\n\nConstants:\n\nPLANCK_CONSTANT - Planck's constant, in J-sec\nSPEED_OF_LIGHT - Speed of light, in m/sec\nBOLTZMAN_CONSTANT - Boltzman's constant, in J/K\nSUN_TEMPERATURE - Surface temperature of the Sun, in K\n\nFunctions:\n\nblackbody_specific_intensity (wl_nm, T_K) - \n Get the monochromatic specific intensity for a blackbody -\n wl_nm = wavelength [nm]\n T_K = temperature [K]\n This is the energy radiated per second per unit wavelength per unit solid angle.\n Reference - Shu, eq. 4.6, p. 78.\n\nblackbody_spectrum (T_K) - \n Get the spectrum of a blackbody, as a numpy array.\n\nblackbody_color (T_K) - \n Given a temperature (K), return the xyz color of a thermal blackbody.\n\nPlots:\n\nblackbody_patch_plot (T_list, title, filename) -\n Draw a patch plot of blackbody colors for the given temperature range.\n\nblackbody_color_vs_temperature_plot (T_list, title, filename) -\n Draw a color vs temperature plot for the given temperature range.\n\nblackbody_spectrum_plot (T_K) -\n Draw the spectrum of a blackbody at the given temperature.\n \nReferences:\n\nFrank H. Shu, The Physical Universe. An Introduction to Astronomy,\nUniversity Science Books, Mill Valley, California. 1982. ISBN 0-935702-05-9.\n\nCharles Kittel and Herbert Kroemer, Thermal Physics, 2nd edition,\nW. H. Freeman, New York, 1980. ISBN 0-7167-1088-9.\n\nLicense:\n\nCopyright (C) 2008 Mark Kness\n\nAuthor - Mark Kness - mkness@alumni.utexas.net\n\nThis file is part of ColorPy.\n\nColorPy is free software: you can redistribute it and/or modify\nit under the terms of the GNU Lesser General Public License as\npublished by the Free Software Foundation, either version 3 of\nthe License, or (at your option) any later version.\n\nColorPy is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Lesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public License\nalong with ColorPy. If not, see <http://www.gnu.org/licenses/>.\n'''\nimport math, numpy, pylab\n\nimport colormodels\nimport ciexyz\nimport plots\n\n# Physical constants in mks units\nPLANCK_CONSTANT = 6.6237e-34 # J-sec\nSPEED_OF_LIGHT = 2.997925e+08 # m/sec\nBOLTZMAN_CONSTANT = 1.3802e-23 # J/K\nSUN_TEMPERATURE = 5778.0 # K\n\ndef blackbody_specific_intensity (wl_nm, T_K):\n '''Get the monochromatic specific intensity for a blackbody -\n wl_nm = wavelength [nm]\n T_K = temperature [K]\n This is the energy radiated per second per unit wavelength per unit solid angle.\n Reference - Shu, eq. 4.6, p. 78.'''\n # precalculations that could be made global\n a = (PLANCK_CONSTANT * SPEED_OF_LIGHT) / (BOLTZMAN_CONSTANT)\n b = (2.0 * PLANCK_CONSTANT * SPEED_OF_LIGHT * SPEED_OF_LIGHT)\n wl_m = wl_nm * 1.0e-9\n try:\n exponent = a / (wl_m * T_K)\n except ZeroDivisionError:\n # treat same as large exponent\n return 0.0\n if exponent > 500.0:\n # so large that the final result is nearly zero - avoid the giant intermediate\n return 0.0\n specific_intensity = b / (math.pow (wl_m, 5) * (math.exp (exponent) - 1.0))\n return specific_intensity\n\ndef blackbody_spectrum (T_K):\n '''Get the spectrum of a blackbody, as a numpy array.'''\n spectrum = ciexyz.empty_spectrum()\n (num_rows, num_cols) = spectrum.shape\n for i in xrange (0, num_rows):\n specific_intensity = blackbody_specific_intensity (spectrum [i][0], T_K)\n # scale by size of wavelength interval\n spectrum [i][1] = specific_intensity * ciexyz.delta_wl_nm * 1.0e-9\n return spectrum\n \ndef blackbody_color (T_K):\n '''Given a temperature (K), return the xyz color of a thermal blackbody.'''\n spectrum = blackbody_spectrum (T_K)\n xyz = ciexyz.xyz_from_spectrum (spectrum)\n return xyz\n\n#\n# Figures\n#\n\ndef blackbody_patch_plot (T_list, title, filename):\n '''Draw a patch plot of blackbody colors for the given temperature range.'''\n xyz_colors = []\n color_names = []\n for Ti in T_list:\n xyz = blackbody_color (Ti)\n xyz_colors.append (xyz)\n name = '%g K' % (Ti)\n color_names.append (name)\n plots.xyz_patch_plot (xyz_colors, color_names, title, filename)\n\ndef blackbody_color_vs_temperature_plot (T_list, title, filename):\n '''Draw a color vs temperature plot for the given temperature range.'''\n num_T = len (T_list)\n rgb_list = numpy.empty ((num_T, 3))\n for i in xrange (0, num_T):\n T_i = T_list [i]\n xyz = blackbody_color (T_i)\n rgb_list [i] = colormodels.rgb_from_xyz (xyz)\n # note that b and g become negative for low T - MatPlotLib skips those on the semilog plot.\n plots.color_vs_param_plot (\n T_list,\n rgb_list,\n title,\n filename,\n plotfunc = pylab.semilogy,\n tight = True,\n xlabel = r'Temperature (K)',\n ylabel = r'RGB Color')\n\ndef blackbody_spectrum_plot (T_K):\n '''Draw the spectrum of a blackbody at the given temperature.'''\n spectrum = blackbody_spectrum (T_K)\n title = 'Blackbody Spectrum - T %d K' % (int (T_K))\n filename = 'BlackbodySpectrum-%dK' % (int (T_K))\n plots.spectrum_plot (\n spectrum,\n title,\n filename,\n xlabel = 'Wavelength (nm)',\n ylabel = 'Specific Intensity')\n #ylabel = 'Intensity ($W/m^2$)') # with LaTex symbols, the axis text gets too big...\n\n# Create sample figures\n\ndef figures ():\n '''Create some blackbody plots.'''\n # patch plots\n T_list_0 = plots.log_interpolate ( 1200.0, 20000.0, 48)\n T_list_hot = plots.log_interpolate (10000.0, 40000.0, 24)\n T_list_cool = plots.log_interpolate ( 950.0, 1200.0, 24)\n blackbody_patch_plot (T_list_0, 'Blackbody Colors', 'Blackbody-Patch')\n blackbody_patch_plot (T_list_hot, 'Hot Blackbody Colors', 'Blackbody-HotPatch')\n blackbody_patch_plot (T_list_cool, 'Cool Blackbody Colors', 'Blackbody-CoolPatch')\n\n # color vs temperature\n blackbody_color_vs_temperature_plot (range (1200, 16000, 50), 'Blackbody Colors', 'Blackbody-Colors')\n blackbody_color_vs_temperature_plot (range (10000, 40000, 100), 'Hot Blackbody Colors', 'Blackbody-HotColors')\n blackbody_color_vs_temperature_plot (range (950, 1200, 1), 'Cool Blackbody Colors', 'Blackbody-CoolColors')\n\n # spectrum of specific temperatures\n blackbody_spectrum_plot (2000.0)\n blackbody_spectrum_plot (3000.0) # Proxima Centauri\n blackbody_spectrum_plot (SUN_TEMPERATURE) # Sun\n blackbody_spectrum_plot (11000.0) # Rigel\n blackbody_spectrum_plot (15000.0)\n \n", "'''\nplots.py - Various types of plots.\n\nDescription:\n\nFunctions to draw various types of plots for light spectra.\n\nFunctions:\n\nlog_interpolate (y0, y1, num_values) -\n Return a list of values, num_values in size, logarithmically interpolated\n between y0 and y1. The first value will be y0, the last y1.\n\ntighten_x_axis (x_list) -\n Tighten the x axis (only) of the current plot to match the given range of x values.\n The y axis limits are not affected.\n\nGeneral plots:\n\nrgb_patch_plot (\n rgb_colors,\n color_names,\n title,\n filename = None,\n patch_gap = 0.05,\n num_across = 6) -\n Draw a set of color patches, specified as linear rgb colors.\n\nxyz_patch_plot (\n xyz_colors,\n color_names,\n title,\n filename = None,\n patch_gap = 0.05,\n num_across = 6) -\n Draw a set of color patches specified as xyz colors.\n\nspectrum_subplot (spectrum) -\n Plot a spectrum, with x-axis the wavelength, and y-axis the intensity.\n The curve is colored at that wavelength by the (approximate) color of a\n pure spectral color at that wavelength, with intensity constant over wavelength.\n (This means that dark looking colors here mean that wavelength is poorly viewed by the eye.\n This is not a complete plotting function, e.g. no file is saved, etc.\n It is assumed that this function is being called by one that handles those things.\n\nspectrum_plot (\n spectrum,\n title,\n filename = None,\n xlabel = 'Wavelength ($nm$)',\n ylabel = 'Intensity ($W/m^2$)') -\n \n Plot for a single spectrum -\n In a two part graph, plot:\n top: color of the spectrum, as a large patch.\n low: graph of spectrum intensity vs wavelength (x axis).\n The graph is colored by the (approximated) color of each wavelength.\n Each wavelength has equal physical intensity, so the variation in\n apparent intensity (e.g. 400, 800 nm are very dark, 550 nm is bright),\n is due to perceptual factors in the eye. This helps show how much\n each wavelength contributes to the percieved color.\n\n spectrum - spectrum to plot\n title - title for plot\n filename - filename to save plot to (or None to not save it)\n xlabel - label for x axis\n ylabel - label for y axis\n\ncolor_vs_param_plot (\n param_list,\n rgb_colors,\n title,\n filename,\n tight = False,\n plotfunc = pylab.plot,\n xlabel = 'param',\n ylabel = 'RGB Color') -\n \n Plot for a color that varies with a parameter -\n In a two part figure, draw:\n top: color as it varies with parameter (x axis)\n low: r,g,b values, as linear 0.0-1.0 values, of the attempted color.\n\n param_list - list of parameters (x axis)\n rgb_colors - numpy array, one row for each param in param_list\n title - title for plot\n filename - filename to save plot to (or None to not save it)\n plotfunc - optional plot function to use (default pylab.plot)\n xlabel - label for x axis\n ylabel - label for y axis (default 'RGB Color')\n\nSpecialized plots:\n\nvisible_spectrum_plot () -\n Plot the visible spectrum, as a plot vs wavelength.\n\ncie_matching_functions_plot () -\n Plot the CIE XYZ matching functions, as three spectral subplots.\n\nshark_fin_plot () -\n Draw the 'shark fin' CIE chromaticity diagram of the pure spectral lines (plus purples) in xy space.\n\nLicense:\n\nCopyright (C) 2008 Mark Kness\n\nAuthor - Mark Kness - mkness@alumni.utexas.net\n\nThis file is part of ColorPy.\n\nColorPy is free software: you can redistribute it and/or modify\nit under the terms of the GNU Lesser General Public License as\npublished by the Free Software Foundation, either version 3 of\nthe License, or (at your option) any later version.\n\nColorPy is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Lesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public License\nalong with ColorPy. If not, see <http://www.gnu.org/licenses/>.\n'''\nfrom __future__ import division, absolute_import, print_function\n\nimport math, random\nimport numpy, pylab\n\nfrom . import colormodels, ciexyz\n\n# Miscellaneous utilities for plots\n\ndef log_interpolate (y0, y1, num_values):\n '''Return a list of values, num_values in size, logarithmically interpolated\n between y0 and y1. The first value will be y0, the last y1.'''\n rtn = []\n if num_values <= 0:\n raise ValueError('Invalid number of divisions %s in log_interpolate' % (str (num_values)))\n if num_values == 1:\n # can't use both endpoints, too constrained\n yi = math.sqrt (y0 * y1)\n rtn.append (yi)\n else:\n # normal case\n beta = math.log (y1 / y0) / float (num_values - 1)\n for i in range (0, num_values):\n yi = y0 * math.exp (beta * float (i))\n rtn.append (yi)\n return rtn\n\ndef tighten_x_axis (x_list):\n '''Tighten the x axis (only) of the current plot to match the given range of x values.\n The y axis limits are not affected.'''\n x_min = min (x_list)\n x_max = max (x_list)\n pylab.xlim ((x_min, x_max))\n\n#\n# Patch plots - Plots with each color value as a solid patch, with optional labels.\n#\n\ndef rgb_patch_plot (\n rgb_colors,\n color_names,\n title,\n filename = None,\n patch_gap = 0.05,\n num_across = 6):\n '''Draw a set of color patches, specified as linear rgb colors.'''\n \n def draw_patch (x0, y0, color, name, patch_gap):\n '''Draw a patch of color.'''\n # patch relative vertices\n m = patch_gap\n omm = 1.0 - m\n poly_dx = [m, m, omm, omm]\n poly_dy = [m, omm, omm, m]\n # construct vertices\n poly_x = [ x0 + dx_i for dx_i in poly_dx ]\n poly_y = [ y0 + dy_i for dy_i in poly_dy ]\n pylab.fill (poly_x, poly_y, color)\n if name != None:\n dtext = 0.1\n pylab.text (x0+dtext, y0+dtext, name, size=8.0)\n\n # make plot with each color with one patch\n pylab.clf()\n num_colors = len (rgb_colors)\n for i in range (0, num_colors):\n (iy, ix) = divmod (i, num_across)\n # get color as a displayable string\n colorstring = colormodels.irgb_string_from_rgb (rgb_colors [i])\n if color_names != None:\n name = color_names [i]\n else:\n name = None\n draw_patch (float (ix), float (-iy), colorstring, name, patch_gap)\n pylab.axis ('off')\n pylab.title (title)\n if filename is not None:\n print('Saving plot %s' % str (filename))\n pylab.savefig (filename)\n\ndef xyz_patch_plot (\n xyz_colors,\n color_names,\n title,\n filename = None,\n patch_gap = 0.05,\n num_across = 6):\n '''Draw a set of color patches specified as xyz colors.'''\n rgb_colors = []\n for xyz in xyz_colors:\n rgb = colormodels.rgb_from_xyz (xyz)\n rgb_colors.append (rgb)\n rgb_patch_plot (rgb_colors, color_names, title, filename, patch_gap=patch_gap, num_across=num_across)\n\n#\n# Spectrum plots\n#\n\ndef spectrum_subplot (spectrum):\n '''Plot a spectrum, with x-axis the wavelength, and y-axis the intensity.\n The curve is colored at that wavelength by the (approximate) color of a\n pure spectral color at that wavelength, with intensity constant over wavelength.\n (This means that dark looking colors here mean that wavelength is poorly viewed by the eye.\n\n This is not a complete plotting function, e.g. no file is saved, etc.\n It is assumed that this function is being called by one that handles those things.'''\n (num_wl, num_cols) = spectrum.shape\n # get rgb colors for each wavelength\n rgb_colors = numpy.empty ((num_wl, 3))\n for i in range (0, num_wl):\n wl_nm = spectrum [i][0]\n xyz = ciexyz.xyz_from_wavelength (wl_nm)\n rgb_colors [i] = colormodels.rgb_from_xyz (xyz)\n # scale to make brightest rgb value = 1.0\n rgb_max = numpy.max (rgb_colors)\n scaling = 1.0 / rgb_max\n rgb_colors *= scaling \n # draw color patches (thin vertical lines matching the spectrum curve) in color\n for i in range (0, num_wl-1): # skipping the last one here to stay in range\n x0 = spectrum [i][0]\n x1 = spectrum [i+1][0]\n y0 = spectrum [i][1]\n y1 = spectrum [i+1][1]\n poly_x = [x0, x1, x1, x0]\n poly_y = [0.0, 0.0, y1, y0]\n color_string = colormodels.irgb_string_from_rgb (rgb_colors [i])\n pylab.fill (poly_x, poly_y, color_string, edgecolor=color_string)\n # plot intensity as a curve\n pylab.plot (\n spectrum [:,0], spectrum [:,1],\n color='k', linewidth=2.0, antialiased=True)\n\ndef spectrum_plot (\n spectrum,\n title,\n filename = None,\n xlabel = 'Wavelength ($nm$)',\n ylabel = 'Intensity ($W/m^2$)'):\n '''Plot for a single spectrum -\n In a two part graph, plot:\n top: color of the spectrum, as a large patch.\n low: graph of spectrum intensity vs wavelength (x axis).\n The graph is colored by the (approximated) color of each wavelength.\n Each wavelength has equal physical intensity, so the variation in\n apparent intensity (e.g. 400, 800 nm are very dark, 550 nm is bright),\n is due to perceptual factors in the eye. This helps show how much\n each wavelength contributes to the percieved color.\n\n spectrum - spectrum to plot\n title - title for plot\n filename - filename to save plot to\n xlabel - label for x axis\n ylabel - label for y axis\n '''\n pylab.clf ()\n # upper plot - solid patch of color that matches the spectrum color\n pylab.subplot (2,1,1)\n pylab.title (title)\n color_string = colormodels.irgb_string_from_rgb (\n colormodels.rgb_from_xyz (ciexyz.xyz_from_spectrum (spectrum)))\n poly_x = [0.0, 1.0, 1.0, 0.0]\n poly_y = [0.0, 0.0, 1.0, 1.0]\n pylab.fill (poly_x, poly_y, color_string)\n # draw a solid line around the patch to look nicer\n pylab.plot (poly_x, poly_y, color='k', linewidth=2.0)\n pylab.axis ('off')\n # lower plot - spectrum vs wavelength, with colors of the associated spectral lines below\n pylab.subplot (2,1,2)\n spectrum_subplot (spectrum)\n tighten_x_axis (spectrum [:,0])\n pylab.xlabel (xlabel)\n pylab.ylabel (ylabel)\n # done\n if filename is not None:\n print('Saving plot %s' % str (filename))\n pylab.savefig (filename)\n\n#\n# Color vs param plot\n#\n\ndef color_vs_param_plot (\n param_list,\n rgb_colors,\n title,\n filename = None,\n tight = False,\n plotfunc = pylab.plot,\n xlabel = 'param',\n ylabel = 'RGB Color'):\n '''Plot for a color that varies with a parameter -\n In a two part figure, draw:\n top: color as it varies with parameter (x axis)\n low: r,g,b values, as linear 0.0-1.0 values, of the attempted color.\n\n param_list - list of parameters (x axis)\n rgb_colors - numpy array, one row for each param in param_list\n title - title for plot\n filename - filename to save plot to\n plotfunc - optional plot function to use (default pylab.plot)\n xlabel - label for x axis\n ylabel - label for y axis (default 'RGB Color')\n '''\n pylab.clf ()\n # draw color bars in upper plot\n pylab.subplot (2,1,1)\n pylab.title (title)\n # no xlabel, ylabel in upper plot\n num_points = len (param_list)\n for i in range (0, num_points-1):\n x0 = param_list [i]\n x1 = param_list [i+1]\n y0 = 0.0\n y1 = 1.0\n poly_x = [x0, x1, x1, x0]\n poly_y = [y0, y0, y1, y1]\n color_string = colormodels.irgb_string_from_rgb (rgb_colors [i])\n pylab.fill (poly_x, poly_y, color_string, edgecolor=color_string)\n if tight:\n tighten_x_axis (param_list)\n # draw rgb curves in lower plot\n pylab.subplot (2,1,2)\n # no title in lower plot\n plotfunc (param_list, rgb_colors [:,0], color='r', label='Red')\n plotfunc (param_list, rgb_colors [:,1], color='g', label='Green')\n plotfunc (param_list, rgb_colors [:,2], color='b', label='Blue')\n if tight:\n tighten_x_axis (param_list)\n pylab.xlabel (xlabel)\n pylab.ylabel (ylabel)\n if filename is not None:\n print('Saving plot %s' % str (filename))\n pylab.savefig (filename)\n\n#\n# Some specialized plots\n#\n\ndef visible_spectrum_plot ():\n '''Plot the visible spectrum, as a plot vs wavelength.'''\n spectrum = ciexyz.empty_spectrum()\n (num_wl, num_cols) = spectrum.shape\n # get rgb colors for each wavelength\n rgb_colors = numpy.empty ((num_wl, 3))\n for i in range (0, num_wl):\n xyz = ciexyz.xyz_from_wavelength (spectrum [i][0])\n rgb = colormodels.rgb_from_xyz (xyz)\n rgb_colors [i] = rgb\n # scale to make brightest rgb value = 1.0\n rgb_max = numpy.max (rgb_colors)\n scaling = 1.0 / rgb_max\n rgb_colors *= scaling \n # plot colors and rgb values vs wavelength\n color_vs_param_plot (\n spectrum [:,0],\n rgb_colors,\n 'The Visible Spectrum',\n 'VisibleSpectrum',\n tight = True,\n xlabel = r'Wavelength (nm)',\n ylabel = r'RGB Color')\n \ndef cie_matching_functions_plot ():\n '''Plot the CIE XYZ matching functions, as three spectral subplots.'''\n # get 'spectra' for x,y,z matching functions\n spectrum_x = ciexyz.empty_spectrum()\n spectrum_y = ciexyz.empty_spectrum()\n spectrum_z = ciexyz.empty_spectrum()\n (num_wl, num_cols) = spectrum_x.shape\n for i in range (0, num_wl):\n wl_nm = spectrum_x [i][0]\n xyz = ciexyz.xyz_from_wavelength (wl_nm)\n spectrum_x [i][1] = xyz [0]\n spectrum_y [i][1] = xyz [1]\n spectrum_z [i][1] = xyz [2]\n # Plot three separate subplots, with CIE X in the first, CIE Y in the second, and CIE Z in the third.\n # Label appropriately for the whole plot.\n pylab.clf ()\n # X\n pylab.subplot (3,1,1)\n pylab.title ('1931 CIE XYZ Matching Functions')\n pylab.ylabel ('CIE $X$')\n spectrum_subplot (spectrum_x)\n tighten_x_axis (spectrum_x [:,0])\n # Y\n pylab.subplot (3,1,2)\n pylab.ylabel ('CIE $Y$')\n spectrum_subplot (spectrum_y)\n tighten_x_axis (spectrum_x [:,0])\n # Z\n pylab.subplot (3,1,3)\n pylab.xlabel ('Wavelength (nm)')\n pylab.ylabel ('CIE $Z$')\n spectrum_subplot (spectrum_z)\n tighten_x_axis (spectrum_x [:,0])\n # done\n filename = 'CIEXYZ_Matching'\n print('Saving plot %s' % str (filename))\n pylab.savefig (filename)\n\ndef shark_fin_plot ():\n '''Draw the 'shark fin' CIE chromaticity diagram of the pure spectral lines (plus purples) in xy space.'''\n # get array of (approximate) colors for the boundary of the fin\n xyz_list = ciexyz.get_normalized_spectral_line_colors (brightness=1.0, num_purples=200, dwl_angstroms=2)\n # get normalized colors\n xy_list = xyz_list.copy()\n (num_colors, num_cols) = xy_list.shape\n for i in range (0, num_colors):\n colormodels.xyz_normalize (xy_list [i])\n # get phosphor colors and normalize\n red = colormodels.PhosphorRed\n green = colormodels.PhosphorGreen\n blue = colormodels.PhosphorBlue\n white = colormodels.PhosphorWhite\n colormodels.xyz_normalize (red)\n colormodels.xyz_normalize (green)\n colormodels.xyz_normalize (blue)\n colormodels.xyz_normalize (white)\n\n def get_direc_to_white (xyz):\n '''Get unit vector (xy plane) in direction of the white point.'''\n direc = white - xyz\n mag = math.hypot (direc [0], direc [1])\n if mag != 0.0:\n direc /= mag\n return (direc[0], direc[1])\n\n # plot\n pylab.clf ()\n # draw color patches for point in xy_list\n s = 0.025 # distance in xy plane towards white point\n for i in range (0, len (xy_list)-1):\n x0 = xy_list [i][0]\n y0 = xy_list [i][1]\n x1 = xy_list [i+1][0]\n y1 = xy_list [i+1][1]\n # get unit vectors in direction of white point\n (dir_x0, dir_y0) = get_direc_to_white (xy_list [i])\n (dir_x1, dir_y1) = get_direc_to_white (xy_list [i+1])\n # polygon vertices\n poly_x = [x0, x1, x1 + s*dir_x1, x0 + s*dir_x0]\n poly_y = [y0, y1, y1 + s*dir_y1, y0 + s*dir_y0]\n # draw (using full color, not normalized value)\n color_string = colormodels.irgb_string_from_rgb (\n colormodels.rgb_from_xyz (xyz_list [i]))\n pylab.fill (poly_x, poly_y, color_string, edgecolor=color_string)\n # draw the curve of the xy values of the spectral lines and purples\n pylab.plot (xy_list [:,0], xy_list [:,1], color='#808080', linewidth=3.0)\n # draw monitor gamut and white point\n pylab.plot ([red [0], green[0]], [red [1], green[1]], 'o-', color='k')\n pylab.plot ([green[0], blue [0]], [green[1], blue [1]], 'o-', color='k')\n pylab.plot ([blue [0], red [0]], [blue [1], red [1]], 'o-', color='k')\n pylab.plot ([white[0], white[0]], [white[1], white[1]], 'o-', color='k')\n # label phosphors\n dx = 0.01\n dy = 0.01\n pylab.text (red [0] + dx, red [1], 'Red', ha='left', va='center')\n pylab.text (green [0], green [1] + dy, 'Green', ha='center', va='bottom')\n pylab.text (blue [0] - dx, blue [1], 'Blue', ha='right', va='center')\n pylab.text (white [0], white [1] + dy, 'White', ha='center', va='bottom')\n # titles etc\n pylab.axis ([0.0, 0.85, 0.0, 0.85])\n pylab.xlabel (r'CIE $x$')\n pylab.ylabel (r'CIE $y$')\n pylab.title (r'CIE Chromaticity Diagram')\n filename = 'ChromaticityDiagram'\n print('Saving plot %s' % (str (filename)))\n pylab.savefig (filename)\n\n# Special figures\n\ndef figures ():\n '''Draw specific figures not used anywhere else.'''\n visible_spectrum_plot()\n cie_matching_functions_plot()\n shark_fin_plot()\n \n \n" ]
[ [ "numpy.empty" ], [ "numpy.max", "numpy.empty" ] ]
AtsushiHashimoto/SpectralClusteringFD
[ "dd150a08898ce354a1b59457dea2f5185d145ed7" ]
[ "spectral_clustering_fd/laplacian_sketch.py" ]
[ "# coding: utf-8\nimport numpy as np\nfrom frequent_direction import FrequentDirection\nfrom sklearn.preprocessing import normalize\n\nfrom sklearn.metrics.pairwise import pairwise_kernels\n\n\ndef laplacian_sketch(X,ell,k,do_normalize_feature,normed,callback,**args):\n fd = FrequentDirection(ell,k)\n\n D = np.array([np.sum(callback(X,i,**args)) for i in range(len(X))])\n if normed:\n D = np.sqrt(D)\n isolation_mask = D==0\n\n if do_normalize_feature:\n # normalize original feature (for cosine distance)\n X[-isolation_mask] = normalize(X[-isolation_mask],norm='l2', axis=1, copy=False)\n D[:] = 1 # set 1 even to D==0 samples to avoid 0 division.\n\n for i,isolation in enumerate(isolation_mask):\n A_i = -1 * callback(X,i,**args)\n if normed:\n A_i /= D[i]\n A_i /= D\n A_i[i] = 1 - isolation # set 0 to isolated node.\n else:\n A_i[i] = D[i]\n fd.add_sample(-A_i)\n return fd.get_result().T, D\n\ndef laplacian_sketch_rbf_kernel(X,ell,k,normed=True,gamma=None):\n return laplacian_sketch(X,ell,k,False,normed,one_row_rbf_kernel,gamma=None)\n\ndef laplacian_sketch_cosine_similarity(X,ell,k,normed=True):\n return laplacian_sketch(X,ell,k,True,normed,one_row_cosine_similarity)\n\ndef one_row_rbf_kernel(X,i,gamma=None):\n \"\"\"\n X : array of shape (n_samples_X, n_features)\n i : target sample in X (X[i])\n gamma : float, default None\n If None, defaults to 1.0 / n_samples_X\n K(x, y) = exp(-gamma ||x-xi||^2)\n Returns\n -------\n kernel_matrix : array of shape (n_samples_X, n_samples_Y)\n \"\"\"\n if gamma is None:\n gamma = 1.0 / X.shape[0]\n d = np.sum(np.power(X-X[i],2),axis=1)\n return np.array(np.exp(-gamma * d))\n\n\ndef one_row_cosine_similarity(X,i):\n \"\"\"\n X : normalized matrix\n i : target sample in X\n \"\"\"\n a = (np.dot(X,X[i].T)+1)/2\n a[a<0]=0\n return a\n\ndef debug_one_row_rbf_kernel(X,gamma=None):\n W = np.zeros((X.shape[0],X.shape[0]))\n W_gt = pairwise_kernels(X, metric='rbf',\n filter_params=True,\n gamma=gamma)\n for i,row in enumerate(X):\n W[i] = one_row_rbf_kernel(X,i,gamma=gamma)\n #print(W)\n #print(W_gt)\n #print(np.sum(W-W_gt))\n\ndef debug_one_row_cosine_similarity(X):\n W = np.zeros((X.shape[0],X.shape[0]))\n W_gt = pairwise_kernels(X, metric='cosine',\n filter_params=True)\n for i,row in enumerate(X):\n W[i] = one_row_cosine_similarity(X,i)\n print(W)\n print(W_gt)\n print(np.sum(W-W_gt))\n" ]
[ [ "numpy.dot", "numpy.zeros", "numpy.sum", "numpy.exp", "sklearn.metrics.pairwise.pairwise_kernels", "sklearn.preprocessing.normalize", "numpy.power", "numpy.sqrt" ] ]
acproject/GNNs
[ "953d175f672f0bb1b7cd25f371878728f3d27f09" ]
[ "NAACL/ensemble.py" ]
[ "'''Ensemble some predictions. '''\r\nimport argparse\r\nimport collections\r\nimport math\r\nfrom scipy.special import logsumexp\r\nimport sys\r\n\r\nMODES = ['mean', 'max', 'logsumexp', 'noisy_or', 'log_noisy_or', 'odds_ratio']\r\n\r\ndef parse_args(args):\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('mode', choices=MODES)\r\n parser.add_argument('files', nargs='+')\r\n parser.add_argument('--weights', '-w', type=lambda x:[float(t) for t in x.split(',')],\r\n help='Comma-separated lit of multiplizer per file')\r\n parser.add_argument('--out-file', '-o', default=None, help='Where to write all output')\r\n\r\n if len(sys.argv) == 1:\r\n parser.print_help()\r\n sys.exit(1)\r\n return parser.parse_args(args)\r\n\r\ndef read_preds(fn):\r\n preds = []\r\n with open(fn) as f:\r\n for line in f:\r\n idx, pmid, drug, gene, variant, prob = line.strip().split('\\t')\r\n prob = float(prob)\r\n preds.append((pmid, drug, gene, variant, prob))\r\n\r\n return preds\r\n\r\ndef main(OPTS):\r\n preds_all = [read_preds(fn) for fn in OPTS.files]\r\n groups = collections.defaultdict(list)\r\n for i, preds in enumerate(preds_all):\r\n if OPTS.weights:\r\n weight = OPTS.weights[i]\r\n else:\r\n weight = 1.0\r\n for pmid, drug, gene, variant, prob in preds:\r\n groups[(pmid, drug, gene, variant)].append(weight * prob)\r\n\r\n results = []\r\n for i , ((pmid, drug, gene, variant), prob_list) in enumerate(groups.items()):\r\n if OPTS.mode == 'mean':\r\n prob = sum(prob_list) / len(prob_list)\r\n elif OPTS.mode == 'max':\r\n prob = max(prob_list)\r\n elif OPTS.mode == 'logsumexp':\r\n prob = logsumexp(prob_list)\r\n elif OPTS.mode == 'noisy_or':\r\n prob_no_rel = 1.0\r\n for p in prob_list:\r\n prob_no_rel *= 1.0 - p\r\n prob =1.0 - prob_no_rel\r\n elif OPTS.mode == 'log_noisy_or':\r\n log_prob_no_rel = 0.0\r\n for p in prob_list:\r\n if p < 1.0:\r\n log_prob_no_rel += math.log(1.0 - p)\r\n else:\r\n log_prob_no_rel -= 1000000\r\n prob = -log_prob_no_rel\r\n elif OPTS.mode == 'odds_ratio':\r\n cur_log_odds = 0.0\r\n for p in prob_list:\r\n cur_log_odds += 10 + 0.001 * p #math.log(p / (1.0 - p) * 100000000)\r\n prob = cur_log_odds\r\n else:\r\n raise ValueError(OPTS.mode)\r\n results.append((i, pmid, drug, gene, variant, prob))\r\n\r\n with open(OPTS.out_file, 'w') as f:\r\n for item in results:\r\n f.write('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(*item))\r\n\r\nif __name__ == '__main__':\r\n OPTS = parse_args(sys.argv[1:])\r\n main(OPTS)\r\n\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "scipy.special.logsumexp" ] ]
csepreghy/spectral_analysis
[ "1cbd9770347a71721164a7daf7b133ad0eeba8e4" ]
[ "spectral_analysis/unsupervised_learning/autoencoder/autoencoder_bestmodel.py" ]
[ "import numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nimport time\n\nfrom tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\nfrom tensorflow.keras.models import Model, Sequential\nfrom tensorflow.keras.callbacks import TensorBoard, History, EarlyStopping, ModelCheckpoint\nfrom tensorflow.keras.optimizers import Adam, Nadam, RMSprop\nfrom tensorflow.keras.callbacks import EarlyStopping\n\nfrom kerastuner.engine.hyperparameters import HyperParameters\nfrom kerastuner.tuners import RandomSearch\n\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\n\nimport seaborn as sns\n\nfrom spectral_analysis.classifiers.neural_network.helper_functions import train_test_split\nfrom spectral_analysis.plotify import Plotify\n\nclass AutoEncoder():\n def __init__(self, df_source_info, df_fluxes, df_wavelengths, load_model, weights_path=''):\n self.load_model = load_model\n self.weights_path = weights_path\n X = self._prepare_data(df_source_info, df_fluxes, df_wavelengths)\n indeces = list(range(len(X)))\n X_train, X_test, self.i_train, self.i_test = train_test_split(X, 0.2, indeces=indeces)\n X_train, X_val, self.i_train, self.i_val = train_test_split(X_train, 0.2, indeces=indeces)\n \n self.scaler = StandardScaler()\n X_train = self.scaler.fit_transform(X_train)\n X_test = self.scaler.transform(X_test)\n X_val = self.scaler.transform(X_val)\n\n self.X_train = np.expand_dims(X_train, axis=2)\n self.X_test = np.expand_dims(X_test, axis=2)\n self.X_val = np.expand_dims(X_val, axis=2)\n\n def _prepare_data(self, df_source_info, df_fluxes, df_wavelengths): \n # self.df_source_info = df_source_info.loc[df_source_info['class'] == 'QSO']\n self.df_source_info = df_source_info\n self.objids = self.df_source_info['objid'].to_numpy()\n fluxes = df_fluxes.loc[df_fluxes['objid'].isin(self.objids)]\n \n X = np.delete(fluxes.values, 0, axis=1)\n X = X[:, 0::2]\n print(f'X.shape = {X.shape}')\n X = X[:, np.mod(np.arange(X[0].size),25)!=0]\n X = X[:,:1792]\n print(f'X.shape = {X.shape}')\n\n\n wavelengths = df_wavelengths.to_numpy()\n wavelengths = wavelengths[::2]\n self.wavelengths = wavelengths[0:1792]\n # plot_spectrum(X[0], wavelengths)\n return X\n \n def build_model(self):\n # ================================================================================== #\n # ==================================== ENCODER ===================================== #\n # ================================================================================== #\n \n input_layer = Input(shape=(self.X_train.shape[1], 1))\n\n # encoder\n x = Conv1D(filters=256,\n kernel_size=7,\n activation='relu', \n padding='same')(input_layer)\n x = MaxPooling1D(4)(x)\n\n x = Conv1D(filters=128,\n kernel_size=5,\n activation='relu',\n padding='same')(x)\n \n x = MaxPooling1D(4)(x)\n x = Conv1D(filters=64,\n kernel_size=5,\n activation='relu',\n padding='same')(x)\n x = MaxPooling1D(2)(x)\n\n x = Conv1D(filters=32,\n kernel_size=3,\n activation='relu',\n padding='same')(x)\n x = MaxPooling1D(2)(x)\n\n x = Conv1D(filters=32,\n kernel_size=3,\n activation='relu',\n padding='same')(x)\n x = MaxPooling1D(2)(x)\n\n x = Conv1D(filters=1,\n kernel_size=3,\n activation='relu',\n padding='same')(x)\n\n encoded = MaxPooling1D(2, padding='same')(x)\n\n # ================================================================================== #\n # ==================================== DECODER ===================================== #\n # ================================================================================== #\n\n x = Conv1D(filters=1,\n kernel_size=3,\n activation='relu',\n padding='same')(encoded)\n \n x = UpSampling1D(2)(x)\n\n x = Conv1D(filters=32,\n kernel_size=3,\n activation='relu',\n padding='same')(x)\n\n x = UpSampling1D(2)(x)\n\n x = Conv1D(filters=32,\n kernel_size=3,\n activation='relu',\n padding='same')(x)\n\n x = UpSampling1D(2)(x)\n\n x = Conv1D(filters=64,\n kernel_size=5,\n activation='relu',\n padding='same')(x)\n\n x = UpSampling1D(2)(x)\n\n x = Conv1D(filters=128,\n kernel_size=5,\n activation='relu',\n padding='same')(x)\n\n x = UpSampling1D(4)(x)\n\n x = Conv1D(filters=256,\n kernel_size=7,\n activation='relu',\n padding='same')(x)\n x = UpSampling1D(4)(x)\n \n decoded = Conv1D(1, 1, activation='tanh', padding='same')(x)\n \n self.autoencoder = Model(input_layer, decoded)\n self.autoencoder.summary()\n self.autoencoder.compile(loss='mse', optimizer='adam')\n\n return self.autoencoder\n \n def train_model(self, epochs, batch_size=32):\n model = self.build_model()\n \n if self.load_model == False:\n modelcheckpoint = ModelCheckpoint(filepath='logs/1-14_autoencoder.epoch{epoch:02d}.h5',\n monitor='val_loss',\n save_best_only=True)\n \n history = model.fit(x=self.X_train,\n y=self.X_train,\n epochs=epochs,\n batch_size=32,\n validation_data=(self.X_val, self.X_val),\n callbacks=[EarlyStopping('val_loss', patience=8), modelcheckpoint])\n\n self.evaluate_model(model)\n\n else:\n model.load_weights(self.weights_path)\n print(f'model = {model}')\n # self.evaluate_model(model)\n self.get_bottleneck_values(model)\n\n return model\n \n def get_bottleneck_values(self, model):\n bottleneck = model.get_layer('conv1d_5')\n\n extractor = Model(inputs=model.inputs, outputs=[bottleneck.output])\n features = extractor(self.X_test)\n features = np.squeeze(features, axis=2)\n\n df_source_info_test = pd.DataFrame({'class': self.df_source_info.iloc[self.i_test]['class'].values})\n\n print(f'df_source_info_test = {df_source_info_test}')\n\n df = pd.DataFrame(features)\n df = df.join(df_source_info_test)\n\n print(f'df = {df}')\n\n sns.set(style=\"ticks\", color_codes=True)\n sns.pairplot(df, hue='class')\n plt.savefig('plots/autoencoder_pairplot', dpi=100)\n\n def evaluate_model(self, model):\n preds = model.predict(self.X_test)\n \n print(self.X_test.shape)\n self.X_test = np.squeeze(self.X_test, axis=2)\n preds = np.squeeze(preds, axis=2)\n print(self.X_test.shape)\n\n self.X_test = self.scaler.inverse_transform(self.X_test)\n preds = self.scaler.inverse_transform(preds)\n \n for i in range(100):\n qso_ra = self.df_source_info.iloc[self.i_test[i]]['ra']\n qso_dec = self.df_source_info.iloc[self.i_test[i]]['dec']\n qso_plate = self.df_source_info.iloc[self.i_test[i]]['plate']\n qso_z = self.df_source_info.iloc[self.i_test[i]]['z']\n qso_class = self.df_source_info.iloc[self.i_test[i]]['class']\n\n plotify = Plotify(theme='ugly')\n _, axs = plotify.get_figax(nrows=2, figsize=(5.8, 8))\n axs[0].plot(self.wavelengths, self.X_test[i], color=plotify.c_orange)\n axs[1].plot(self.wavelengths, preds[i], color=plotify.c_orange)\n axs[0].set_title(f'ra = {qso_ra}, dec = {qso_dec}, \\n z = {qso_z}, plate = {qso_plate}, class = {qso_class} \\n', fontsize=14)\n axs[1].set_title(f'Autoencoder recreation \\n')\n axs[0].set_ylabel(r'$F_{\\lambda[10^{-17} erg \\: cm^{-2}s^{-1} Å^{-1}]}$', fontsize=14)\n axs[1].set_ylabel(r'$F_{\\lambda[10^{-17} erg \\: cm^{-2}s^{-1} Å^{-1}]}$', fontsize=14)\n axs[1].set_xlabel('Wavelength (Å)')\n\n plt.subplots_adjust(hspace=0.4)\n plt.savefig(f'plots/autoencoder/__all_sources/_autoencoder_{i}', dpi=160)\n\n return preds\n\ndef main():\n df_fluxes = pd.read_hdf('data/sdss/preprocessed/balanced.h5', key='fluxes').head(5000)\n df_source_info = pd.read_hdf('data/sdss/preprocessed/balanced.h5', key='source_info').head(5000)\n df_wavelengths = pd.read_hdf('data/sdss/preprocessed/balanced.h5', key='wavelengths')\n\n ae = AutoEncoder(df_source_info, df_fluxes, df_wavelengths, load_model=False, weights_path='logs/colab-logs/_all_sources1-14_autoencoder.epoch30.h5')\n ae.train_model(epochs=12, batch_size=64)\n \n\nif __name__ == \"__main__\":\n main()" ]
[ [ "tensorflow.keras.layers.Conv1D", "numpy.delete", "tensorflow.keras.layers.Input", "sklearn.preprocessing.StandardScaler", "pandas.DataFrame", "matplotlib.pyplot.savefig", "tensorflow.keras.models.Model", "tensorflow.keras.layers.MaxPooling1D", "tensorflow.keras.layers.UpSampling1D", "tensorflow.keras.callbacks.ModelCheckpoint", "matplotlib.pyplot.subplots_adjust", "pandas.read_hdf", "numpy.arange", "numpy.squeeze", "numpy.expand_dims", "tensorflow.keras.callbacks.EarlyStopping" ] ]
ved432/test
[ "779914d1bc876414d1149161ec0d838d7bb16601" ]
[ "utils/heatmap-coverage.py" ]
[ "from pandas import DataFrame\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport csv,sys\n\nExperimentName=sys.argv[1]\n\nwith open(ExperimentName+'.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)\n\nif ExperimentName == \"pod-delete\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Stress: 3600/1s','Memory/CPU footprint for the stress run','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','With Force','Without Force','Different base image(alpine/nginx/centos)']\n Cols = ['Is the test added?']\n plt.title(\"Pod Delete Experiment\", fontsize =20)\nelif ExperimentName == \"container-kill\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Large Duration and Interval']\n Cols = ['Is the test added?']\n plt.title(\"Container Kill Experiment\", fontsize =20)\nelif ExperimentName == \"disk-fill\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Application image(nginx/centos/alpine)']\n Cols = ['Is the test added?']\n plt.title(\"Disk Fill Experiment\", fontsize =20)\nelif ExperimentName == \"pod-cpu-hog\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)']\n Cols = ['Is the test added?']\n plt.title(\"Pod CPU Hog Experiment\", fontsize =20) \nelif ExperimentName == \"pod-memory-hog\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)']\n Cols = ['Is the test added?']\n plt.title(\"Pod Memory Hog Experiment\", fontsize =20)\nelif ExperimentName == \"pod-network-corruption\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host','With Target Container']\n Cols = ['Is the test added?']\n plt.title(\"Pod Network Corruption Experiment\", fontsize =20)\nelif ExperimentName == \"pod-network-duplication\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host','With Target Container']\n Cols = ['Is the test added?']\n plt.title(\"Pod Network Duplication Experiment\", fontsize =20)\nelif ExperimentName == \"pod-network-latency\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host'] \n Cols = ['Is the test added?']\n plt.title(\"Pod Network Latency Experiment\", fontsize =20) \nelif ExperimentName == \"pod-network-loss\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host'] \n Cols = ['Is the test added?']\n plt.title(\"Pod Network Loss Experiment\", fontsize =20)\nelif ExperimentName == \"pod-autoscaler\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With less replicas(say 5)','with more replicas(say 20)'] \n Cols = ['Is the test added?']\n plt.title(\"Pod Autoscaler Experiment\", fontsize =20)\nelif ExperimentName == \"kubelet-service-kill\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Target Node Specified','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different lib image(ubuntu/centos)','Without appinfo'] \n Cols = ['Is the test added?']\n plt.title(\"Kubelet Service Kill\", fontsize =20)\nelif ExperimentName == \"node-cpu-hog\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','NodeAffectedPercentage is 0','NodeAffectedPercentage is 100','NodeAffectedPercentage 0-100','Target Node Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, node_affected_perc=100','Sequence=parallel, node_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo']\n Cols = ['Is the test added?']\n plt.title(\"Node CPU Hog\", fontsize =20)\nelif ExperimentName == \"node-memory-hog\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','NodeAffectedPercentage is 0','NodeAffectedPercentage is 100','NodeAffectedPercentage 0-100','Target Node Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, node_affected_perc=100','Sequence=parallel, node_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo']\n Cols = ['Is the test added?']\n plt.title(\"Node Memory Hog\", fontsize =20)\nelif ExperimentName == \"node-drain\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Target node specified','Without appinfo'] \n Cols = ['Is the test added?']\n plt.title(\"Node Drain Experiment\", fontsize =20) \nelif ExperimentName == \"node-taint\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Target node specified','Without appinfo'] \n Cols = ['Is the test added?']\n plt.title(\"Node Taint Experiment\", fontsize =20) \nelif ExperimentName == \"node-io-stress\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','NodeAffectedPercentage is 0','NodeAffectedPercentage is 100','NodeAffectedPercentage 0-100','Target Node Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, node_affected_perc=100','Sequence=parallel, node_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo','W/ filesystem utilisation bytes specified','w/ filesystem utilisation percentage specified']\n Cols = ['Is the test added?']\n plt.title(\"Node IO Stress\", fontsize =20) \nelif ExperimentName == \"pod-io-stress\":\n Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodAffectedPercentage is 0','PodAffectedPercentage is 100','PodAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo','W/ filesystem utilisation bytes specified','w/ filesystem utilisation percentage specified','w/ Volume mouth path specified']\n Cols = ['Is the test added?']\n plt.title(\"Pod IO Stress\", fontsize =20) \nelse:\n print(\"Experiment %s not supported\",ExperimentName)\n\ndf = DataFrame(data, index=Index, columns=Cols)\ndf = df[df.columns].astype(float)\n\nprint(df)\nsvm = sns.heatmap(df, cmap=\"Reds\")\nfigure = svm.get_figure()\n\nplt.subplots_adjust(left=0.218,bottom=0.095,right=0.9,top=0.88,wspace=0.2,hspace=0.2)\nfigure.set_figheight(10)\nfigure.set_figwidth(15)\nplt.savefig(ExperimentName+'-heatmap.png', dpi=250)\n" ]
[ [ "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.title" ] ]
minwook-shin/aws-data-wrangler
[ "304e734db5e96cc5e11ff54b4f3a1cf7c4e5736b" ]
[ "awswrangler/neptune/neptune.py" ]
[ "\"\"\"Amazon Neptune Module.\"\"\"\n\nimport logging\nimport re\nfrom typing import Any\n\nimport pandas as pd\nfrom gremlin_python.process.graph_traversal import GraphTraversalSource, __\nfrom gremlin_python.process.translator import Translator\nfrom gremlin_python.process.traversal import Cardinality, T\nfrom gremlin_python.structure.graph import Graph\n\nfrom awswrangler import exceptions\nfrom awswrangler.neptune.client import NeptuneClient\n\n_logger: logging.Logger = logging.getLogger(__name__)\n\n\ndef execute_gremlin(client: NeptuneClient, query: str) -> pd.DataFrame:\n \"\"\"Return results of a Gremlin traversal as pandas dataframe.\n\n Parameters\n ----------\n client : neptune.Client\n instance of the neptune client to use\n traversal : str\n The gremlin traversal to execute\n\n Returns\n -------\n Union[pandas.DataFrame, Iterator[pandas.DataFrame]]\n Results as Pandas DataFrame\n\n Examples\n --------\n Run a Gremlin Query\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> df = wr.neptune.execute_gremlin(client, \"g.V().limit(1)\")\n \"\"\"\n results = client.read_gremlin(query)\n df = pd.DataFrame.from_records(results)\n return df\n\n\ndef execute_opencypher(client: NeptuneClient, query: str) -> pd.DataFrame:\n \"\"\"Return results of a openCypher traversal as pandas dataframe.\n\n Parameters\n ----------\n client : NeptuneClient\n instance of the neptune client to use\n query : str\n The openCypher query to execute\n\n Returns\n -------\n Union[pandas.DataFrame, Iterator[pandas.DataFrame]]\n Results as Pandas DataFrame\n\n Examples\n --------\n Run an openCypher query\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> resp = wr.neptune.execute_opencypher(client, \"MATCH (n) RETURN n LIMIT 1\")\n \"\"\"\n resp = client.read_opencypher(query)\n df = pd.DataFrame.from_dict(resp)\n return df\n\n\ndef execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:\n \"\"\"Return results of a SPARQL query as pandas dataframe.\n\n Parameters\n ----------\n client : NeptuneClient\n instance of the neptune client to use\n query : str\n The SPARQL traversal to execute\n\n Returns\n -------\n Union[pandas.DataFrame, Iterator[pandas.DataFrame]]\n Results as Pandas DataFrame\n\n Examples\n --------\n Run a SPARQL query\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> df = wr.neptune.execute_sparql(client, \"PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n SELECT ?name\n WHERE {\n ?person foaf:name ?name .\n \"\"\"\n data = client.read_sparql(query)\n df = None\n if \"results\" in data and \"bindings\" in data[\"results\"]:\n df = pd.DataFrame(data[\"results\"][\"bindings\"])\n df.applymap(lambda x: x[\"value\"])\n else:\n df = pd.DataFrame(data)\n\n return df\n\n\ndef to_property_graph(\n client: NeptuneClient, df: pd.DataFrame, batch_size: int = 50, use_header_cardinality: bool = True\n) -> bool:\n \"\"\"Write records stored in a DataFrame into Amazon Neptune.\n\n If writing to a property graph then DataFrames for vertices and edges must be written separately.\n DataFrames for vertices must have a ~label column with the label and a ~id column for the vertex id.\n If the ~id column does not exist, the specified id does not exists, or is empty then a new vertex will be added.\n If no ~label column exists an exception will be thrown.\n DataFrames for edges must have a ~id, ~label, ~to, and ~from column. If the ~id column does not exist\n the specified id does not exists, or is empty then a new edge will be added. If no ~label, ~to, or ~from column\n exists an exception will be thrown.\n\n If you would like to save data using `single` cardinality then you can postfix (single) to the column header and\n set use_header_cardinality=True (default). e.g. A column named `name(single)` will save the `name` property\n as single\n cardinality. You can disable this by setting by setting `use_header_cardinality=False`.\n\n Parameters\n ----------\n client : NeptuneClient\n instance of the neptune client to use\n df : pandas.DataFrame\n Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n batch_size: int\n The number of rows to save at a time. Default 50\n use_header_cardinality: bool\n If True, then the header cardinality will be used to save the data. Default True\n\n Returns\n -------\n bool\n True if records were written\n\n Examples\n --------\n Writing to Amazon Neptune\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> wr.neptune.gremlin.to_property_graph(\n ... df=df\n ... )\n \"\"\"\n # check if ~id and ~label column exist and if not throw error\n g = Graph().traversal()\n is_edge_df = False\n is_update_df = True\n if \"~id\" in df.columns:\n if \"~label\" in df.columns:\n is_update_df = False\n if \"~to\" in df.columns and \"~from\" in df.columns:\n is_edge_df = True\n else:\n raise exceptions.InvalidArgumentValue(\n \"Dataframe must contain at least a ~id and a ~label column to be saved to Amazon Neptune\"\n )\n\n # Loop through items in the DF\n for (index, row) in df.iterrows():\n # build up a query\n if is_update_df:\n g = _build_gremlin_update(g, row, use_header_cardinality)\n elif is_edge_df:\n g = _build_gremlin_insert_edges(g, row.to_dict(), use_header_cardinality)\n else:\n g = _build_gremlin_insert_vertices(g, row.to_dict(), use_header_cardinality)\n # run the query\n if index > 0 and index % batch_size == 0:\n res = _run_gremlin_insert(client, g)\n if res:\n g = Graph().traversal()\n\n return _run_gremlin_insert(client, g)\n\n\ndef to_rdf_graph(\n client: NeptuneClient,\n df: pd.DataFrame,\n batch_size: int = 50,\n subject_column: str = \"s\",\n predicate_column: str = \"p\",\n object_column: str = \"o\",\n graph_column: str = \"g\",\n) -> bool:\n \"\"\"Write records stored in a DataFrame into Amazon Neptune.\n\n The DataFrame must consist of triples with column names for the subject, predicate, and object specified.\n If you want to add data into a named graph then you will also need the graph column.\n\n Parameters\n ----------\n client (NeptuneClient) :\n instance of the neptune client to use\n df (pandas.DataFrame) :\n Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n subject_column (str, optional) :\n The column name in the dataframe for the subject. Defaults to 's'\n predicate_column (str, optional) :\n The column name in the dataframe for the predicate. Defaults to 'p'\n object_column (str, optional) :\n The column name in the dataframe for the object. Defaults to 'o'\n graph_column (str, optional) :\n The column name in the dataframe for the graph if sending across quads. Defaults to 'g'\n\n Returns\n -------\n bool\n True if records were written\n\n Examples\n --------\n Writing to Amazon Neptune\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> wr.neptune.gremlin.to_rdf_graph(\n ... df=df\n ... )\n \"\"\"\n is_quads = False\n if pd.Series([subject_column, object_column, predicate_column]).isin(df.columns).all():\n if graph_column in df.columns:\n is_quads = True\n else:\n raise exceptions.InvalidArgumentValue(\n \"\"\"Dataframe must contain at least the subject, predicate, and object columns defined or the defaults\n (s, p, o) to be saved to Amazon Neptune\"\"\"\n )\n\n query = \"\"\n # Loop through items in the DF\n for (index, row) in df.iterrows():\n # build up a query\n if is_quads:\n insert = f\"\"\"INSERT DATA {{ GRAPH <{row[graph_column]}> {{<{row[subject_column]}>\n <{str(row[predicate_column])}> <{row[object_column]}> . }} }}; \"\"\"\n query = query + insert\n else:\n insert = f\"\"\"INSERT DATA {{ <{row[subject_column]}> <{str(row[predicate_column])}>\n <{row[object_column]}> . }}; \"\"\"\n query = query + insert\n # run the query\n if index > 0 and index % batch_size == 0:\n res = client.write_sparql(query)\n if res:\n query = \"\"\n return client.write_sparql(query)\n\n\ndef connect(host: str, port: int, iam_enabled: bool = False, **kwargs: Any) -> NeptuneClient:\n \"\"\"Create a connection to a Neptune cluster.\n\n Parameters\n ----------\n host : str\n The host endpoint to connect to\n port : int\n The port endpoint to connect to\n iam_enabled : bool, optional\n True if IAM is enabled on the cluster. Defaults to False.\n\n Returns\n -------\n NeptuneClient\n [description]\n \"\"\"\n return NeptuneClient(host, port, iam_enabled, **kwargs)\n\n\ndef _get_column_name(column: str) -> str:\n if \"(single)\" in column.lower():\n return re.compile(r\"\\(single\\)\", re.IGNORECASE).sub(\"\", column)\n return column\n\n\ndef _set_properties(g: GraphTraversalSource, use_header_cardinality: bool, row: Any) -> GraphTraversalSource:\n for (column, value) in row.items():\n if column not in [\"~id\", \"~label\", \"~to\", \"~from\"]:\n # If the column header is specifying the cardinality then use it\n if use_header_cardinality:\n if column.lower().find(\"(single)\") > 0 and pd.notna(value):\n g = g.property(Cardinality.single, _get_column_name(column), value)\n else:\n g = _expand_properties(g, _get_column_name(column), value)\n else:\n # If not using header cardinality then use the default of set\n g = _expand_properties(g, column, value)\n return g\n\n\ndef _expand_properties(g: GraphTraversalSource, column: str, value: Any) -> GraphTraversalSource:\n # If this is a list then expand it out into multiple property calls\n if isinstance(value, list) and len(value) > 0:\n for item in value:\n g = g.property(Cardinality.set_, column, item)\n elif pd.notna(value):\n g = g.property(Cardinality.set_, column, value)\n return g\n\n\ndef _build_gremlin_update(g: GraphTraversalSource, row: Any, use_header_cardinality: bool) -> GraphTraversalSource:\n g = g.V(str(row[\"~id\"]))\n g = _set_properties(g, use_header_cardinality, row)\n return g\n\n\ndef _build_gremlin_insert_vertices(\n g: GraphTraversalSource, row: Any, use_header_cardinality: bool = False\n) -> GraphTraversalSource:\n g = g.V(str(row[\"~id\"])).fold().coalesce(__.unfold(), __.addV(row[\"~label\"]).property(T.id, str(row[\"~id\"])))\n g = _set_properties(g, use_header_cardinality, row)\n return g\n\n\ndef _build_gremlin_insert_edges(\n g: GraphTraversalSource, row: pd.Series, use_header_cardinality: bool\n) -> GraphTraversalSource:\n g = (\n g.V(str(row[\"~from\"]))\n .fold()\n .coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {\"~id\": row[\"~from\"], \"~label\": \"Vertex\"}))\n .addE(row[\"~label\"])\n .property(T.id, str(row[\"~id\"]))\n .to(\n __.V(str(row[\"~to\"]))\n .fold()\n .coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {\"~id\": row[\"~to\"], \"~label\": \"Vertex\"}))\n )\n )\n g = _set_properties(g, use_header_cardinality, row)\n\n return g\n\n\ndef _run_gremlin_insert(client: NeptuneClient, g: GraphTraversalSource) -> bool:\n translator = Translator(\"g\")\n s = translator.translate(g.bytecode)\n s = s.replace(\"Cardinality.\", \"\") # hack to fix parser error for set cardinality\n _logger.debug(s)\n res = client.write_gremlin(s)\n return res\n\n\ndef flatten_nested_df(\n df: pd.DataFrame, include_prefix: bool = True, seperator: str = \"_\", recursive: bool = True\n) -> pd.DataFrame:\n \"\"\"Flatten the lists and dictionaries of the input data frame.\n\n Parameters\n ----------\n df : pd.DataFrame\n The input data frame\n include_prefix : bool, optional\n If True, then it will prefix the new column name with the original column name.\n Defaults to True.\n seperator : str, optional\n The seperator to use between field names when a dictionary is exploded.\n Defaults to \"_\".\n recursive : bool, optional\n If True, then this will recurse the fields in the data frame. Defaults to True.\n\n Returns\n -------\n pd.DataFrame: The flattened data frame\n \"\"\"\n if seperator is None:\n seperator = \"_\"\n df = df.reset_index()\n\n # search for list and map\n s = (df.applymap(type) == list).all()\n list_columns = s[s].index.tolist()\n\n s = (df.applymap(type) == dict).all()\n dict_columns = s[s].index.tolist()\n\n if len(list_columns) > 0 or len(dict_columns) > 0:\n new_columns = []\n\n for col in dict_columns:\n # expand dictionaries horizontally\n expanded = None\n if include_prefix:\n expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f\"{col}{seperator}\")\n else:\n expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f\"{seperator}\")\n expanded.index = df.index\n df = pd.concat([df, expanded], axis=1).drop(columns=[col])\n new_columns.extend(expanded.columns)\n\n for col in list_columns:\n df = df.drop(columns=[col]).join(df[col].explode().to_frame())\n new_columns.append(col)\n\n # check if there are still dict o list fields to flatten\n s = (df[new_columns].applymap(type) == list).all()\n list_columns = s[s].index.tolist()\n\n s = (df[new_columns].applymap(type) == dict).all()\n dict_columns = s[s].index.tolist()\n if recursive and (len(list_columns) > 0 or len(dict_columns) > 0):\n df = flatten_nested_df(df, include_prefix=include_prefix, seperator=seperator, recursive=recursive)\n\n return df\n" ]
[ [ "pandas.DataFrame.from_records", "pandas.json_normalize", "pandas.DataFrame.from_dict", "pandas.DataFrame", "pandas.concat", "pandas.notna", "pandas.Series" ] ]
MediaBrain-SJTU/GroupNet
[ "607541c8843f8b6206b1ffefd2d27fb07dcca073" ]
[ "datasets/nba/Game.py" ]
[ "import pandas as pd\nfrom Event import Event\nfrom Team import Team\nfrom Constant import Constant\nimport numpy as np\n\n\nclass Game:\n \"\"\"A class for keeping info about the games\"\"\"\n def __init__(self, path_to_json):\n # self.events = None\n self.home_team = None\n self.guest_team = None\n self.event = None\n self.path_to_json = path_to_json\n\n def read_json(self):\n data_frame = pd.read_json(self.path_to_json)\n last_default_index = len(data_frame) - 1\n all_trajs = []\n for i in range(last_default_index):\n event = data_frame['events'][i]\n self.event = Event(event)\n trajs = self.event.get_traj() # (N,15,11,2)\n if len(trajs) > 0:\n all_trajs.append(trajs)\n # print(i,len(trajs))\n all_trajs = np.concatenate(all_trajs,axis=0)\n return all_trajs\n\n\n\n" ]
[ [ "numpy.concatenate", "pandas.read_json" ] ]
cperales/Fourier-classifying-songs
[ "54d13e2ce2d7d05fe7126bbbd884917758188d6d" ]
[ "test/test_b_plot.py" ]
[ "import unittest\nfrom foucluster.plot import song_plot, diff_plot, heatmap_song\nimport configparser\nimport os\nimport json\nfrom scipy.io.wavfile import read\nimport numpy as np\nimport pandas as pd\n\n\nclass TestPlot(unittest.TestCase):\n\n @staticmethod\n def _get_series(i=0):\n \"\"\"\n\n :return:\n \"\"\"\n config = configparser.ConfigParser()\n config.read('config.ini')\n fourier_folder = config['Folder']['Output']\n first_file = os.path.join(fourier_folder,\n os.listdir(fourier_folder)[i])\n with open(first_file, 'r') as b:\n j = json.load(b)\n name = list(j.keys())[0]\n song = j[name]\n return song, name\n\n @staticmethod\n def _get_song(i=0):\n \"\"\"\n\n :return:\n \"\"\"\n config = configparser.ConfigParser()\n config.read('config.ini')\n song_folder = config['Folder']['Temp']\n first_song = os.listdir(song_folder)[i]\n rate, aud_data = read(os.path.join(song_folder,\n first_song))\n # Should be mono\n if len(aud_data) != len(aud_data.ravel()):\n aud_data = np.mean(aud_data, axis=1)\n return aud_data,first_song\n\n def test_diff(self):\n \"\"\"\n\n :return:\n \"\"\"\n config = configparser.ConfigParser()\n config.read('config.ini')\n image_folder = config['Folder']['Image']\n song_1, name_1 = self._get_series(i=0)\n song_2, name_2 = self._get_series(i=1)\n diff_plot(song_1, song_2,\n filename=name_1.split()[2].split('.')[0] + name_2.split()[2].split('.')[0],\n folder=image_folder)\n\n def test_song(self):\n \"\"\"\n\n :return:\n \"\"\"\n config = configparser.ConfigParser()\n config.read('config.ini')\n image_folder = config['Folder']['Image']\n aud_data, name = self._get_song()\n song_plot(aud_data,\n filename=name.split('.')[0],\n folder=image_folder)\n\n def test_heatmap(self):\n config = configparser.ConfigParser()\n config.read('config.ini')\n image_folder = config['Folder']['Image']\n distance_folder = config['Folder']['Distance']\n df = pd.read_csv(os.path.join(distance_folder, 'positive.csv'),\n sep=';',\n index_col=[0, 1])\n heatmap_song(df,\n image_name='heatmap_positive',\n image_folder=image_folder)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.mean" ] ]
trxw/qutip
[ "b923c973edd9a071d86eb849650661549f73585f" ]
[ "qutip/graph.py" ]
[ "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without \n# modification, are permitted provided that the following conditions are \n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A \n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT \n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, \n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT \n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, \n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY \n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\"\"\"\nThis module contains a collection of graph theory routines used mainly\nto reorder matrices for iterative steady state solvers.\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom qutip.cy.graph_utils import (\n _pseudo_peripheral_node, _breadth_first_search, _node_degrees,\n _rcm, _bfs_matching, _weighted_bfs_matching)\nfrom qutip.settings import debug\nfrom warnings import warn\nif debug:\n import inspect\n\ndef graph_degree(A):\n \"\"\"\n Returns the degree for the nodes (rows) of a symmetric \n graph in sparse CSR or CSC format, or a qobj.\n \n Parameters\n ----------\n A : qobj, csr_matrix, csc_matrix\n Input quantum object or csr_matrix.\n \n Returns\n -------\n degree : array\n Array of integers giving the degree for each node (row).\n \n \"\"\"\n if A.__class__.__name__=='Qobj':\n return _node_degrees(A.data.indices, A.data.indptr, A.shape[0])\n else:\n return _node_degrees(A.indices, A.indptr, A.shape[0])\n\n\ndef breadth_first_search(A,start):\n \"\"\"\n Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting\n from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs.\n \n This function requires a matrix with symmetric structure.\n Use A+trans(A) if original matrix is not symmetric or not sure.\n \n Parameters\n ----------\n A : qobj, csr_matrix\n Input graph in CSR matrix form\n \n start : int\n Staring node for BFS traversal.\n \n Returns\n -------\n order : array\n Order in which nodes are traversed from starting node.\n \n levels : array\n Level of the nodes in the order that they are traversed.\n \n \"\"\"\n if A.__class__.__name__=='Qobj':\n A=A.data\n num_rows=A.shape[0]\n start=int(start)\n order, levels = _breadth_first_search(A.indices,A.indptr, num_rows, start)\n #since maybe not all nodes are in search, check for unused entires in arrays\n return order[order!=-1], levels[levels!=-1]\n\n\ndef symrcm(A, sym=False):\n \"\"\"\n Returns the permutation array that orders a sparse CSR or CSC matrix or Qobj\n in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric,\n this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default).\n \n It is assumed by default (*sym=False*) that the input matrix is not symmetric. This\n is because it is faster to do A+Trans(A) than it is to check for symmetry for \n a generic matrix. If you are guaranteed that the matrix is symmetric in structure\n (values of matrix element do not matter) then set *sym=True*\n \n Parameters\n ----------\n A : csr_matrix, qobj\n Input sparse csr_matrix or Qobj.\n \n sym : bool {False, True}\n Flag to set whether input matrix is symmetric.\n \n Returns\n -------\n perm : array\n Array of permuted row and column indices.\n \n Notes\n -----\n This routine is used primarily for internal reordering of Lindblad super-operators\n for use in iterative solver routines.\n \n References\n ----------\n E. Cuthill and J. McKee, \"Reducing the Bandwidth of Sparse Symmetric Matrices\",\n ACM '69 Proceedings of the 1969 24th national conference, (1969).\n \n \"\"\"\n nrows = A.shape[0]\n if A.__class__.__name__=='Qobj':\n if not sym:\n A = A.data+A.data.transpose()\n return _rcm(A.indices, A.indptr, nrows)\n else:\n return _rcm(A.data.indices, A.data.indptr, nrows)\n else:\n if not sym:\n A=A+A.transpose()\n return _rcm(A.indices, A.indptr, nrows)\n\n\ndef bfs_matching(A):\n \"\"\"\n Returns an array of row permutations that removes nonzero elements\n from the diagonal of a nonsingular square CSC sparse matrix. Such\n a permutation is always possible provided that the matrix is \n nonsingular.\n \n This function looks at the structure of the matrix only.\n \n Parameters\n ----------\n A : csc_matrix\n Input matrix\n \n Returns\n -------\n perm : array\n Array of row permutations.\n \n Notes\n -----\n This function relies on a maximum cardinality bipartite matching algorithm\n based on a breadth-first search (BFS) of the underlying graph[1]_.\n \n References\n ----------\n .. [1] I. S. Duff, K. Kaya, and B. Ucar, \"Design, Implementation, and \n Analysis of Maximum Transversal Algorithms\", ACM Trans. Math. Softw.\n 38, no. 2, (2011).\n \n \"\"\"\n nrows = A.shape[0]\n if A.shape[0]!=A.shape[1]:\n raise ValueError('bfs_matching requires a square matrix.')\n if A.__class__.__name__=='Qobj':\n A = A.data.tocsc()\n elif not sp.isspmatrix_csc(A):\n A = sp.csc_matrix(A)\n warn('bfs_matching requires CSC matrix format.', \n sp.SparseEfficiencyWarning)\n \n perm = _bfs_matching(A.indices, A.indptr, nrows)\n if np.any(perm==-1):\n raise Exception('Possibly singular input matrix.')\n return perm\n\n\ndef weighted_bfs_matching(A):\n \"\"\"\n Returns an array of row permutations that attempts to maximize\n the product of the ABS values of the diagonal elements in \n a nonsingular square CSC sparse matrix. Such a permutation is \n always possible provided that the matrix is nonsingular.\n \n This function looks at both the structure and ABS values of the \n underlying matrix.\n \n Parameters\n ----------\n A : csc_matrix\n Input matrix\n \n Returns\n -------\n perm : array\n Array of row permutations.\n \n Notes\n -----\n This function uses a weighted maximum cardinality bipartite matching \n algorithm based on breadth-first search (BFS). The columns are weighted\n according to the element of max ABS value in the associated rows and \n are traversed in descending order by weight. When performing the BFS \n traversal, the row associated to a given column is the one with maximum \n weight. Unlike other techniques[1]_, this algorithm does not guarantee the \n product of the diagonal is maximized. However, this limitation is offset\n by the substantially faster runtime of this method. \n \n References\n ----------\n .. [1] I. S. Duff and J. Koster, \"The design and use of algorithms for \n permuting large entries to the diagonal of sparse matrices\", SIAM J. \n Matrix Anal. and Applics. 20, no. 4, 889 (1997).\n \n \"\"\"\n nrows = A.shape[0]\n if A.shape[0]!=A.shape[1]:\n raise ValueError('weighted_bfs_matching requires a square matrix.')\n if A.__class__.__name__=='Qobj':\n A = A.data.tocsc()\n elif not sp.isspmatrix_csc(A):\n A = sp.csc_matrix(A)\n warn('weighted_bfs_matching requires CSC matrix format', \n sp.SparseEfficiencyWarning)\n \n perm = _weighted_bfs_matching(\n np.asarray(np.abs(A.data), dtype=float),\n A.indices, A.indptr, nrows)\n if np.any(perm==-1):\n raise Exception('Possibly singular input matrix.')\n return perm\n \n" ]
[ [ "numpy.any", "numpy.abs", "scipy.sparse.csc_matrix", "scipy.sparse.isspmatrix_csc" ] ]
liangan1/fairseq
[ "31b54e8ec03824bad61a80bea376c987e2e7c721" ]
[ "fairseq_cli/eval_lm.py" ]
[ "#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nEvaluate the perplexity of a trained language model.\n\"\"\"\n\nimport logging\nimport math\nimport os\n\nimport torch\n\nfrom fairseq import checkpoint_utils, options, tasks, utils\nfrom fairseq.data import LMContextWindowDataset\nfrom fairseq.logging import progress_bar\nfrom fairseq.logging.meters import StopwatchMeter, TimeMeter\nfrom fairseq.sequence_scorer import SequenceScorer\nfrom fairseq import distributed_utils\n\n\nlogging.basicConfig(\n format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO,\n)\nlogger = logging.getLogger('fairseq_cli.eval_lm')\n\n\nclass WordStat(object):\n def __init__(self, word, is_bpe):\n self.word = word\n self.is_bpe = is_bpe\n self.log_prob = 0\n self.next_word_prob = 0\n self.count = 0\n self.missing_next_words = 0\n\n def add(self, log_prob, next_word_prob):\n \"\"\" increments counters for the sum of log probs of current word and next\n word (given context ending at current word). Since the next word might be at the end of the example,\n or it might be not counted because it is not an ending subword unit,\n also keeps track of how many of those we have seen \"\"\"\n if next_word_prob is not None:\n self.next_word_prob += next_word_prob\n else:\n self.missing_next_words += 1\n self.log_prob += log_prob\n self.count += 1\n\n def __str__(self):\n return '{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe,\n self.next_word_prob, self.count - self.missing_next_words)\n\n\ndef main(parsed_args, **unused_kwargs):\n assert parsed_args.path is not None, '--path required for evaluation!'\n\n if torch.cuda.is_available() and not parsed_args.cpu:\n torch.cuda.set_device(parsed_args.device_id)\n\n utils.import_user_module(parsed_args)\n\n logger.info(parsed_args)\n\n if parsed_args.ipex:\n import intel_pytorch_extension as ipex\n if args.dnnl:\n ipex.core.enable_auto_dnnl()\n else:\n ipex.core.disable_auto_dnnl()\n if args.mix_precision:\n ipex.core.enable_mix_bf16_fp32()\n \n use_cuda = torch.cuda.is_available() and not parsed_args.cpu\n\n task = tasks.setup_task(parsed_args)\n\n # Load ensemble\n logger.info('loading model(s) from {}'.format(parsed_args.path))\n models, args = checkpoint_utils.load_model_ensemble(\n parsed_args.path.split(os.pathsep),\n arg_overrides=eval(parsed_args.model_overrides),\n task=task,\n suffix=getattr(parsed_args, \"checkpoint_suffix\", \"\"),\n )\n\n for arg in vars(parsed_args).keys():\n if arg not in {\n 'self_target', 'future_target', 'past_target', 'tokens_per_sample',\n 'output_size_dictionary', 'add_bos_token',\n }:\n setattr(args, arg, getattr(parsed_args, arg))\n\n # reduce tokens per sample by the required context window size\n args.tokens_per_sample -= args.context_window\n task = tasks.setup_task(args)\n\n # Load dataset splits\n task.load_dataset(args.gen_subset)\n dataset = task.dataset(args.gen_subset)\n if args.context_window > 0:\n dataset = LMContextWindowDataset(\n dataset=dataset,\n tokens_per_sample=args.tokens_per_sample,\n context_window=args.context_window,\n pad_idx=task.source_dictionary.pad(),\n )\n logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))\n\n # Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)\n for model in models:\n model.prepare_for_inference_(args)\n if args.fp16:\n model.half()\n if use_cuda:\n model.cuda()\n if args.ipex:\n model = model.to(device = ipex.DEVICE)\n\n assert len(models) > 0\n\n logger.info('num. model params: {}'.format(sum(p.numel() for p in models[0].parameters())))\n\n itr = task.get_batch_iterator(\n dataset=dataset,\n max_tokens=args.max_tokens or 36000,\n max_sentences=args.max_sentences,\n max_positions=utils.resolve_max_positions(*[\n model.max_positions() for model in models\n ]),\n ignore_invalid_inputs=True,\n num_shards=args.num_shards,\n shard_id=args.shard_id,\n num_workers=args.num_workers,\n ).next_epoch_itr(shuffle=False)\n progress = progress_bar.progress_bar(\n itr,\n log_format=args.log_format,\n log_interval=args.log_interval,\n default_log_format=('tqdm' if not args.no_progress_bar else 'none'),\n )\n\n gen_timer = StopwatchMeter()\n scorer = SequenceScorer(task.target_dictionary, args.softmax_batch)\n\n score_sum = 0.\n count = 0\n\n if args.remove_bpe is not None:\n if args.remove_bpe == 'sentencepiece':\n raise NotImplementedError\n else:\n bpe_cont = args.remove_bpe.rstrip()\n bpe_toks = {\n i\n for i in range(len(task.source_dictionary))\n if task.source_dictionary[i].endswith(bpe_cont)\n }\n bpe_len = len(bpe_cont)\n else:\n bpe_toks = None\n bpe_len = 0\n\n word_stats = dict()\n\n wps_meter = TimeMeter()\n\n for sample in progress:\n if 'net_input' not in sample:\n continue\n\n sample = utils.move_to_cuda(sample) if use_cuda else sample\n sample = utils.move_to_ipex(sample) if args.ipex else sample \n\n gen_timer.start()\n hypos = scorer.generate(models, sample)\n gen_timer.stop(sample['ntokens'])\n\n for i, hypos_i in enumerate(hypos):\n hypo = hypos_i[0]\n sample_id = sample['id'][i]\n\n tokens = hypo['tokens']\n tgt_len = tokens.numel()\n pos_scores = hypo['positional_scores'].float()\n\n if args.add_bos_token:\n assert hypo['tokens'][0].item() == task.target_dictionary.bos()\n tokens = tokens[1:]\n pos_scores = pos_scores[1:]\n\n skipped_toks = 0\n if bpe_toks is not None:\n for i in range(tgt_len - 1):\n if tokens[i].item() in bpe_toks:\n skipped_toks += 1\n pos_scores[i + 1] += pos_scores[i]\n pos_scores[i] = 0\n\n inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))\n if inf_scores.any():\n logger.info(\n 'skipping tokens with inf scores:',\n task.target_dictionary.string(tokens[inf_scores.nonzero()])\n )\n pos_scores = pos_scores[(~inf_scores).nonzero()]\n score_sum += pos_scores.sum().cpu()\n count += pos_scores.numel() - skipped_toks\n\n if args.output_word_probs or args.output_word_stats:\n w = ''\n word_prob = []\n is_bpe = False\n for i in range(len(tokens)):\n w_ind = tokens[i].item()\n w += task.source_dictionary[w_ind]\n if bpe_toks is not None and w_ind in bpe_toks:\n w = w[:-bpe_len]\n is_bpe = True\n else:\n word_prob.append((w, pos_scores[i].item()))\n\n next_prob = None\n ind = i + 1\n while ind < len(tokens):\n if pos_scores[ind].item() != 0:\n next_prob = pos_scores[ind]\n break\n ind += 1\n\n word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob)\n is_bpe = False\n w = ''\n if args.output_word_probs:\n logger.info(\n str(int(sample_id)) + \" \"\n + ('\\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))\n )\n\n wps_meter.update(sample['ntokens'])\n progress.log({'wps': round(wps_meter.avg)})\n\n avg_nll_loss = -score_sum / count / math.log(2) # convert to base 2\n logger.info('Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(\n gen_timer.n, gen_timer.sum, 1. / gen_timer.avg\n ))\n logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(\n avg_nll_loss, 2**avg_nll_loss\n ))\n\n if args.output_word_stats:\n for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):\n logger.info(ws)\n\n\ndef cli_main():\n parser = options.get_eval_lm_parser()\n args = options.parse_args_and_arch(parser)\n distributed_utils.call_main(args, main)\n\n\nif __name__ == '__main__':\n cli_main()\n" ]
[ [ "torch.cuda.set_device", "torch.cuda.is_available" ] ]
sbrunk/flytekit
[ "0aa9cdb1be928f799170da61f1135121ccb64657" ]
[ "tests/flytekit/unit/core/test_type_hints.py" ]
[ "import datetime\nimport os\nimport typing\nfrom dataclasses import dataclass\n\nimport pandas\nimport pytest\nfrom dataclasses_json import dataclass_json\n\nimport flytekit\nfrom flytekit import ContainerTask, SQLTask, dynamic, kwtypes, maptask\nfrom flytekit.common.translator import get_serializable\nfrom flytekit.core import context_manager, launch_plan, promise\nfrom flytekit.core.condition import conditional\nfrom flytekit.core.context_manager import ExecutionState, Image, ImageConfig\nfrom flytekit.core.node import Node\nfrom flytekit.core.promise import NodeOutput, Promise, VoidPromise\nfrom flytekit.core.resources import Resources\nfrom flytekit.core.task import TaskMetadata, task\nfrom flytekit.core.testing import patch, task_mock\nfrom flytekit.core.type_engine import RestrictedTypeError, TypeEngine\nfrom flytekit.core.workflow import workflow\nfrom flytekit.interfaces.data.data_proxy import FileAccessProvider\nfrom flytekit.models.core import types as _core_types\nfrom flytekit.models.interface import Parameter\nfrom flytekit.models.task import Resources as _resource_models\nfrom flytekit.models.types import LiteralType\nfrom flytekit.types.schema import FlyteSchema, SchemaOpenMode\n\n\ndef test_default_wf_params_works():\n @task\n def my_task(a: int):\n wf_params = flytekit.current_context()\n assert wf_params.execution_id == \"ex:local:local:local\"\n\n my_task(a=3)\n\n\ndef test_simple_input_output():\n @task\n def my_task(a: int) -> typing.NamedTuple(\"OutputsBC\", b=int, c=str):\n ctx = flytekit.current_context()\n assert ctx.execution_id == \"ex:local:local:local\"\n return a + 2, \"hello world\"\n\n assert my_task(a=3) == (5, \"hello world\")\n\n\ndef test_simple_input_no_output():\n @task\n def my_task(a: int):\n pass\n\n assert my_task(a=3) is None\n\n ctx = context_manager.FlyteContext.current_context()\n with ctx.new_compilation_context() as ctx:\n outputs = my_task(a=3)\n assert isinstance(outputs, VoidPromise)\n\n\ndef test_single_output():\n @task\n def my_task() -> str:\n return \"Hello world\"\n\n assert my_task() == \"Hello world\"\n\n ctx = context_manager.FlyteContext.current_context()\n with ctx.new_compilation_context() as ctx:\n outputs = my_task()\n assert ctx.compilation_state is not None\n nodes = ctx.compilation_state.nodes\n assert len(nodes) == 1\n assert outputs.is_ready is False\n assert outputs.ref.node is nodes[0]\n\n\ndef test_engine_file_output():\n basic_blob_type = _core_types.BlobType(format=\"\", dimensionality=_core_types.BlobType.BlobDimensionality.SINGLE,)\n\n fs = FileAccessProvider(local_sandbox_dir=\"/tmp/flytetesting\")\n with context_manager.FlyteContext.current_context().new_file_access_context(file_access_provider=fs) as ctx:\n # Write some text to a file not in that directory above\n test_file_location = \"/tmp/sample.txt\"\n with open(test_file_location, \"w\") as fh:\n fh.write(\"Hello World\\n\")\n\n lit = TypeEngine.to_literal(ctx, test_file_location, os.PathLike, LiteralType(blob=basic_blob_type))\n\n # Since we're using local as remote, we should be able to just read the file from the 'remote' location.\n with open(lit.scalar.blob.uri, \"r\") as fh:\n assert fh.readline() == \"Hello World\\n\"\n\n # We should also be able to turn the thing back into regular python native thing.\n redownloaded_local_file_location = TypeEngine.to_python_value(ctx, lit, os.PathLike)\n with open(redownloaded_local_file_location, \"r\") as fh:\n assert fh.readline() == \"Hello World\\n\"\n\n\ndef test_wf1():\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n return a + 2, \"world\"\n\n @task\n def t2(a: str, b: str) -> str:\n return b + a\n\n @workflow\n def my_wf(a: int, b: str) -> (int, str):\n x, y = t1(a=a)\n d = t2(a=y, b=b)\n return x, d\n\n assert len(my_wf._nodes) == 2\n assert my_wf._nodes[0].id == \"n0\"\n assert my_wf._nodes[1]._upstream_nodes[0] is my_wf._nodes[0]\n\n assert len(my_wf._output_bindings) == 2\n assert my_wf._output_bindings[0].var == \"o0\"\n assert my_wf._output_bindings[0].binding.promise.var == \"t1_int_output\"\n\n nt = typing.NamedTuple(\"SingleNT\", t1_int_output=float)\n\n @task\n def t3(a: int) -> nt:\n return (a + 2,)\n\n assert t3.python_interface.output_tuple_name == \"SingleNT\"\n assert t3.interface.outputs[\"t1_int_output\"] is not None\n\n\ndef test_wf1_run():\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n return a + 2, \"world\"\n\n @task\n def t2(a: str, b: str) -> str:\n return b + a\n\n @workflow\n def my_wf(a: int, b: str) -> (int, str):\n x, y = t1(a=a)\n d = t2(a=y, b=b)\n return x, d\n\n x = my_wf(a=5, b=\"hello \")\n assert x == (7, \"hello world\")\n\n @workflow\n def my_wf2(a: int, b: str) -> (int, str):\n tup = t1(a=a)\n d = t2(a=tup.c, b=b)\n return tup.t1_int_output, d\n\n x = my_wf2(a=5, b=\"hello \")\n assert x == (7, \"hello world\")\n\n\ndef test_wf1_with_overrides():\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n return a + 2, \"world\"\n\n @task\n def t2(a: str, b: str) -> str:\n return b + a\n\n @workflow\n def my_wf(a: int, b: str) -> (int, str):\n x, y = t1(a=a).with_overrides(name=\"x\")\n d = t2(a=y, b=b).with_overrides()\n return x, d\n\n x = my_wf(a=5, b=\"hello \")\n assert x == (7, \"hello world\")\n\n\ndef test_wf1_with_list_of_inputs():\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n return a + 2, \"world\"\n\n @task\n def t2(a: typing.List[str]) -> str:\n return \" \".join(a)\n\n @workflow\n def my_wf(a: int, b: str) -> (int, str):\n xx, yy = t1(a=a)\n d = t2(a=[b, yy])\n return xx, d\n\n x = my_wf(a=5, b=\"hello\")\n assert x == (7, \"hello world\")\n\n @workflow\n def my_wf2(a: int, b: str) -> int:\n x, y = t1(a=a)\n t2(a=[b, y])\n return x\n\n x = my_wf2(a=5, b=\"hello\")\n assert x == 7\n\n\ndef test_wf_output_mismatch():\n with pytest.raises(AssertionError):\n\n @workflow\n def my_wf(a: int, b: str) -> (int, str):\n return a\n\n with pytest.raises(AssertionError):\n\n @workflow\n def my_wf2(a: int, b: str) -> int:\n return a, b\n\n @workflow\n def my_wf3(a: int, b: str) -> int:\n return (a,)\n\n my_wf3(a=10, b=\"hello\")\n\n\ndef test_promise_return():\n \"\"\"\n Testing that when a workflow is local executed but a local wf execution context already exists, Promise objects\n are returned wrapping Flyte literals instead of the unpacked dict.\n \"\"\"\n\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n a = a + 2\n return a, \"world-\" + str(a)\n\n @workflow\n def mimic_sub_wf(a: int) -> (str, str):\n x, y = t1(a=a)\n u, v = t1(a=x)\n return y, v\n\n ctx = context_manager.FlyteContext.current_context()\n\n with ctx.new_execution_context(mode=ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION) as ctx:\n a, b = mimic_sub_wf(a=3)\n\n assert isinstance(a, promise.Promise)\n assert isinstance(b, promise.Promise)\n assert a.val.scalar.value.string_value == \"world-5\"\n assert b.val.scalar.value.string_value == \"world-7\"\n\n\ndef test_wf1_with_sql():\n sql = SQLTask(\n \"my-query\",\n query_template=\"SELECT * FROM hive.city.fact_airport_sessions WHERE ds = '{{ .Inputs.ds }}' LIMIT 10\",\n inputs=kwtypes(ds=datetime.datetime),\n outputs=kwtypes(results=FlyteSchema),\n metadata=TaskMetadata(retries=2),\n )\n\n @task\n def t1() -> datetime.datetime:\n return datetime.datetime.now()\n\n @workflow\n def my_wf() -> FlyteSchema:\n dt = t1()\n return sql(ds=dt)\n\n with task_mock(sql) as mock:\n mock.return_value = pandas.DataFrame(data={\"x\": [1, 2], \"y\": [\"3\", \"4\"]})\n assert (my_wf().open().all() == pandas.DataFrame(data={\"x\": [1, 2], \"y\": [\"3\", \"4\"]})).all().all()\n\n\ndef test_wf1_with_sql_with_patch():\n sql = SQLTask(\n \"my-query\",\n query_template=\"SELECT * FROM hive.city.fact_airport_sessions WHERE ds = '{{ .Inputs.ds }}' LIMIT 10\",\n inputs=kwtypes(ds=datetime.datetime),\n outputs=kwtypes(results=FlyteSchema),\n metadata=TaskMetadata(retries=2),\n )\n\n @task\n def t1() -> datetime.datetime:\n return datetime.datetime.now()\n\n @workflow\n def my_wf() -> FlyteSchema:\n dt = t1()\n return sql(ds=dt)\n\n @patch(sql)\n def test_user_demo_test(mock_sql):\n mock_sql.return_value = pandas.DataFrame(data={\"x\": [1, 2], \"y\": [\"3\", \"4\"]})\n assert (my_wf().open().all() == pandas.DataFrame(data={\"x\": [1, 2], \"y\": [\"3\", \"4\"]})).all().all()\n\n # Have to call because tests inside tests don't run\n test_user_demo_test()\n\n\ndef test_wf1_with_map():\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n a = a + 2\n return a, \"world-\" + str(a)\n\n @task\n def t2(a: typing.List[int], b: typing.List[str]) -> (int, str):\n ra = 0\n for x in a:\n ra += x\n rb = \"\"\n for x in b:\n rb += x\n return ra, rb\n\n @workflow\n def my_wf(a: typing.List[int]) -> (int, str):\n x, y = maptask(t1, metadata=TaskMetadata(retries=1))(a=a)\n return t2(a=x, b=y)\n\n x = my_wf(a=[5, 6])\n assert x == (15, \"world-7world-8\")\n\n\ndef test_wf1_compile_time_constant_vars():\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n return a + 2, \"world\"\n\n @task\n def t2(a: str, b: str) -> str:\n return b + a\n\n @workflow\n def my_wf(a: int, b: str) -> (int, str):\n x, y = t1(a=a)\n d = t2(a=\"This is my way\", b=b)\n return x, d\n\n x = my_wf(a=5, b=\"hello \")\n assert x == (7, \"hello This is my way\")\n\n\ndef test_wf1_with_constant_return():\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n return a + 2, \"world\"\n\n @task\n def t2(a: str, b: str) -> str:\n return b + a\n\n @workflow\n def my_wf(a: int, b: str) -> (int, str):\n x, y = t1(a=a)\n t2(a=\"This is my way\", b=b)\n return x, \"A constant output\"\n\n x = my_wf(a=5, b=\"hello \")\n assert x == (7, \"A constant output\")\n\n @workflow\n def my_wf2(a: int, b: str) -> int:\n t1(a=a)\n t2(a=\"This is my way\", b=b)\n return 10\n\n assert my_wf2(a=5, b=\"hello \") == 10\n\n\ndef test_wf1_with_dynamic():\n @task\n def t1(a: int) -> str:\n a = a + 2\n return \"world-\" + str(a)\n\n @task\n def t2(a: str, b: str) -> str:\n return b + a\n\n @dynamic\n def my_subwf(a: int) -> typing.List[str]:\n s = []\n for i in range(a):\n s.append(t1(a=i))\n return s\n\n @workflow\n def my_wf(a: int, b: str) -> (str, typing.List[str]):\n x = t2(a=b, b=b)\n v = my_subwf(a=a)\n return x, v\n\n v = 5\n x = my_wf(a=v, b=\"hello \")\n assert x == (\"hello hello \", [\"world-\" + str(i) for i in range(2, v + 2)])\n\n with context_manager.FlyteContext.current_context().new_serialization_settings(\n serialization_settings=context_manager.SerializationSettings(\n project=\"test_proj\",\n domain=\"test_domain\",\n version=\"abc\",\n image_config=ImageConfig(Image(name=\"name\", fqn=\"image\", tag=\"name\")),\n env={},\n )\n ) as ctx:\n with ctx.new_execution_context(mode=ExecutionState.Mode.TASK_EXECUTION) as ctx:\n dynamic_job_spec = my_subwf.compile_into_workflow(ctx, my_subwf._task_function, a=5)\n assert len(dynamic_job_spec._nodes) == 5\n\n\ndef test_list_output():\n @task\n def t1(a: int) -> str:\n a = a + 2\n return \"world-\" + str(a)\n\n @workflow\n def lister() -> typing.List[str]:\n s = []\n # FYI: For users who happen to look at this, keep in mind this is only run once at compile time.\n for i in range(10):\n s.append(t1(a=i))\n return s\n\n assert len(lister.interface.outputs) == 1\n binding_data = lister._output_bindings[0].binding # the property should be named binding_data\n assert binding_data.collection is not None\n assert len(binding_data.collection.bindings) == 10\n\n\ndef test_comparison_refs():\n def dummy_node(node_id) -> Node:\n n = Node(\n node_id,\n metadata=None,\n bindings=[],\n upstream_nodes=[],\n flyte_entity=SQLTask(name=\"x\", query_template=\"x\", inputs={}),\n )\n\n n._id = node_id\n return n\n\n px = Promise(\"x\", NodeOutput(var=\"x\", node=dummy_node(\"n1\")))\n py = Promise(\"y\", NodeOutput(var=\"y\", node=dummy_node(\"n2\")))\n\n def print_expr(expr):\n print(f\"{expr} is type {type(expr)}\")\n\n print_expr(px == py)\n print_expr(px < py)\n print_expr((px == py) & (px < py))\n print_expr(((px == py) & (px < py)) | (px > py))\n print_expr(px < 5)\n print_expr(px >= 5)\n\n\ndef test_comparison_lits():\n px = Promise(\"x\", TypeEngine.to_literal(None, 5, int, None))\n py = Promise(\"y\", TypeEngine.to_literal(None, 8, int, None))\n\n def eval_expr(expr, expected: bool):\n print(f\"{expr} evals to {expr.eval()}\")\n assert expected == expr.eval()\n\n eval_expr(px == py, False)\n eval_expr(px < py, True)\n eval_expr((px == py) & (px < py), False)\n eval_expr(((px == py) & (px < py)) | (px > py), False)\n eval_expr(px < 5, False)\n eval_expr(px >= 5, True)\n eval_expr(py >= 5, True)\n\n\ndef test_wf1_branches():\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n return a + 2, \"world\"\n\n @task\n def t2(a: str) -> str:\n return a\n\n @workflow\n def my_wf(a: int, b: str) -> (int, str):\n x, y = t1(a=a)\n d = (\n conditional(\"test1\")\n .if_(x == 4)\n .then(t2(a=b))\n .elif_(x >= 5)\n .then(t2(a=y))\n .else_()\n .fail(\"Unable to choose branch\")\n )\n f = conditional(\"test2\").if_(d == \"hello \").then(t2(a=\"It is hello\")).else_().then(t2(a=\"Not Hello!\"))\n return x, f\n\n x = my_wf(a=5, b=\"hello \")\n assert x == (7, \"Not Hello!\")\n\n x = my_wf(a=2, b=\"hello \")\n assert x == (4, \"It is hello\")\n\n\ndef test_wf1_branches_no_else():\n with pytest.raises(NotImplementedError):\n\n def foo():\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n return a + 2, \"world\"\n\n @task\n def t2(a: str) -> str:\n return a\n\n @workflow\n def my_wf(a: int, b: str) -> (int, str):\n x, y = t1(a=a)\n d = conditional(\"test1\").if_(x == 4).then(t2(a=b)).elif_(x >= 5).then(t2(a=y))\n conditional(\"test2\").if_(x == 4).then(t2(a=b)).elif_(x >= 5).then(t2(a=y)).else_().fail(\"blah\")\n return x, d\n\n foo()\n\n\ndef test_wf1_branches_failing():\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n return a + 2, \"world\"\n\n @task\n def t2(a: str) -> str:\n return a\n\n @workflow\n def my_wf(a: int, b: str) -> (int, str):\n x, y = t1(a=a)\n d = (\n conditional(\"test1\")\n .if_(x == 4)\n .then(t2(a=b))\n .elif_(x >= 5)\n .then(t2(a=y))\n .else_()\n .fail(\"All Branches failed\")\n )\n return x, d\n\n with pytest.raises(ValueError):\n my_wf(a=1, b=\"hello \")\n\n\ndef test_cant_use_normal_tuples():\n with pytest.raises(RestrictedTypeError):\n\n @task\n def t1(a: str) -> tuple:\n return (a, 3)\n\n\ndef test_wf1_df():\n @task\n def t1(a: int) -> pandas.DataFrame:\n return pandas.DataFrame(data={\"col1\": [a, 2], \"col2\": [a, 4]})\n\n @task\n def t2(df: pandas.DataFrame) -> pandas.DataFrame:\n return df.append(pandas.DataFrame(data={\"col1\": [5, 10], \"col2\": [5, 10]}))\n\n @workflow\n def my_wf(a: int) -> pandas.DataFrame:\n df = t1(a=a)\n return t2(df=df)\n\n x = my_wf(a=20)\n assert isinstance(x, pandas.DataFrame)\n result_df = x.reset_index(drop=True) == pandas.DataFrame(\n data={\"col1\": [20, 2, 5, 10], \"col2\": [20, 4, 5, 10]}\n ).reset_index(drop=True)\n assert result_df.all().all()\n\n\ndef test_lp_serialize():\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n a = a + 2\n return a, \"world-\" + str(a)\n\n @task\n def t2(a: str, b: str) -> str:\n return b + a\n\n @workflow\n def my_subwf(a: int) -> (str, str):\n x, y = t1(a=a)\n u, v = t1(a=x)\n return y, v\n\n lp = launch_plan.LaunchPlan.create(\"serialize_test1\", my_subwf)\n lp_with_defaults = launch_plan.LaunchPlan.create(\"serialize_test2\", my_subwf, default_inputs={\"a\": 3})\n\n serialization_settings = context_manager.SerializationSettings(\n project=\"proj\",\n domain=\"dom\",\n version=\"123\",\n image_config=ImageConfig(Image(name=\"name\", fqn=\"asdf/fdsa\", tag=\"123\")),\n env={},\n )\n sdk_lp = get_serializable(serialization_settings, lp)\n assert len(sdk_lp.default_inputs.parameters) == 0\n assert len(sdk_lp.fixed_inputs.literals) == 0\n\n sdk_lp = get_serializable(serialization_settings, lp_with_defaults)\n assert len(sdk_lp.default_inputs.parameters) == 1\n assert len(sdk_lp.fixed_inputs.literals) == 0\n\n # Adding a check to make sure oneof is respected. Tricky with booleans... if a default is specified, the\n # required field needs to be None, not False.\n parameter_a = sdk_lp.default_inputs.parameters[\"a\"]\n parameter_a = Parameter.from_flyte_idl(parameter_a.to_flyte_idl())\n assert parameter_a.default is not None\n\n\ndef test_wf_container_task():\n @task\n def t1(a: int) -> (int, str):\n return a + 2, str(a) + \"-HELLO\"\n\n t2 = ContainerTask(\n \"raw\",\n image=\"alpine\",\n inputs=kwtypes(a=int, b=str),\n input_data_dir=\"/tmp\",\n output_data_dir=\"/tmp\",\n command=[\"cat\"],\n arguments=[\"/tmp/a\"],\n )\n\n def wf(a: int):\n x, y = t1(a=a)\n t2(a=x, b=y)\n\n with task_mock(t2) as mock:\n mock.side_effect = lambda a, b: None\n assert t2(a=10, b=\"hello\") is None\n\n wf(a=10)\n\n\ndef test_wf_container_task_multiple():\n square = ContainerTask(\n name=\"square\",\n input_data_dir=\"/var/inputs\",\n output_data_dir=\"/var/outputs\",\n inputs=kwtypes(val=int),\n outputs=kwtypes(out=int),\n image=\"alpine\",\n command=[\"sh\", \"-c\", \"echo $(( {{.Inputs.val}} * {{.Inputs.val}} )) | tee /var/outputs/out\"],\n )\n\n sum = ContainerTask(\n name=\"sum\",\n input_data_dir=\"/var/flyte/inputs\",\n output_data_dir=\"/var/flyte/outputs\",\n inputs=kwtypes(x=int, y=int),\n outputs=kwtypes(out=int),\n image=\"alpine\",\n command=[\"sh\", \"-c\", \"echo $(( {{.Inputs.x}} + {{.Inputs.y}} )) | tee /var/flyte/outputs/out\"],\n )\n\n @workflow\n def raw_container_wf(val1: int, val2: int) -> int:\n return sum(x=square(val=val1), y=square(val=val2))\n\n with task_mock(square) as square_mock, task_mock(sum) as sum_mock:\n square_mock.side_effect = lambda val: val * val\n assert square(val=10) == 100\n\n sum_mock.side_effect = lambda x, y: x + y\n assert sum(x=10, y=10) == 20\n\n assert raw_container_wf(val1=10, val2=10) == 200\n\n\ndef test_wf_tuple_fails():\n with pytest.raises(RestrictedTypeError):\n\n @task\n def t1(a: tuple) -> (int, str):\n return a[0] + 2, str(a) + \"-HELLO\"\n\n\ndef test_wf_typed_schema():\n schema1 = FlyteSchema[kwtypes(x=int, y=str)]\n\n @task\n def t1() -> schema1:\n s = schema1()\n s.open().write(pandas.DataFrame(data={\"x\": [1, 2], \"y\": [\"3\", \"4\"]}))\n return s\n\n @task\n def t2(s: FlyteSchema[kwtypes(x=int, y=str)]) -> FlyteSchema[kwtypes(x=int)]:\n df = s.open().all()\n return df[s.column_names()[:-1]]\n\n @workflow\n def wf() -> FlyteSchema[kwtypes(x=int)]:\n return t2(s=t1())\n\n w = t1()\n assert w is not None\n df = w.open(override_mode=SchemaOpenMode.READ).all()\n result_df = df.reset_index(drop=True) == pandas.DataFrame(data={\"x\": [1, 2], \"y\": [\"3\", \"4\"]}).reset_index(\n drop=True\n )\n assert result_df.all().all()\n\n df = t2(s=w.as_readonly())\n assert df is not None\n result_df = df.reset_index(drop=True) == pandas.DataFrame(data={\"x\": [1, 2]}).reset_index(drop=True)\n assert result_df.all().all()\n\n x = wf()\n df = x.open().all()\n result_df = df.reset_index(drop=True) == pandas.DataFrame(data={\"x\": [1, 2]}).reset_index(drop=True)\n assert result_df.all().all()\n\n\ndef test_wf_schema_to_df():\n schema1 = FlyteSchema[kwtypes(x=int, y=str)]\n\n @task\n def t1() -> schema1:\n s = schema1()\n s.open().write(pandas.DataFrame(data={\"x\": [1, 2], \"y\": [\"3\", \"4\"]}))\n return s\n\n @task\n def t2(df: pandas.DataFrame) -> int:\n return len(df.columns.values)\n\n @workflow\n def wf() -> int:\n return t2(df=t1())\n\n x = wf()\n assert x == 2\n\n\ndef test_dict_wf_with_constants():\n @task\n def t1(a: int) -> typing.NamedTuple(\"OutputsBC\", t1_int_output=int, c=str):\n return a + 2, \"world\"\n\n @task\n def t2(a: typing.Dict[str, str]) -> str:\n return \" \".join([v for k, v in a.items()])\n\n @workflow\n def my_wf(a: int, b: str) -> (int, str):\n x, y = t1(a=a)\n d = t2(a={\"key1\": b, \"key2\": y})\n return x, d\n\n x = my_wf(a=5, b=\"hello\")\n assert x == (7, \"hello world\")\n\n\ndef test_dict_wf_with_conversion():\n @task\n def t1(a: int) -> typing.Dict[str, str]:\n return {\"a\": str(a)}\n\n @task\n def t2(a: dict) -> str:\n print(f\"HAHAH {a}\")\n return \" \".join([v for k, v in a.items()])\n\n @workflow\n def my_wf(a: int) -> str:\n return t2(a=t1(a=a))\n\n with pytest.raises(TypeError):\n my_wf(a=5)\n\n\ndef test_wf_with_empty_dict():\n @task\n def t1() -> typing.Dict:\n return {}\n\n @task\n def t2(d: typing.Dict):\n assert d == {}\n\n @workflow\n def wf():\n d = t1()\n t2(d=d)\n\n wf()\n\n\ndef test_wf_with_catching_no_return():\n @task\n def t1() -> typing.Dict:\n return {}\n\n @task\n def t2(d: typing.Dict):\n assert d == {}\n\n @task\n def t3(s: str):\n pass\n\n with pytest.raises(AssertionError):\n\n @workflow\n def wf():\n d = t1()\n # The following statement is wrong, this should not be allowed to pass to another task\n x = t2(d=d)\n # Passing x is wrong in this case\n t3(s=x)\n\n wf()\n\n\ndef test_wf_custom_types_missing_dataclass_json():\n with pytest.raises(AssertionError):\n\n @dataclass\n class MyCustomType(object):\n pass\n\n @task\n def t1(a: int) -> MyCustomType:\n return MyCustomType()\n\n\ndef test_wf_custom_types():\n @dataclass_json\n @dataclass\n class MyCustomType(object):\n x: int\n y: str\n\n @task\n def t1(a: int) -> MyCustomType:\n return MyCustomType(x=a, y=\"t1\")\n\n @task\n def t2(a: MyCustomType, b: str) -> (MyCustomType, int):\n return MyCustomType(x=a.x, y=f\"{a.y} {b}\"), 5\n\n @workflow\n def my_wf(a: int, b: str) -> (MyCustomType, int):\n return t2(a=t1(a=a), b=b)\n\n c, v = my_wf(a=10, b=\"hello\")\n assert v == 5\n assert c.x == 10\n assert c.y == \"t1 hello\"\n\n\ndef test_arbit_class():\n class Foo(object):\n pass\n\n with pytest.raises(ValueError):\n\n @task\n def t1(a: int) -> Foo:\n return Foo()\n\n\ndef test_dataclass_more():\n @dataclass_json\n @dataclass\n class Datum(object):\n x: int\n y: str\n z: typing.Dict[int, str]\n\n @task\n def stringify(x: int) -> Datum:\n return Datum(x=x, y=str(x), z={x: str(x)})\n\n @task\n def add(x: Datum, y: Datum) -> Datum:\n x.z.update(y.z)\n return Datum(x=x.x + y.x, y=x.y + y.y, z=x.z)\n\n @workflow\n def wf(x: int, y: int) -> Datum:\n return add(x=stringify(x=x), y=stringify(x=y))\n\n wf(x=10, y=20)\n\n\ndef test_environment():\n @task(environment={\"FOO\": \"foofoo\", \"BAZ\": \"baz\"})\n def t1(a: int) -> str:\n a = a + 2\n return \"now it's \" + str(a)\n\n @workflow\n def my_wf(a: int) -> str:\n x = t1(a=a)\n return x\n\n serialization_settings = context_manager.SerializationSettings(\n project=\"test_proj\",\n domain=\"test_domain\",\n version=\"abc\",\n image_config=ImageConfig(Image(name=\"name\", fqn=\"image\", tag=\"name\")),\n env={\"FOO\": \"foo\", \"BAR\": \"bar\"},\n )\n with context_manager.FlyteContext.current_context().new_compilation_context():\n sdk_task = get_serializable(serialization_settings, t1)\n assert sdk_task.container.env == {\"FOO\": \"foofoo\", \"BAR\": \"bar\", \"BAZ\": \"baz\"}\n\n\ndef test_resources():\n @task(requests=Resources(cpu=\"1\"), limits=Resources(cpu=\"2\", mem=\"400M\"))\n def t1(a: int) -> str:\n a = a + 2\n return \"now it's \" + str(a)\n\n @task(requests=Resources(cpu=\"3\"))\n def t2(a: int) -> str:\n a = a + 200\n return \"now it's \" + str(a)\n\n @workflow\n def my_wf(a: int) -> str:\n x = t1(a=a)\n return x\n\n serialization_settings = context_manager.SerializationSettings(\n project=\"test_proj\",\n domain=\"test_domain\",\n version=\"abc\",\n image_config=ImageConfig(Image(name=\"name\", fqn=\"image\", tag=\"name\")),\n env={},\n )\n with context_manager.FlyteContext.current_context().new_compilation_context():\n sdk_task = get_serializable(serialization_settings, t1)\n assert sdk_task.container.resources.requests == [\n _resource_models.ResourceEntry(_resource_models.ResourceName.CPU, \"1\")\n ]\n assert sdk_task.container.resources.limits == [\n _resource_models.ResourceEntry(_resource_models.ResourceName.CPU, \"2\"),\n _resource_models.ResourceEntry(_resource_models.ResourceName.MEMORY, \"400M\"),\n ]\n\n sdk_task2 = get_serializable(serialization_settings, t2)\n assert sdk_task2.container.resources.requests == [\n _resource_models.ResourceEntry(_resource_models.ResourceName.CPU, \"3\")\n ]\n assert sdk_task2.container.resources.limits == []\n\n\ndef test_wf_explicitly_returning_empty_task():\n @task\n def t1():\n ...\n\n @workflow\n def my_subwf():\n return t1() # This forces the wf _local_execute to handle VoidPromises\n\n assert my_subwf() is None\n" ]
[ [ "pandas.DataFrame" ] ]
rousseab/pymatgen
[ "ecfba4a576a21f31c222be8fd20ce2ddaa77495a" ]
[ "pymatgen/io/vasp/tests/test_sets.py" ]
[ "# coding: utf-8\n\nfrom __future__ import unicode_literals\n\nimport unittest\nimport os\nimport shutil\n\nimport numpy as np\nfrom monty.json import MontyDecoder\n\nfrom pymatgen.io.vasp.sets import MITVaspInputSet, MITHSEVaspInputSet, \\\n MPVaspInputSet, MITGGAVaspInputSet, MITNEBVaspInputSet,\\\n MPStaticVaspInputSet, MPNonSCFVaspInputSet, MITMDVaspInputSet,\\\n MPHSEVaspInputSet, MPBSHSEVaspInputSet, MPStaticDielectricDFPTVaspInputSet,\\\n MPOpticsNonSCFVaspInputSet\nfrom pymatgen.io.vasp.inputs import Poscar, Incar\nfrom pymatgen import Specie, Lattice, Structure\n\ntest_dir = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\", \"..\",\n 'test_files')\n\ndec = MontyDecoder()\n\n\nclass MITMPVaspInputSetTest(unittest.TestCase):\n\n def setUp(self):\n if \"VASP_PSP_DIR\" not in os.environ:\n os.environ[\"VASP_PSP_DIR\"] = test_dir\n filepath = os.path.join(test_dir, 'POSCAR')\n poscar = Poscar.from_file(filepath)\n self.struct = poscar.structure\n\n self.mitparamset = MITVaspInputSet()\n self.mitparamset_unsorted = MITVaspInputSet(sort_structure=False)\n self.mithseparamset = MITHSEVaspInputSet()\n self.paramset = MPVaspInputSet()\n self.userparamset = MPVaspInputSet(\n user_incar_settings={'MAGMOM': {\"Fe\": 10, \"S\": -5, \"Mn3+\": 100}}\n )\n self.mitggaparam = MITGGAVaspInputSet()\n self.mpstaticparamset = MPStaticVaspInputSet()\n self.mpnscfparamsetu = MPNonSCFVaspInputSet(\n {\"NBANDS\": 50}, mode=\"Uniform\")\n self.mpnscfparamsetl = MPNonSCFVaspInputSet(\n {\"NBANDS\": 60}, mode=\"Line\")\n self.mphseparamset = MPHSEVaspInputSet()\n self.mpbshseparamsetl = MPBSHSEVaspInputSet(mode=\"Line\")\n self.mpbshseparamsetu = MPBSHSEVaspInputSet(\n mode=\"Uniform\", added_kpoints=[[0.5, 0.5, 0.0]])\n self.mpdielparamset = MPStaticDielectricDFPTVaspInputSet()\n\n def test_get_poscar(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"Fe\", \"Mn\"], coords)\n\n s_unsorted = self.mitparamset_unsorted.get_poscar(struct).structure\n s_sorted = self.mitparamset.get_poscar(struct).structure\n\n self.assertEqual(s_unsorted[0].specie.symbol, 'Fe')\n self.assertEqual(s_sorted[0].specie.symbol, 'Mn')\n\n def test_get_potcar_symbols(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n coords.append([0.75, 0.25, 0.75])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"P\", \"Fe\", \"O\"], coords)\n\n syms = self.paramset.get_potcar_symbols(struct)\n self.assertEqual(syms, ['Fe_pv', 'P', 'O'])\n\n syms = MPVaspInputSet(sort_structure=False).get_potcar_symbols(struct)\n self.assertEqual(syms, ['P', 'Fe_pv', 'O'])\n\n def test_false_potcar_hash(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n coords.append([0.75, 0.25, 0.75])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"P\", \"Fe\", \"O\"], coords)\n\n self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe_pv'\n self.assertRaises(ValueError, self.mitparamset.get_potcar, struct, check_hash=True)\n self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe'\n\n def test_lda_potcar(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"P\", \"Fe\"], coords)\n p = MITVaspInputSet(potcar_functional=\"LDA\").get_potcar(struct)\n self.assertEqual(p.functional, 'LDA')\n\n def test_get_nelect(self):\n coords = [[0]*3, [0.5]*3, [0.75]*3]\n lattice = Lattice.cubic(4)\n s = Structure(lattice, ['Si', 'Si', 'Fe'], coords)\n self.assertAlmostEqual(MITVaspInputSet().get_nelect(s), 16)\n\n def test_get_incar(self):\n incar = self.paramset.get_incar(self.struct)\n\n self.assertEqual(incar['LDAUU'], [5.3, 0, 0])\n self.assertAlmostEqual(incar['EDIFF'], 0.0012)\n\n incar = self.mitparamset.get_incar(self.struct)\n self.assertEqual(incar['LDAUU'], [4.0, 0, 0])\n self.assertAlmostEqual(incar['EDIFF'], 0.0012)\n\n incar_gga = self.mitggaparam.get_incar(self.struct)\n self.assertNotIn(\"LDAU\", incar_gga)\n\n incar_static = self.mpstaticparamset.get_incar(self.struct)\n self.assertEqual(incar_static[\"NSW\"], 0)\n\n incar_nscfl = self.mpnscfparamsetl.get_incar(self.struct)\n self.assertEqual(incar_nscfl[\"NBANDS\"], 60)\n\n incar_nscfu = self.mpnscfparamsetu.get_incar(self.struct)\n self.assertEqual(incar_nscfu[\"ISYM\"], 0)\n\n incar_hse = self.mphseparamset.get_incar(self.struct)\n self.assertEqual(incar_hse['LHFCALC'], True)\n self.assertEqual(incar_hse['HFSCREEN'], 0.2)\n\n incar_hse_bsl = self.mpbshseparamsetl.get_incar(self.struct)\n self.assertEqual(incar_hse_bsl['LHFCALC'], True)\n self.assertEqual(incar_hse_bsl['HFSCREEN'], 0.2)\n self.assertEqual(incar_hse_bsl['NSW'], 0)\n\n incar_hse_bsu = self.mpbshseparamsetu.get_incar(self.struct)\n self.assertEqual(incar_hse_bsu['LHFCALC'], True)\n self.assertEqual(incar_hse_bsu['HFSCREEN'], 0.2)\n self.assertEqual(incar_hse_bsu['NSW'], 0)\n\n incar_diel = self.mpdielparamset.get_incar(self.struct)\n self.assertEqual(incar_diel['IBRION'], 8)\n self.assertEqual(incar_diel['LEPSILON'], True)\n\n si = 14\n coords = list()\n coords.append(np.array([0, 0, 0]))\n coords.append(np.array([0.75, 0.5, 0.75]))\n\n #Silicon structure for testing.\n latt = Lattice(np.array([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]]))\n struct = Structure(latt, [si, si], coords)\n incar = self.paramset.get_incar(struct)\n self.assertNotIn(\"LDAU\", incar)\n\n incar = self.mithseparamset.get_incar(self.struct)\n self.assertTrue(incar['LHFCALC'])\n\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"Fe\", \"Mn\"], coords)\n\n incar = self.paramset.get_incar(struct)\n self.assertNotIn('LDAU', incar)\n\n #check fluorides\n struct = Structure(lattice, [\"Fe\", \"F\"], coords)\n incar = self.paramset.get_incar(struct)\n self.assertEqual(incar['LDAUU'], [5.3, 0])\n self.assertEqual(incar['MAGMOM'], [5, 0.6])\n\n struct = Structure(lattice, [\"Fe\", \"F\"], coords)\n incar = self.mitparamset.get_incar(struct)\n self.assertEqual(incar['LDAUU'], [4.0, 0])\n\n #Make sure this works with species.\n struct = Structure(lattice, [\"Fe2+\", \"O2-\"], coords)\n incar = self.paramset.get_incar(struct)\n self.assertEqual(incar['LDAUU'], [5.3, 0])\n\n struct = Structure(lattice, [\"Fe\", \"Mn\"], coords,\n site_properties={'magmom': (5.2, -4.5)})\n incar = self.paramset.get_incar(struct)\n self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])\n incar = self.mpstaticparamset.get_incar(struct)\n self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])\n incar = self.mitparamset_unsorted.get_incar(struct)\n self.assertEqual(incar['MAGMOM'], [5.2, -4.5])\n\n struct = Structure(lattice, [Specie(\"Fe\", 2, {'spin': 4.1}), \"Mn\"],\n coords)\n incar = self.paramset.get_incar(struct)\n self.assertEqual(incar['MAGMOM'], [5, 4.1])\n incar = self.mpnscfparamsetl.get_incar(struct)\n self.assertEqual(incar.get('MAGMOM', None), None)\n\n struct = Structure(lattice, [\"Mn3+\", \"Mn4+\"], coords)\n incar = self.mitparamset.get_incar(struct)\n self.assertEqual(incar['MAGMOM'], [4, 3])\n incar = self.mpnscfparamsetu.get_incar(struct)\n self.assertEqual(incar.get('MAGMOM', None), None)\n\n self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],\n [100, 0.6])\n\n #sulfide vs sulfate test\n\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n coords.append([0.25, 0.5, 0])\n\n struct = Structure(lattice, [\"Fe\", \"Fe\", \"S\"], coords)\n incar = self.mitparamset.get_incar(struct)\n self.assertEqual(incar['LDAUU'], [1.9, 0])\n\n #Make sure Matproject sulfides are ok.\n self.assertNotIn('LDAUU', self.paramset.get_incar(struct))\n self.assertNotIn('LDAUU', self.mpstaticparamset.get_incar(struct))\n\n struct = Structure(lattice, [\"Fe\", \"S\", \"O\"], coords)\n incar = self.mitparamset.get_incar(struct)\n self.assertEqual(incar['LDAUU'], [4.0, 0, 0])\n\n #Make sure Matproject sulfates are ok.\n self.assertEqual(self.paramset.get_incar(struct)['LDAUU'], [5.3, 0, 0])\n self.assertEqual(self.mpnscfparamsetl.get_incar(struct)['LDAUU'],\n [5.3, 0, 0])\n\n self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],\n [10, -5, 0.6])\n\n def test_optics(self):\n self.mpopticsparamset = MPOpticsNonSCFVaspInputSet.from_previous_vasp_run(\n '{}/static_silicon'.format(test_dir), output_dir='optics_test_dir',\n nedos=1145)\n self.assertTrue(os.path.exists('optics_test_dir/CHGCAR'))\n incar = Incar.from_file('optics_test_dir/INCAR')\n self.assertTrue(incar['LOPTICS'])\n self.assertEqual(incar['NEDOS'], 1145)\n\n #Remove the directory in which the inputs have been created\n shutil.rmtree('optics_test_dir')\n\n def test_get_kpoints(self):\n kpoints = self.paramset.get_kpoints(self.struct)\n self.assertEqual(kpoints.kpts, [[2, 4, 6]])\n self.assertEqual(kpoints.style, 'Monkhorst')\n\n kpoints = self.mitparamset.get_kpoints(self.struct)\n self.assertEqual(kpoints.kpts, [[2, 4, 6]])\n self.assertEqual(kpoints.style, 'Monkhorst')\n\n kpoints = self.mpstaticparamset.get_kpoints(self.struct)\n self.assertEqual(kpoints.kpts, [[6, 6, 4]])\n self.assertEqual(kpoints.style, 'Monkhorst')\n\n kpoints = self.mpnscfparamsetl.get_kpoints(self.struct)\n self.assertEqual(kpoints.num_kpts, 140)\n self.assertEqual(kpoints.style, 'Reciprocal')\n\n kpoints = self.mpnscfparamsetu.get_kpoints(self.struct)\n self.assertEqual(kpoints.num_kpts, 168)\n\n kpoints = self.mpbshseparamsetl.get_kpoints(self.struct)\n self.assertAlmostEqual(kpoints.num_kpts, 164)\n self.assertAlmostEqual(kpoints.kpts[10][0], 0.0)\n self.assertAlmostEqual(kpoints.kpts[10][1], 0.5)\n self.assertAlmostEqual(kpoints.kpts[10][2], 0.16666667)\n self.assertAlmostEqual(kpoints.kpts[26][0], 0.0714285714286)\n self.assertAlmostEqual(kpoints.kpts[26][1], 0.0)\n self.assertAlmostEqual(kpoints.kpts[26][2], 0.0)\n self.assertAlmostEqual(kpoints.kpts[-1][0], 0.5)\n self.assertAlmostEqual(kpoints.kpts[-1][1], 0.5)\n self.assertAlmostEqual(kpoints.kpts[-1][2], 0.5)\n\n kpoints = self.mpbshseparamsetu.get_kpoints(self.struct)\n self.assertAlmostEqual(kpoints.num_kpts, 25)\n self.assertAlmostEqual(kpoints.kpts[10][0], 0.0)\n self.assertAlmostEqual(kpoints.kpts[10][1], 0.5)\n self.assertAlmostEqual(kpoints.kpts[10][2], 0.16666667)\n self.assertAlmostEqual(kpoints.kpts[-1][0], 0.5)\n self.assertAlmostEqual(kpoints.kpts[-1][1], 0.5)\n self.assertAlmostEqual(kpoints.kpts[-1][2], 0.0)\n\n def test_get_all_vasp_input(self):\n d = self.mitparamset.get_all_vasp_input(self.struct)\n self.assertEqual(d[\"INCAR\"][\"ISMEAR\"], -5)\n self.struct.make_supercell(4)\n d = self.mitparamset.get_all_vasp_input(self.struct)\n self.assertEqual(d[\"INCAR\"][\"ISMEAR\"], 0)\n\n def test_to_from_dict(self):\n self.mitparamset = MITVaspInputSet()\n self.mithseparamset = MITHSEVaspInputSet()\n self.paramset = MPVaspInputSet()\n self.userparamset = MPVaspInputSet(\n user_incar_settings={'MAGMOM': {\"Fe\": 10, \"S\": -5, \"Mn3+\": 100}}\n )\n\n d = self.mitparamset.as_dict()\n v = dec.process_decoded(d)\n self.assertEqual(v.incar_settings[\"LDAUU\"][\"O\"][\"Fe\"], 4)\n\n d = self.mitggaparam.as_dict()\n v = dec.process_decoded(d)\n self.assertNotIn(\"LDAUU\", v.incar_settings)\n\n d = self.mithseparamset.as_dict()\n v = dec.process_decoded(d)\n self.assertEqual(v.incar_settings[\"LHFCALC\"], True)\n\n d = self.mphseparamset.as_dict()\n v = dec.process_decoded(d)\n self.assertEqual(v.incar_settings[\"LHFCALC\"], True)\n\n d = self.paramset.as_dict()\n v = dec.process_decoded(d)\n self.assertEqual(v.incar_settings[\"LDAUU\"][\"O\"][\"Fe\"], 5.3)\n\n d = self.userparamset.as_dict()\n v = dec.process_decoded(d)\n #self.assertEqual(type(v), MPVaspInputSet)\n self.assertEqual(v.incar_settings[\"MAGMOM\"],\n {\"Fe\": 10, \"S\": -5, \"Mn3+\": 100})\n\n\nclass MITMDVaspInputSetTest(unittest.TestCase):\n\n def setUp(self):\n filepath = os.path.join(test_dir, 'POSCAR')\n poscar = Poscar.from_file(filepath)\n self.struct = poscar.structure\n self.mitmdparam = MITMDVaspInputSet(300, 1200, 10000)\n\n def test_get_potcar_symbols(self):\n syms = self.mitmdparam.get_potcar_symbols(self.struct)\n self.assertEqual(syms, ['Fe', 'P', 'O'])\n\n def test_get_incar(self):\n incar = self.mitmdparam.get_incar(self.struct)\n self.assertNotIn(\"LDAUU\", incar)\n self.assertAlmostEqual(incar['EDIFF'], 2.4e-5)\n\n def test_get_kpoints(self):\n kpoints = self.mitmdparam.get_kpoints(self.struct)\n self.assertEqual(kpoints.kpts, [(1, 1, 1)])\n self.assertEqual(kpoints.style, 'Gamma')\n\n def test_to_from_dict(self):\n d = self.mitmdparam.as_dict()\n v = dec.process_decoded(d)\n self.assertEqual(type(v), MITMDVaspInputSet)\n self.assertEqual(v.incar_settings[\"TEBEG\"], 300)\n\n\nclass MITNEBVaspInputSetTest(unittest.TestCase):\n\n def setUp(self):\n filepath = os.path.join(test_dir, 'POSCAR')\n poscar = Poscar.from_file(filepath)\n self.struct = poscar.structure\n self.vis = MITNEBVaspInputSet(nimages=10, hubbard_off=True)\n\n def test_get_potcar_symbols(self):\n syms = self.vis.get_potcar_symbols(self.struct)\n self.assertEqual(syms, ['Fe', 'P', 'O'])\n\n def test_get_incar(self):\n incar = self.vis.get_incar(self.struct)\n self.assertNotIn(\"LDAUU\", incar)\n self.assertAlmostEqual(incar['EDIFF'], 0.00005)\n\n def test_get_kpoints(self):\n kpoints = self.vis.get_kpoints(self.struct)\n self.assertEqual(kpoints.kpts, [[2, 4, 6]])\n self.assertEqual(kpoints.style, 'Monkhorst')\n\n def test_to_from_dict(self):\n d = self.vis.as_dict()\n v = dec.process_decoded(d)\n self.assertEqual(v.incar_settings[\"IMAGES\"], 10)\n\n def test_write_inputs(self):\n c1 = [[0.5] * 3, [0.9] * 3]\n c2 = [[0.5] * 3, [0.9, 0.1, 0.1]]\n s1 = Structure(Lattice.cubic(5), ['Si', 'Si'], c1)\n s2 = Structure(Lattice.cubic(5), ['Si', 'Si'], c2)\n structs = []\n for s in s1.interpolate(s2, 3, pbc=True):\n structs.append(Structure.from_sites(s.sites,\n to_unit_cell=True))\n\n fc = self.vis._process_structures(structs)[2].frac_coords\n self.assertTrue(np.allclose(fc, [[0.5]*3,[0.9, 1.033333, 1.0333333]]))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.allclose", "numpy.array" ] ]
Rougnt/ArkNightAutoClick
[ "693ba25227bdfbf228a3d5b3a04d86ea8135b78b" ]
[ "Photo.py" ]
[ "import cv2\nimport os,shutil\nimport numpy as np\nfrom Adb import Adb\nimport time\n\nclass Photo():\n '''\n 提取图片信息,比较图片\n '''\n def __init__(self,img_path) -> None:\n '''\n 读取图片\n '''\n self.img = cv2.imread(img_path)\n\n\n \n\nclass sourceData():\n '''\n 获取测试数据\n '''\n def __init__(self) -> None:\n pass\n\n @staticmethod\n def getScreenPhoto():\n adb = Adb(device='d5c42b2a')\n for x in range(100):\n adb.screenCap()\n adb.pullBackScreenCap(os.path.join('.','photo',time.strftime(\"%Y-%m-%d_%H-%M-%S.png\", time.localtime()) ))\n print(\"截图\",time.asctime(time.localtime()))\n time.sleep(3)\n\n @staticmethod\n def calcOujilide(img):\n img_new = img[938:1035,1935:2247]\n img_new_num = np.sum(img_new)/(img_new.shape[0]*img_new.shape[1]*img_new.shape[2])\n return img_new_num\n \n @staticmethod\n def calcFangcha(img):\n '''\n 计算938:1035,1935:2247区域间图片的方差,用于比较图片见相似程度\n 计算过程,对图像每一行像素求平均,对所有行像素平均值求方差\n return (int)\n '''\n img_new = img[938:1013,1935:2247]\n img_avg = np.mean(img_new,axis=(0,2))\n return np.var(img_avg)\n\n\nif __name__ is '__main__':\n static_num = sourceData.calcFangcha(cv2.imread(os.path.join(\"adb\",\"screen.png\")))\n for img_name in os.listdir(os.path.join(\"photo\")):\n img = cv2.imread(os.path.join(\"photo\",img_name))\n img_num = sourceData.calcFangcha(img)\n chazhi = abs(static_num-img_num)\n # chazhi = (abs(static_num**2-img_num**2))**0.5\n print(img_name,\"的差值为\",chazhi)\n if chazhi<20:\n print(\"Copy this file: \",img_name)\n shutil.copyfile(os.path.join(\"photo\",img_name),os.path.join(\"photo2\",img_name))\n print(\"Write this file: \",img_name)\n cv2.imwrite(os.path.join(\"photo3\",img_name),img[938:1013,1935:2247])\n\n\n # '''截图 400s'''\n # sourceData.getScreenPhoto()" ]
[ [ "numpy.sum", "numpy.var", "numpy.mean" ] ]
Pathetiue/safe-exploration
[ "e6c0bc57b7b51fe3e3c97d51721893fe297b2b11" ]
[ "safe_exploration/visualization/utils_visualization.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 21 15:49:49 2017\n\n@author: tkoller\n\"\"\"\n\nimport numpy as np\nimport numpy.linalg as nLa\n\nfrom ..utils import unavailable\ntry:\n import matplotlib.pyplot as plt\n _has_matplotlib = True\nexcept:\n _has_matplotlib = False\n\n\n@unavailable(not _has_matplotlib, \"matplotlib\")\ndef plot_ellipsoid_3D(p, q, ax, n_points=100):\n \"\"\" Plot an ellipsoid in 3D\n\n Based on\n https://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib\n\n TODO: Untested!\n\n Parameters\n ----------\n p: 3x1 array[float]\n Center of the ellipsoid\n q: 3x3 array[float]\n Shape matrix of the ellipsoid\n ax: matplotlib.Axes object\n Ax on which to plot the ellipsoid\n\n Returns\n -------\n ax: matplotlib.Axes object\n The Ax containing the ellipsoid\n\n \"\"\"\n\n assert np.shape(p) == (3, 1), \"p needs to be a 3x1 vector\"\n assert np.shape(q) == (3, 3), \"q needs to be a spd 3x3 matrix\"\n assert np.allclose(q, 0.5 * (q + q.T), \"q needs to be spd\")\n # transform to radius/center parametrization\n U, s, rotation = linalg.svd(q)\n assert np.all(s > 0), \"q needs to be positive definite\"\n radii = 1.0 / np.sqrt(s)\n\n # get x,y,z of sphere and transform\n u = np.linspace(0.0, 2.0 * np.pi, n_points)\n v = np.linspace(0.0, np.pi, n_points)\n x = radii[0] * np.outer(np.cos(u), np.sin(v))\n y = radii[1] * np.outer(np.sin(u), np.sin(v))\n z = radii[2] * np.outer(np.ones_like(u), np.cos(v))\n for i in range(len(x)):\n for j in range(len(x)):\n [x[i, j], y[i, j], z[i, j]] = np.dot([x[i, j], y[i, j], z[i, j]],\n rotation) + center\n\n # plot the result\n ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='b', alpha=0.2)\n return ax\n\n\n@unavailable(not _has_matplotlib, \"matplotlib\")\ndef plot_ellipsoid_2D(p, q, ax, n_points=100, color=\"r\"):\n \"\"\" Plot an ellipsoid in 2D\n\n TODO: Untested!\n\n Parameters\n ----------\n p: 3x1 array[float]\n Center of the ellipsoid\n q: 3x3 array[float]\n Shape matrix of the ellipsoid\n ax: matplotlib.Axes object\n Ax on which to plot the ellipsoid\n\n Returns\n -------\n ax: matplotlib.Axes object\n The Ax containing the ellipsoid\n \"\"\"\n plt.sca(ax)\n r = nLa.cholesky(q).T; # checks spd inside the function\n t = np.linspace(0, 2 * np.pi, n_points);\n z = [np.cos(t), np.sin(t)];\n ellipse = np.dot(r, z) + p;\n handle, = ax.plot(ellipse[0, :], ellipse[1, :], color)\n\n return ax, handle\n" ]
[ [ "numpy.sin", "numpy.dot", "numpy.ones_like", "matplotlib.pyplot.sca", "numpy.shape", "numpy.allclose", "numpy.sqrt", "numpy.linalg.cholesky", "numpy.all", "numpy.cos", "numpy.linspace" ] ]
randomvi/opencv-color-detector
[ "8106178434b15c116d8a93140a35f2c341480f11" ]
[ "main.py" ]
[ "import numpy as np \nimport cv2\n\n# To capture webcam live stream, simply change the following line to: cap = cv2.VideoCapture(0)\ncap = cv2.VideoCapture('./assets/video.mp4')\n\nwhile (True):\n # Capture frame by frame\n _, frame = cap.read()\n\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # hsv (hue sat value) for the color red\n lower_color = np.array([150, 150, 50])\n upper_color = np.array([180, 255, 150])\n\n # mask will be anything between range *lower_color to upper_color (Red)\n mask = cv2.inRange(hsv, lower_color, upper_color)\n \n res = cv2.bitwise_and(frame, frame, mask = mask)\n\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n cv2.drawContours(frame, contours, -1, (200, 255, 0), 4)\n\n if len(contours) > 0: \n cv2.putText(mask, 'Relavante Object Detected', (100, 300), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n cv2.imshow('frame', frame)\n cv2.imshow('mask', mask)\n cv2.imshow('res', res)\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()" ]
[ [ "numpy.array" ] ]
martinhoang11/vietnamese-ocr-toolbox
[ "524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5", "524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5", "524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5" ]
[ "modules/__init__.py", "modules/retrieval/text_classification/libs/losses/focal.py", "modules/retrieval/text_classification/libs/loggers/tsboard.py" ]
[ "import os\nimport cv2\nimport shutil\nimport argparse\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom .preprocess import DocScanner\nimport modules.detection as detection\nimport modules.ocr as ocr\nimport modules.retrieval as retrieval\nimport modules.correction as correction\nfrom tool.config import Config \nfrom tool.utils import download_pretrained_weights\n\n\nCACHE_DIR = '.cache'\n\nclass Preprocess:\n def __init__(\n self, \n find_best_rotation=True,\n det_model=None,\n ocr_model=None):\n \n self.find_best_rotation = find_best_rotation\n\n if self.find_best_rotation:\n self.crop_path = os.path.join(CACHE_DIR, 'crops')\n if os.path.exists(self.crop_path):\n shutil.rmtree(self.crop_path)\n os.mkdir(self.crop_path)\n self.det_model = det_model if det_model is not None else Detection()\n self.ocr_model = ocr_model if ocr_model is not None else OCR()\n self.scanner = DocScanner()\n\n def __call__(self, image, return_score=False):\n \n\n output = self.scanner.scan(image)\n \n if self.find_best_rotation:\n\n _ = self.det_model(\n output,\n crop_region=True,\n return_result=False,\n output_path=CACHE_DIR)\n\n orientation_scores = np.array([0.,0.,0.,0.])\n num_crops = len(os.listdir(self.crop_path))\n for i in range(num_crops):\n single_crop_path = os.path.join(self.crop_path, f'{i}.jpg')\n if not os.path.isfile(single_crop_path):\n continue\n img = cv2.imread(single_crop_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n orientation_scores += ocr.find_rotation_score(img, self.ocr_model)\n best_orient = np.argmax(orientation_scores)\n print(f\"Rotate image by {best_orient*90} degrees\")\n\n # Rotate the original image\n output = ocr.rotate_img(output, best_orient)\n \n if return_score:\n return output, orientation_scores\n else:\n return output\n\nclass Detection:\n def __init__(self, config_path=None, weight_path=None, model_name=None):\n if config_path is None:\n config_path = 'tool/config/detection/configs.yaml'\n config = Config(config_path)\n self.model_name = model_name\n if weight_path is None:\n if self.model_name is None:\n self.model_name = \"pan_resnet18_default\"\n tmp_path = os.path.join(CACHE_DIR, f'{self.model_name}.pth')\n download_pretrained_weights(self.model_name, cached=tmp_path)\n weight_path = tmp_path\n self.model = detection.PAN(config, model_path=weight_path)\n \n def __call__(\n self, \n image,\n crop_region=False,\n return_result=False,\n output_path=None):\n \n \"\"\"\n Input: path to image\n Output: boxes (coordinates of 4 points)\n \"\"\"\n\n if output_path is None:\n assert crop_region, \"Please specify output_path\"\n else:\n output_path = os.path.join(output_path, 'crops')\n if os.path.exists(output_path):\n shutil.rmtree(output_path)\n os.mkdir(output_path)\n\n \n # Detect and OCR for final result\n _, boxes_list, _ = self.model.predict(\n image, \n output_path, \n crop_region=crop_region)\n\n if return_result:\n img = detection.draw_bbox(image, boxes_list)\n \n if return_result:\n return boxes_list, img\n else:\n return boxes_list\n\nclass OCR:\n def __init__(self, config_path=None, weight_path=None, model_name=None):\n if config_path is None:\n config_path = 'tool/config/ocr/configs.yaml'\n config = Config(config_path)\n ocr_config = ocr.Config.load_config_from_name(config.model_name)\n ocr_config['cnn']['pretrained']=False\n ocr_config['device'] = 'cuda:0'\n ocr_config['predictor']['beamsearch']=False\n\n self.model_name = model_name\n if weight_path is None:\n if self.model_name is None:\n self.model_name = \"transformerocr_default_vgg\"\n tmp_path = os.path.join(CACHE_DIR, f'{self.model_name}.pth')\n download_pretrained_weights(self.model_name, cached=tmp_path)\n weight_path = tmp_path\n ocr_config['weights'] = weight_path\n self.model = ocr.Predictor(ocr_config)\n\n def __call__(self, img, return_prob=False):\n if isinstance(img, np.ndarray):\n img = Image.fromarray(img)\n return self.model.predict(img, return_prob)\n\n def predict_folder(self, img_paths, return_probs=False):\n texts = []\n if return_probs:\n probs = []\n for i, img_path in enumerate(img_paths):\n img = Image.open(img_path)\n if return_probs:\n text, prob = self(img, True)\n texts.append(text)\n probs.append(prob)\n else:\n text = self(img, False)\n texts.append(text)\n\n if return_probs:\n return texts, probs\n else:\n return texts\n\nclass Retrieval:\n def __init__(self, class_mapping, dictionary=None, mode=\"all\", bert_weight=None):\n assert mode in [\"all\", \"bert\", \"trie\", \"ed\"], \"Mode is not supported\"\n self.mode = mode\n\n self.dictionary = dictionary\n self.class_mapping = class_mapping\n self.idx_mapping = {v:k for k,v in class_mapping.items()}\n\n if self.mode == 'bert':\n self.use_bert = True\n if self.mode == 'trie':\n self.use_trie = True\n if self.mode == 'ed':\n self.use_ed = True\n if self.mode == 'all':\n self.use_bert = True\n self.use_trie = True\n self.use_ed = True\n\n if self.use_bert:\n self.bert = retrieval.PhoBERT(self.idx_mapping, bert_weight)\n if self.use_ed:\n self.ed = retrieval.get_heuristic_retrieval('diff')\n if self.use_trie:\n self.trie = retrieval.get_heuristic_retrieval('trie')\n\n if self.use_ed or self.use_trie:\n if self.dictionary is None:\n self.dictionary = {}\n df = pd.read_csv('./modules/retrieval/heuristic/custom-dictionary.csv')\n for id, row in df.iterrows():\n self.dictionary[row.text.lower()] = row.lbl\n\n def ensemble(self, df):\n preds = []\n probs = []\n\n for id, row in df.iterrows():\n if row[\"timestamp\"] == 1:\n preds.append(\"TIMESTAMP\")\n probs.append(5.0)\n elif row[\"bert_labels\"] == row[\"diff_labels\"]:\n preds.append(row[\"bert_labels\"])\n probs.append(row[\"bert_probs\"] + row[\"diff_probs\"])\n elif row[\"bert_labels\"] == row[\"trie_labels\"]:\n preds.append(row[\"bert_labels\"])\n probs.append(row[\"bert_probs\"] + row[\"trie_probs\"])\n elif row[\"trie_labels\"] == row[\"diff_labels\"]:\n preds.append(row[\"trie_labels\"])\n probs.append(row[\"trie_probs\"] + row[\"diff_probs\"])\n else:\n if row[\"diff_probs\"] >= 0.4:\n preds.append(row[\"diff_labels\"])\n probs.append(row[\"diff_probs\"])\n elif row[\"trie_probs\"] >= 0.25:\n preds.append(row[\"trie_labels\"])\n probs.append(row[\"trie_probs\"])\n else:\n preds.append(row[\"bert_labels\"])\n probs.append(row[\"bert_probs\"]/3)\n\n return preds, probs\n\n def __call__(self, query_texts):\n df = pd.DataFrame()\n if self.use_bert:\n preds, probs = self.bert(query_texts)\n df[\"bert_labels\"] = preds\n df[\"bert_probs\"] = probs\n if self.use_ed:\n preds, probs = self.ed(query_texts, self.dictionary)\n df[\"diff_labels\"] = [self.idx_mapping[x] for x in preds]\n df[\"diff_probs\"] = probs\n if self.use_trie:\n preds, probs = self.trie(query_texts, self.dictionary)\n df[\"trie_labels\"] = [self.idx_mapping[x] for x in preds]\n df[\"trie_probs\"] = probs\n\n timestamps = retrieval.regex_timestamp(query_texts)\n df[\"timestamp\"] = timestamps\n preds, probs = self.ensemble(df)\n return preds, probs\n\n \nclass Correction:\n def __init__(self, dictionary=None, mode=\"ed\"):\n assert mode in [\"trie\", \"ed\"], \"Mode is not supported\"\n self.mode = mode\n self.dictionary = dictionary\n\n self.use_trie = False\n self.use_ed = False\n\n if self.mode == 'trie':\n self.use_trie = True\n if self.mode == 'ed':\n self.use_ed = True\n \n if self.use_ed:\n self.ed = correction.get_heuristic_correction('diff')\n if self.use_trie:\n self.trie = correction.get_heuristic_correction('trie')\n \n if self.use_ed or self.use_trie:\n if self.dictionary is None:\n self.dictionary = {}\n df = pd.read_csv('./modules/retrieval/heuristic/custom-dictionary.csv')\n for id, row in df.iterrows():\n self.dictionary[row.text.lower()] = row.lbl\n\n def __call__(self, query_texts, return_score=False):\n if self.use_ed:\n preds, score = self.ed(query_texts, self.dictionary)\n \n if self.use_trie:\n preds, score = self.trie(query_texts, self.dictionary)\n \n if return_score:\n return preds, score\n else:\n return preds", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass FocalLoss(nn.Module):\n def __init__(self, gamma=0, alpha=None, size_average=True):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.alpha = alpha\n if isinstance(alpha, (float, int)):\n self.alpha = torch.Tensor([alpha, 1 - alpha])\n if isinstance(alpha, list):\n self.alpha = torch.Tensor(alpha)\n self.size_average = size_average\n\n def forward(self, input, target):\n if input.dim() > 2:\n # N,C,H,W => N,C,H*W\n input = input.view(input.size(0), input.size(1), -1)\n input = input.transpose(1, 2) # N,C,H*W => N,H*W,C\n input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C\n target = target.view(-1, 1)\n\n logpt = F.log_softmax(input, dim=1)\n logpt = logpt.gather(1, target)\n logpt = logpt.view(-1)\n pt = Variable(logpt.detach().exp())\n\n if self.alpha is not None:\n if self.alpha.type() != input.detach().type():\n self.alpha = self.alpha.type_as(input.detach())\n at = self.alpha.gather(0, target.detach().view(-1))\n logpt = logpt * Variable(at)\n\n loss = -1 * (1 - pt) ** self.gamma * logpt\n if self.size_average:\n return loss.mean()\n else:\n return loss.sum()\n", "from torch.utils.tensorboard import SummaryWriter\n\n\nclass TensorboardLogger():\n def __init__(self, path):\n assert path != None, \"path is None\"\n self.writer = SummaryWriter(log_dir=path)\n\n def update_loss(self, phase, value, step):\n self.writer.add_scalar(f'{phase}/loss', value, step)\n\n def update_metric(self, phase, metric, value, step):\n self.writer.add_scalar(f'{phase}/{metric}', value, step)\n" ]
[ [ "pandas.DataFrame", "numpy.array", "numpy.argmax", "pandas.read_csv" ], [ "torch.autograd.Variable", "torch.Tensor", "torch.nn.functional.log_softmax" ], [ "torch.utils.tensorboard.SummaryWriter" ] ]
ybettan/AirSimTensorFlow
[ "7c9a17d97a68e99da310f2e537bfb66433056066" ]
[ "run_demo.py" ]
[ "from __future__ import print_function\n\nimport pprint\nimport os\nimport time\nimport msgpackrpc\nimport math\nimport msgpackrpc #install as admin: pip install msgpack-rpc-python\nimport msgpack\nimport sys\nimport inspect\nimport types\nimport re\nimport shutil\n\nimport numpy as np #pip install numpy\n\n\n\n#==============================================================================\n# Classes\n#==============================================================================\n\nclass MsgpackMixin:\n def to_msgpack(self, *args, **kwargs):\n return self.__dict__ #msgpack.dump(self.to_dict(*args, **kwargs))\n\n @classmethod\n def from_msgpack(cls, encoded):\n obj = cls()\n obj.__dict__ = {k.decode('utf-8'): v for k, v in encoded.items()}\n return obj\n\n\nclass AirSimImageType: \n Scene = 0\n DepthPlanner = 1\n DepthPerspective = 2\n DepthVis = 3\n DisparityNormalized = 4\n Segmentation = 5\n SurfaceNormals = 6\n\nclass DrivetrainType:\n MaxDegreeOfFreedom = 0\n ForwardOnly = 1\n \nclass LandedState:\n Landed = 0\n Flying = 1\n\nclass Vector3r(MsgpackMixin):\n x_val = np.float32(0)\n y_val = np.float32(0)\n z_val = np.float32(0)\n\n def __init__(self, x_val = np.float32(0), y_val = np.float32(0), z_val = np.float32(0)):\n self.x_val = x_val\n self.y_val = y_val\n self.z_val = z_val\n\n\nclass Quaternionr(MsgpackMixin):\n w_val = np.float32(0)\n x_val = np.float32(0)\n y_val = np.float32(0)\n z_val = np.float32(0)\n\n def __init__(self, x_val = np.float32(0), y_val = np.float32(0), z_val = np.float32(0), w_val = np.float32(1)):\n self.x_val = x_val\n self.y_val = y_val\n self.z_val = z_val\n self.w_val = w_val\n\nclass Pose(MsgpackMixin):\n position = Vector3r()\n orientation = Quaternionr()\n\n def __init__(self, position_val, orientation_val):\n self.position = position_val\n self.orientation = orientation_val\n\n\nclass CollisionInfo(MsgpackMixin):\n has_collided = False\n normal = Vector3r()\n impact_point = Vector3r()\n position = Vector3r()\n penetration_depth = np.float32(0)\n time_stamp = np.float32(0)\n object_name = \"\"\n object_id = -1\n\nclass GeoPoint(MsgpackMixin):\n latitude = 0.0\n longitude = 0.0\n altitude = 0.0\n\nclass YawMode(MsgpackMixin):\n is_rate = True\n yaw_or_rate = 0.0\n def __init__(self, is_rate = True, yaw_or_rate = 0.0):\n self.is_rate = is_rate\n self.yaw_or_rate = yaw_or_rate\n\nclass ImageRequest(MsgpackMixin):\n camera_id = np.uint8(0)\n image_type = AirSimImageType.Scene\n pixels_as_float = False\n compress = False\n\n def __init__(self, camera_id, image_type, pixels_as_float = False, compress = True):\n self.camera_id = camera_id\n self.image_type = image_type\n self.pixels_as_float = pixels_as_float\n self.compress = compress\n\n\nclass ImageResponse(MsgpackMixin):\n image_data_uint8 = np.uint8(0)\n image_data_float = np.float32(0)\n camera_position = Vector3r()\n camera_orientation = Quaternionr()\n time_stamp = np.uint64(0)\n message = ''\n pixels_as_float = np.float32(0)\n compress = True\n width = 0\n height = 0\n image_type = AirSimImageType.Scene\n\nclass CarControls(MsgpackMixin):\n throttle = np.float32(0)\n steering = np.float32(0)\n brake = np.float32(0)\n handbrake = False\n is_manual_gear = False\n manual_gear = 0\n gear_immediate = True\n\n def set_throttle(self, throttle_val, forward):\n if (forward):\n is_manual_gear = False\n manual_gear = 0\n throttle = abs(throttle_val)\n else:\n is_manual_gear = False\n manual_gear = -1\n throttle = - abs(throttle_val)\n\nclass CarState(MsgpackMixin):\n speed = np.float32(0)\n gear = 0\n position = Vector3r()\n velocity = Vector3r()\n orientation = Quaternionr()\n\nclass AirSimClientBase:\n def __init__(self, ip, port):\n self.client = msgpackrpc.Client(msgpackrpc.Address(ip, port), timeout = 3600)\n \n def ping(self):\n return self.client.call('ping')\n \n def reset(self):\n self.client.call('reset')\n\n def confirmConnection(self):\n print('Waiting for connection: ', end='')\n home = self.getHomeGeoPoint()\n while ((home.latitude == 0 and home.longitude == 0 and home.altitude == 0) or\n math.isnan(home.latitude) or math.isnan(home.longitude) or math.isnan(home.altitude)):\n time.sleep(1)\n home = self.getHomeGeoPoint()\n print('X', end='')\n print('')\n\n def getHomeGeoPoint(self):\n return GeoPoint.from_msgpack(self.client.call('getHomeGeoPoint'))\n\n # basic flight control\n def enableApiControl(self, is_enabled):\n return self.client.call('enableApiControl', is_enabled)\n def isApiControlEnabled(self):\n return self.client.call('isApiControlEnabled')\n\n def simSetSegmentationObjectID(self, mesh_name, object_id, is_name_regex = False):\n return self.client.call('simSetSegmentationObjectID', mesh_name, object_id, is_name_regex)\n def simGetSegmentationObjectID(self, mesh_name):\n return self.client.call('simGetSegmentationObjectID', mesh_name)\n \n # camera control\n # simGetImage returns compressed png in array of bytes\n # image_type uses one of the AirSimImageType members\n def simGetImage(self, camera_id, image_type):\n # because this method returns std::vector<uint8>, msgpack decides to encode it as a string unfortunately.\n result = self.client.call('simGetImage', camera_id, image_type)\n if (result == \"\" or result == \"\\0\"):\n return None\n return result\n\n # camera control\n # simGetImage returns compressed png in array of bytes\n # image_type uses one of the AirSimImageType members\n def simGetImages(self, requests):\n responses_raw = self.client.call('simGetImages', requests)\n return [ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]\n\n def getCollisionInfo(self):\n return CollisionInfo.from_msgpack(self.client.call('getCollisionInfo'))\n\n @staticmethod\n def stringToUint8Array(bstr):\n return np.fromstring(bstr, np.uint8)\n @staticmethod\n def stringToFloatArray(bstr):\n return np.fromstring(bstr, np.float32)\n @staticmethod\n def listTo2DFloatArray(flst, width, height):\n return np.reshape(np.asarray(flst, np.float32), (height, width))\n @staticmethod\n def getPfmArray(response):\n return AirSimClientBase.listTo2DFloatArray(response.image_data_float, response.width, response.height)\n\n @staticmethod\n def get_public_fields(obj):\n return [attr for attr in dir(obj)\n if not (attr.startswith(\"_\") \n or inspect.isbuiltin(attr)\n or inspect.isfunction(attr)\n or inspect.ismethod(attr))]\n\n\n @staticmethod\n def to_dict(obj):\n return dict([attr, getattr(obj, attr)] for attr in AirSimClientBase.get_public_fields(obj))\n\n @staticmethod\n def to_str(obj):\n return str(AirSimClientBase.to_dict(obj))\n\n @staticmethod\n def write_file(filename, bstr):\n with open(filename, 'wb') as afile:\n afile.write(bstr)\n\n def simSetPose(self, pose, ignore_collison):\n self.client.call('simSetPose', pose, ignore_collison)\n\n def simGetPose(self):\n return self.client.call('simGetPose')\n\n # helper method for converting getOrientation to roll/pitch/yaw\n # https:#en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles\n @staticmethod\n def toEulerianAngle(q):\n z = q.z_val\n y = q.y_val\n x = q.x_val\n w = q.w_val\n ysqr = y * y\n\n # roll (x-axis rotation)\n t0 = +2.0 * (w*x + y*z)\n t1 = +1.0 - 2.0*(x*x + ysqr)\n roll = math.atan2(t0, t1)\n\n # pitch (y-axis rotation)\n t2 = +2.0 * (w*y - z*x)\n if (t2 > 1.0):\n t2 = 1\n if (t2 < -1.0):\n t2 = -1.0\n pitch = math.asin(t2)\n\n # yaw (z-axis rotation)\n t3 = +2.0 * (w*z + x*y)\n t4 = +1.0 - 2.0 * (ysqr + z*z)\n yaw = math.atan2(t3, t4)\n\n return (pitch, roll, yaw)\n\n @staticmethod\n def toQuaternion(pitch, roll, yaw):\n t0 = math.cos(yaw * 0.5)\n t1 = math.sin(yaw * 0.5)\n t2 = math.cos(roll * 0.5)\n t3 = math.sin(roll * 0.5)\n t4 = math.cos(pitch * 0.5)\n t5 = math.sin(pitch * 0.5)\n\n q = Quaternionr()\n q.w_val = t0 * t2 * t4 + t1 * t3 * t5 #w\n q.x_val = t0 * t3 * t4 - t1 * t2 * t5 #x\n q.y_val = t0 * t2 * t5 + t1 * t3 * t4 #y\n q.z_val = t1 * t2 * t4 - t0 * t3 * t5 #z\n return q\n\n @staticmethod\n def wait_key(message = ''):\n ''' Wait for a key press on the console and return it. '''\n if message != '':\n print (message)\n\n result = None\n if os.name == 'nt':\n import msvcrt\n result = msvcrt.getch()\n else:\n import termios\n fd = sys.stdin.fileno()\n\n oldterm = termios.tcgetattr(fd)\n newattr = termios.tcgetattr(fd)\n newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n termios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n try:\n result = sys.stdin.read(1)\n except IOError:\n pass\n finally:\n termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n\n return result\n\n @staticmethod\n def read_pfm(file):\n \"\"\" Read a pfm file \"\"\"\n file = open(file, 'rb')\n\n color = None\n width = None\n height = None\n scale = None\n endian = None\n\n header = file.readline().rstrip()\n header = str(bytes.decode(header, encoding='utf-8'))\n if header == 'PF':\n color = True\n elif header == 'Pf':\n color = False\n else:\n raise Exception('Not a PFM file.')\n\n temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))\n dim_match = re.match(r'^(\\d+)\\s(\\d+)\\s$', temp_str)\n if dim_match:\n width, height = map(int, dim_match.groups())\n else:\n raise Exception('Malformed PFM header.')\n\n scale = float(file.readline().rstrip())\n if scale < 0: # little-endian\n endian = '<'\n scale = -scale\n else:\n endian = '>' # big-endian\n\n data = np.fromfile(file, endian + 'f')\n shape = (height, width, 3) if color else (height, width)\n\n data = np.reshape(data, shape)\n # DEY: I don't know why this was there.\n #data = np.flipud(data)\n file.close()\n \n return data, scale\n\n @staticmethod\n def write_pfm(file, image, scale=1):\n \"\"\" Write a pfm file \"\"\"\n file = open(file, 'wb')\n\n color = None\n\n if image.dtype.name != 'float32':\n raise Exception('Image dtype must be float32.')\n\n image = np.flipud(image)\n\n if len(image.shape) == 3 and image.shape[2] == 3: # color image\n color = True\n elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale\n color = False\n else:\n raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')\n\n file.write('PF\\n'.encode('utf-8') if color else 'Pf\\n'.encode('utf-8'))\n temp_str = '%d %d\\n' % (image.shape[1], image.shape[0])\n file.write(temp_str.encode('utf-8'))\n\n endian = image.dtype.byteorder\n\n if endian == '<' or endian == '=' and sys.byteorder == 'little':\n scale = -scale\n\n temp_str = '%f\\n' % scale\n file.write(temp_str.encode('utf-8'))\n\n image.tofile(file)\n\n @staticmethod\n def write_png(filename, image):\n \"\"\" image must be numpy array H X W X channels\n \"\"\"\n import zlib, struct\n\n buf = image.flatten().tobytes()\n width = image.shape[1]\n height = image.shape[0]\n\n # reverse the vertical line order and add null bytes at the start\n width_byte_4 = width * 4\n raw_data = b''.join(b'\\x00' + buf[span:span + width_byte_4]\n for span in range((height - 1) * width_byte_4, -1, - width_byte_4))\n\n def png_pack(png_tag, data):\n chunk_head = png_tag + data\n return (struct.pack(\"!I\", len(data)) +\n chunk_head +\n struct.pack(\"!I\", 0xFFFFFFFF & zlib.crc32(chunk_head)))\n\n png_bytes = b''.join([\n b'\\x89PNG\\r\\n\\x1a\\n',\n png_pack(b'IHDR', struct.pack(\"!2I5B\", width, height, 8, 6, 0, 0, 0)),\n png_pack(b'IDAT', zlib.compress(raw_data, 9)),\n png_pack(b'IEND', b'')])\n\n AirSimClientBase.write_file(filename, png_bytes)\n\n\n# ----------------------------------- Multirotor APIs ---------------------------------------------\nclass MultirotorClient(AirSimClientBase, object):\n def __init__(self, ip = \"\"):\n if (ip == \"\"):\n ip = \"127.0.0.1\"\n super(MultirotorClient, self).__init__(ip, 41451)\n\n def armDisarm(self, arm):\n return self.client.call('armDisarm', arm)\n\n def takeoff(self, max_wait_seconds = 15):\n return self.client.call('takeoff', max_wait_seconds)\n \n def land(self, max_wait_seconds = 60):\n return self.client.call('land', max_wait_seconds)\n \n def goHome(self):\n return self.client.call('goHome')\n\n def hover(self):\n return self.client.call('hover')\n\n \n # query vehicle state\n def getPosition(self):\n return Vector3r.from_msgpack(self.client.call('getPosition'))\n def getVelocity(self):\n return Vector3r.from_msgpack(self.client.call('getVelocity'))\n def getOrientation(self):\n return Quaternionr.from_msgpack(self.client.call('getOrientation'))\n def getLandedState(self):\n return self.client.call('getLandedState')\n def getGpsLocation(self):\n return GeoPoint.from_msgpack(self.client.call('getGpsLocation'))\n def getPitchRollYaw(self):\n return self.toEulerianAngle(self.getOrientation())\n\n #def getRCData(self):\n # return self.client.call('getRCData')\n def timestampNow(self):\n return self.client.call('timestampNow')\n def isApiControlEnabled(self):\n return self.client.call('isApiControlEnabled')\n def isSimulationMode(self):\n return self.client.call('isSimulationMode')\n def getServerDebugInfo(self):\n return self.client.call('getServerDebugInfo')\n\n\n # APIs for control\n def moveByAngle(self, pitch, roll, z, yaw, duration):\n return self.client.call('moveByAngle', pitch, roll, z, yaw, duration)\n\n def moveByVelocity(self, vx, vy, vz, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode()):\n return self.client.call('moveByVelocity', vx, vy, vz, duration, drivetrain, yaw_mode)\n\n def moveByVelocityZ(self, vx, vy, z, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode()):\n return self.client.call('moveByVelocityZ', vx, vy, z, duration, drivetrain, yaw_mode)\n\n def moveOnPath(self, path, velocity, max_wait_seconds = 60, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1):\n return self.client.call('moveOnPath', path, velocity, max_wait_seconds, drivetrain, yaw_mode, lookahead, adaptive_lookahead)\n\n def moveToZ(self, z, velocity, max_wait_seconds = 60, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1):\n return self.client.call('moveToZ', z, velocity, max_wait_seconds, yaw_mode, lookahead, adaptive_lookahead)\n\n def moveToPosition(self, x, y, z, velocity, max_wait_seconds = 60, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode(), lookahead = -1, adaptive_lookahead = 1):\n return self.client.call('moveToPosition', x, y, z, velocity, max_wait_seconds, drivetrain, yaw_mode, lookahead, adaptive_lookahead)\n\n def moveByManual(self, vx_max, vy_max, z_min, duration, drivetrain = DrivetrainType.MaxDegreeOfFreedom, yaw_mode = YawMode()):\n return self.client.call('moveByManual', vx_max, vy_max, z_min, duration, drivetrain, yaw_mode)\n\n def rotateToYaw(self, yaw, max_wait_seconds = 60, margin = 5):\n return self.client.call('rotateToYaw', yaw, max_wait_seconds, margin)\n\n def rotateByYawRate(self, yaw_rate, duration):\n return self.client.call('rotateByYawRate', yaw_rate, duration)\n\n# ----------------------------------- Car APIs ---------------------------------------------\nclass CarClient(AirSimClientBase, object):\n def __init__(self, ip = \"\"):\n if (ip == \"\"):\n ip = \"127.0.0.1\"\n super(CarClient, self).__init__(ip, 42451)\n\n def setCarControls(self, controls):\n self.client.call('setCarControls', controls)\n\n def getCarState(self):\n state_raw = self.client.call('getCarState')\n return CarState.from_msgpack(state_raw)\n\n#FIXME: keep it and remove all upper that already is in AirSimClient.py\n#==============================================================================\n# Functions\n#==============================================================================\n\ndef drive(client, throttle, steering):\n\n car_controls.throttle = throttle\n car_controls.steering = steering\n client.setCarControls(car_controls)\n\ndef drive_forward(client, car_controls):\n\n drive(client, 1.0, 0)\n\ndef drive_right(client, car_controls):\n\n drive(client, 1.0, 10)\n\ndef drive_left(client, car_controls):\n\n drive(client, 1.0, -10)\n \ndef save_image(i):\n\n # get a sinlgle image from the car's camera\n responses = client.simGetImages([ImageRequest(1, AirSimImageType.Scene)]) \n single_image = responses[0].image_data_uint8\n\n # save the image\n AirSimClientBase.write_file(os.path.normpath(IMAGEDIR + \\\n '/image_{}.png'.format(i)), single_image)\n\n#==============================================================================\n# Main\n#==============================================================================\n\n# Constants\nIMAGEDIR = \"images\"\n\n\n# Create an empty image directory\ntry:\n shutil.rmtree(IMAGEDIR, ignore_errors=True)\n os.stat(IMAGEDIR)\nexcept:\n os.mkdir(IMAGEDIR)\n \n# Connect to AirSim\nclient = CarClient()\nclient.confirmConnection()\nclient.enableApiControl(True)\nclient.reset()\nprint('Connected')\n\ni = 0\ncar_controls = CarControls()\n\nwhile True:\n\n drive_forward(client, car_controls)\n i += 1\n save_image(i)\n print(\"image {} has been saved\".format(i))\n\n time.sleep(0.1)\n\n drive_right(client, car_controls)\n i += 1\n save_image(i)\n print(\"image {} has been saved\".format(i))\n\n time.sleep(0.1)\n\n drive_forward(client, car_controls)\n i += 1\n save_image(i)\n print(\"image {} has been saved\".format(i))\n\n time.sleep(0.1)\n\n drive_left(client, car_controls)\n i += 1\n save_image(i)\n print(\"image {} has been saved\".format(i))\n\n time.sleep(0.1)\n\n if i >= 40:\n break\n\n\n\n\n\n\n ## get RGBA camera images from the car\n #responses = client.simGetImages([ImageRequest(1, AirSimImageType.Scene)]) \n\n ## add image to queue \n #imagequeue.append(responses[0].image_data_uint8)\n\n ## dump queue when it gets full\n #if len(imagequeue) == QUEUESIZE:\n # for i in range(QUEUESIZE):\n # AirSimClientBase.write_file(os.path.normpath(IMAGEDIR + \\\n # '/image%03d.png' % i ), imagequeue[i])\n # imagequeue.pop(0) \n\n #collision_info = client.getCollisionInfo()\n\n #if collision_info.has_collided:\n # print(\"Collision at pos %s, normal %s, impact pt %s, penetration %f, name %s, obj id %d\" % (\n # pprint.pformat(collision_info.position), \n # pprint.pformat(collision_info.normal), \n # pprint.pformat(collision_info.impact_point), \n # collision_info.penetration_depth, collision_info.object_name, collision_info.object_id))\n # break\n\n #time.sleep(0.1)\n\nclient.enableApiControl(False)\n" ]
[ [ "numpy.uint8", "numpy.reshape", "numpy.asarray", "numpy.flipud", "numpy.float32", "numpy.fromfile", "numpy.uint64", "numpy.fromstring" ] ]
Henrynaut/Py622
[ "1ac33db96f82c562fe4a85ca5dc0b9b77c5fd088" ]
[ "logAxes.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.ticker import NullFormatter # useful for `logit` scale\n\n# Fixing random state for reproducibility\nnp.random.seed(19680801)\n\n# make up some data in the interval ]0, 1[\ny = np.random.normal(loc=0.5, scale=0.4, size=1000)\ny = y[(y > 0) & (y < 1)]\ny.sort()\nx = np.arange(len(y))\n\n# plot with various axes scales\nplt.figure(1)\n\n# linear\nplt.subplot(221)\nplt.plot(x, y)\nplt.yscale('linear')\nplt.title('linear')\nplt.grid(True)\n\n\n# log\nplt.subplot(222)\nplt.plot(x, y)\nplt.yscale('log')\nplt.title('log')\nplt.grid(True)\n\n\n# symmetric log\nplt.subplot(223)\nplt.plot(x, y - y.mean())\nplt.yscale('symlog', linthreshy=0.01)\nplt.title('symlog')\nplt.grid(True)\n\n# logit\nplt.subplot(224)\nplt.plot(x, y)\nplt.yscale('logit')\nplt.title('logit')\nplt.grid(True)\n# Format the minor tick labels of the y-axis into empty strings with\n# `NullFormatter`, to avoid cumbering the axis with too many labels.\nplt.gca().yaxis.set_minor_formatter(NullFormatter())\n# Adjust the subplot layout, because the logit one may take more space\n# than usual, due to y-tick labels like \"1 - 10^{-3}\"\nplt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25,\n wspace=0.35)\n\nplt.show()" ]
[ [ "numpy.random.normal", "numpy.random.seed", "matplotlib.pyplot.grid", "matplotlib.pyplot.gca", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "matplotlib.ticker.NullFormatter", "matplotlib.pyplot.yscale", "matplotlib.pyplot.subplot" ] ]
arp95/pytorch_image_classifier
[ "81db0a99b79dcebc39843869bf684c5090db6fdb" ]
[ "models/densenet121.py" ]
[ "# header files\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport numpy as np\n\n\n# define network (remember input size: (224 x 224 x 3))\nclass DenseNet_121(torch.nn.Module):\n \n # define dense block\n def dense_block(self, input_channels):\n return torch.nn.Sequential(\n torch.nn.Conv2d(input_channels, 128, kernel_size=1, bias=False),\n torch.nn.BatchNorm2d(128),\n torch.nn.ReLU(inplace=True),\n torch.nn.Conv2d(128, 32, kernel_size=3, padding=1, bias=False),\n torch.nn.BatchNorm2d(32),\n torch.nn.ReLU(inplace=True)\n )\n \n # init function\n def __init__(self, num_classes = 2):\n super(DenseNet_121, self).__init__()\n \n self.features = torch.nn.Sequential(\n torch.nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),\n torch.nn.BatchNorm2d(64),\n torch.nn.ReLU(inplace=True),\n torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n )\n \n # dense block 1 (56 x 56 x 64)\n self.dense_block_1_1 = self.dense_block(64)\n self.dense_block_1_2 = self.dense_block(96)\n self.dense_block_1_3 = self.dense_block(128)\n self.dense_block_1_4 = self.dense_block(160)\n self.dense_block_1_5 = self.dense_block(192)\n self.dense_block_1_6 = self.dense_block(224)\n \n # transition block 1\n self.transition_block_1 = torch.nn.Sequential(\n torch.nn.Conv2d(256, 128, kernel_size=1, bias=False),\n torch.nn.AvgPool2d(kernel_size=2, stride=2)\n )\n \n # dense block 2 (28 x 28 x 128)\n self.dense_block_2_1 = self.dense_block(128)\n self.dense_block_2_2 = self.dense_block(160)\n self.dense_block_2_3 = self.dense_block(192)\n self.dense_block_2_4 = self.dense_block(224)\n self.dense_block_2_5 = self.dense_block(256)\n self.dense_block_2_6 = self.dense_block(288)\n self.dense_block_2_7 = self.dense_block(320)\n self.dense_block_2_8 = self.dense_block(352)\n self.dense_block_2_9 = self.dense_block(384)\n self.dense_block_2_10 = self.dense_block(416)\n self.dense_block_2_11 = self.dense_block(448)\n self.dense_block_2_12 = self.dense_block(480)\n \n \n # transition block 2\n self.transition_block_2 = torch.nn.Sequential(\n torch.nn.Conv2d(512, 256, kernel_size=1, bias=False),\n torch.nn.AvgPool2d(kernel_size=2, stride=2)\n )\n \n # dense block 3 (14 x 14 x 240)\n self.dense_block_3_1 = self.dense_block(256)\n self.dense_block_3_2 = self.dense_block(288)\n self.dense_block_3_3 = self.dense_block(320)\n self.dense_block_3_4 = self.dense_block(352)\n self.dense_block_3_5 = self.dense_block(384)\n self.dense_block_3_6 = self.dense_block(416)\n self.dense_block_3_7 = self.dense_block(448)\n self.dense_block_3_8 = self.dense_block(480)\n self.dense_block_3_9 = self.dense_block(512)\n self.dense_block_3_10 = self.dense_block(544)\n self.dense_block_3_11 = self.dense_block(576)\n self.dense_block_3_12 = self.dense_block(608)\n self.dense_block_3_13 = self.dense_block(640)\n self.dense_block_3_14 = self.dense_block(672)\n self.dense_block_3_15 = self.dense_block(704)\n self.dense_block_3_16 = self.dense_block(736)\n self.dense_block_3_17 = self.dense_block(768)\n self.dense_block_3_18 = self.dense_block(800)\n self.dense_block_3_19 = self.dense_block(832)\n self.dense_block_3_20 = self.dense_block(864)\n self.dense_block_3_21 = self.dense_block(896)\n self.dense_block_3_22 = self.dense_block(928)\n self.dense_block_3_23 = self.dense_block(960)\n self.dense_block_3_24 = self.dense_block(992)\n \n \n # transition block 3\n self.transition_block_3 = torch.nn.Sequential(\n torch.nn.Conv2d(1024, 512, kernel_size=1, bias=False),\n torch.nn.AvgPool2d(kernel_size=2, stride=2)\n )\n \n # dense block 4 (7 x 7 x 512)\n self.dense_block_4_1 = self.dense_block(512)\n self.dense_block_4_2 = self.dense_block(544)\n self.dense_block_4_3 = self.dense_block(576)\n self.dense_block_4_4 = self.dense_block(608)\n self.dense_block_4_5 = self.dense_block(640)\n self.dense_block_4_6 = self.dense_block(672)\n self.dense_block_4_7 = self.dense_block(704)\n self.dense_block_4_8 = self.dense_block(736)\n self.dense_block_4_9 = self.dense_block(768)\n self.dense_block_4_10 = self.dense_block(800)\n self.dense_block_4_11 = self.dense_block(832)\n self.dense_block_4_12 = self.dense_block(864)\n self.dense_block_4_13 = self.dense_block(896)\n self.dense_block_4_14 = self.dense_block(928)\n self.dense_block_4_15 = self.dense_block(960)\n self.dense_block_4_16 = self.dense_block(992)\n \n self.avgpool = torch.nn.AdaptiveAvgPool2d(7)\n \n self.classifier = torch.nn.Sequential(\n torch.nn.Linear(1024 * 7 * 7, num_classes)\n )\n \n def forward(self, x):\n x = self.features(x)\n \n # dense block 1\n x_1 = self.dense_block_1_1(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_1_2(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_1_3(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_1_4(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_1_5(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_1_6(x)\n x = torch.cat([x, x_1], 1)\n \n # transition block 1\n x = self.transition_block_1(x)\n \n # dense block 2\n x_1 = self.dense_block_2_1(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_2_2(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_2_3(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_2_4(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_2_5(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_2_6(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_2_7(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_2_8(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_2_9(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_2_10(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_2_11(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_2_12(x)\n x = torch.cat([x, x_1], 1)\n \n # transition block 2\n x = self.transition_block_2(x)\n \n # dense block 3\n x_1 = self.dense_block_3_1(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_2(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_3(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_4(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_5(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_6(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_7(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_8(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_9(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_10(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_11(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_12(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_13(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_14(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_15(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_16(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_17(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_18(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_19(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_20(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_21(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_22(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_23(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_3_24(x)\n x = torch.cat([x, x_1], 1)\n \n # transition block 3\n x = self.transition_block_3(x)\n \n # dense block 4\n x_1 = self.dense_block_4_1(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_2(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_3(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_4(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_5(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_6(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_7(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_8(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_9(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_10(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_11(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_12(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_13(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_14(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_15(x)\n x = torch.cat([x, x_1], 1)\n x_1 = self.dense_block_4_16(x)\n x = torch.cat([x, x_1], 1)\n \n x = self.avgpool(x)\n \n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.flatten", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.AdaptiveAvgPool2d" ] ]
fugue-project/tune
[ "bf2288ddcb29c8345d996a9b22c0910da9002da1" ]
[ "tests/tune/_utils/test_values.py" ]
[ "import json\n\nimport numpy as np\nfrom tune._utils import normalize_hp\n\n\ndef test_normalize_hp():\n assert isinstance(np.int64(10), np.int64)\n assert 10 == normalize_hp(np.int64(10))\n assert not isinstance(normalize_hp(np.int64(10)), np.int64)\n\n assert json.dumps(dict(a=[0, 1], b=1.1, c=\"x\")) == json.dumps(\n normalize_hp(dict(a=[np.int64(0), 1], b=np.float64(1.1), c=\"x\"))\n )\n" ]
[ [ "numpy.float64", "numpy.int64" ] ]
IshitaTakeshi/DVO
[ "2c5a3db1db7e651bfaa7808bbf79a6c1c6a42fc5" ]
[ "examples/rgbd_desk.py" ]
[ "import csv\nimport sys\nfrom pathlib import Path\n\nsys.path.append(str(Path(__file__).resolve().parent.parent))\n\nfrom skimage.color import rgb2gray\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom tadataka import VisualOdometry, CameraParameters\nfrom tadataka.rigid import exp_se3, log_se3\nfrom tadataka.projection import warp\nfrom tadataka.mapping import MapBuilder\nfrom tadataka.quaternion import quaternion_to_rotation\nfrom tadataka.datasets.tum_rgbd import TUMDataset, PoseSequence\n\nfrom visualization.plot import plot\n\n\n# dataset format is explained at\n# https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats#\n# intrinsic_camera_calibration_of_the_kinect\n\ndataset_root = Path(\"datasets\", \"rgbd_dataset_freiburg1_desk\")\n# dataset_root = Path(\"datasets\", \"rgbd_dataset_freiburg2_pioneer_360\")\n# dataset_root = Path(\"datasets\", \"rgbd_dataset_freiburg3_structure_texture_near\")\n\n\ndef error(image_true, image_pred, mask):\n return np.power(image_true[mask]-image_pred[mask], 2).mean()\n\n\ndef visualize_error_function(camera_parameters, I0, D0, I1, xi_pred):\n def generate_error_curve(i, start, stop, n):\n xi = np.copy(xi_pred)\n\n vs = xi[i] + np.linspace(start, stop, n)\n errors = []\n for v in vs:\n xi[i] = v\n DG = exp_se3(xi)\n estimated, mask = warp(camera_parameters, I1, D0, DG)\n errors.append(error(I0, estimated, mask))\n errors = np.array(errors)\n return vs, errors\n\n from matplotlib import pyplot as plt\n\n fig = plt.figure()\n\n for xi_index, ax_index in enumerate([1, 3, 5, 2, 4, 6]):\n ax = fig.add_subplot(3, 2, ax_index)\n\n vs, errors = generate_error_curve(xi_index,\n start=-0.10, stop=0.10, n=101)\n ax.set_title(\"Axis {}\".format(xi_index+1))\n ax.axvline(vs[np.argmin(errors)], label=\"ground truth\")\n ax.axvline(xi_pred[xi_index], color=\"red\", label=\"prediction\")\n ax.legend()\n ax.plot(vs, errors)\n\n plt.show()\n\n\ndef main():\n np.set_printoptions(suppress=True, precision=8, linewidth=1e8)\n\n camera_parameters = CameraParameters(\n focal_length=[525.0, 525.0],\n offset=[319.5, 239.5]\n )\n\n dataset = TUMDataset(dataset_root)\n\n G = np.eye(4)\n frame0 = dataset.load_color(0)\n\n sequence_pred = PoseSequence()\n sequence_pred.add(frame0.timestamp_depth, G)\n\n for i in tqdm(range(1, dataset.size)):\n frame1 = dataset.load_color(i)\n\n # TODO not necessary to convert the color of the same image twice\n # we need to create a better interface\n vo = VisualOdometry(camera_parameters,\n rgb2gray(frame0.image), frame0.depth_map,\n rgb2gray(frame1.image))\n DG = vo.estimate_motion(n_coarse_to_fine=6)\n\n G = np.dot(G, np.linalg.inv(DG))\n\n sequence_pred.add(frame1.timestamp_depth, G)\n\n frame0 = frame1\n\n sequence_pred.save(\"poses.txt\")\n\n # TODO implement the following\n # pointcloud = map_builder.export()\n # export_pointcloud(pointcloud)\n\nmain()\n" ]
[ [ "numpy.array", "numpy.argmin", "numpy.set_printoptions", "numpy.copy", "numpy.eye", "matplotlib.pyplot.figure", "numpy.power", "matplotlib.pyplot.show", "numpy.linspace", "numpy.linalg.inv" ] ]
YanchengWang/RegNet-Search-PyTorch
[ "e15f2d33d5e2191ff22e65f5257693894156b4fd" ]
[ "train.py" ]
[ "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Hang Zhang\n## Email: zhanghang0704@gmail.com\n## Copyright (c) 2020\n##\n## LICENSE file in the root directory of this source tree \n##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\nimport os\nimport time\nimport argparse\nimport importlib\nimport numpy as np\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.backends.cudnn as cudnn\nfrom torch.nn.parallel import DistributedDataParallel\n\nimport autotorch as at\nimport encoding\nfrom encoding.nn import LabelSmoothing, NLLMultiLabelSmooth\nfrom encoding.utils import (accuracy, AverageMeter, MixUpWrapper, LR_Scheduler, torch_dist_sum)\n\ntry:\n import apex\n from apex import amp\nexcept ModuleNotFoundError:\n print('please install amp if using float16 training')\n\nclass Options():\n def __init__(self):\n # data settings\n parser = argparse.ArgumentParser(description='Deep Encoding')\n parser.add_argument('--dataset', type=str, default='imagenet',\n help='training dataset (default: imagenet)')\n parser.add_argument('--base-size', type=int, default=None,\n help='base image size')\n parser.add_argument('--crop-size', type=int, default=224,\n help='crop image size')\n parser.add_argument('--label-smoothing', type=float, default=0.0,\n help='label-smoothing (default eta: 0.0)')\n parser.add_argument('--mixup', type=float, default=0.0,\n help='mixup (default eta: 0.0)')\n parser.add_argument('--auto-policy', type=str, default=None,\n help='path to auto augment policy')\n parser.add_argument('--data-dir', type=str, default=os.path.expanduser('~/.encoding/data'),\n help='data location for training')\n # model params \n #parser.add_argument('--model', type=str, default='resnet50',\n # help='network model type (default: densenet)')\n parser.add_argument('--arch', type=str, default='regnet',\n help='network type (default: regnet)')\n parser.add_argument('--config-file', type=str, required=True,\n help='network node config file')\n parser.add_argument('--last-gamma', action='store_true', default=False,\n help='whether to init gamma of the last BN layer in \\\n each bottleneck to 0 (default: False)')\n # training params\n parser.add_argument('--amp', action='store_true',\n default=False, help='using amp')\n parser.add_argument('--batch-size', type=int, default=128, metavar='N',\n help='batch size for training (default: 128)')\n parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',\n help='batch size for testing (default: 256)')\n parser.add_argument('--epochs', type=int, default=120, metavar='N',\n help='number of epochs to train (default: 600)')\n parser.add_argument('--start_epoch', type=int, default=0, \n metavar='N', help='the epoch number to start (default: 1)')\n parser.add_argument('--workers', type=int, default=8,\n metavar='N', help='dataloader threads')\n # optimizer\n parser.add_argument('--lr', type=float, default=0.1, metavar='LR',\n help='learning rate (default: 0.1)')\n parser.add_argument('--lr-scheduler', type=str, default='cos', \n help='learning rate scheduler (default: cos)')\n parser.add_argument('--warmup-epochs', type=int, default=0,\n help='number of warmup epochs (default: 0)')\n parser.add_argument('--momentum', type=float, default=0.9, \n metavar='M', help='SGD momentum (default: 0.9)')\n parser.add_argument('--wd', type=float, default=1e-4, \n metavar ='M', help='SGD weight decay (default: 1e-4)')\n parser.add_argument('--no-bn-wd', action='store_true', \n default=False, help='no bias decay')\n # seed\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n # checking point\n parser.add_argument('--resume', type=str, default=None,\n help='put the path to resuming file if needed')\n parser.add_argument('--checkname', type=str, default='default',\n help='set the checkpoint name')\n # distributed\n parser.add_argument('--world-size', default=1, type=int,\n help='number of nodes for distributed training')\n parser.add_argument('--rank', default=0, type=int,\n help='node rank for distributed training')\n parser.add_argument('--dist-url', default='tcp://localhost:23456', type=str,\n help='url used to set up distributed training')\n parser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\n # evaluation option\n parser.add_argument('--eval', action='store_true', default= False,\n help='evaluating')\n parser.add_argument('--export', type=str, default=None,\n help='put the path to resuming file if needed')\n self.parser = parser\n\n def parse(self):\n args = self.parser.parse_args()\n return args\n\ndef main():\n args = Options().parse()\n ngpus_per_node = torch.cuda.device_count()\n args.world_size = ngpus_per_node * args.world_size\n args.lr = args.lr * args.world_size\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n\n# global variable\nbest_pred = 0.0\nacclist_train = []\nacclist_val = []\n\ndef main_worker(gpu, ngpus_per_node, args):\n args.gpu = gpu\n args.rank = args.rank * ngpus_per_node + gpu\n # model name for checkpoint\n args.model = \"{}-{}\".format(args.arch, os.path.splitext(os.path.basename(args.config_file))[0])\n if args.gpu == 0:\n print('model:', args.model)\n print('rank: {} / {}'.format(args.rank, args.world_size))\n dist.init_process_group(backend=args.dist_backend,\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank)\n torch.cuda.set_device(args.gpu)\n # init the args\n global best_pred, acclist_train, acclist_val\n\n if args.gpu == 0:\n print(args)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n cudnn.benchmark = True\n # init dataloader\n transform_train, transform_val = encoding.transforms.get_transform(\n args.dataset, args.base_size, args.crop_size)\n if args.auto_policy is not None:\n print(f'Using auto_policy: {args.auto_policy}')\n from augment import Augmentation\n auto_policy = Augmentation(at.load(args.auto_policy))\n transform_train.transforms.insert(0, auto_policy)\n\n trainset = encoding.datasets.get_dataset(args.dataset, root=args.data_dir,\n transform=transform_train, train=True, download=True)\n valset = encoding.datasets.get_dataset(args.dataset, root=args.data_dir,\n transform=transform_val, train=False, download=True)\n\n train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)\n train_loader = torch.utils.data.DataLoader(\n trainset, batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True,\n sampler=train_sampler)\n\n val_sampler = torch.utils.data.distributed.DistributedSampler(valset, shuffle=False)\n val_loader = torch.utils.data.DataLoader(\n valset, batch_size=args.test_batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True,\n sampler=val_sampler)\n \n # init the model\n arch = importlib.import_module('arch.' + args.arch)\n model = arch.config_network(args.config_file)\n if args.gpu == 0:\n print(model)\n\n if args.mixup > 0:\n train_loader = MixUpWrapper(args.mixup, 1000, train_loader, args.gpu)\n criterion = NLLMultiLabelSmooth(args.label_smoothing)\n elif args.label_smoothing > 0.0:\n criterion = LabelSmoothing(args.label_smoothing)\n else:\n criterion = nn.CrossEntropyLoss()\n\n model.cuda(args.gpu)\n criterion.cuda(args.gpu)\n # criterion and optimizer\n if args.no_bn_wd:\n parameters = model.named_parameters()\n param_dict = {}\n for k, v in parameters:\n param_dict[k] = v\n bn_params = [v for n, v in param_dict.items() if ('bn' in n or 'bias' in n)]\n rest_params = [v for n, v in param_dict.items() if not ('bn' in n or 'bias' in n)]\n if args.gpu == 0:\n print(\" Weight decay NOT applied to BN parameters \")\n print(f'len(parameters): {len(list(model.parameters()))} = {len(bn_params)} + {len(rest_params)}')\n optimizer = torch.optim.SGD([{'params': bn_params, 'weight_decay': 0 },\n {'params': rest_params, 'weight_decay': args.wd}],\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.wd)\n else:\n optimizer = torch.optim.SGD(model.parameters(),\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.wd)\n if args.amp:\n #optimizer = amp_handle.wrap_optimizer(optimizer)\n model, optimizer = amp.initialize(model, optimizer, opt_level='O2')\n #from apex import amp\n DDP = apex.parallel.DistributedDataParallel\n model = DDP(model, delay_allreduce=True)\n else:\n DDP = DistributedDataParallel\n model = DDP(model, device_ids=[args.gpu])\n\n # check point\n if args.resume is not None:\n if os.path.isfile(args.resume):\n if args.gpu == 0:\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch'] + 1 if args.start_epoch == 0 else args.start_epoch\n best_pred = checkpoint['best_pred']\n acclist_train = checkpoint['acclist_train']\n acclist_val = checkpoint['acclist_val']\n model.module.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n if args.amp:\n amp.load_state_dict(checkpoint['amp'])\n if args.gpu == 0:\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n raise RuntimeError (\"=> no resume checkpoint found at '{}'\".\\\n format(args.resume))\n scheduler = LR_Scheduler(args.lr_scheduler,\n base_lr=args.lr,\n num_epochs=args.epochs,\n iters_per_epoch=len(train_loader),\n warmup_epochs=args.warmup_epochs)\n def train(epoch):\n train_sampler.set_epoch(epoch)\n model.train()\n losses = AverageMeter()\n top1 = AverageMeter()\n global best_pred, acclist_train\n tic = time.time()\n for batch_idx, (data, target) in enumerate(train_loader):\n scheduler(optimizer, batch_idx, epoch, best_pred)\n if not args.mixup:\n data, target = data.cuda(args.gpu), target.cuda(args.gpu)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n if args.amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optimizer.step()\n\n if not args.mixup:\n acc1 = accuracy(output, target, topk=(1,))\n top1.update(acc1[0], data.size(0))\n\n losses.update(loss.item(), data.size(0))\n if batch_idx % 100 == 0 and args.gpu == 0:\n iter_per_sec = 100.0 / (time.time() - tic) if batch_idx != 0 else 1.0 / (time.time() - tic)\n tic = time.time()\n if args.mixup:\n #print('Batch: %d| Loss: %.3f'%(batch_idx, losses.avg))\n print('Epoch: {}, Iter: {}, Speed: {:.3f} iter/sec, Train loss: {:.3f}'. \\\n format(epoch, batch_idx, iter_per_sec, losses.avg.item()))\n else:\n #print('Batch: %d| Loss: %.3f | Top1: %.3f'%(batch_idx, losses.avg, top1.avg))\n print('Epoch: {}, Iter: {}, Speed: {:.3f} iter/sec, Top1: {:.3f}'. \\\n format(epoch, batch_idx, iter_per_sec, top1.avg.item()))\n\n acclist_train += [top1.avg]\n\n def validate(epoch):\n model.eval()\n top1 = AverageMeter()\n top5 = AverageMeter()\n global best_pred, acclist_train, acclist_val\n is_best = False\n for batch_idx, (data, target) in enumerate(val_loader):\n data, target = data.cuda(args.gpu), target.cuda(args.gpu)\n with torch.no_grad():\n output = model(data)\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n top1.update(acc1[0], data.size(0))\n top5.update(acc5[0], data.size(0))\n\n # sum all\n sum1, cnt1, sum5, cnt5 = torch_dist_sum(args.gpu, top1.sum, top1.count, top5.sum, top5.count)\n\n if args.eval:\n if args.gpu == 0:\n top1_acc = sum(sum1) / sum(cnt1)\n top5_acc = sum(sum5) / sum(cnt5)\n print('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))\n return\n\n if args.gpu == 0:\n top1_acc = sum(sum1) / sum(cnt1)\n top5_acc = sum(sum5) / sum(cnt5)\n print('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))\n\n # save checkpoint\n acclist_val += [top1_acc]\n if top1_acc > best_pred:\n best_pred = top1_acc \n is_best = True\n state_dict = {\n 'epoch': epoch,\n 'state_dict': model.module.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_pred': best_pred,\n 'acclist_train':acclist_train,\n 'acclist_val':acclist_val,\n }\n if args.amp:\n state_dict['amp'] = amp.state_dict()\n encoding.utils.save_checkpoint(state_dict, args=args, is_best=is_best)\n\n if args.export:\n if args.gpu == 0:\n torch.save(model.module.state_dict(), args.export + '.pth')\n return\n\n if args.eval:\n validate(args.start_epoch)\n return\n\n for epoch in range(args.start_epoch, args.epochs):\n tic = time.time()\n train(epoch)\n if epoch % 10 == 0:# or epoch == args.epochs-1:\n validate(epoch)\n elapsed = time.time() - tic\n if args.gpu == 0:\n print(f'Epoch: {epoch}, Time cost: {elapsed}')\n\n if args.gpu == 0:\n encoding.utils.save_checkpoint({\n 'epoch': args.epochs-1,\n 'state_dict': model.module.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_pred': best_pred,\n 'acclist_train':acclist_train,\n 'acclist_val':acclist_val,\n }, args=args, is_best=False)\n\nif __name__ == \"__main__\":\n os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'\n main()\n\n" ]
[ [ "torch.cuda.manual_seed", "torch.distributed.init_process_group", "torch.no_grad", "torch.optim.SGD", "torch.multiprocessing.spawn", "torch.cuda.device_count", "torch.manual_seed", "torch.cuda.set_device", "torch.utils.data.DataLoader", "torch.utils.data.distributed.DistributedSampler", "torch.load", "torch.nn.CrossEntropyLoss" ] ]
ne7ermore/deeping-flow
[ "9414fa48139bac99824ae89cc45c6f59855fe7d4" ]
[ "hierarchical-sc/corpus.py" ]
[ "import os\nimport pickle\nimport math\n\nimport pandas as pd\n\nfrom const import *\n\n\ndef middle_save(obj, inf):\n pickle.dump(obj, open(inf, \"wb\"), True)\n\n\ndef middle_load(inf):\n return pickle.load(open(inf, \"rb\"))\n\n\ndef word2idx(sents, word2idx):\n return [[word2idx[w] if w in word2idx else UNK for w in s] for s in sents]\n\n\nclass Dictionary(object):\n def __init__(self):\n self.word2idx = {\n WORD[PAD]: PAD,\n WORD[UNK]: UNK,\n WORD[BOS]: BOS,\n WORD[EOS]: EOS\n }\n self.idx = len(self.word2idx)\n\n def add(self, word):\n if self.word2idx.get(word) is None:\n self.word2idx[word] = self.idx\n self.idx += 1\n\n def __call__(self, sents, min_count):\n words = [word for sent in sents for word in sent]\n word_count = {w: 0 for w in set(words)}\n for w in words:\n word_count[w] += 1\n\n ignored_word_count = 0\n for word, count in word_count.items():\n if count <= min_count:\n ignored_word_count += 1\n continue\n self.add(word)\n\n return ignored_word_count\n\n def __len__(self):\n return self.idx\n\n def __str__(self):\n return \"%s(size = %d)\".format(self.__class__.__name__, len(self.idx))\n\n\nclass Corpus(object):\n def __init__(self, max_ori_len=128, max_sum_len=15, min_word_count=1):\n self.dict = Dictionary()\n self.max_ori_len = max_ori_len\n self.max_sum_len = max_sum_len\n self._min_word_count = min_word_count\n\n self.parse_data(\"data/test.csv\", False)\n self.parse_data(\"data/train.csv\")\n self.save()\n\n def parse_data(self, _file, is_train=True):\n def cut(x, list, ignore, max_len, is_summ):\n if isinstance(x, float) and math.isnan(x):\n if is_summ:\n list.append(WORD[EOS])\n else:\n list.append(\"\")\n else:\n x = x.split()\n if len(x) > max_len:\n x = x[:max_len]\n ignore[0] += 1\n\n if is_summ:\n x += [WORD[EOS]]\n\n list.append(x)\n\n origins, summurys = [], []\n ignore_ori_nums = [0]\n ignore_sum_nums = [0]\n\n df = pd.read_csv(_file)\n\n df[\"original\"].apply(cut, args=(\n origins, ignore_ori_nums, self.max_ori_len, False))\n df[\"summary\"].apply(cut, args=(\n summurys, ignore_sum_nums, self.max_sum_len, True))\n\n if is_train:\n ori_ignore = self.dict(origins + summurys, self._min_word_count)\n self.train_origins = origins\n self.train_summurys = summurys\n self.train_labels = df[\"score\"].values - 1\n\n print(\"Ignored origin counts - [{}]\".format(ori_ignore))\n print(\n 'Train data - ignore original lines - [{}]'.format(ignore_ori_nums[0]))\n print(\n 'Train data - ignore summary lines - [{}]'.format(ignore_sum_nums[0]))\n else:\n self.test_origins = origins\n self.test_summurys = summurys\n self.test_labels = df[\"score\"].values - 1\n print(\n 'Test data - ignore original lines - [{}]'.format(ignore_ori_nums[0]))\n print(\n 'Test data - ignore summary lines - [{}]'.format(ignore_sum_nums[0]))\n\n def save(self):\n data = {\n 'max_ori_len': self.max_ori_len,\n 'max_sum_len': self.max_sum_len + 1,\n 'dict': {\n 'dict': self.dict.word2idx,\n 'dict_size': len(self.dict),\n },\n 'train': {\n 'original': word2idx(self.train_origins, self.dict.word2idx),\n 'summary': word2idx(self.train_summurys, self.dict.word2idx),\n 'label': self.train_labels\n },\n 'test': {\n 'original': word2idx(self.test_origins, self.dict.word2idx),\n 'summary': word2idx(self.test_summurys, self.dict.word2idx),\n 'label': self.test_labels\n }\n }\n\n middle_save(data, \"data/corpus\")\n print('dict length - [{}]'.format(len(self.dict)))\n\n\nif __name__ == \"__main__\":\n Corpus()\n" ]
[ [ "pandas.read_csv" ] ]
vivekkhurana/handsign
[ "315e40e2d7b00a7e34cad870e6f90679e7bf7100" ]
[ "utils/general.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# ColorHandPose3DNetwork - Network for estimating 3D Hand Pose from a single RGB Image\n# Copyright (C) 2017 Christian Zimmermann\n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom __future__ import print_function, unicode_literals\n\nimport tensorflow as tf\nfrom tensorflow.python import pywrap_tensorflow\nimport numpy as np\nimport math\nimport cv2\n\n\nclass NetworkOps(object):\n \"\"\" Operations that are frequently used within networks. \"\"\"\n neg_slope_of_relu = 0.01\n\n @classmethod\n def leaky_relu(cls, tensor, name='relu'):\n out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu*tensor, name=name)\n return out_tensor\n\n @classmethod\n def conv(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):\n with tf.variable_scope(layer_name):\n in_size = in_tensor.get_shape().as_list()\n\n strides = [1, stride, stride, 1]\n kernel_shape = [kernel_size, kernel_size, in_size[3], out_chan]\n\n # conv\n kernel = tf.get_variable('weights', kernel_shape, tf.float32,\n tf.contrib.layers.xavier_initializer_conv2d(), trainable=trainable, collections=['wd', 'variables', 'filters'])\n tmp_result = tf.nn.conv2d(in_tensor, kernel, strides, padding='SAME')\n\n # bias\n biases = tf.get_variable('biases', [kernel_shape[3]], tf.float32,\n tf.constant_initializer(0.0001), trainable=trainable, collections=['wd', 'variables', 'biases'])\n out_tensor = tf.nn.bias_add(tmp_result, biases, name='out')\n\n return out_tensor\n\n @classmethod\n def conv_relu(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):\n tensor = cls.conv(in_tensor, layer_name, kernel_size, stride, out_chan, trainable)\n out_tensor = cls.leaky_relu(tensor, name='out')\n return out_tensor\n\n @classmethod\n def max_pool(cls, bottom, name='pool'):\n pooled = tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n padding='VALID', name=name)\n return pooled\n\n @classmethod\n def upconv(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):\n with tf.variable_scope(layer_name):\n in_size = in_tensor.get_shape().as_list()\n\n kernel_shape = [kernel_size, kernel_size, in_size[3], in_size[3]]\n strides = [1, stride, stride, 1]\n\n # conv\n kernel = cls.get_deconv_filter(kernel_shape, trainable)\n tmp_result = tf.nn.conv2d_transpose(value=in_tensor, filter=kernel, output_shape=output_shape,\n strides=strides, padding='SAME')\n\n # bias\n biases = tf.get_variable('biases', [kernel_shape[2]], tf.float32,\n tf.constant_initializer(0.0), trainable=trainable, collections=['wd', 'variables', 'biases'])\n out_tensor = tf.nn.bias_add(tmp_result, biases)\n return out_tensor\n\n @classmethod\n def upconv_relu(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):\n tensor = cls.upconv(in_tensor, layer_name, output_shape, kernel_size, stride, trainable)\n out_tensor = cls.leaky_relu(tensor, name='out')\n return out_tensor\n\n @staticmethod\n def get_deconv_filter(f_shape, trainable):\n width = f_shape[0]\n height = f_shape[1]\n f = math.ceil(width/2.0)\n c = (2 * f - 1 - f % 2) / (2.0 * f)\n bilinear = np.zeros([f_shape[0], f_shape[1]])\n for x in range(width):\n for y in range(height):\n value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))\n bilinear[x, y] = value\n weights = np.zeros(f_shape)\n for i in range(f_shape[2]):\n weights[:, :, i, i] = bilinear\n\n init = tf.constant_initializer(value=weights,\n dtype=tf.float32)\n return tf.get_variable(name=\"weights\", initializer=init,\n shape=weights.shape, trainable=trainable, collections=['wd', 'variables', 'filters'])\n\n @staticmethod\n def fully_connected(in_tensor, layer_name, out_chan, trainable=True):\n with tf.variable_scope(layer_name):\n in_size = in_tensor.get_shape().as_list()\n assert len(in_size) == 2, 'Input to a fully connected layer must be a vector.'\n weights_shape = [in_size[1], out_chan]\n\n # weight matrix\n weights = tf.get_variable('weights', weights_shape, tf.float32,\n tf.contrib.layers.xavier_initializer(), trainable=trainable)\n weights = tf.check_numerics(weights, 'weights: %s' % layer_name)\n\n # bias\n biases = tf.get_variable('biases', [out_chan], tf.float32,\n tf.constant_initializer(0.0001), trainable=trainable)\n biases = tf.check_numerics(biases, 'biases: %s' % layer_name)\n\n out_tensor = tf.matmul(in_tensor, weights) + biases\n return out_tensor\n\n @classmethod\n def fully_connected_relu(cls, in_tensor, layer_name, out_chan, trainable=True):\n tensor = cls.fully_connected(in_tensor, layer_name, out_chan, trainable)\n out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu*tensor, name='out')\n return out_tensor\n\n @staticmethod\n def dropout(in_tensor, keep_prob, evaluation):\n \"\"\" Dropout: Each neuron is dropped independently. \"\"\"\n with tf.variable_scope('dropout'):\n tensor_shape = in_tensor.get_shape().as_list()\n out_tensor = tf.cond(evaluation,\n lambda: tf.nn.dropout(in_tensor, 1.0,\n noise_shape=tensor_shape),\n lambda: tf.nn.dropout(in_tensor, keep_prob,\n noise_shape=tensor_shape))\n return out_tensor\n\n @staticmethod\n def spatial_dropout(in_tensor, keep_prob, evaluation):\n \"\"\" Spatial dropout: Not each neuron is dropped independently, but feature map wise. \"\"\"\n with tf.variable_scope('spatial_dropout'):\n tensor_shape = in_tensor.get_shape().as_list()\n out_tensor = tf.cond(evaluation,\n lambda: tf.nn.dropout(in_tensor, 1.0,\n noise_shape=tensor_shape),\n lambda: tf.nn.dropout(in_tensor, keep_prob,\n noise_shape=[tensor_shape[0], 1, 1, tensor_shape[3]]))\n return out_tensor\n\n\ndef crop_image_from_xy(image, crop_location, crop_size, scale=1.0):\n \"\"\"\n Crops an image. When factor is not given does an central crop.\n\n Inputs:\n image: 4D tensor, [batch, height, width, channels] which will be cropped in height and width dimension\n crop_location: tensor, [batch, 2] which represent the height and width location of the crop\n crop_size: int, describes the extension of the crop\n Outputs:\n image_crop: 4D tensor, [batch, crop_size, crop_size, channels]\n \"\"\"\n with tf.name_scope('crop_image_from_xy'):\n s = image.get_shape().as_list()\n assert len(s) == 4, \"Image needs to be of shape [batch, width, height, channel]\"\n scale = tf.reshape(scale, [-1])\n crop_location = tf.cast(crop_location, tf.float32)\n crop_location = tf.reshape(crop_location, [s[0], 2])\n crop_size = tf.cast(crop_size, tf.float32)\n\n crop_size_scaled = crop_size / scale\n y1 = crop_location[:, 0] - crop_size_scaled//2\n y2 = y1 + crop_size_scaled\n x1 = crop_location[:, 1] - crop_size_scaled//2\n x2 = x1 + crop_size_scaled\n y1 /= s[1]\n y2 /= s[1]\n x1 /= s[2]\n x2 /= s[2]\n boxes = tf.stack([y1, x1, y2, x2], -1)\n\n crop_size = tf.cast(tf.stack([crop_size, crop_size]), tf.int32)\n box_ind = tf.range(s[0])\n image_c = tf.image.crop_and_resize(tf.cast(image, tf.float32), boxes, box_ind, crop_size, name='crop')\n return image_c\n\n\ndef find_max_location(scoremap):\n \"\"\" Returns the coordinates of the given scoremap with maximum value. \"\"\"\n with tf.variable_scope('find_max_location'):\n s = scoremap.get_shape().as_list()\n if len(s) == 4:\n scoremap = tf.squeeze(scoremap, [3])\n if len(s) == 2:\n scoremap = tf.expand_dims(scoremap, 0)\n\n s = scoremap.get_shape().as_list()\n assert len(s) == 3, \"Scoremap must be 3D.\"\n assert (s[0] < s[1]) and (s[0] < s[2]), \"Scoremap must be [Batch, Width, Height]\"\n\n # my meshgrid\n x_range = tf.expand_dims(tf.range(s[1]), 1)\n y_range = tf.expand_dims(tf.range(s[2]), 0)\n X = tf.tile(x_range, [1, s[2]])\n Y = tf.tile(y_range, [s[1], 1])\n\n x_vec = tf.reshape(X, [-1])\n y_vec = tf.reshape(Y, [-1])\n scoremap_vec = tf.reshape(scoremap, [s[0], -1])\n max_ind_vec = tf.cast(tf.argmax(scoremap_vec, dimension=1), tf.int32)\n\n xy_loc = list()\n for i in range(s[0]):\n x_loc = tf.reshape(x_vec[max_ind_vec[i]], [1])\n y_loc = tf.reshape(y_vec[max_ind_vec[i]], [1])\n xy_loc.append(tf.concat([x_loc, y_loc], 0))\n\n xy_loc = tf.stack(xy_loc, 0)\n return xy_loc\n\n\ndef single_obj_scoremap(scoremap):\n \"\"\" Applies my algorithm to figure out the most likely object from a given segmentation scoremap. \"\"\"\n with tf.variable_scope('single_obj_scoremap'):\n filter_size = 21\n s = scoremap.get_shape().as_list()\n assert len(s) == 4, \"Scoremap must be 4D.\"\n\n scoremap_softmax = tf.nn.softmax(scoremap) #B, H, W, C --> normalizes across last dimension\n scoremap_fg = tf.reduce_max(scoremap_softmax[:, :, :, 1:], 3) # B, H, W\n detmap_fg = tf.round(scoremap_fg) # B, H, W\n\n # find maximum in the fg scoremap\n max_loc = find_max_location(scoremap_fg)\n\n # use maximum to start \"growing\" our objectmap\n objectmap_list = list()\n kernel_dil = tf.ones((filter_size, filter_size, 1)) / float(filter_size*filter_size)\n for i in range(s[0]):\n # create initial objectmap (put a one at the maximum)\n sparse_ind = tf.reshape(max_loc[i, :], [1, 2]) # reshape that its one point with 2dim)\n objectmap = tf.sparse_to_dense(sparse_ind, [s[1], s[2]], 1.0)\n\n # grow the map by dilation and pixelwise and\n num_passes = max(s[1], s[2]) // (filter_size//2) # number of passes needes to make sure the map can spread over the whole image\n for j in range(num_passes):\n objectmap = tf.reshape(objectmap, [1, s[1], s[2], 1])\n objectmap_dil = tf.nn.dilation2d(objectmap, kernel_dil, [1, 1, 1, 1], [1, 1, 1, 1], 'SAME')\n objectmap_dil = tf.reshape(objectmap_dil, [s[1], s[2]])\n objectmap = tf.round(tf.multiply(detmap_fg[i, :, :], objectmap_dil))\n\n objectmap = tf.reshape(objectmap, [s[1], s[2], 1])\n objectmap_list.append(objectmap)\n\n objectmap = tf.stack(objectmap_list)\n\n return objectmap\n\n\ndef calc_center_bb(binary_class_mask):\n \"\"\" Returns the center of mass coordinates for the given binary_class_mask. \"\"\"\n with tf.variable_scope('calc_center_bb'):\n binary_class_mask = tf.cast(binary_class_mask, tf.int32)\n binary_class_mask = tf.equal(binary_class_mask, 1)\n s = binary_class_mask.get_shape().as_list()\n if len(s) == 4:\n binary_class_mask = tf.squeeze(binary_class_mask, [3])\n\n s = binary_class_mask.get_shape().as_list()\n assert len(s) == 3, \"binary_class_mask must be 3D.\"\n assert (s[0] < s[1]) and (s[0] < s[2]), \"binary_class_mask must be [Batch, Width, Height]\"\n\n # my meshgrid\n x_range = tf.expand_dims(tf.range(s[1]), 1)\n y_range = tf.expand_dims(tf.range(s[2]), 0)\n X = tf.tile(x_range, [1, s[2]])\n Y = tf.tile(y_range, [s[1], 1])\n\n bb_list = list()\n center_list = list()\n crop_size_list = list()\n for i in range(s[0]):\n X_masked = tf.cast(tf.boolean_mask(X, binary_class_mask[i, :, :]), tf.float32)\n Y_masked = tf.cast(tf.boolean_mask(Y, binary_class_mask[i, :, :]), tf.float32)\n\n x_min = tf.reduce_min(X_masked)\n x_max = tf.reduce_max(X_masked)\n y_min = tf.reduce_min(Y_masked)\n y_max = tf.reduce_max(Y_masked)\n\n start = tf.stack([x_min, y_min])\n end = tf.stack([x_max, y_max])\n bb = tf.stack([start, end], 1)\n bb_list.append(bb)\n\n center_x = 0.5*(x_max + x_min)\n center_y = 0.5*(y_max + y_min)\n center = tf.stack([center_x, center_y], 0)\n\n center = tf.cond(tf.reduce_all(tf.is_finite(center)), lambda: center,\n lambda: tf.constant([160.0, 160.0]))\n center.set_shape([2])\n center_list.append(center)\n\n crop_size_x = x_max - x_min\n crop_size_y = y_max - y_min\n crop_size = tf.expand_dims(tf.maximum(crop_size_x, crop_size_y), 0)\n crop_size = tf.cond(tf.reduce_all(tf.is_finite(crop_size)), lambda: crop_size,\n lambda: tf.constant([100.0]))\n crop_size.set_shape([1])\n crop_size_list.append(crop_size)\n\n bb = tf.stack(bb_list)\n center = tf.stack(center_list)\n crop_size = tf.stack(crop_size_list)\n\n return center, bb, crop_size\n\n\ndef detect_keypoints(scoremaps):\n \"\"\" Performs detection per scoremap for the hands keypoints. \"\"\"\n if len(scoremaps.shape) == 4:\n scoremaps = np.squeeze(scoremaps)\n s = scoremaps.shape\n assert len(s) == 3, \"This function was only designed for 3D Scoremaps.\"\n assert (s[2] < s[1]) and (s[2] < s[0]), \"Probably the input is not correct, because [H, W, C] is expected.\"\n\n keypoint_coords = np.zeros((s[2], 2))\n for i in range(s[2]):\n v, u = np.unravel_index(np.argmax(scoremaps[:, :, i]), (s[0], s[1]))\n keypoint_coords[i, 0] = v\n keypoint_coords[i, 1] = u\n return keypoint_coords\n\n\ndef trafo_coords(keypoints_crop_coords, centers, scale, crop_size):\n \"\"\" Transforms coords into global image coordinates. \"\"\"\n keypoints_coords = np.copy(keypoints_crop_coords)\n\n keypoints_coords -= crop_size // 2\n\n keypoints_coords /= scale\n\n keypoints_coords += centers\n\n return keypoints_coords\n\n\ndef plot_hand(coords_hw, axis, color_fixed=None, linewidth='1'):\n \"\"\" Plots a hand stick figure into a matplotlib figure. \"\"\"\n colors = np.array([[0., 0., 0.5],\n [0., 0., 0.73172906],\n [0., 0., 0.96345811],\n [0., 0.12745098, 1.],\n [0., 0.33137255, 1.],\n [0., 0.55098039, 1.],\n [0., 0.75490196, 1.],\n [0.06008855, 0.9745098, 0.90765338],\n [0.22454143, 1., 0.74320051],\n [0.40164453, 1., 0.56609741],\n [0.56609741, 1., 0.40164453],\n [0.74320051, 1., 0.22454143],\n [0.90765338, 1., 0.06008855],\n [1., 0.82861293, 0.],\n [1., 0.63979666, 0.],\n [1., 0.43645606, 0.],\n [1., 0.2476398, 0.],\n [0.96345811, 0.0442992, 0.],\n [0.73172906, 0., 0.],\n [0.5, 0., 0.]])\n\n # define connections and colors of the bones\n bones = [((0, 4), colors[0, :]),\n ((4, 3), colors[1, :]),\n ((3, 2), colors[2, :]),\n ((2, 1), colors[3, :]),\n\n ((0, 8), colors[4, :]),\n ((8, 7), colors[5, :]),\n ((7, 6), colors[6, :]),\n ((6, 5), colors[7, :]),\n\n ((0, 12), colors[8, :]),\n ((12, 11), colors[9, :]),\n ((11, 10), colors[10, :]),\n ((10, 9), colors[11, :]),\n\n ((0, 16), colors[12, :]),\n ((16, 15), colors[13, :]),\n ((15, 14), colors[14, :]),\n ((14, 13), colors[15, :]),\n\n ((0, 20), colors[16, :]),\n ((20, 19), colors[17, :]),\n ((19, 18), colors[18, :]),\n ((18, 17), colors[19, :])]\n\n for connection, color in bones:\n coord1 = coords_hw[connection[0], :]\n coord2 = coords_hw[connection[1], :]\n coords = np.stack([coord1, coord2])\n if color_fixed is None:\n axis.plot(coords[:, 1], coords[:, 0], color=color, linewidth=linewidth)\n else:\n axis.plot(coords[:, 1], coords[:, 0], color_fixed, linewidth=linewidth)\n\n\ndef plot_hand_3d(coords_xyz, axis, color_fixed=None, linewidth='1'):\n \"\"\" Plots a hand stick figure into a matplotlib figure. \"\"\"\n colors = np.array([[0., 0., 0.5],\n [0., 0., 0.73172906],\n [0., 0., 0.96345811],\n [0., 0.12745098, 1.],\n [0., 0.33137255, 1.],\n [0., 0.55098039, 1.],\n [0., 0.75490196, 1.],\n [0.06008855, 0.9745098, 0.90765338],\n [0.22454143, 1., 0.74320051],\n [0.40164453, 1., 0.56609741],\n [0.56609741, 1., 0.40164453],\n [0.74320051, 1., 0.22454143],\n [0.90765338, 1., 0.06008855],\n [1., 0.82861293, 0.],\n [1., 0.63979666, 0.],\n [1., 0.43645606, 0.],\n [1., 0.2476398, 0.],\n [0.96345811, 0.0442992, 0.],\n [0.73172906, 0., 0.],\n [0.5, 0., 0.]])\n\n # define connections and colors of the bones\n bones = [((0, 4), colors[0, :]),\n ((4, 3), colors[1, :]),\n ((3, 2), colors[2, :]),\n ((2, 1), colors[3, :]),\n\n ((0, 8), colors[4, :]),\n ((8, 7), colors[5, :]),\n ((7, 6), colors[6, :]),\n ((6, 5), colors[7, :]),\n\n ((0, 12), colors[8, :]),\n ((12, 11), colors[9, :]),\n ((11, 10), colors[10, :]),\n ((10, 9), colors[11, :]),\n\n ((0, 16), colors[12, :]),\n ((16, 15), colors[13, :]),\n ((15, 14), colors[14, :]),\n ((14, 13), colors[15, :]),\n\n ((0, 20), colors[16, :]),\n ((20, 19), colors[17, :]),\n ((19, 18), colors[18, :]),\n ((18, 17), colors[19, :])]\n\n for connection, color in bones:\n coord1 = coords_xyz[connection[0], :]\n coord2 = coords_xyz[connection[1], :]\n coords = np.stack([coord1, coord2])\n if color_fixed is None:\n axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color=color, linewidth=linewidth)\n else:\n axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color_fixed, linewidth=linewidth)\n\n axis.view_init(azim=-90., elev=90.)\n\ndef plot_hand_2d(coords_hw, image, color_fixed=None, linewidth=2):\n \"\"\" Plots a hand stick figure into a matplotlib figure. \"\"\"\n colors = [(0, 0, 127),\n (0, 0, 187),\n (0, 0, 246),\n (0, 32, 255),\n (0, 85, 255),\n (0, 140, 255),\n (0, 192, 255),\n (15, 248, 231),\n (57, 255, 190),\n (102, 1, 144),\n (144, 1, 102),\n (190, 1, 57),\n (231, 1, 15),\n (1, 211, 0),\n (1, 163, 0),\n (1, 111, 0),\n (1, 63, 0),\n (246, 11, 0),\n (187, 0, 0),\n (127, 0, 0)]\n \n # define connections and colors of the bones\n bones = [((0, 4), colors[0]),\n ((4, 3), colors[1]),\n ((3, 2), colors[2]),\n ((2, 1), colors[3]),\n\n ((0, 8), colors[4]),\n ((8, 7), colors[5]),\n ((7, 6), colors[6]),\n ((6, 5), colors[7]),\n\n ((0, 12), colors[8]),\n ((12, 11), colors[9]),\n ((11, 10), colors[10]),\n ((10, 9), colors[11]),\n\n ((0, 16), colors[12]),\n ((16, 15), colors[13]),\n ((15, 14), colors[14]),\n ((14, 13), colors[15]),\n\n ((0, 20), colors[16]),\n ((20, 19), colors[17]),\n ((19, 18), colors[18]),\n ((18, 17), colors[19])]\n\n\n for connection, color in bones:\n coord1 = coords_hw[connection[0], :]\n coord2 = coords_hw[connection[1], :]\n coords = np.stack([coord1, coord2])\n \n coord1_t = (int(coord1[1]), int(coord1[0]))\n coord2_t = (int(coord2[1]), int(coord2[0]))\n \n if color_fixed is None:\n cv2.line(image, coord2_t, coord1_t, color, linewidth)\n else:\n cv2.line(image, coord1_t, coord2_t, color_fixed, linewidth)\n\n\nclass LearningRateScheduler:\n \"\"\"\n Provides scalar tensors at certain iteration as is needed for a multistep learning rate schedule.\n \"\"\"\n def __init__(self, steps, values):\n self.steps = steps\n self.values = values\n\n assert len(steps)+1 == len(values), \"There must be one more element in value as step.\"\n\n def get_lr(self, global_step):\n with tf.name_scope('lr_scheduler'):\n\n if len(self.values) == 1: #1 value -> no step\n learning_rate = tf.constant(self.values[0])\n elif len(self.values) == 2: #2 values -> one step\n cond = tf.greater(global_step, self.steps[0])\n learning_rate = tf.where(cond, self.values[1], self.values[0])\n else: # n values -> n-1 steps\n cond_first = tf.less(global_step, self.steps[0])\n\n cond_between = list()\n for ind, step in enumerate(range(0, len(self.steps)-1)):\n cond_between.append(tf.logical_and(tf.less(global_step, self.steps[ind+1]),\n tf.greater_equal(global_step, self.steps[ind])))\n\n cond_last = tf.greater_equal(global_step, self.steps[-1])\n\n cond_full = [cond_first]\n cond_full.extend(cond_between)\n cond_full.append(cond_last)\n\n cond_vec = tf.stack(cond_full)\n lr_vec = tf.stack(self.values)\n\n learning_rate = tf.where(cond_vec, lr_vec, tf.zeros_like(lr_vec))\n\n learning_rate = tf.reduce_sum(learning_rate)\n\n return learning_rate\n\n\nclass EvalUtil:\n \"\"\" Util class for evaluation networks.\n \"\"\"\n def __init__(self, num_kp=21):\n # init empty data storage\n self.data = list()\n self.num_kp = num_kp\n for _ in range(num_kp):\n self.data.append(list())\n\n def feed(self, keypoint_gt, keypoint_vis, keypoint_pred):\n \"\"\" Used to feed data to the class. Stores the euclidean distance between gt and pred, when it is visible. \"\"\"\n keypoint_gt = np.squeeze(keypoint_gt)\n keypoint_pred = np.squeeze(keypoint_pred)\n keypoint_vis = np.squeeze(keypoint_vis).astype('bool')\n\n assert len(keypoint_gt.shape) == 2\n assert len(keypoint_pred.shape) == 2\n assert len(keypoint_vis.shape) == 1\n\n # calc euclidean distance\n diff = keypoint_gt - keypoint_pred\n euclidean_dist = np.sqrt(np.sum(np.square(diff), axis=1))\n\n num_kp = keypoint_gt.shape[0]\n for i in range(num_kp):\n if keypoint_vis[i]:\n self.data[i].append(euclidean_dist[i])\n\n def _get_pck(self, kp_id, threshold):\n \"\"\" Returns pck for one keypoint for the given threshold. \"\"\"\n if len(self.data[kp_id]) == 0:\n return None\n\n data = np.array(self.data[kp_id])\n pck = np.mean((data <= threshold).astype('float'))\n return pck\n\n def _get_epe(self, kp_id):\n \"\"\" Returns end point error for one keypoint. \"\"\"\n if len(self.data[kp_id]) == 0:\n return None, None\n\n data = np.array(self.data[kp_id])\n epe_mean = np.mean(data)\n epe_median = np.median(data)\n return epe_mean, epe_median\n\n def get_measures(self, val_min, val_max, steps):\n \"\"\" Outputs the average mean and median error as well as the pck score. \"\"\"\n thresholds = np.linspace(val_min, val_max, steps)\n thresholds = np.array(thresholds)\n norm_factor = np.trapz(np.ones_like(thresholds), thresholds)\n\n # init mean measures\n epe_mean_all = list()\n epe_median_all = list()\n auc_all = list()\n pck_curve_all = list()\n\n # Create one plot for each part\n for part_id in range(self.num_kp):\n # mean/median error\n mean, median = self._get_epe(part_id)\n\n if mean is None:\n # there was no valid measurement for this keypoint\n continue\n\n epe_mean_all.append(mean)\n epe_median_all.append(median)\n\n # pck/auc\n pck_curve = list()\n for t in thresholds:\n pck = self._get_pck(part_id, t)\n pck_curve.append(pck)\n\n pck_curve = np.array(pck_curve)\n pck_curve_all.append(pck_curve)\n auc = np.trapz(pck_curve, thresholds)\n auc /= norm_factor\n auc_all.append(auc)\n\n epe_mean_all = np.mean(np.array(epe_mean_all))\n epe_median_all = np.mean(np.array(epe_median_all))\n auc_all = np.mean(np.array(auc_all))\n pck_curve_all = np.mean(np.array(pck_curve_all), 0) # mean only over keypoints\n\n return epe_mean_all, epe_median_all, auc_all, pck_curve_all, thresholds\n\n\ndef load_weights_from_snapshot(session, checkpoint_path, discard_list=None, rename_dict=None):\n \"\"\" Loads weights from a snapshot except the ones indicated with discard_list. Others are possibly renamed. \"\"\"\n reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\n var_to_shape_map = reader.get_variable_to_shape_map()\n\n # Remove everything from the discard list\n if discard_list is not None:\n num_disc = 0\n var_to_shape_map_new = dict()\n for k, v in var_to_shape_map.items():\n good = True\n for dis_str in discard_list:\n if dis_str in k:\n good = False\n\n if good:\n var_to_shape_map_new[k] = v\n else:\n num_disc += 1\n var_to_shape_map = dict(var_to_shape_map_new)\n print('Discarded %d items' % num_disc)\n\n # rename everything according to rename_dict\n num_rename = 0\n var_to_shape_map_new = dict()\n for name in var_to_shape_map.keys():\n new_name = name\n if rename_dict is not None:\n for rename_str in rename_dict.keys():\n if rename_str in name:\n new_name = new_name.replace(rename_str, rename_dict[rename_str])\n num_rename += 1\n var_to_shape_map_new[new_name] = reader.get_tensor(name)\n var_to_shape_map = dict(var_to_shape_map_new)\n\n init_op, init_feed = tf.contrib.framework.assign_from_values(var_to_shape_map)\n session.run(init_op, init_feed)\n print('Initialized %d variables from %s.' % (len(var_to_shape_map), checkpoint_path))\n\n\ndef calc_auc(x, y):\n \"\"\" Given x and y values it calculates the approx. integral and normalizes it: area under curve\"\"\"\n integral = np.trapz(y, x)\n norm = np.trapz(np.ones_like(y), x)\n\n return integral / norm\n\n\ndef get_stb_ref_curves():\n \"\"\"\n Returns results of various baseline methods on the Stereo Tracking Benchmark Dataset reported by:\n Zhang et al., ‘3d Hand Pose Tracking and Estimation Using Stereo Matching’, 2016\n \"\"\"\n curve_list = list()\n thresh_mm = np.array([20.0, 25, 30, 35, 40, 45, 50])\n pso_b1 = np.array([0.32236842, 0.53947368, 0.67434211, 0.75657895, 0.80921053, 0.86513158, 0.89473684])\n curve_list.append((thresh_mm, pso_b1, 'PSO (AUC=%.3f)' % calc_auc(thresh_mm, pso_b1)))\n icppso_b1 = np.array([ 0.51973684, 0.64473684, 0.71710526, 0.77302632, 0.80921053, 0.84868421, 0.86842105])\n curve_list.append((thresh_mm, icppso_b1, 'ICPPSO (AUC=%.3f)' % calc_auc(thresh_mm, icppso_b1)))\n chpr_b1 = np.array([ 0.56578947, 0.71710526, 0.82236842, 0.88157895, 0.91447368, 0.9375, 0.96052632])\n curve_list.append((thresh_mm, chpr_b1, 'CHPR (AUC=%.3f)' % calc_auc(thresh_mm, chpr_b1)))\n return curve_list\n" ]
[ [ "tensorflow.reduce_min", "numpy.ones_like", "tensorflow.constant_initializer", "tensorflow.nn.conv2d", "tensorflow.nn.dilation2d", "numpy.median", "tensorflow.contrib.layers.xavier_initializer", "numpy.copy", "tensorflow.contrib.framework.assign_from_values", "tensorflow.ones", "tensorflow.matmul", "tensorflow.reshape", "numpy.mean", "tensorflow.zeros_like", "tensorflow.stack", "tensorflow.tile", "tensorflow.nn.softmax", "tensorflow.greater", "tensorflow.cast", "tensorflow.concat", "tensorflow.less", "tensorflow.argmax", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.squeeze", "numpy.trapz", "numpy.argmax", "tensorflow.contrib.layers.xavier_initializer_conv2d", "tensorflow.nn.conv2d_transpose", "tensorflow.nn.bias_add", "tensorflow.nn.max_pool", "tensorflow.nn.dropout", "numpy.square", "numpy.array", "tensorflow.range", "numpy.zeros", "tensorflow.expand_dims", "tensorflow.where", "tensorflow.round", "numpy.stack", "tensorflow.get_variable", "tensorflow.name_scope", "tensorflow.reduce_sum", "numpy.squeeze", "tensorflow.boolean_mask", "tensorflow.check_numerics", "tensorflow.sparse_to_dense", "tensorflow.multiply", "tensorflow.greater_equal", "tensorflow.equal", "tensorflow.is_finite", "tensorflow.reduce_max", "tensorflow.python.pywrap_tensorflow.NewCheckpointReader", "tensorflow.maximum", "numpy.linspace" ] ]
Mopolino8/postcipes
[ "5d67b383aa3e314b581b5262ba95f734ecb6369f" ]
[ "postcipes/bfs.py" ]
[ "# This file is part of postcipes\r\n# (c) Timofey Mukha\r\n# The code is released under the MIT Licence.\r\n# See LICENCE.txt and the Legal section in the README for more information\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nfrom .postcipe import Postcipe\r\nimport turbulucid as tbl\r\nimport numpy as np\r\nimport h5py\r\n\r\n__all__ = [\"BackwardFacingStep\"]\r\n\r\n\r\nclass BackwardFacingStep(Postcipe):\r\n\r\n def __init__(self, path, nu, uRef):\r\n Postcipe.__init__(self)\r\n self.case = tbl.Case(path)\r\n self.nu = nu\r\n self.uRef = uRef\r\n self.h = np.sum(tbl.edge_lengths(self.case, \"stepB\"))\r\n self.H = np.sum(tbl.edge_lengths(self.case, \"outletB\")) - self.h\r\n self.eRatio = (self.H + self.h)/self.H\r\n\r\n self.tau1 = \\\r\n self.case.boundary_data(\"lowB1\")[1][\"wallShearStressMean\"][:, 0]\r\n self.tau2 = \\\r\n self.case.boundary_data(\"lowB2\")[1][\"wallShearStressMean\"][:, 0]\r\n self.tau = np.append(self.tau1, self.tau2)\r\n\r\n self.x1 = self.case.boundary_data(\"lowB1\")[0][:, 0]\r\n self.x2 = self.case.boundary_data(\"lowB2\")[0][:, 0]\r\n self.x = np.append(self.x1, self.x2)\r\n\r\n self.idx105h = np.argmin(np.abs(self.x1 + 1.05*self.h))\r\n\r\n self.uTop = self.case.boundary_data(\"upB\")[1]['UMean'][:, 0]\r\n\r\n self.theta = None\r\n self.delta99 = None\r\n self.edgeU = None\r\n\r\n\r\n\r\n def compute_delta99(self, u0='max', interpolate=True):\r\n self.delta99 = np.zeros(self.x1.shape[0])\r\n self.edgeU = np.zeros(self.x1.shape[0])\r\n\r\n for i in range(self.x1.shape[0]):\r\n x = self.x1[i]\r\n y, v = tbl.profile_along_line(self.case, (x, -1), (x, 10),\r\n correctDistance=True)\r\n self.delta99[i] = tbl.delta_99(y, v['UMean'][:, 0], u0=u0,\r\n interpolate=interpolate)\r\n if u0 is 'max':\r\n self.edgeU[i] = np.max(v['UMean'][:, 0])\r\n elif u0 is 'last':\r\n self.edgeU[i] = v['UMean'][-1, 0]\r\n\r\n\r\n self.reDelta99 = self.delta99*self.edgeU/self.nu\r\n self.reTau = self.delta99*np.sqrt(np.abs(self.tau1))/self.nu\r\n self.delta99105h = self.delta99[self.idx105h]\r\n return 0\r\n\r\n def compute_theta(self, u0='max', interpolate=True):\r\n self.theta = np.zeros(self.x1.shape[0])\r\n self.edgeU = np.zeros(self.x1.shape[0])\r\n\r\n for i in range(self.x1.shape[0]):\r\n x = self.x1[i]\r\n y, v = tbl.profile_along_line(self.case, (x, -1), (x, 10),\r\n correctDistance=True)\r\n self.theta[i] = tbl.momentum_thickness(y, v['UMean'][:, 0], u0=u0,\r\n interpolate=interpolate)\r\n if u0 is 'max':\r\n self.edgeU[i] = np.max(v['UMean'][:, 0])\r\n elif u0 is 'last':\r\n self.edgeU[i] = v['UMean'][-1, 0]\r\n\r\n self.reTheta = self.theta*self.edgeU/self.nu\r\n self.reTheta105h = self.reTheta[self.idx105h]\r\n return 0\r\n\r\n def save(self, name):\r\n f = h5py.File(name, 'w')\r\n\r\n f.attrs[\"h\"] = self.h\r\n f.attrs[\"H\"] = self.H\r\n f.attrs[\"nu\"] = self.nu\r\n f.attrs[\"eRatio\"] = self.eRatio\r\n f.attrs[\"uRef\"] = self.uRef\r\n f.attrs[\"idx105h\"] = self.idx105h\r\n\r\n f.create_dataset(\"x1\", data=self.x1)\r\n f.create_dataset(\"x2\", data=self.x2)\r\n f.create_dataset(\"x\", data=self.x)\r\n\r\n f.create_dataset(\"uTop\", data=self.uTop)\r\n\r\n f.create_dataset(\"tau1\", data=self.tau1)\r\n f.create_dataset(\"tau2\", data=self.tau2)\r\n f.create_dataset(\"tau\", data=self.tau)\r\n\r\n if self.theta is None:\r\n self.compute_theta()\r\n if self.delta99 is None:\r\n self.compute_delta99()\r\n f.create_dataset(\"theta\", data=self.theta)\r\n f.create_dataset(\"delta99\", data=self.delta99)\r\n f.create_dataset(\"reTheta\", data=self.reTheta)\r\n f.create_dataset(\"reTau\", data=self.reTau)\r\n f.create_dataset(\"reDelta99\", data=self.reDelta99)\r\n\r\n f.close()\r\n\r\n def load(self, name):\r\n f = h5py.File(name, 'r')\r\n\r\n self.h = f.attrs[\"h\"]\r\n self.H = f.attrs[\"H\"]\r\n self.nu = f.attrs[\"nu\"]\r\n self.eRatio = f.attrs[\"eRatio\"]\r\n self.uRef = f.attrs[\"uRef\"]\r\n self.idx105h = f.attrs[\"idx105h\"]\r\n\r\n self.x1 = f[\"x1\"][:]\r\n self.x2 = f[\"x2\"][:]\r\n self.x = f[\"x\"][:]\r\n\r\n self.uTop = f[\"uTop\"][:]\r\n\r\n self.tau1 = f[\"tau1\"][:]\r\n self.tau2 = f[\"tau2\"][:]\r\n self.tau = f[\"tau\"][:]\r\n\r\n self.theta = f[\"theta\"][:]\r\n self.delta99 = f[\"delta99\"][:]\r\n self.reTheta = f[\"reTheta\"][:]\r\n self.reTau = f[\"reTau\"][:]\r\n self.reDelta99 = f[\"reDelta99\"][:]\r\n f.close()\r\n\r\n" ]
[ [ "numpy.max", "numpy.abs", "numpy.zeros", "numpy.append" ] ]
GreatGameDota/CNN-Numpy-1D-Images
[ "6016701b54d7475b0c294355801bf8f6ce534852" ]
[ "CNN/Dense.py" ]
[ "import numpy as np\n\n\nclass Dense():\n\n def __init__(self, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', input_shape=None):\n self._units = units\n self._activation = activation\n self._use_bias = use_bias\n self._kernal_initializer = kernel_initializer\n self._bias_initializer = bias_initializer\n self._bias = np.zeros((units, 1))\n\n def setPrevUnits(self, units):\n self._prevUnits = units\n self._weights = np.zeros((self._units, units))\n self._weights = np.random.standard_normal(\n size=self._weights.shape) * 0.01\n\n def forward(self, arr):\n out = self._weights.dot(arr) + self._bias\n if self._activation == \"relu\":\n out[out <= 0] = 0\n if self._activation == \"softmax\":\n out = self.softmax(out)\n return out\n\n def backwardFirst(self, dout, z):\n dw = dout.dot(z.T)\n db = np.sum(dout, axis=1)\n db = np.reshape(db, (db.shape[0], 1))\n return dw, db\n\n def backward(self, dout, next_weights, flat, z):\n dz = next_weights.T.dot(dout)\n if (self._activation == \"relu\"):\n dz[z <= 0] = 0\n dw = dz.dot(flat.T)\n db = np.sum(dz, axis=1).reshape(self._bias.shape)\n return dw, db, dz\n\n def softmax(self, X):\n out = np.exp(X)\n return out/np.sum(out)\n" ]
[ [ "numpy.random.standard_normal", "numpy.reshape", "numpy.zeros", "numpy.sum", "numpy.exp" ] ]
leobxpan/robovat
[ "0d360c34c677cf018c4daab0b8e758943ae1d2c1" ]
[ "robovat/simulation/simulator.py" ]
[ "\"\"\"The Simulator class.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\n\nimport numpy as np\nimport pybullet\n\nfrom robovat.math.pose import Pose\nfrom robovat.simulation import physics\nfrom robovat.simulation.body import Body\nfrom robovat.simulation.controllable_body import ControllableBody\nfrom robovat.simulation.constraint import Constraint\nfrom robovat.simulation.controllable_constraint import ControllableConstraint\n\n\nclass Simulator(object):\n \"\"\"The Simulator class.\"\"\"\n\n def __init__(self,\n assets_dir=None,\n physics_backend='BulletPhysics',\n time_step=1e-3,\n gravity=[0, 0, -9.8],\n worker_id=0,\n use_visualizer=False):\n \"\"\"Initialize the simulator.\n\n Args:\n assets_dir: The assets directory.\n physics_backend: Name of the physics engine backend.\n time_step: Time step of the simulation.\n gravity: The gravity as a 3-dimensional vector.\n worker_id: The id of the multi-threaded simulation.\n use_visualizer: Render the simulation use the debugging visualizer\n if True.\n \"\"\"\n self._assets_dir = os.path.abspath(assets_dir or './')\n self._gravity = gravity\n\n # Create the physics backend.\n physics_class = getattr(physics, physics_backend)\n self._physics = physics_class(\n time_step=time_step,\n use_visualizer=use_visualizer,\n worker_id=worker_id)\n\n self._num_steps = 0\n\n def __del__(self):\n \"\"\"Delete the simulator.\"\"\"\n del self._physics\n\n @property\n def assets_dir(self):\n return self._assets_dir\n\n @property\n def physics(self):\n return self._physics\n\n @property\n def bodies(self):\n return self._bodies\n\n @property\n def num_steps(self):\n return self._num_steps\n\n @property\n def time_step(self):\n return self.physics.time_step\n\n @property\n def constraints(self):\n return self._constraints\n\n def reset(self):\n \"\"\"Reset the simulation.\"\"\"\n self.physics.reset()\n self.physics.set_gravity(self._gravity)\n self._bodies = dict()\n self._constraints = dict()\n self._num_steps = 0\n\n def start(self):\n \"\"\"Start the simulation.\"\"\"\n self.physics.start()\n self._num_steps = 0\n\n def step(self):\n \"\"\"Take a simulation step.\"\"\"\n for body in self.bodies.values():\n body.update()\n\n for constraint in self.constraints.values():\n constraint.update()\n\n self.physics.step()\n self._num_steps += 1\n\n def add_body(self,\n filename,\n pose=None,\n scale=1.0,\n is_static=False,\n is_controllable=False,\n name=None):\n \"\"\"Add a body to the simulation.\n\n Args:\n filename: The path to the URDF file to be loaded. If the path is\n not absolute path, it will be joined with the assets directory.\n pose: The initial pose as an instance of Pose.\n scale: The scaling factor of the body.\n is_static: If True, set the base of the body to be static.\n is_controllable: If True, the body can apply motor controls.\n name: Used as a reference of the body in this Simulator instance.\n\n Returns:\n An instance of Body.\n \"\"\"\n if os.path.isabs(filename):\n path = filename\n else:\n path = os.path.join(self._assets_dir, filename)\n\n if pose is None:\n pose = [[0, 0, 0], [0, 0, 0]]\n\n # Create the body.\n if is_controllable:\n body = ControllableBody(\n simulator=self,\n filename=path,\n pose=pose,\n scale=scale,\n is_static=is_static,\n name=name)\n else:\n body = Body(\n simulator=self,\n filename=path,\n pose=pose,\n scale=scale,\n is_static=is_static,\n name=name)\n\n # Add the body to the dictionary.\n self._bodies[body.name] = body\n\n return body\n\n def remove_body(self, name):\n \"\"\"Remove the body.\n\n Args:\n body: An instance of Body.\n \"\"\"\n self.physics.remove_body(self._bodies[name].uid)\n del self._bodies[name]\n\n def add_constraint(self,\n parent,\n child,\n joint_type='fixed',\n joint_axis=[0, 0, 0],\n parent_frame_pose=None,\n child_frame_pose=None,\n max_force=None,\n max_linear_velocity=None,\n max_angular_velocity=None,\n is_controllable=False,\n name=None):\n \"\"\"Add a constraint to the simulation.\n\n Args:\n parent: The parent entity as an instance of Entity.\n child: The child entity as an instance of Entity.\n joint_type: The type of the joint.\n joint_axis: The axis of the joint.\n parent_frame_pose: The pose of the joint in the parent frame.\n child_frame_pose: The pose of the joint in the child frame.\n max_force: Max force the constraint can apply.\n max_linear_velocity: Maximum linear velocity.\n max_angular_velocity: Max angular velocity.\n is_controllable: If True, the constraint can apply motor controls.\n\n Returns:\n An instance of Constraint.\n \"\"\"\n # Create the constraint.\n if is_controllable:\n constraint = ControllableConstraint(\n parent,\n child,\n joint_type,\n joint_axis,\n parent_frame_pose,\n child_frame_pose,\n max_force=max_force,\n max_linear_velocity=max_linear_velocity,\n max_angular_velocity=max_angular_velocity,\n name=name)\n else:\n assert max_linear_velocity is None\n assert max_angular_velocity is None\n constraint = Constraint(\n parent,\n child,\n joint_type,\n joint_axis,\n parent_frame_pose,\n child_frame_pose,\n max_force=max_force,\n name=name)\n\n # Add the constraint to the dictionary.\n self._constraints[constraint.name] = constraint\n\n return constraint\n\n def receive_robot_commands(self,\n robot_command,\n component_type='body'):\n \"\"\"Receive a robot command.\n\n Args:\n robot_command: An instance of RobotCommand.\n component_type: Either 'body' or 'constraint'.\n \"\"\"\n if component_type == 'body':\n component = self._bodies[robot_command.component]\n elif component_type == 'constraint':\n component = self._constraints[robot_command.component]\n else:\n raise ValueError('Unrecognized component type: %r' %\n component_type)\n\n command_method = getattr(component, robot_command.command_type)\n command_method(**robot_command.arguments)\n\n def check_contact(self, entity_a, entity_b=None):\n \"\"\"Check if the loaded object is stable.\n\n Args:\n entity_a: The first entity.\n entity_b: The second entity, None for any entities.\n\n Returns:\n True if they have contacts, False otherwise.\n \"\"\"\n def _check_contact(entity_a, entity_b=None):\n a_uid = entity_a.uid\n if entity_b is None:\n b_uid = None\n else:\n b_uid = entity_b.uid\n\n contact_points = self._physics.get_contact_points(\n a_uid, b_uid)\n has_contact = len(contact_points) > 0\n\n return has_contact\n\n if not isinstance(entity_a, (list, tuple)):\n entities_a = [entity_a]\n else:\n entities_a = entity_a\n\n if not isinstance(entity_b, (list, tuple)):\n entities_b = [entity_b]\n else:\n entities_b = entity_b\n\n has_contact = False\n\n for a in entities_a:\n for b in entities_b:\n if _check_contact(a, b):\n has_contact = True\n break\n\n return has_contact\n\n def check_stable(self,\n body,\n linear_velocity_threshold,\n angular_velocity_threshold):\n \"\"\"Check if the loaded object is stable.\n\n Args:\n body: An instance of body or a list of bodies.\n linear_velocity_threshold: Linear velocity threshold of being\n stable.\n angular_velocity_threshold: Angular velocity threshold of being\n stable.\n\n Returns:\n is_stable: True if the linear velocity and the angular velocity are\n almost zero; False otherwise.\n \"\"\"\n linear_velocity = np.linalg.norm(body.linear_velocity)\n angular_velocity = np.linalg.norm(body.angular_velocity)\n\n if linear_velocity_threshold is None:\n has_linear_velocity = False\n else:\n has_linear_velocity = (\n linear_velocity >= linear_velocity_threshold)\n\n if angular_velocity_threshold is None:\n has_angular_velocity = False\n else:\n has_angular_velocity = (\n angular_velocity >= angular_velocity_threshold)\n\n is_stable = (not has_linear_velocity) and (not has_angular_velocity)\n\n return is_stable\n\n def wait_until_stable(self,\n body,\n linear_velocity_threshold=0.005,\n angular_velocity_threshold=0.005,\n check_after_steps=100,\n min_stable_steps=100,\n max_steps=2000):\n \"\"\"Wait until the objects are stable.\n\n Args:\n body: An instance of body or a list of bodies.\n linear_velocity_threshold: Linear velocity threshold of being\n stable.\n angular_velocity_threshold: Angular velocity threshold of being\n stable.\n check_after_steps: Number of steps before checking.\n min_stable_steps: Minimum number of steps required to be stable.\n max_steps: Maximum steps to wait for objects being stable.\n \"\"\"\n if isinstance(body, (list, tuple)):\n body_list = body\n else:\n body_list = [body]\n\n num_steps = 0\n num_stable_steps = 0\n\n while(1):\n self.step()\n num_steps += 1\n\n if num_steps < check_after_steps:\n continue\n\n # Check if all bodies are stable.\n all_stable = True\n for b in body_list:\n is_stable = self.check_stable(\n b,\n linear_velocity_threshold,\n angular_velocity_threshold)\n\n if not is_stable:\n all_stable = False\n break\n\n if all_stable:\n num_stable_steps += 1\n\n if (num_stable_steps >= min_stable_steps or\n num_steps >= max_steps):\n break\n\n def plot_pose(self,\n pose,\n axis_length=1.0,\n text=None,\n text_size=1.0,\n text_color=[0, 0, 0]):\n \"\"\"Plot a 6-DoF pose or a frame in the debugging visualizer.\n\n Args:\n pose: The pose to be plot.\n axis_length: The length of the axes.\n text: Text showing up next to the frame.\n text_size: Size of the text.\n text_color: Color of the text.\n \"\"\"\n if not isinstance(pose, Pose):\n pose = Pose(pose)\n\n origin = pose.position\n x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)\n y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)\n z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)\n\n pybullet.addUserDebugLine(\n origin,\n x_end,\n lineColorRGB=[1, 0, 0],\n lineWidth=2)\n\n pybullet.addUserDebugLine(\n origin,\n y_end,\n lineColorRGB=[0, 1, 0],\n lineWidth=2)\n\n pybullet.addUserDebugLine(\n origin,\n z_end,\n lineColorRGB=[0, 0, 1],\n lineWidth=2)\n\n if text is not None:\n pybullet.addUserDebugText(\n text,\n origin,\n text_color,\n text_size)\n\n def plot_line(self,\n start,\n end,\n line_color=[0, 0, 0],\n line_width=1):\n \"\"\"Plot a pose or a frame in the debugging visualizer.\n\n Args:\n start: Starting point of the line.\n end: Ending point of the line.\n line_color: Color of the line.\n line_width: Width of the line.\n \"\"\"\n pybullet.addUserDebugLine(\n start,\n end,\n lineColorRGB=line_color,\n lineWidth=line_width)\n\n def clear_visualization(self):\n \"\"\"Clear all visualization items.\"\"\"\n pybullet.removeAllUserDebugItems()\n" ]
[ [ "numpy.linalg.norm", "numpy.dot" ] ]
akhti/torch-blocksparse
[ "49d029660dfa0fcf350f0e20f820872e9973973e" ]
[ "setup.py" ]
[ "#!/usr/bin/env python\n\nimport os\nimport torch\nfrom setuptools import setup, find_packages\nfrom torch.utils.cpp_extension import BuildExtension, CppExtension\n\ncmdclass = {}\ncmdclass['build_ext'] = BuildExtension\n\n\nimport setuptools\n\next_modules = [\n CppExtension(name='torch_blocksparse_cpp_utils',\n sources=['csrc/utils.cpp'],\n extra_compile_args={'cxx': ['-O2',\n '-fopenmp']})\n]\n\nsetuptools.setup(\n name = 'torch-blocksparse',\n version = '1.1.1',\n description = 'Block-sparse primitives for PyTorch',\n author = 'Philippe Tillet',\n maintainer = 'Philippe Tillet',\n maintainer_email = 'ptillet@g.harvard.edu',\n install_requires = ['triton', 'torch'],\n url = 'https://github.com/ptillet/torch-blocksparse',\n test_suite = 'nose.collector',\n tests_require = ['nose', 'parameterized'],\n license = 'MIT',\n packages = find_packages(exclude=[\"csrc\"]),\n ext_modules = ext_modules,\n cmdclass = cmdclass\n)\n" ]
[ [ "torch.utils.cpp_extension.CppExtension" ] ]
oustar/scipylearn
[ "f3f3223f1170b39dc420606bdf989b6fcb705410" ]
[ "perceptron_np.py" ]
[ "\nimport numpy as np\n\nclass Perceptron(object):\n\n def __init__(self, input_num, activator):\n\n self.activator = activator\n self.weights = np.zeros((input_num))\n self.bias = 0.0\n\n def __str__(self):\n\n return 'weights\\t:%s\\nbias\\t:%f\\n' % (self.weights, self.bias)\n\n def predict(self, input_vec):\n \n return self.activator(np.dot(input_vec, self.weights) + self.bias)\n\n def train(self, input_vecs, labels, iteration, rate):\n\n for _ in range(iteration):\n self._one_iteration(input_vecs, labels, rate)\n\n def _one_iteration(self, input_vecs, labels, rate):\n\n samples = zip(input_vecs, labels)\n\n for input_vec, label in samples:\n\n output = self.predict(input_vec)\n\n self._update_weight(input_vec, output, label, rate)\n\n def _update_weight(self, input_vec, output, label, rate):\n\n delat = label - output\n self.weights += rate * delat * input_vec\n self.bias += rate * delat\n\n\ndef f(x):\n\n if x > 0: return 1\n else: return 0\n\n\ndef get_train_dataset():\n\n vecs = np.array([[1, 1], [1, 0], [0, 1], [0, 0]])\n labels = np.array([1, 0, 0, 0])\n\n return vecs, labels\n\n\ndef train_and_perceptron():\n\n p = Perceptron(2, f)\n\n input_vecs, labels = get_train_dataset()\n p.train(input_vecs, labels, 10, 0.1)\n\n return p\n\n\nif __name__ == \"__main__\":\n\n and_perceptron = train_and_perceptron()\n print(and_perceptron)\n\n print ('1 and 1 = ' , and_perceptron.predict([1, 1]))\n print ('1 and 0 = ' , and_perceptron.predict([1, 0]))\n print ('0 and 1 = ' , and_perceptron.predict([0, 1]))\n print ('0 and 0 = ' , and_perceptron.predict([0, 0]))\n\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.zeros" ] ]
njamalova/whale_tail_identifier
[ "507ffe8838b42ca75dbd696c2faaa71252f417da" ]
[ "train_valid_split.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport cv2\nimport tensorflow as tf\n\n\n# In[2]:\n\n\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\n# In[3]:\n\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n\n# ### Load the Training Data\n\n# In[4]:\n\n# curwd = str(os.getcwd())\n# targetwd = '\\\\data\\\\train'\n# path_train = curwd + targetwd\npath_train = '...s\\\\Documents\\\\whale_identification\\\\whale_identification\\\\data\\\\train\\\\'\ntrain = [os.path.join(path_train,f) for f in os.listdir(path_train) if f.endswith('.jpg')]\n\n\n\n# In[6]:\n\n\ntrain_labels = pd.read_csv(\"df_train.csv\")\n\n\n# In[7]:\n\n\ntrain_labels.head()\n\n\n# In[8]:\n\n\nunique_whales = train_labels['Id'].unique()\nlen(unique_whales)\n\n\n# ### Train-Validation Split\n\n# In[9]:\n\n\ndef train_valid_split(df):\n\n # find unique categories of whales in our dataframe\n unique_whales = train_labels['Id'].unique()\n\n # map the images to categories\n mapping = {}\n for whale in unique_whales:\n lst_of_images = list(train_labels[train_labels['Id'] == whale]['Image'].values)\n mapping[whale] = lst_of_images\n\n # perform manual train/validation split to ensure balanced data in both sets (i.e. all categories are represented)\n train_revised = []\n valid_revised = []\n\n for v in mapping.values():\n cut = int(0.2*len(v)) # sample & 80-20 split\n cut2 = int(0.25*len(v))\n tr = v[:cut]\n val = v[cut:cut2]\n train_revised.append(tr)\n valid_revised.append(val)\n\n return train_revised, valid_revised\n\ndef train_valid_dict_generator(train_list, valid_list, df):\n \n # create a dictionary mapping new training set to correct labels\n train_df = {}\n\n for i in train_list:\n for j in i:\n lbl = df[df['Image'] == j]['Id'].values[0]\n train_df[j] = lbl\n \n # create a dictionary mapping new validation set to correct labels\n valid_df = {}\n\n for i in valid_list:\n for j in i:\n lbl = df[df['Image'] == j]['Id'].values[0]\n valid_df[j] = lbl\n\n return train_df, valid_df" ]
[ [ "pandas.read_csv" ] ]
actboy/espnet
[ "66f0f8382b0e1195bed7c280c29711f8436b3db4" ]
[ "espnet2/gan_tts/espnet_model.py" ]
[ "# Copyright 2021 Tomoki Hayashi\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"GAN-based TTS ESPnet model.\"\"\"\n\nfrom contextlib import contextmanager\nfrom distutils.version import LooseVersion\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Optional\n\nimport torch\n\nfrom typeguard import check_argument_types\n\nfrom espnet2.gan_tts.abs_gan_tts import AbsGANTTS\nfrom espnet2.layers.abs_normalize import AbsNormalize\nfrom espnet2.layers.inversible_interface import InversibleInterface\nfrom espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel\nfrom espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract\n\nif LooseVersion(torch.__version__) >= LooseVersion(\"1.6.0\"):\n from torch.cuda.amp import autocast\nelse:\n # Nothing to do if torch < 1.6.0\n @contextmanager\n def autocast(enabled=True): # NOQA\n yield\n\n\nclass ESPnetGANTTSModel(AbsGANESPnetModel):\n \"\"\"GAN-based TTS ESPnet model.\"\"\"\n\n def __init__(\n self,\n feats_extract: Optional[AbsFeatsExtract],\n normalize: Optional[AbsNormalize and InversibleInterface],\n tts: AbsGANTTS,\n ):\n \"\"\"Initialize ESPnetGANTTSModel module.\"\"\"\n assert check_argument_types()\n super().__init__()\n self.feats_extract = feats_extract\n self.normalize = normalize\n self.tts = tts\n assert hasattr(\n tts, \"generator\"\n ), \"generator module must be resistered as tts.generator\"\n assert hasattr(\n tts, \"discriminator\"\n ), \"discriminator module must be resistered as tts.discriminator\"\n\n def forward(\n self,\n text: torch.Tensor,\n text_lengths: torch.Tensor,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n spembs: Optional[torch.Tensor] = None,\n sids: Optional[torch.Tensor] = None,\n lids: Optional[torch.Tensor] = None,\n forward_generator: bool = True,\n ) -> Dict[str, Any]:\n \"\"\"Return generator or discriminator loss with dict format.\n\n Args:\n text (Tensor): Text index tensor (B, T_text).\n text_lengths (Tensor): Text length tensor (B,).\n speech (Tensor): Speech waveform tensor (B, T_wav).\n speech_lengths (Tensor): Speech length tensor (B,).\n spembs (Optional[Tensor]): Speaker embedding tensor (B, D).\n sids (Optional[Tensor]): Speaker ID tensor (B, 1).\n lids (Optional[Tensor]): Language ID tensor (B, 1).\n forward_generator (bool): Whether to forward generator.\n\n Returns:\n Dict[str, Any]:\n - loss (Tensor): Loss scalar tensor.\n - stats (Dict[str, float]): Statistics to be monitored.\n - weight (Tensor): Weight tensor to summarize losses.\n - optim_idx (int): Optimizer index (0 for G and 1 for D).\n\n \"\"\"\n with autocast(False):\n # Extract features\n feats = None\n if self.feats_extract is not None:\n feats, feats_lengths = self.feats_extract(speech, speech_lengths)\n\n # Normalize\n if self.normalize is not None:\n feats, feats_lengths = self.normalize(feats, feats_lengths)\n\n # Make batch for tts inputs\n batch = {}\n batch.update(text=text, text_lengths=text_lengths)\n batch.update(forward_generator=forward_generator)\n\n # Update kwargs for additional auxiliary inputs\n if feats is not None:\n batch.update(feats=feats, feats_lengths=feats_lengths)\n if self.tts.require_raw_speech:\n batch.update(speech=speech, speech_lengths=speech_lengths)\n if spembs is not None:\n batch.update(spembs=spembs)\n if sids is not None:\n batch.update(sids=sids)\n if lids is not None:\n batch.update(lids=lids)\n\n return self.tts(**batch)\n\n def collect_feats(\n self,\n text: torch.Tensor,\n text_lengths: torch.Tensor,\n speech: torch.Tensor,\n speech_lengths: torch.Tensor,\n spembs: Optional[torch.Tensor] = None,\n sids: Optional[torch.Tensor] = None,\n lids: Optional[torch.Tensor] = None,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"Calculate features and return them as a dict.\n\n Args:\n text (Tensor): Text index tensor (B, T_text).\n text_lengths (Tensor): Text length tensor (B,).\n speech (Tensor): Speech waveform tensor (B, T_wav).\n speech_lengths (Tensor): Speech length tensor (B, 1).\n spembs (Optional[Tensor]): Speaker embedding tensor (B, D).\n sids (Optional[Tensor]): Speaker index tensor (B, 1).\n lids (Optional[Tensor]): Language ID tensor (B, 1).\n\n Returns:\n Dict[str, Tensor]: Dict of features.\n\n \"\"\"\n feats = None\n if self.feats_extract is not None:\n feats, feats_lengths = self.feats_extract(speech, speech_lengths)\n feats_dict = {}\n if feats is not None:\n feats_dict.update(feats=feats, feats_lengths=feats_lengths)\n\n return feats_dict\n" ]
[ [ "torch.cuda.amp.autocast" ] ]
c-rizz/autoencoding_rl
[ "65775630e87184c8809a31a8ef980853d5b49f9f" ]
[ "src/autoencoding_rl/latent_extractors/dyn_autoencoder/DynAutoencoder.py" ]
[ "\nfrom typing import Tuple\n\nimport torch as th\nimport torch.nn as nn\nfrom torchvision import transforms\n\nfrom autoencoding_rl.latent_extractors.autoencoder.SimpleEncoder import SimpleEncoder\nfrom autoencoding_rl.latent_extractors.autoencoder.SimpleDecoder import SimpleDecoder\nfrom autoencoding_rl.utils import Transition\n\nclass DynAutoencoder(nn.Module):\n \n def __init__(self, observation_width: int,\n observation_height: int,\n observation_channels_num: int,\n dyn_encoding_size: int,\n static_encoding_size: int,\n action_size: int,\n dynamics_nn_arch: Tuple[int, int]):\n\n super().__init__()\n\n self._observation_height = observation_height\n self._observation_width = observation_width\n self._dyn_encoding_size = dyn_encoding_size\n self._static_encoding_size = static_encoding_size\n self._action_size = action_size\n self._observation_channels_num = observation_channels_num\n self._dynamics_nn_arch = dynamics_nn_arch\n\n self._dynEncoder = SimpleEncoder(encoding_size = self._dyn_encoding_size,\n image_channels_num = self._observation_channels_num,\n net_input_width = self._observation_width,\n net_input_height = self._observation_height)\n \n if self._static_encoding_size != 0:\n self._staticEncoder = SimpleEncoder(encoding_size = self._static_encoding_size,\n image_channels_num = self._observation_channels_num,\n net_input_width = self._observation_width,\n net_input_height = self._observation_height)\n else:\n self._staticEncoder = None\n\n self._dynamics_net = th.nn.Sequential( th.nn.Linear(self._dyn_encoding_size+self._action_size, self._dynamics_nn_arch[0]),\n th.nn.ReLU(),\n th.nn.Linear(self._dynamics_nn_arch[0], self._dynamics_nn_arch[1]),\n th.nn.ReLU(),\n th.nn.Linear(self._dynamics_nn_arch[1], self._dyn_encoding_size+1))\n\n self._decoder = SimpleDecoder( encoding_size = self._dyn_encoding_size + self._static_encoding_size,\n image_channels_num = self._observation_channels_num,\n net_output_width = self._observation_width,\n net_output_height = self._observation_height)\n\n self._resizeToInput = transforms.Resize((self._observation_height,self._observation_width))\n\n\n @property\n def observation_height(self):\n return self._observation_height\n\n @property\n def observation_width(self):\n return self._observation_width\n\n @property\n def dyn_encoding_size(self):\n return self._dyn_encoding_size\n\n @property\n def static_encoding_size(self):\n return self._static_encoding_size\n\n @property \n def action_size(self):\n return self._action_size\n\n def forward(self, transition_batch : Transition):\n observation_batch = transition_batch.observation\n action_batch = transition_batch.action\n assert action_batch.size()[0] == observation_batch.size()[0], \\\n f\"Observation batch and action batch should have the same length. Action batch size = {action_batch.size()[0]}, observation batch size = {observation_batch.size()[0]}. Action tensor size = {action_batch.size()[0]}. Observation tensor size = {observation_batch.size()[0]}\"\n assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \\\n f\"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}\"\n assert action_batch.size()[1] == self._action_size, \\\n f\"Each action should have size {self._action_size}, not {action_batch.size()[1]}. Tensor has size {action_batch.size()}\"\n\n #Compute 'static' features encoding\n state_s_0_batch = self.encode_static(observation_batch) #Gives a (batch_size, static_encoding_size) output\n\n #Compute 'dynamic' features encoding\n state_d_0_batch = self.encode_dynamic(observation_batch) #Gives a (batch_size, dyn_encoding_size) output\n\n state_d_1_batch, reward_d_1_batch = self.predict_dynamics(state_d_0_batch, action_batch)\n #state_d_1_batch now has size (batch_size, dyn_encoding_size)\n #reward_d_1_batch now has size (batch_size, 1) (still 2-dimensional)\n\n #Will now use 'static' features vectors and predicted states to predict the observation\n observation_1_batch = self.decode(state_s_0_batch,state_d_1_batch) #Gives a (batch_size, observations_channels_num, observation_height, observation_width) output\n\n return observation_1_batch, reward_d_1_batch\n\n\n def encode_dynamic(self, observation_batch : th.Tensor):\n assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \\\n f\"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}\"\n return self._dynEncoder(observation_batch)\n\n def encode_static(self, observation_batch : th.Tensor):\n assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \\\n f\"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}\"\n if self._staticEncoder is not None:\n return self._staticEncoder(observation_batch)\n else:\n return th.empty([observation_batch.size()[0],0]).to(observation_batch.device)\n\n def decode(self, static_encoding_batch : th.Tensor, dynamic_encoding_batch : th.Tensor):\n assert static_encoding_batch.size()[0] == dynamic_encoding_batch.size()[0], \\\n f\"static encoding batch and dynamic encoding batch have different sizes, respectively {static_encoding_batch.size()[0]} and {dynamic_encoding_batch.size()[0]}\"\n assert dynamic_encoding_batch.size() == (dynamic_encoding_batch.size()[0], self._dyn_encoding_size), \\\n f\"dynamic_encoding have wrong size, should be {(dynamic_encoding_batch.size()[0], self._dyn_encoding_size)}, but it's {dynamic_encoding_batch.size()}\"\n assert static_encoding_batch.size() == (static_encoding_batch.size()[0], self._static_encoding_size), \\\n f\"static_encoding_batch have wrong size, should be {(static_encoding_batch.size()[0], self._static_encoding_size)}, but it's {static_encoding_batch.size()}\"\n \n #Combine the two vectors\n state_batch = th.cat((static_encoding_batch, dynamic_encoding_batch), 1) #Gives a (batch_size, dyn_encoding_size+static_encoding_size) output\n #Predict the observation\n return self._decoder(state_batch) #Gives a (batch_size, observations_channels_num, observation_height, observation_width) output\n \n\n\n def predict_dynamics(self, state_batch : th.Tensor, action_batch : th.Tensor):\n assert state_batch.size()[0] == action_batch.size()[0], \\\n f\"state batch and action batch have different sizes, respectively {state_batch.size()[0]} and {action_batch.size()[0]}\"\n assert state_batch.size()[1] == self._dyn_encoding_size, \\\n f\"States have wrong size, should be {self._dyn_encoding_size}, but it's {state_batch.size()[1]}\"\n assert action_batch.size()[1] == self._action_size, \\\n f\"Actions have wrong size, should be {self._action_size} but it's {action_batch.size()[1]}\"\n\n #Concatenate states and actions\n state_action_batch = th.cat((state_batch, action_batch), 1) #Gives a (batch_size, dyn_encoding_size+action_size) output\n nextstate_reward_batch = self._dynamics_net(state_action_batch) #Gives a (batch_size, dyn_encoding_size+1) output\n nextstate_batch, reward_batch = th.split(nextstate_reward_batch, [self._dyn_encoding_size, 1], 1)\n #nextstate_batch now has size (batch_size, dyn_encoding_size)\n #reward_batch now has size (batch_size, 1) (still 2-dimensional)\n return nextstate_batch, reward_batch\n \n\n\n\n def preprocess_observations(self, observation_batch : th.Tensor):\n resized_batch = self._resizeToInput(observation_batch)\n # Input should be in the [0,1] range, as this is what torchvision.transforms.ToTensor does\n # We move it to [-1,1]\n normalized = resized_batch*2 - 1\n return normalized\n #return resized_batch\n\n def postprocess_observations(self, observation_batch : th.Tensor):\n return (observation_batch + 1)/2\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.split", "torch.nn.ReLU" ] ]
MBaltz/dival
[ "b7c10ed471d05242312a7d4916900c92e0c36cdb", "b7c10ed471d05242312a7d4916900c92e0c36cdb" ]
[ "dival/reconstructors/dip_ct_reconstructor.py", "dival/examples/ct_train_learnedpd.py" ]
[ "from warnings import warn\nfrom functools import partial\nfrom tqdm import tqdm\nimport torch\nimport numpy as np\n\nfrom torch.optim import Adam\nfrom torch.nn import MSELoss\n\nfrom odl.contrib.torch import OperatorModule\n\nfrom dival.reconstructors import IterativeReconstructor\nfrom dival.reconstructors.networks.unet import UNet\nfrom dival.util.torch_losses import poisson_loss, tv_loss\nfrom dival.util.constants import MU_MAX\n\nMIN = -1000\nMAX = 1000\n\n\nclass DeepImagePriorCTReconstructor(IterativeReconstructor):\n \"\"\"\n CT reconstructor applying DIP with TV regularization (see [2]_).\n The DIP was introduced in [1]_.\n\n References\n ----------\n .. [1] V. Lempitsky, A. Vedaldi, and D. Ulyanov, 2018, \"Deep Image Prior\".\n IEEE/CVF Conference on Computer Vision and Pattern Recognition.\n https://doi.org/10.1109/CVPR.2018.00984\n .. [2] D. Otero Baguer, J. Leuschner, M. Schmidt, 2020, \"Computed\n Tomography Reconstruction Using Deep Image Prior and Learned\n Reconstruction Methods\". Inverse Problems.\n https://doi.org/10.1088/1361-6420/aba415\n \"\"\"\n\n HYPER_PARAMS = {\n 'lr':\n {'default': 1e-3,\n 'range': [1e-5, 1e-1]},\n 'gamma':\n {'default': 1e-4,\n 'range': [1e-7, 1e-0],\n 'grid_search_options': {'num_samples': 20}},\n 'scales':\n {'default': 4,\n 'choices': [3, 4, 5, 6, 7]},\n 'channels':\n {'default': [128] * 5},\n 'skip_channels':\n {'default': [4] * 5},\n 'iterations':\n {'default': 5000,\n 'range': [1, 50000]},\n 'loss_function':\n {'default': 'mse',\n 'choices': ['mse', 'poisson']},\n 'photons_per_pixel': # used by 'poisson' loss function\n {'default': 4096,\n 'range': [1000, 10000]},\n 'mu_max': # used by 'poisson' loss function\n {'default': MU_MAX,\n 'range': [1., 10000.]}\n }\n\n def __init__(self, ray_trafo, callback_func=None,\n callback_func_interval=100, show_pbar=True,\n torch_manual_seed=10, **kwargs):\n \"\"\"\n Parameters\n ----------\n ray_trafo : `odl.tomo.operators.RayTransform`\n The forward operator\n callback_func : callable, optional\n Callable with signature\n ``callback_func(iteration, reconstruction, loss)`` that is called\n after every `callback_func_interval` iterations, starting\n after the first iteration. It is additionally called after the\n last iteration.\n Note that it differs from the inherited\n `IterativeReconstructor.callback` (which is also supported) in that\n the latter is of type :class:`odl.solvers.util.callback.Callback`,\n which only receives the reconstruction, such that the loss would\n have to be recomputed.\n callback_func_interval : int, optional\n Number of iterations between calls to `callback_func`.\n Default: `100`.\n show_pbar : bool, optional\n Whether to show a tqdm progress bar during reconstruction.\n torch_manual_seed : int, optional\n Fixed seed to set by ``torch.manual_seed`` before reconstruction.\n The default is `10`. It can be set to `None` or `False` to disable\n the manual seed.\n \"\"\"\n\n super().__init__(\n reco_space=ray_trafo.domain, observation_space=ray_trafo.range,\n **kwargs)\n\n self.callback_func = callback_func\n self.ray_trafo = ray_trafo\n self.ray_trafo_module = OperatorModule(self.ray_trafo)\n self.callback_func = callback_func\n self.callback_func_interval = callback_func_interval\n self.show_pbar = show_pbar\n self.torch_manual_seed = torch_manual_seed\n\n def get_activation(self, layer_index):\n return self.model.layer_output(self.net_input, layer_index)\n\n def _reconstruct(self, observation, *args, **kwargs):\n if self.torch_manual_seed:\n torch.random.manual_seed(self.torch_manual_seed)\n\n output_depth = 1\n input_depth = 1\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n self.net_input = 0.1 * \\\n torch.randn(input_depth, *self.reco_space.shape)[None].to(device)\n self.model = UNet(\n input_depth,\n output_depth,\n channels=self.channels[:self.scales],\n skip_channels=self.skip_channels[:self.scales],\n use_sigmoid=True,\n use_norm=True).to(device)\n\n self.optimizer = Adam(self.model.parameters(), lr=self.lr)\n\n y_delta = torch.tensor(np.asarray(observation), dtype=torch.float32)\n y_delta = y_delta.view(1, 1, *y_delta.shape)\n y_delta = y_delta.to(device)\n\n if self.loss_function == 'mse':\n criterion = MSELoss()\n elif self.loss_function == 'poisson':\n criterion = partial(poisson_loss,\n photons_per_pixel=self.photons_per_pixel,\n mu_max=self.mu_max)\n else:\n warn('Unknown loss function, falling back to MSE')\n criterion = MSELoss()\n\n best_loss = np.inf\n best_output = self.model(self.net_input).detach()\n\n for i in tqdm(range(self.iterations),\n desc='DIP', disable=not self.show_pbar):\n self.optimizer.zero_grad()\n output = self.model(self.net_input)\n loss = criterion(self.ray_trafo_module(output),\n y_delta) + self.gamma * tv_loss(output)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1)\n self.optimizer.step()\n\n for p in self.model.parameters():\n p.data.clamp_(MIN, MAX)\n\n if loss.item() < best_loss:\n best_loss = loss.item()\n best_output = output.detach()\n\n if (self.callback_func is not None and\n (i % self.callback_func_interval == 0\n or i == self.iterations-1)):\n self.callback_func(\n iteration=i,\n reconstruction=best_output[0, 0, ...].cpu().numpy(),\n loss=best_loss)\n\n if self.callback is not None:\n self.callback(self.reco_space.element(\n best_output[0, 0, ...].cpu().numpy()))\n\n return self.reco_space.element(best_output[0, 0, ...].cpu().numpy())\n", "\"\"\"\nTrain LearnedPDReconstructor on 'lodopab'.\n\"\"\"\nimport numpy as np\nfrom dival import get_standard_dataset\nfrom dival.measure import PSNR\nfrom dival.reconstructors.learnedpd_reconstructor import LearnedPDReconstructor\nfrom dival.reference_reconstructors import (\n check_for_params, download_params, get_hyper_params_path)\nfrom dival.util.plot import plot_images\n\nIMPL = 'astra_cuda'\n\nLOG_DIR = './logs/lodopab_learnedpd'\nSAVE_BEST_LEARNED_PARAMS_PATH = './params/lodopab_learnedpd'\n\ndataset = get_standard_dataset('lodopab', impl=IMPL)\nray_trafo = dataset.get_ray_trafo(impl=IMPL)\ntest_data = dataset.get_data_pairs('test', 100)\n\nreconstructor = LearnedPDReconstructor(\n ray_trafo, log_dir=LOG_DIR,\n save_best_learned_params_path=SAVE_BEST_LEARNED_PARAMS_PATH)\n\n#%% obtain reference hyper parameters\nif not check_for_params('learnedpd', 'lodopab', include_learned=False):\n download_params('learnedpd', 'lodopab', include_learned=False)\nhyper_params_path = get_hyper_params_path('learnedpd', 'lodopab')\nreconstructor.load_hyper_params(hyper_params_path)\n\n#%% train\nreconstructor.train(dataset)\n\n#%% evaluate\nrecos = []\npsnrs = []\nfor obs, gt in test_data:\n reco = reconstructor.reconstruct(obs)\n recos.append(reco)\n psnrs.append(PSNR(reco, gt))\n\nprint('mean psnr: {:f}'.format(np.mean(psnrs)))\n\nfor i in range(3):\n _, ax = plot_images([recos[i], test_data.ground_truth[i]],\n fig_size=(10, 4))\n ax[0].set_xlabel('PSNR: {:.2f}'.format(psnrs[i]))\n ax[0].set_title('LearnedPDReconstructor')\n ax[1].set_title('ground truth')\n ax[0].figure.suptitle('test sample {:d}'.format(i))\n" ]
[ [ "torch.nn.MSELoss", "numpy.asarray", "torch.random.manual_seed", "torch.cuda.is_available", "torch.randn" ], [ "numpy.mean" ] ]
meteor1993/python-learning
[ "4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40" ]
[ "python-data-analysis/2019-nCoV-global/global_map.py" ]
[ "from pyecharts import options as opts\r\nfrom pyecharts.charts import Map\r\nimport pandas as pd\r\nimport namemap\r\n\r\ndef read_country_code():\r\n \"\"\"\r\n 获取国家中英文字典\r\n :return:\r\n \"\"\"\r\n country_dict = {}\r\n for key, val in namemap.nameMap.items(): # 将 nameMap 列表里面键值互换\r\n country_dict[val] = key\r\n return country_dict\r\n\r\ndef read_csv():\r\n \"\"\"\r\n 读取数据,返回国家英文名称列表和累计确诊数列表\r\n :return:\r\n \"\"\"\r\n country_dict = read_country_code()\r\n data = pd.read_csv(\"2019-nCoV.csv\", index_col=False)\r\n\r\n countrys_names = list()\r\n confirmed_count = list()\r\n\r\n for x in range(len(data.index)):\r\n if data['name'].iloc[x] in country_dict.keys():\r\n countrys_names.append(country_dict[data['name'].iloc[x]])\r\n confirmed_count.append(data['confirm'].iloc[x])\r\n else:\r\n print(data['name'].iloc[x])\r\n\r\n return countrys_names, confirmed_count\r\n\r\n\r\ndef draw_map():\r\n \"\"\"\r\n 绘制世界地图\r\n 遇到一个很神奇的问题:\r\n 两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据\r\n :return:\r\n \"\"\"\r\n\r\n # 修复注释中的问题,原因是 confirmed_count 中的 int 是 numpy 的 int ,需转化为 python 中的 int\r\n # 感谢公众号的 @李康伟 同学提出\r\n countrys_names, confirmed_count = read_csv()\r\n confirmed_count_list = []\r\n for item in confirmed_count:\r\n confirmed_count_list.append(int(item))\r\n\r\n # countrys_names = ['United States', 'Brazil', 'Russia', 'Spain', 'United Kingdom', 'Italy', 'France', 'Germany', 'Turkey', 'Iran', 'India', 'Peru', 'Canada', 'Saudi Arabia', 'Mexico', 'Chile', 'Belgium', 'Pakistan', 'Netherlands', 'Qatar', 'Ecuador', 'Belarus', 'Sweden', 'Bangladesh', 'Singapore Rep.', 'Switzerland', 'Portugal', 'United Arab Emirates', 'Ireland', 'Indonesia', 'South Africa', 'Poland', 'Ukraine', 'Kuwait', 'Colombia', 'Romania', 'Israel', 'Japan', 'Egypt', 'Austria', 'Dominican Rep.', 'Philippines', 'Denmark', 'Argentina', 'Korea', 'Serbia', 'Panama', 'Afghanistan', 'Czech Rep.', 'Norway', 'Kazakhstan', 'Algeria', 'Nigeria', 'Morocco', 'Oman', 'Malaysia', 'Australia', 'Moldova', 'Ghana', 'Finland', 'Armenia', 'Bolivia', 'Cameroon', 'Iraq', 'Luxembourg', 'Azerbaijan', 'Honduras', 'Hungary', 'Sudan', 'Guinea', 'Uzbekistan', 'Guatemala', 'Thailand', 'Senegal', 'Greece', 'Tajikistan', 'Bulgaria', \"Côte d'Ivoire\", 'Djibouti', 'Croatia', 'Gabon', 'Cuba', 'Estonia', 'El Salvador', 'Iceland', 'Lithuania', 'Somalia', 'New Zealand', 'Slovakia', 'Slovenia', 'Kyrgyzstan', 'Kenya', 'Guinea Bissau', 'Lebanon', 'Sri Lanka', 'Tunisia', 'Latvia', 'Mali', 'Venezuela', 'Albania', 'Eq. Guinea', 'Niger', 'Cyprus', 'Zambia', 'Costa Rica', 'Haiti', 'Paraguay', 'Burkina Faso', 'Uruguay', 'Georgia', 'Jordan', 'Chad', 'Sierra Leone', 'Nepal', 'Jamaica', 'Tanzania', 'Ethiopia', 'Madagascar', 'Palestine', 'Togo', 'Vietnam', 'Rwanda', 'Montenegro', 'Nicaragua', 'Liberia', 'Swaziland', 'Mauritania', 'Yemen', 'Myanmar', 'Uganda', 'Mozambique', 'Mongolia', 'Brunei', 'Benin', 'Guyana', 'Cambodia', 'The Bahamas', 'Malawi', 'Libya', 'Syria', 'Angola', 'Zimbabwe', 'Burundi', 'Eritrea', 'Botswana', 'Gambia', 'Bhutan', 'East Timor', 'Namibia', 'Lao PDR', 'Fiji', 'Belize', 'Suriname', 'Papua New Guinea', 'Lesotho']\r\n # \r\n # confirmed_count = [1666828, 347398, 335882, 281904, 258504, 229327, 182036, 179986, 155686, 133521, 131920, 115754, 85151, 70161, 65856, 65393, 56810, 54601, 45265, 42213, 36258, 35244, 33188, 32078, 31068, 30725, 30471, 28704, 24582, 21745, 21343, 20931, 20580, 20464, 20177, 17857, 16712, 16536, 16513, 16486, 14422, 13777, 11487, 11353, 11190, 11092, 10577, 9998, 8890, 8346, 8322, 8113, 7526, 7406, 7257, 7185, 7114, 6994, 6617, 6568, 6302, 5915, 4400, 4272, 3990, 3982, 3743, 3741, 3628, 3176, 3132, 3054, 3040, 2976, 2876, 2738, 2427, 2366, 2270, 2243, 1934, 1931, 1821, 1819, 1804, 1616, 1594, 1504, 1504, 1468, 1403, 1192, 1114, 1097, 1089, 1048, 1046, 1015, 1010, 989, 960, 943, 927, 920, 918, 865, 850, 814, 764, 728, 704, 648, 621, 584, 550, 509, 494, 488, 423, 373, 325, 325, 324, 279, 255, 238, 227, 212, 201, 198, 168, 141, 141, 135, 127, 124, 100, 82, 75, 70, 61, 56, 42, 39, 30, 25, 24, 24, 20, 19, 18, 18, 11, 8, 2]\r\n\r\n\r\n c = (\r\n Map()\r\n .add(\r\n \"确诊人数\",\r\n [list(z) for z in zip(countrys_names, confirmed_count_list)],\r\n is_map_symbol_show=False,\r\n maptype=\"world\",\r\n label_opts=opts.LabelOpts(is_show=False),\r\n itemstyle_opts=opts.ItemStyleOpts(color=\"rgb(49,60,72)\")\r\n )\r\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\r\n .set_global_opts(\r\n title_opts=opts.TitleOpts(title=\"全球 2019-nCoV 地图\"),\r\n visualmap_opts=opts.VisualMapOpts(max_=1700000),\r\n )\r\n .render(\"map_world.html\")\r\n )\r\n\r\n\r\nif __name__ == '__main__':\r\n draw_map()\r\n\r\n" ]
[ [ "pandas.read_csv" ] ]
santosh653/dtcwt
[ "01d9e87dc9abfa244a89c1f05aebf3dec6999f3a" ]
[ "docs/image-registration.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nAn example of image registration via the DTCWT.\n\nThis script demonstrates some methods for image registration using the DTCWT.\n\n\"\"\"\n\nfrom __future__ import division, print_function\n\nimport itertools\nimport logging\nimport os\n\nfrom matplotlib.pyplot import *\nimport numpy as np\n\nimport dtcwt\nfrom dtcwt.numpy import Transform2d\nimport dtcwt.sampling\nfrom dtcwt.registration import *\n\nlogging.basicConfig(level=logging.INFO)\n\nimport datasets\n\ndef register_frames(filename):\n # Load test images\n logging.info('Loading frames from \"{0}\"'.format(filename))\n f1, f2 = datasets.regframes(filename)\n\n # Take the DTCWT of both frames.\n logging.info('Taking DTCWT')\n nlevels = 6\n trans = Transform2d()\n t1 = trans.forward(f1, nlevels=nlevels)\n t2 = trans.forward(f2, nlevels=nlevels)\n\n # Solve for transform\n logging.info('Finding flow')\n avecs = estimatereg(t1, t2)\n\n logging.info('Computing warped image')\n warped_f1 = warp(f1, avecs, method='bilinear')\n\n logging.info('Computing velocity field')\n step = 16\n X, Y = np.meshgrid(np.arange(f1.shape[1]), np.arange(f1.shape[0]))\n vxs, vys = velocityfield(avecs, f1.shape, method='nearest')\n\n vxs -= np.median(vxs.flat)\n vys -= np.median(vys.flat)\n\n figure(figsize=(16,9))\n\n subplot(221)\n imshow(np.dstack((f1, f2, np.zeros_like(f1))))\n title('Overlaid frames')\n\n subplot(222)\n imshow(np.dstack((warped_f1, f2, np.zeros_like(f2))))\n title('Frame 1 warped to Frame 2 (image domain)')\n\n subplot(223)\n sc = 2\n imshow(np.dstack((f1, f2, np.zeros_like(f2))))\n quiver(X[::step,::step], Y[::step,::step],\n -sc*vxs[::step,::step]*f1.shape[1], -sc*vys[::step,::step]*f1.shape[0],\n color='b', angles='xy', scale_units='xy', scale=1)\n title('Computed velocity field (median subtracted), x{0}'.format(sc))\n\n subplot(224)\n imshow(np.sqrt(vxs*vxs + vys*vys), interpolation='none', cmap=cm.hot)\n colorbar()\n title('Magnitude of computed velocity (median subtracted)')\n\n # savefig(os.path.splitext(os.path.basename(filename))[0] + '-registration.png')\n\nregister_frames('traffic')\nregister_frames('tennis')\n" ]
[ [ "numpy.median", "numpy.zeros_like", "numpy.arange", "numpy.sqrt" ] ]
aManOf502/tqsdk-python
[ "b766b45bb82c89a0401a6a84e0e42600fa10e6f4" ]
[ "tqsdk/utils.py" ]
[ "#!usr/bin/env python3\n# -*- coding:utf-8 -*-\n__author__ = 'yanqiong'\n\n\nimport random\nimport secrets\nfrom bisect import bisect_right\n\nfrom sgqlc.operation import Operation\nfrom pandas.core.internals import BlockManager\n\nfrom tqsdk.ins_schema import ins_schema, _add_all_frags\n\nRD = random.Random(secrets.randbits(128)) # 初始化随机数引擎,使用随机数作为seed,防止用户同时拉起多个策略,产生同样的 seed\n\n\ndef _generate_uuid(prefix=''):\n return f\"{prefix + '_' if prefix else ''}{RD.getrandbits(128):032x}\"\n\n\ndef _query_for_quote(symbol):\n \"\"\"\n 返回请求某个合约的合约信息的 query_pack\n 调用次函数应该全部都是sdk的代码主动请求合约信息\n 用户请求合约信息一定是 PYSDK_api 开头的请求,因为用户请求的合约信息在回测时带有 timestamp 参数,是不应该调用此函数的\n \"\"\"\n symbol_list = symbol if isinstance(symbol, list) else [symbol]\n op = Operation(ins_schema.rootQuery)\n query = op.multi_symbol_info(instrument_id=symbol_list)\n _add_all_frags(query)\n return {\n \"aid\": \"ins_query\",\n \"query_id\": _generate_uuid(prefix='PYSDK_quote_'),\n \"query\": op.__to_graphql__()\n }\n\n\ndef _query_for_init():\n \"\"\"\n 返回某些类型合约的 query\n todo: 为了兼容旧版提供给用户的 api._data[\"quote\"].items() 类似用法,应该限制交易所 [\"SHFE\", \"DCE\", \"CZCE\", \"INE\", \"CFFEX\", \"KQ\"]\n \"\"\"\n op = Operation(ins_schema.rootQuery)\n query = op.multi_symbol_info(class_=[\"FUTURE\", \"INDEX\", \"OPTION\", \"COMBINE\", \"CONT\"],\n exchange_id=[\"SHFE\", \"DCE\", \"CZCE\", \"INE\", \"CFFEX\", \"KQ\"])\n _add_all_frags(query)\n return op.__to_graphql__()\n\n\nnight_trading_table = {\n \"DCE.a\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.b\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.c\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.cs\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.m\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.y\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.p\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.l\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.v\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.pp\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.j\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.jm\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.i\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.eg\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.eb\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.rr\": [\"21:00:00\", \"23:00:00\"],\n \"DCE.pg\": [\"21:00:00\", \"23:00:00\"],\n \"CZCE.CF\": [\"21:00:00\", \"23:00:00\"],\n \"CZCE.CY\": [\"21:00:00\", \"23:00:00\"],\n \"CZCE.SA\": [\"21:00:00\", \"23:00:00\"],\n \"CZCE.SR\": [\"21:00:00\", \"23:00:00\"],\n \"CZCE.TA\": [\"21:00:00\", \"23:00:00\"],\n \"CZCE.OI\": [\"21:00:00\", \"23:00:00\"],\n \"CZCE.MA\": [\"21:00:00\", \"23:00:00\"],\n \"CZCE.FG\": [\"21:00:00\", \"23:00:00\"],\n \"CZCE.RM\": [\"21:00:00\", \"23:00:00\"],\n \"CZCE.ZC\": [\"21:00:00\", \"23:00:00\"],\n \"CZCE.TC\": [\"21:00:00\", \"23:00:00\"],\n \"SHFE.rb\": [\"21:00:00\", \"23:00:00\"],\n \"SHFE.hc\": [\"21:00:00\", \"23:00:00\"],\n \"SHFE.fu\": [\"21:00:00\", \"23:00:00\"],\n \"SHFE.bu\": [\"21:00:00\", \"23:00:00\"],\n \"SHFE.ru\": [\"21:00:00\", \"23:00:00\"],\n \"SHFE.sp\": [\"21:00:00\", \"23:00:00\"],\n \"INE.nr\": [\"21:00:00\", \"23:00:00\"],\n \"SHFE.cu\": [\"21:00:00\", \"25:00:00\"],\n \"SHFE.al\": [\"21:00:00\", \"25:00:00\"],\n \"SHFE.zn\": [\"21:00:00\", \"25:00:00\"],\n \"SHFE.pb\": [\"21:00:00\", \"25:00:00\"],\n \"SHFE.ni\": [\"21:00:00\", \"25:00:00\"],\n \"SHFE.sn\": [\"21:00:00\", \"25:00:00\"],\n \"SHFE.ss\": [\"21:00:00\", \"25:00:00\"],\n \"SHFE.au\": [\"21:00:00\", \"26:30:00\"],\n \"SHFE.ag\": [\"21:00:00\", \"26:30:00\"],\n \"INE.sc\": [\"21:00:00\", \"26:30:00\"],\n}\n\n\ndef _quotes_add_night(quotes):\n \"\"\"为 quotes 中应该有夜盘但是市价合约文件中没有夜盘的品种,添加夜盘时间\"\"\"\n for symbol in quotes:\n product_id = quotes[symbol].get(\"product_id\")\n if quotes[symbol].get(\"trading_time\") and product_id:\n key = f\"{quotes[symbol].get('exchange_id')}.{product_id}\"\n if key in night_trading_table and (not quotes[symbol][\"trading_time\"].get(\"night\")):\n quotes[symbol][\"trading_time\"][\"night\"] = [night_trading_table[key]]\n\n\ndef _bisect_value(a, x, priority=\"right\"):\n \"\"\"\n 返回 bisect_right() 取得下标对应的值,当插入点距离前后元素距离相等,priority 表示优先返回右边的值还是左边的值\n a: 必须是已经排序好(升序排列)的 list\n bisect_right : Return the index where to insert item x in list a, assuming a is sorted.\n \"\"\"\n assert priority in ['left', 'right']\n insert_index = bisect_right(a, x)\n if 0 < insert_index < len(a):\n left_dis = x - a[insert_index - 1]\n right_dis = a[insert_index] - x\n if left_dis == right_dis:\n mid_index = insert_index - 1 if priority == \"left\" else insert_index\n elif left_dis < right_dis:\n mid_index = insert_index - 1\n else:\n mid_index = insert_index\n else:\n assert insert_index == 0 or insert_index == len(a)\n mid_index = 0 if insert_index == 0 else (len(a) - 1)\n return a[mid_index]\n\n\nclass BlockManagerUnconsolidated(BlockManager):\n \"\"\"mock BlockManager for unconsolidated, 不会因为自动合并同类型的 blocks 而导致 k 线数据不更新\"\"\"\n def __init__(self, *args, **kwargs):\n BlockManager.__init__(self, *args, **kwargs)\n self._is_consolidated = False\n self._known_consolidated = False\n\n def _consolidate_inplace(self): pass\n" ]
[ [ "pandas.core.internals.BlockManager.__init__" ] ]
jmaces/robust-nets
[ "25d49302f9fa5fcc9ded2727de75e96e25243d09", "25d49302f9fa5fcc9ded2727de75e96e25243d09", "25d49302f9fa5fcc9ded2727de75e96e25243d09", "25d49302f9fa5fcc9ded2727de75e96e25243d09" ]
[ "tvsynth/script_train_unet_it_tikh_jitter.py", "tvsynth/find_adversarial.py", "ellipses/config_robustness_radon.py", "ellipses/script_robustness_fourier_table_adv.py" ]
[ "import os\n\nimport matplotlib as mpl\nimport torch\n\nfrom data_management import Jitter, load_dataset\nfrom networks import IterativeNet, UNet\nfrom operators import TVAnalysis, get_tikhonov_matrix\n\n\n# --- load configuration -----\nimport config # isort:skip\n\n# ----- general setup -----\nmpl.use(\"agg\")\ndevice = torch.device(\"cuda:0\")\n\n\n# ----- operators -----\nOpA = config.meas_op(config.m, config.n, device=device, **config.meas_params)\nOpTV = TVAnalysis(config.n, device=device)\n\n\n# ----- build linear inverter ------\nreg_fac = 2e-2\n\ninverter = torch.nn.Linear(OpA.m, OpA.n, bias=False)\ninverter.weight.requires_grad = False\ninverter.weight.data = get_tikhonov_matrix(OpA, OpTV, reg_fac)\n\n# ----- network configuration -----\nsubnet_params = {\n \"in_channels\": 1,\n \"out_channels\": 1,\n \"drop_factor\": 0.0,\n \"base_features\": 64,\n}\nsubnet = UNet\n\nit_net_params = {\n \"operator\": OpA,\n \"inverter\": inverter,\n \"num_iter\": 8,\n \"lam\": 8 * [0.1],\n \"lam_learnable\": True,\n \"final_dc\": True,\n}\n\n# ----- training setup ------\nmse_loss = torch.nn.MSELoss(reduction=\"sum\")\n\n\ndef loss_func(pred, tar):\n return mse_loss(pred, tar) / pred.shape[0]\n\n\ntrain_phases = 2\ntrain_params = {\n \"num_epochs\": [100, 5],\n \"batch_size\": [40, 40],\n \"loss_func\": loss_func,\n \"save_path\": [\n os.path.join(\n config.RESULTS_PATH,\n \"unet_it_tikh_jitter_\"\n \"train_phase_{}\".format((i + 1) % (train_phases + 1)),\n )\n for i in range(train_phases + 1)\n ],\n \"save_epochs\": 1,\n \"optimizer\": torch.optim.Adam,\n \"optimizer_params\": [\n {\"lr\": 5e-5, \"eps\": 1e-5, \"weight_decay\": 5e-4},\n {\"lr\": 2e-5, \"eps\": 1e-5, \"weight_decay\": 5e-4},\n ],\n \"scheduler\": torch.optim.lr_scheduler.StepLR,\n \"scheduler_params\": {\"step_size\": 1, \"gamma\": 1.0},\n \"acc_steps\": [1, 200],\n \"train_transform\": Jitter(2e0, 0.0, 1.0),\n \"val_transform\": None,\n}\n\n\n# -----data prep -----\nX_train, C_train, Y_train = [\n tmp.unsqueeze(-2).to(device)\n for tmp in load_dataset(config.set_params[\"path\"], subset=\"train\")\n]\n\nX_val, C_val, Y_val = [\n tmp.unsqueeze(-2).to(device)\n for tmp in load_dataset(config.set_params[\"path\"], subset=\"val\")\n]\n\n\n# ------ save hyperparameters -------\nos.makedirs(train_params[\"save_path\"][-1], exist_ok=True)\nwith open(\n os.path.join(train_params[\"save_path\"][-1], \"hyperparameters.txt\"), \"w\"\n) as file:\n for key, value in subnet_params.items():\n file.write(key + \": \" + str(value) + \"\\n\")\n for key, value in it_net_params.items():\n file.write(key + \": \" + str(value) + \"\\n\")\n for key, value in train_params.items():\n file.write(key + \": \" + str(value) + \"\\n\")\n file.write(\"train_phases\" + \": \" + str(train_phases) + \"\\n\")\n\n# ------ construct network and train -----\nsubnet = subnet(**subnet_params).to(device)\nit_net = IterativeNet(subnet, **it_net_params).to(device)\nfor i in range(train_phases):\n train_params_cur = {}\n for key, value in train_params.items():\n train_params_cur[key] = (\n value[i] if isinstance(value, (tuple, list)) else value\n )\n\n print(\"Phase {}:\".format(i + 1))\n for key, value in train_params_cur.items():\n print(key + \": \" + str(value))\n\n it_net.train_on((Y_train, X_train), (Y_val, X_val), **train_params_cur)\n", "import torch\n\nfrom tqdm import tqdm\n\nfrom operators import l2_error\n\n\n# ----- Variable Transforms -----\n\n\ndef identity(x):\n return x\n\n\ndef normalized_tanh(x, eps=1e-6):\n return (torch.tanh(x) + 1.0) / 2.0\n\n\ndef normalized_atanh(x, eps=1e-6):\n x = x * (1 - eps) * 2 - 1\n return 0.5 * torch.log((1.0 + x) / (1.0 - x))\n\n\n# ----- Optimization Methods -----\n\n\ndef PGD(\n loss,\n t_in,\n projs=None,\n iter=50,\n stepsize=1e-2,\n maxls=50,\n ls_fac=0.1,\n ls_severity=1.0,\n silent=False,\n):\n \"\"\" (Proj.) gradient decent with simple constraints.\n\n Minimizes a given loss function subject to optional constraints. The\n constraints must be \"simple\" in the sense that efficient projections onto\n the feasible set exist.\n\n The step size for gradient descent is determined by a backtracked\n line search. Set maximum number of line search steps to 1 to disable it.\n\n Parameters\n ----------\n loss : callable\n The loss or objective function.\n t_in : torch.Tensor\n The input tensor. This will be modified during\n optimization. The provided tensor serves as initial guess for the\n iterative optimization algorithm and will hold the result in the end.\n projs : list or tuple of callables, optional\n The projections onto the feasible set to perform after each gradient\n descent step. They will be performed in the order given. (Default None)\n iter : int, optional\n Number of iterations. (Default 50)\n stepsize : float, optional\n The step size parameter for gradient descent. Initial step size\n guess if line search is enabled. (Default 1e-2)\n maxls : int, optional\n Maximum number of line search steps. Set to 1 to disable the\n backtracked line search. (Default 10)\n ls_fac : float, optional\n Step size shrinkage factor for backtracked line search. Should be\n between 0 and 1. (Default 0.5)\n ls_severity : float, optional\n Line search severity parameter. Should be positive. (Default 1.0)\n silent : bool, optional\n Disable progress bar. (Default False)\n\n Returns\n -------\n torch.tensor\n The modified input t_in. Note that t_in is changed as a\n side-effect, even if the returned tensor is discarded.\n \"\"\"\n\n def _project(t_in):\n with torch.no_grad():\n t_tmp = t_in.clone()\n if projs is not None:\n for proj in projs:\n t_tmp = proj(t_tmp)\n t_in.data = t_tmp.data\n return t_tmp\n\n # run optimization\n ls_stepsize = stepsize\n t = tqdm(range(iter), desc=\"PGD iter\", disable=silent)\n for it in t:\n # reset gradients\n if t_in.grad is not None:\n t_in.grad.detach_()\n t_in.grad.zero_()\n # compute pre step loss and descent direction\n pre_loss = loss(t_in)\n pre_loss.backward()\n p = -t_in.grad.data\n # backtracking line search\n ls_count, STOP_LS = 1, False\n with torch.no_grad():\n while not STOP_LS:\n t_tmp = _project(t_in + ls_stepsize * p)\n step_loss = loss(t_tmp)\n STOP_LS = (ls_count >= maxls) or (\n (\n pre_loss\n - ls_severity * (p * (t_tmp - t_in)).mean()\n + 1 / (2 * ls_stepsize) * (t_tmp - t_in).pow(2).mean()\n )\n > step_loss\n )\n if not STOP_LS:\n ls_count += 1\n ls_stepsize *= ls_fac\n # do the actual projected gradient step\n t_in.data.add_(ls_stepsize, p)\n _project(t_in)\n t.set_postfix(\n loss=step_loss.item(), ls_steps=ls_count, stepsize=ls_stepsize,\n )\n # allow initial step size guess to grow between iterations\n if ls_count < maxls and maxls > 1:\n ls_stepsize /= ls_fac\n # stop if steps become to small\n if ls_stepsize < 1e-18:\n break\n\n return t_in\n\n\ndef PAdam(\n loss, t_in, projs=None, iter=50, stepsize=1e-2, silent=False,\n):\n \"\"\" (Proj.) Adam accelerated gradient decent with simple constraints.\n\n Minimizes a given loss function subject to optional constraints. The\n constraints must be \"simple\" in the sense that efficient projections onto\n the feasible set exist.\n\n Parameters\n ----------\n loss : callable\n The loss or objective function.\n t_in : torch.Tensor\n The input tensor. This will be modified during\n optimization. The provided tensor serves as initial guess for the\n iterative optimization algorithm and will hold the result in the end.\n projs : list or tuple of callables, optional\n The projections onto the feasible set to perform after each gradient\n descent step. They will be performed in the order given. (Default None)\n iter : int, optional\n Number of iterations. (Default 50)\n stepsize : float, optional\n The step size parameter for gradient descent. (Default 1e-2)\n silent : bool, optional\n Disable progress bar. (Default False)\n\n Returns\n -------\n torch.tensor\n The modified input t_in. Note that t_in is changed as a\n side-effect, even if the returned tensor is discarded.\n \"\"\"\n\n def _project(t_in):\n with torch.no_grad():\n t_tmp = t_in.clone()\n if projs is not None:\n for proj in projs:\n t_tmp = proj(t_tmp)\n t_in.data = t_tmp.data\n return t_tmp\n\n # run optimization\n optimizer = torch.optim.Adam((t_in,), lr=stepsize, eps=1e-5)\n t = tqdm(range(iter), desc=\"PAdam iter\", disable=silent)\n for it in t:\n # reset gradients\n if t_in.grad is not None:\n t_in.grad.detach_()\n t_in.grad.zero_()\n # compute loss and take gradient step\n pre_loss = loss(t_in)\n pre_loss.backward()\n optimizer.step()\n # project and evaluate\n _project(t_in)\n post_loss = loss(t_in)\n t.set_postfix(pre_loss=pre_loss.item(), post_loss=post_loss.item())\n\n return t_in\n\n\n# ----- Adversarial Example Finding -----\n\n\ndef untargeted_attack(\n func,\n t_in_adv,\n t_in_ref,\n t_out_ref=None,\n domain_dist=None,\n codomain_dist=torch.nn.MSELoss(),\n weights=(1.0, 1.0),\n optimizer=PGD,\n transform=identity,\n inverse_transform=identity,\n **kwargs\n):\n \"\"\" Untargeted finding of adversarial examples.\n\n Finds perturbed input to a function f(x) that is close to a specified\n reference input and brings the function value f(x) as far from f(reference)\n as possible, by solving\n\n min distance(x, reference) - distance(f(x), f(reference))\n\n subject to optional constraints (see e.g. `PGD`). Optionally the\n optimization domain can be transformed to include implicit constraints.\n In this case a variable transform and its inverse must be provided.\n\n\n Parameters\n ----------\n func : callable\n The function f.\n t_in_adv : torch.Tensor\n The adversarial input tensor. This will be modified during\n optimization. The provided tensor serves as initial guess for the\n iterative optimization algorithm and will hold the result in the end.\n t_in_ref : torch.tensor\n The reference input tensor.\n domain_dist : callable, optional\n The distance measure between x and reference in the domain of f. Set\n to `Ǹone` to exlcude this term from the objective. (Default None)\n codomain_dist : callable, optional\n The distance measure between f(x) and f(reference) in the codomain of\n f. (Default torch.nn.MSELoss)\n weights : tuple of float\n Weighting factor for the two distance measures in the objective.\n (Default (1.0, 1.0))\n optimizer : callable, optional\n The routine used for solving th optimization problem. (Default `PGD`)\n transform : callable, optional\n Domain variable transform. (Default `identity`)\n inverse_transform : callable, optional\n Inverse domain variable transform. (Default `identity`)\n **kwargs\n Optional keyword arguments passed on to the optimizer (see e.g. `PGD`).\n\n Returns\n -------\n torch.tensor\n The perturbed input t_in_adv. Note that t_in_adv is changed as a\n side-effect, even if the returned tensor is discarded.\n \"\"\"\n\n t_in_adv.data = inverse_transform(t_in_adv.data)\n if t_out_ref is None:\n t_out_ref = func(t_in_ref)\n\n # loss closure\n def _closure(t_in):\n t_out = func(transform(t_in))\n loss = -weights[1] * codomain_dist(t_out, t_out_ref)\n if domain_dist is not None:\n loss += weights[0] * domain_dist(transform(t_in), t_in_ref)\n return loss\n\n # run optimization\n t_in_adv = optimizer(_closure, t_in_adv, **kwargs)\n return transform(t_in_adv)\n\n\n# ----- Grid attacks -----\n\n\ndef err_measure_l2(x1, x2):\n \"\"\" L2 error wrapper function. \"\"\"\n return l2_error(x1, x2, relative=True, squared=False)[1].squeeze()\n\n\ndef grid_attack(\n method,\n noise_rel,\n X_0,\n Y_0,\n store_data=False,\n keep_init=0,\n err_measure=err_measure_l2,\n):\n \"\"\" Finding adversarial examples over a grid of multiple noise levels.\n\n Parameters\n ----------\n method : dataframe\n The reconstruction method (including metadata in a dataframe).\n noise_rel : torch.Tensor\n List of relative noise levels. An adversarial example will be computed\n for each noise level, in descending order.\n X_0 : torch.Tensor\n The reference signal.\n Y_0 : torch.Tensor\n The reference measruements. Used to convert relative noise levels to\n absolute noise levels.\n store_data : bool, optional\n Store resulting adversarial examples. If set to False, only the\n resulting errors are stored. (Default False)\n keep_init : int, optional\n Reuse results from one noise level as initialization for the next noise\n level. (Default 0)\n err_measure : callable, optional\n Error measure to evaluate the effect of the adversarial perturbations\n on the reconstruction method. (Default relative l2-error)\n\n Returns\n -------\n torch.Tensor\n Error of adversarial perturbations for each noise level.\n torch.Tensor\n Error of statistical perturbations for each noise level (as reference).\n torch.Tensor, optional\n The adversarial reconstruction for each noise level.\n (only if store_data is set to True)\n torch.Tensor, optional\n The reference reconstruction for each noise level.\n (only if store_data is set to True)\n torch.Tensor, optional\n The adversarial measurements for each noise level.\n (only if store_data is set to True)\n torch.Tensor, optional\n The reference measurements for each noise level.\n (only if store_data is set to True)\n\n \"\"\"\n\n X_adv_err = torch.zeros(len(noise_rel), X_0.shape[0])\n X_ref_err = torch.zeros(len(noise_rel), X_0.shape[0])\n\n if store_data:\n X_adv = torch.zeros(\n len(noise_rel), *X_0.shape, device=torch.device(\"cpu\")\n )\n X_ref = torch.zeros(\n len(noise_rel), *X_0.shape, device=torch.device(\"cpu\")\n )\n\n Y_adv = torch.zeros(\n len(noise_rel), *Y_0.shape, device=torch.device(\"cpu\")\n )\n Y_ref = torch.zeros(\n len(noise_rel), *Y_0.shape, device=torch.device(\"cpu\")\n )\n\n for idx_noise in reversed(range(len(noise_rel))):\n\n # perform the actual attack for \"method\" and current noise level\n print(\n \"Method: {}; Noise rel {}/{}\".format(\n method.name, idx_noise + 1, len(noise_rel)\n ),\n flush=True,\n )\n if (keep_init == 0) or (idx_noise == (len(noise_rel) - 1)):\n Y_adv_cur, Y_ref_cur, Y_0_cur = method.attacker(\n X_0, noise_rel[idx_noise], yadv_init=None\n )\n else:\n Y_adv_cur, Y_ref_cur, Y_0_cur = method.attacker(\n X_0, noise_rel[idx_noise], yadv_init=Y_adv_cur\n )\n\n # compute adversarial and reference reconstruction\n # (noise level needs to be absolute)\n X_adv_cur = method.reconstr(\n Y_adv_cur,\n noise_rel[idx_noise]\n * Y_0_cur.norm(p=2, dim=(-2, -1), keepdim=True),\n )\n X_ref_cur = method.reconstr(\n Y_ref_cur,\n noise_rel[idx_noise]\n * Y_0_cur.norm(p=2, dim=(-2, -1), keepdim=True),\n )\n\n # compute resulting reconstruction error according to err_measure\n X_adv_err[idx_noise, ...] = err_measure(X_adv_cur, X_0)\n X_ref_err[idx_noise, ...] = err_measure(X_ref_cur, X_0)\n\n if store_data:\n X_adv[idx_noise, ...] = X_adv_cur.cpu()\n X_ref[idx_noise, ...] = X_ref_cur.cpu()\n\n Y_adv[idx_noise, ...] = Y_adv_cur.cpu()\n Y_ref[idx_noise, ...] = Y_ref_cur.cpu()\n\n idx_max = X_adv_err[idx_noise, ...].argsort(descending=True)\n Y_adv_cur = Y_adv_cur[idx_max[0:keep_init], ...]\n\n if store_data:\n return X_adv_err, X_ref_err, X_adv, X_ref, Y_adv, Y_ref\n else:\n return X_adv_err, X_ref_err\n", "import os\n\nimport numpy as np\nimport pandas as pd\nimport torch\n\nimport config\n\nfrom find_adversarial import PAdam, untargeted_attack\nfrom networks import IterativeNet, Tiramisu, UNet\nfrom operators import Radon, TVAnalysisPeriodic, noise_poisson, proj_l2_ball\nfrom reconstruction_methods import admm_l1_rec\n\n\n# ------ setup ----------\ndevice = torch.device(\"cuda:0\")\ntorch.cuda.set_device(0)\n\n# ----- operators -----\ntheta = torch.linspace(0, 180, 61)[:-1] # 60 lines, exclude endpoint\nOpA = Radon(config.n, theta)\nOpAIt = Radon(config.n, theta)\nOpAIt.adj = OpAIt.inv\nOpTV = TVAnalysisPeriodic(config.n, device=device)\n\n# ----- methods --------\nmethods = pd.DataFrame(columns=[\"name\", \"info\", \"reconstr\", \"attacker\", \"net\"])\nmethods = methods.set_index(\"name\")\n\nnoise_ref = noise_poisson\n\n# ----- set up L1 --------\n# grid search parameters for L1 via admm\ngrid_search_file = os.path.join(\n config.RESULTS_PATH, \"grid_search_l1\", \"grid_search_l1_radon_all.pkl\"\n)\ngs_params = pd.read_pickle(grid_search_file)\n\n\ndef _get_gs_param(noise_rel):\n idx = (gs_params.noise_rel - noise_rel).abs().to_numpy().argmin()\n return gs_params.grid_param[idx][\"lam\"], gs_params.grid_param[idx][\"rho\"]\n\n\n# the actual reconstruction method\ndef _reconstructL1(y, noise_rel):\n lam, rho = _get_gs_param(noise_rel.numpy())\n x, _ = admm_l1_rec(\n y,\n OpA,\n OpTV,\n 0.0 * OpA.adj(y),\n 0.0 * OpTV(OpA.adj(y)),\n lam,\n rho,\n iter=200,\n silent=False,\n )\n return x\n\n\n# the reconstruction method used for the L1 attack\n# (less iterations due to high computational costs)\ndef _reconstructL1_adv(y, lam, rho, x0, z0):\n x, _ = admm_l1_rec(y, OpA, OpTV, x0, z0, lam, rho, iter=20, silent=True)\n return x\n\n\n# loss\nmseloss = torch.nn.MSELoss(reduction=\"sum\")\n\n\n# attack function for L1\ndef _attackerL1(x0, noise_rel, yadv_init=None, batch_size=6):\n\n # compute noiseless measurements\n y0 = OpA(x0)\n\n if noise_rel == 0.0:\n return y0, y0, y0\n\n # compute absolute noise levels\n noise_level = noise_rel * y0.norm(p=2, dim=(-2, -1), keepdim=True)\n # compute noisy measurements for reference\n yref = noise_ref(OpA(x0), noise_level)\n\n # attack parameters\n adv_init_fac = 3.0 * noise_level\n adv_param = {\n \"codomain_dist\": mseloss,\n \"domain_dist\": None,\n \"mixed_dist\": None,\n \"weights\": (1.0, 1.0, 1.0),\n \"optimizer\": PAdam,\n \"projs\": None,\n \"iter\": 15,\n \"stepsize\": 5e0,\n }\n # get ADMM tuning parameters for noise_rel\n lam, rho = _get_gs_param(noise_rel.numpy())\n\n # compute good start values for _reconstructL1_adv\n x0_adv, z0_adv = admm_l1_rec(\n y0,\n OpA,\n OpTV,\n 0.0 * OpA.adj(y0),\n 0.0 * OpTV(OpA.adj(y0)),\n lam,\n rho,\n iter=200,\n silent=False,\n )\n # compute initialization\n yadv = y0.clone().detach() + (\n adv_init_fac / np.sqrt(np.prod(y0.shape[-2:]))\n ) * torch.randn_like(y0)\n\n if yadv_init is not None:\n yadv[0 : yadv_init.shape[0], ...] = yadv_init.clone().detach()\n\n for idx_batch in range(0, yadv.shape[0], batch_size):\n print(\n \"Attack for samples \"\n + str(list(range(idx_batch, idx_batch + batch_size)))\n )\n\n adv_param[\"projs\"] = [\n lambda y: proj_l2_ball(\n y,\n y0[idx_batch : idx_batch + batch_size, ...],\n noise_level[idx_batch : idx_batch + batch_size, ...],\n )\n ]\n # perform attack\n yadv[idx_batch : idx_batch + batch_size, ...] = untargeted_attack(\n lambda y: _reconstructL1_adv(\n y,\n lam,\n rho,\n x0_adv[idx_batch : idx_batch + batch_size, ...],\n z0_adv[idx_batch : idx_batch + batch_size, ...],\n ),\n yadv[idx_batch : idx_batch + batch_size, ...]\n .clone()\n .requires_grad_(True),\n y0[idx_batch : idx_batch + batch_size, ...],\n t_out_ref=x0[idx_batch : idx_batch + batch_size, ...],\n **adv_param\n ).detach()\n\n return yadv, yref, y0\n\n\nmethods.loc[\"L1\"] = {\n \"info\": {\n \"name_disp\": \"TV$[\\\\eta]$\",\n \"name_save\": \"tv\",\n \"plt_color\": \"#e8000b\",\n \"plt_marker\": \"o\",\n \"plt_linestyle\": \"-\",\n \"plt_linewidth\": 2.75,\n },\n \"reconstr\": _reconstructL1,\n \"attacker\": lambda x0, noise_rel, yadv_init=None: _attackerL1(\n x0, noise_rel, yadv_init=yadv_init\n ),\n \"net\": None,\n}\nmethods.loc[\"L1\", \"net\"] = None\n\n\n# ----- set up net attacks --------\n\n# the actual reconstruction method for any net\ndef _reconstructNet(y, noise_rel, net):\n return net.forward(y)\n\n\n# attack function for any net\ndef _attackerNet(x0, noise_rel, net, yadv_init=None, batch_size=3):\n\n # compute noiseless measurements\n y0 = OpA(x0)\n\n if noise_rel == 0.0:\n return y0, y0, y0\n\n # compute absolute noise levels\n noise_level = noise_rel * y0.norm(p=2, dim=(-2, -1), keepdim=True)\n # compute noisy measurements for reference\n yref = noise_ref(OpA(x0), noise_level) # noisy measurements\n\n # attack parameters\n adv_init_fac = 3.0 * noise_level\n adv_param = {\n \"codomain_dist\": mseloss,\n \"domain_dist\": None,\n \"mixed_dist\": None,\n \"weights\": (1.0, 1.0, 1.0),\n \"optimizer\": PAdam,\n \"projs\": None,\n \"iter\": 500,\n \"stepsize\": 5e0,\n }\n # compute initialization\n yadv = y0.clone().detach() + (\n adv_init_fac / np.sqrt(np.prod(y0.shape[-2:]))\n ) * torch.randn_like(y0)\n\n if yadv_init is not None:\n yadv[0 : yadv_init.shape[0], ...] = yadv_init.clone().detach()\n\n for idx_batch in range(0, yadv.shape[0], batch_size):\n print(\n \"Attack for samples \"\n + str(list(range(idx_batch, idx_batch + batch_size)))\n )\n\n adv_param[\"projs\"] = [\n lambda y: proj_l2_ball(\n y,\n y0[idx_batch : idx_batch + batch_size, ...],\n noise_level[idx_batch : idx_batch + batch_size, ...],\n )\n ]\n # perform attack\n yadv[idx_batch : idx_batch + batch_size, ...] = untargeted_attack(\n lambda y: _reconstructNet(y, 0.0, net),\n yadv[idx_batch : idx_batch + batch_size, ...]\n .clone()\n .requires_grad_(True),\n y0[idx_batch : idx_batch + batch_size, ...],\n t_out_ref=x0[idx_batch : idx_batch + batch_size, ...],\n **adv_param\n ).detach()\n\n return yadv, yref, y0\n\n\n# ----- load nets -----\n\n# create a net and load weights from file\ndef _load_net(path, subnet, subnet_params, it_net_params):\n subnet = subnet(**subnet_params).to(device)\n it_net = IterativeNet(subnet, **it_net_params).to(device)\n it_net.load_state_dict(torch.load(path, map_location=torch.device(device)))\n it_net.freeze()\n it_net.eval()\n return it_net\n\n\ndef _append_net(name, info, net):\n methods.loc[name] = {\n \"info\": info,\n \"reconstr\": lambda y, noise_rel: _reconstructNet(y, noise_rel, net),\n \"attacker\": lambda x0, noise_rel, yadv_init=None: _attackerNet(\n x0, noise_rel, net, yadv_init=yadv_init\n ),\n \"net\": net,\n }\n pass\n\n\n# ----- UNets -----\n\nunet_params = {\n \"in_channels\": 1,\n \"drop_factor\": 0.0,\n \"base_features\": 36,\n \"out_channels\": 1,\n}\n\n\n_append_net(\n \"UNet jit\",\n {\n \"name_disp\": \"UNet\",\n \"name_save\": \"unet_jit\",\n \"plt_color\": \"ff7c00\",\n \"plt_marker\": \"o\",\n \"plt_linestyle\": \":\",\n \"plt_linewidth\": 2.75,\n },\n _load_net(\n \"results/Radon_UNet_jitter_v3_train_phase_2/model_weights.pt\",\n UNet,\n unet_params,\n {\n \"num_iter\": 1,\n \"lam\": 0.0,\n \"lam_learnable\": False,\n \"final_dc\": False,\n \"resnet_factor\": 1.0,\n \"operator\": OpA,\n \"inverter\": OpA.inv,\n },\n ),\n)\n\n_append_net(\n \"UNet\",\n {\n \"name_disp\": \"UNet no jit\",\n \"name_save\": \"unet\",\n \"plt_color\": \"darkorange\",\n \"plt_marker\": \"o\",\n \"plt_linestyle\": \":\",\n \"plt_linewidth\": None,\n },\n _load_net(\n \"results/Radon_UNet_Hann_v2_train_phase_2/model_weights.pt\",\n UNet,\n unet_params,\n {\n \"num_iter\": 1,\n \"lam\": 0.0,\n \"lam_learnable\": False,\n \"final_dc\": False,\n \"resnet_factor\": 1.0,\n \"operator\": OpA,\n \"inverter\": OpA.inv,\n },\n ),\n)\n\n\n# ----- Tiramisu -----\ntiramisu_params = {\n \"in_channels\": 1,\n \"out_channels\": 1,\n \"drop_factor\": 0.0,\n \"down_blocks\": (5, 7, 9, 12, 15),\n \"up_blocks\": (15, 12, 9, 7, 5),\n \"pool_factors\": (2, 2, 2, 2, 2),\n \"bottleneck_layers\": 20,\n \"growth_rate\": 16,\n \"out_chans_first_conv\": 16,\n}\n\n\n_append_net(\n \"Tiramisu jit\",\n {\n \"name_disp\": \"Tira\",\n \"name_save\": \"tiramisu_jit\",\n \"plt_color\": \"turquoise\",\n \"plt_marker\": \"o\",\n \"plt_linestyle\": \"-\",\n \"plt_linewidth\": None,\n },\n _load_net(\n \"results/Radon_Tiramisu_jitter_v6_train_phase_1/\"\n + \"model_weights_epoch19.pt\",\n Tiramisu,\n tiramisu_params,\n {\n \"num_iter\": 1,\n \"lam\": 0.0,\n \"lam_learnable\": False,\n \"final_dc\": False,\n \"resnet_factor\": 1.0,\n \"operator\": OpA,\n \"inverter\": OpA.inv,\n },\n ),\n)\n\n_append_net(\n \"Tiramisu\",\n {\n \"name_disp\": \"Tira no jit\",\n \"name_save\": \"tiramisu\",\n \"plt_color\": \"turquoise\",\n \"plt_marker\": \"o\",\n \"plt_linestyle\": \"-\",\n \"plt_linewidth\": None,\n },\n _load_net(\n \"results/Radon_Tiramisu_Hann_v5_train_phase_1/model_weights.pt\",\n Tiramisu,\n tiramisu_params,\n {\n \"num_iter\": 1,\n \"lam\": 0.0,\n \"lam_learnable\": False,\n \"final_dc\": False,\n \"resnet_factor\": 1.0,\n \"operator\": OpA,\n \"inverter\": OpA.inv,\n },\n ),\n)\n", "import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport torch\n\nfrom matplotlib import rc\nfrom piq import psnr, ssim\n\nfrom data_management import IPDataset\nfrom find_adversarial import err_measure_l2, grid_attack\nfrom operators import rotate_real, to_complex\n\n\n# ----- load configuration -----\nimport config # isort:skip\nimport config_robustness_fourier as cfg_rob # isort:skip\nfrom config_robustness_fourier import methods # isort:skip\n\n# ------ general setup ----------\n\ndevice = cfg_rob.device\n\nsave_path = os.path.join(config.RESULTS_PATH, \"attacks\")\nsave_results = os.path.join(save_path, \"table_adv.pkl\")\n\ndo_plot = True\nsave_plot = True\nsave_table = True\n\n# ----- attack setup -----\n\n# select samples\nsamples = tuple(range(50, 100))\n\nit_init = 6\nkeep_init = 3\n\n# select range relative noise\nnoise_rel = torch.tensor([0.00, 0.005, 0.01, 0.02, 0.03, 0.05, 0.08])\n\n# select measure for reconstruction error\nerr_measure = err_measure_l2\n\n# select reconstruction methods\nmethods_include = [\n \"L1\",\n \"UNet jit\",\n \"UNet EE jit\",\n \"Tiramisu jit\",\n \"Tiramisu EE jit\",\n \"UNet It jit\",\n]\nmethods_plot = [\"L1\", \"UNet jit\", \"Tiramisu EE jit\", \"UNet It jit\"]\nmethods = methods.loc[methods_include]\n\n# select methods excluded from (re-)performing attacks\nmethods_no_calc = [\n \"L1\",\n \"UNet jit\",\n \"UNet EE jit\",\n \"Tiramisu jit\",\n \"Tiramisu EE jit\",\n \"UNet It jit\",\n]\n\n# ----- perform attack -----\n\n# select samples\ntest_data = IPDataset(\"test\", config.DATA_PATH)\nX_0 = torch.stack([test_data[s][0] for s in samples])\nX_0 = to_complex(X_0.to(device))\nY_0 = cfg_rob.OpA(X_0)\n\n# create result table\nresults = pd.DataFrame(\n columns=[\n \"name\",\n \"X_adv_err\",\n \"X_ref_err\",\n \"X_adv_psnr\",\n \"X_ref_psnr\",\n \"X_adv_ssim\",\n \"X_ref_ssim\",\n ]\n)\nresults.name = methods.index\nresults = results.set_index(\"name\")\n# load existing results from file\nif os.path.isfile(save_results):\n results_save = pd.read_pickle(save_results)\n for idx in results_save.index:\n if idx in results.index:\n results.loc[idx] = results_save.loc[idx]\nelse:\n results_save = results\n\n# perform attacks\nfor (idx, method) in methods.iterrows():\n if idx not in methods_no_calc:\n\n s_len = X_0.shape[0]\n results.loc[idx].X_adv_err = torch.zeros(len(noise_rel), s_len)\n results.loc[idx].X_ref_err = torch.zeros(len(noise_rel), s_len)\n results.loc[idx].X_adv_psnr = torch.zeros(len(noise_rel), s_len)\n results.loc[idx].X_ref_psnr = torch.zeros(len(noise_rel), s_len)\n results.loc[idx].X_adv_ssim = torch.zeros(len(noise_rel), s_len)\n results.loc[idx].X_ref_ssim = torch.zeros(len(noise_rel), s_len)\n\n for s in range(s_len):\n print(\"Sample: {}/{}\".format(s + 1, s_len))\n X_0_s = X_0[s : s + 1, ...].repeat(\n it_init, *((X_0.ndim - 1) * (1,))\n )\n Y_0_s = Y_0[s : s + 1, ...].repeat(\n it_init, *((Y_0.ndim - 1) * (1,))\n )\n\n (\n X_adv_err_cur,\n X_ref_err_cur,\n X_adv_cur,\n X_ref_cur,\n _,\n _,\n ) = grid_attack(\n method,\n noise_rel,\n X_0_s,\n Y_0_s,\n store_data=True,\n keep_init=keep_init,\n err_measure=err_measure,\n )\n\n (\n results.loc[idx].X_adv_err[:, s],\n idx_max_adv_err,\n ) = X_adv_err_cur.max(dim=1)\n results.loc[idx].X_ref_err[:, s] = X_ref_err_cur.mean(dim=1)\n\n for idx_noise in range(len(noise_rel)):\n idx_max = idx_max_adv_err[idx_noise]\n results.loc[idx].X_adv_psnr[idx_noise, s] = psnr(\n rotate_real(X_adv_cur[idx_noise, ...])[idx_max, 0:1, ...],\n rotate_real(X_0_s.cpu())[0, 0:1, ...],\n data_range=1.0,\n reduction=\"none\",\n )\n results.loc[idx].X_ref_psnr[idx_noise, s] = psnr(\n rotate_real(X_ref_cur[idx_noise, ...])[:, 0:1, ...],\n rotate_real(X_0_s.cpu())[:, 0:1, ...],\n data_range=1.0,\n reduction=\"mean\",\n )\n results.loc[idx].X_adv_ssim[idx_noise, s] = ssim(\n rotate_real(X_adv_cur[idx_noise, ...])[idx_max, 0:1, ...],\n rotate_real(X_0_s.cpu())[0, 0:1, ...],\n data_range=1.0,\n size_average=False,\n )\n results.loc[idx].X_ref_ssim[idx_noise, s] = ssim(\n rotate_real(X_ref_cur[idx_noise, ...])[:, 0:1, ...],\n rotate_real(X_0_s.cpu())[:, 0:1, ...],\n data_range=1.0,\n size_average=True,\n )\n\n# save results\nfor idx in results.index:\n results_save.loc[idx] = results.loc[idx]\nos.makedirs(save_path, exist_ok=True)\nresults_save.to_pickle(save_results)\n\n# ----- plotting -----\n\nif do_plot:\n\n # LaTeX typesetting\n rc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Palatino\"]})\n rc(\"text\", usetex=True)\n\n # +++ visualization of table +++\n fig, ax = plt.subplots(clear=True, figsize=(5, 4), dpi=200)\n\n for (idx, method) in methods.loc[methods_plot].iterrows():\n\n err_mean = results.loc[idx].X_adv_err[:, :].mean(dim=-1)\n err_std = results.loc[idx].X_adv_err[:, :].std(dim=-1)\n\n plt.plot(\n noise_rel,\n err_mean,\n linestyle=method.info[\"plt_linestyle\"],\n linewidth=method.info[\"plt_linewidth\"],\n marker=method.info[\"plt_marker\"],\n color=method.info[\"plt_color\"],\n label=method.info[\"name_disp\"],\n )\n if idx == \"L1\" or idx == \"UNet It jit\":\n plt.fill_between(\n noise_rel,\n err_mean + err_std,\n err_mean - err_std,\n alpha=0.10,\n color=method.info[\"plt_color\"],\n )\n\n plt.yticks(np.arange(0, 1, step=0.05))\n plt.ylim((-0.01, 0.226))\n ax.set_xticklabels([\"{:,.0%}\".format(x) for x in ax.get_xticks()])\n ax.set_yticklabels([\"{:,.0%}\".format(x) for x in ax.get_yticks()])\n plt.legend(loc=\"upper left\", fontsize=12)\n\n if save_plot:\n fig.savefig(\n os.path.join(save_path, \"fig_table_adv.pdf\"), bbox_inches=\"tight\"\n )\n\n plt.show()\n\nif save_table:\n df = results.applymap(\n lambda res: {\"mean\": res.mean(dim=-1), \"std\": res.std(dim=-1)}\n )\n\n # split adv and ref results\n df_adv = df[[\"X_adv_err\", \"X_adv_psnr\", \"X_adv_ssim\"]]\n\n # extract mean and std\n df_adv_mean = (\n df_adv.stack()\n .apply(pd.Series)[\"mean\"]\n .apply(\n lambda res: pd.Series(\n res,\n index=[\n \"{{{:.1f}\\\\%}}\".format(noise * 100) for noise in noise_rel\n ],\n )\n )\n )\n df_adv_std = (\n df_adv.stack()\n .apply(pd.Series)[\"std\"]\n .apply(\n lambda res: pd.Series(\n res,\n index=[\n \"{{{:.1f}\\\\%}}\".format(noise * 100) for noise in noise_rel\n ],\n )\n )\n )\n\n # find best method per noise level and metric\n best_adv_l2 = df_adv_mean.xs(\"X_adv_err\", level=1).idxmin()\n best_adv_ssim = df_adv_mean.xs(\"X_adv_ssim\", level=1).idxmax()\n best_adv_psnr = df_adv_mean.xs(\"X_adv_psnr\", level=1).idxmax()\n\n # combine mean and std data into \"mean\\pmstd\" strings\n for (idx, method) in methods.iterrows():\n df_adv_mean.loc[idx, \"X_adv_err\"] = df_adv_mean.loc[\n idx, \"X_adv_err\"\n ].apply(lambda res: res * 100)\n df_adv_std.loc[idx, \"X_adv_err\"] = df_adv_std.loc[\n idx, \"X_adv_err\"\n ].apply(lambda res: res * 100)\n df_adv_combined = df_adv_mean.combine(\n df_adv_std,\n lambda col1, col2: col1.combine(\n col2, lambda el1, el2: \"{:.2f} \\\\pm {:.2f}\".format(el1, el2)\n ),\n )\n\n # format best value per noise level and metric as bold\n for col, idx in best_adv_l2.iteritems():\n df_adv_combined.at[(idx, \"X_adv_err\"), col] = (\n \"\\\\bfseries \" + df_adv_combined.at[(idx, \"X_adv_err\"), col]\n )\n for col, idx in best_adv_ssim.iteritems():\n df_adv_combined.at[(idx, \"X_adv_ssim\"), col] = (\n \"\\\\bfseries \" + df_adv_combined.at[(idx, \"X_adv_ssim\"), col]\n )\n for col, idx in best_adv_psnr.iteritems():\n df_adv_combined.at[(idx, \"X_adv_psnr\"), col] = (\n \"\\\\bfseries \" + df_adv_combined.at[(idx, \"X_adv_psnr\"), col]\n )\n\n # rename rows and columns\n df_adv_combined = df_adv_combined.rename(\n index={\n \"X_adv_err\": \"rel.~$\\\\l{2}$-err. [\\\\%]\",\n \"X_adv_ssim\": \"SSIM\",\n \"X_adv_psnr\": \"PSNR\",\n }\n )\n df_adv_combined = df_adv_combined.rename(\n index=methods[\"info\"].apply(lambda res: res[\"name_disp\"]).to_dict()\n )\n\n # save latex tabular\n df_adv_combined.to_latex(\n os.path.join(save_path, \"table_adv.tex\"),\n column_format=2 * \"l\" + len(noise_rel) * \"S[separate-uncertainty]\",\n multirow=True,\n escape=False,\n )\n" ]
[ [ "matplotlib.use", "torch.device", "torch.nn.MSELoss", "torch.nn.Linear" ], [ "torch.device", "torch.nn.MSELoss", "torch.no_grad", "torch.optim.Adam", "torch.tanh", "torch.log" ], [ "pandas.read_pickle", "torch.device", "torch.nn.MSELoss", "pandas.DataFrame", "torch.linspace", "torch.cuda.set_device", "torch.randn_like", "numpy.prod" ], [ "pandas.read_pickle", "torch.stack", "pandas.DataFrame", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.subplots", "matplotlib.rc", "torch.tensor", "numpy.arange", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.show" ] ]
robashaw/basisopt
[ "c02fd307bc72c576ed298ea14648818b237d2f30" ]
[ "basisopt/opt/eventemper.py" ]
[ "import numpy as np\nfrom mendeleev import element as md_element\n\nfrom basisopt import api, data\nfrom basisopt.exceptions import PropertyNotAvailable\nfrom basisopt.basis import even_temper_expansion\nfrom basisopt.basis.guesses import null_guess\nfrom .preconditioners import unit\nfrom .strategies import Strategy\n\n_INITIAL_GUESS = (0.3, 2.0, 8)\n\nclass EvenTemperedStrategy(Strategy):\n \"\"\" Implements a strategy for an even tempered basis set, where each angular\n momentum shell is described by three parameters: (c, x, n)\n Each exponent in that shell is then given by\n y_k = c*(x**k) for k=0,...,n\n \n --------------------------- ALGORITHM ----------------------------\n Evaluate: energy (can change to any RMSE-compatible property)\n Loss: root-mean-square error\n Guess: null, uses _INITIAL_GUESS above\n Pre-conditioner: None\n \n Initialisation:\n - Find minimum no. of shells needed\n - max_l >= min_l\n - generate initial parameters for each shell\n \n First run:\n - optimize parameters for each shell once, sequentially\n \n Next shell in list not marked finished:\n - re-optimise\n - below threshold or n=max_n: mark finished\n - above threshold: increment n\n Repeat until all shells are marked finished. \n \n Uses iteration, limited by two parameters:\n max_n: max number of exponents in shell\n target: threshold for objective function\n ------------------------------------------------------------------\n \n Additional attributes:\n shells (list): list of (c, x, n) parameter tuples\n shell_done (list): list of flags for whether shell is finished (0) or not (1)\n target (float): threshold for optimization delta\n max_n (int): maximum number of primitives in shell expansion\n max_l (int): maximum angular momentum shell to do;\n if -1, does minimal configuration\n first_run (bool): setting to True restarts optimization from beginning\n last_objective (var): last value of objective function\n \n \"\"\"\n def __init__(self, eval_type='energy', target=1e-5, max_n=18, max_l=-1):\n Strategy.__init__(self, eval_type=eval_type, pre=unit)\n self.name = 'EvenTemper'\n self.shells = []\n self.shell_done = []\n self.last_objective = 0\n self.target = target\n self.guess = null_guess\n self.guess_params = {}\n self.max_n = max_n\n self.max_l = max_l\n self.first_run = True\n \n def set_basis_shells(self, basis, element):\n \"\"\"Expands parameters into a basis set\"\"\"\n basis[element] = even_temper_expansion(self.shells)\n \n def initialise(self, basis, element):\n if self.max_l < 0:\n el = md_element(element.title())\n l_list = [l for (n, l) in el.ec.conf.keys()]\n min_l = len(set(l_list))\n \n self.max_l = max(min_l, self.max_l)\n self.shells = [_INITIAL_GUESS] * self.max_l\n self.shell_done = [1] * self.max_l\n self.set_basis_shells(basis, element)\n self.last_objective = 0\n \n def get_active(self, basis, element):\n (c, x, _) = self.shells[self._step]\n return np.array([c, x])\n \n def set_active(self, values, basis, element):\n (c, x, n) = self.shells[self._step]\n c = max(values[0], 1e-5)\n x = max(values[1], 1.01)\n self.shells[self._step] = (c, x, n)\n self.set_basis_shells(basis, element)\n \n def next(self, basis, element, objective):\n delta_objective = np.abs(self.last_objective - objective)\n self.last_objective = objective\n \n carry_on = True\n if self.first_run:\n self._step = self._step + 1\n if self._step == self.max_l:\n self.first_run = False\n self._step = 0\n (c, x, n) = self.shells[self._step]\n self.shells[self._step] = (c, x, min(n+1, self.max_n))\n else: \n if delta_objective < self.target:\n self.shell_done[self._step] = 0\n \n self._step = (self._step + 1) % self.max_l \n (c, x, n) = self.shells[self._step]\n if n == self.max_n:\n self.shell_done[self._step] = 0\n elif self.shell_done[self._step] != 0:\n self.shells[self._step] = (c, x, n+1)\n \n carry_on = np.sum(self.shell_done) != 0\n \n return carry_on\n \n \n \n" ]
[ [ "numpy.sum", "numpy.array", "numpy.abs" ] ]
liuh127/Two-branch-dehazing
[ "2861089977876e2809f094b19dc529200af54f00" ]
[ "perceptual.py" ]
[ "# --- Imports --- #\nimport torch\nimport torch.nn.functional as F\n\n# --- Perceptual loss network --- #\nclass LossNetwork(torch.nn.Module):\n def __init__(self, vgg_model):\n super(LossNetwork, self).__init__()\n self.vgg_layers = vgg_model\n self.layer_name_mapping = {\n '3': \"relu1_2\",\n '8': \"relu2_2\",\n '15': \"relu3_3\"\n }\n\n def output_features(self, x):\n output = {}\n for name, module in self.vgg_layers._modules.items():\n x = module(x)\n if name in self.layer_name_mapping:\n output[self.layer_name_mapping[name]] = x\n return list(output.values())\n\n def forward(self, dehaze, gt):\n loss = []\n dehaze_features = self.output_features(dehaze)\n gt_features = self.output_features(gt)\n for dehaze_feature, gt_feature in zip(dehaze_features, gt_features):\n loss.append(F.mse_loss(dehaze_feature, gt_feature))\n return sum(loss)/len(loss)" ]
[ [ "torch.nn.functional.mse_loss" ] ]
mondrasovic/multi_object_tracking_demo
[ "d4ec6af4e3bca9d47628358967a05890071407ee" ]
[ "vehicle_tracker/visual.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Author: Milan Ondrasovic <milan.ondrasovic@gmail.com>\n\nimport random\n\nfrom typing import Sequence, Tuple, Dict, cast\n\nimport cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tracking import TrackedDetection\n\nColorT = Tuple[int, int, int]\nPointT = Tuple[int, int]\n\n\ndef labeled_rectangle(\n image: np.ndarray, start_point: PointT, end_point: PointT, label: str,\n rect_color: ColorT, label_color: ColorT, alpha: float = 0.85):\n (x1, y1), (x2, y2) = start_point, end_point\n\n roi = image[y1:y2, x1:x2]\n rect = np.ones_like(roi) * 255\n image[y1:y2, x1:x2] = cv.addWeighted(roi, alpha, rect, 1 - alpha, 0)\n\n font_face = cv.FONT_HERSHEY_COMPLEX_SMALL\n font_scale = 1\n font_thickness = 3\n\n (text_width, text_height), baseline = cv.getTextSize(\n label, font_face, font_scale, font_thickness)\n text_rect_end = (\n start_point[0] + text_width, start_point[1] + text_height + baseline)\n cv.rectangle(image, start_point, text_rect_end, rect_color, -1)\n \n # TODO Somehow calculate the shift.\n text_start_point = (start_point[0] + 1, start_point[1] + text_height + 3)\n cv.putText(\n image, label, text_start_point, font_face, font_scale, label_color,\n font_thickness, cv.LINE_AA)\n cv.putText(\n image, label, text_start_point, font_face, font_scale, (255, 255, 255),\n max(1, font_thickness - 2), cv.LINE_AA)\n cv.rectangle(image, start_point, end_point, rect_color, 2, cv.LINE_AA)\n\n\nclass TrackingVisualizer:\n def __init__(self, n_colors: int) -> None:\n assert n_colors > 0\n \n self.colors: Sequence[ColorT] = self.init_colors(n_colors, True)\n self.track_color: Dict[int, ColorT] = {}\n \n def draw_tracks(\n self, image: np.ndarray,\n tracks: Sequence[TrackedDetection]) -> None:\n for track in tracks:\n text = str(track.track_id)\n text_color = self._get_text_color()\n annotation_color = self._get_annotation_color(track)\n labeled_rectangle(\n image, track.box.top_left, track.box.bottom_right, text,\n annotation_color, text_color)\n \n def _get_text_color(self) -> ColorT:\n return (16, 16, 16)\n \n def _get_annotation_color(self, track: TrackedDetection) -> ColorT:\n color = self.track_color.get(track.track_id)\n if color is not None:\n return color\n color_pos = len(self.track_color) % len(self.colors)\n color = self.colors[color_pos]\n self.track_color[track.track_id] = color\n return cast(ColorT, color)\n \n @staticmethod\n def init_colors(n_colors: int, randomize: bool = False) -> Sequence[ColorT]:\n color_map = plt.cm.get_cmap('Spectral', n_colors)\n colors = [\n tuple(int(round(c * 255)) for c in color_map(i)[:3])\n for i in range(n_colors)]\n if randomize:\n random.shuffle(colors)\n return cast(Sequence[ColorT], colors)\n" ]
[ [ "matplotlib.pyplot.cm.get_cmap", "numpy.ones_like" ] ]
vikasbahirwani/SequenceTagging
[ "b4e0dc2a71f869a27ada003c9276fd1f269e230d" ]
[ "src/model/lstm_crf/main.py" ]
[ "# reimplementation of https://github.com/guillaumegenthial/tf_ner/blob/master/models/lstm_crf/main.py\n\nimport functools\nimport json\nimport logging\nfrom pathlib import Path\nimport sys\nimport numpy as np\nimport tensorflow as tf\n# tf.enable_eager_execution()\nfrom tf_metrics import precision, recall, f1\n\nDATADIR = \"../../../data/conll/\"\n\n# Setup Logging\nPath('results').mkdir(exist_ok=True)\ntf.logging.set_verbosity(logging.INFO)\nhandlers = [ logging.FileHandler('results/main.log'), logging.StreamHandler(sys.stdout)]\nlogging.getLogger('tensorflow').handlers = handlers\n\n# Data Pipeline\ndef parse_fn(line_words, line_tags):\n \"\"\"Encodes words into bytes for tensor\n\n :param line_words: one line with words (aka sentences) with space between each word/token\n :param line_tags: one line of tags (one tag per word in line_words)\n :return: (list of encoded words, len(words)), list of encoded tags\n \"\"\"\n\n words = [w.encode() for w in line_words.strip().split()]\n tags = [t.encode() for t in line_tags.strip().split()]\n assert len(words) == len(tags), \"Number of words {} and Number of tags must be the same {}\".format(len(words), len(tags))\n return (words, len(words)), tags\n\ndef generator_fn(words_file, tags_file):\n \"\"\"Enumerator to enumerate through words_file and associated tags_file one line at a time\n\n :param words_file: file path of the words file (one sentence per line)\n :param tags_file: file path of tags file (tags corresponding to words file)\n :return enumerator that enumerates over the format (words, len(words)), tags one line at a time from input files.\n \"\"\"\n\n with Path(words_file).open('r') as f_words, Path(tags_file).open('r') as f_tags:\n for line_words, line_tags in zip(f_words, f_tags):\n yield parse_fn(line_words, line_tags)\n\n\ndef input_fn(words_file, tags_file, params = None, shuffle_and_repeat = False):\n \"\"\"Creates tensorflow dataset using the generator_fn\n\n :param words_file: file path of the words file (one sentence per line)\n :param tags_file: file path of tags file (tags corresponding to words file)\n :param params: if not None then model hyperparameters expected - 'buffer' (as in buffer size) and 'epochs'\n :param shuffle_and_repeat: if the input is to be shuffled and repeat-delivered (say per epoch)\n :return: instance of tf.data.Dataset\n \"\"\"\n\n params = params if params is not None else {}\n\n # shapes are analogous to (list of encoded words, len(words)), list of encoded tags\n shapes = (([None], ()), [None])\n types = ((tf.string, tf.int32), tf.string)\n\n defaults = (('<pad>', 0), 'O')\n\n generator = functools.partial(generator_fn, words_file, tags_file)\n dataset = tf.data.Dataset.from_generator(generator, output_shapes = shapes, output_types = types)\n\n if shuffle_and_repeat:\n dataset = dataset.shuffle(params['buffer']).repeat(params['epochs'])\n\n dataset = dataset.padded_batch(params.get('batch_size', 20), shapes, defaults).prefetch(1)\\\n\n return dataset\n\ndef model_fn(features, labels, mode, params):\n \"\"\"\n\n :param features: words from sentence and number of words per sentence\n :param labels: One tag per word\n :param mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.PREDICT or tf.estimator.ModeKeys.EVAL\n :param params: dictionary of hyper parameters for the model\n :return:\n \"\"\"\n\n # For serving, features are a bit different\n if isinstance(features, dict):\n features = features['words'], features['nwords']\n\n # Read vocab_words_file, vocab_tags_file, features\n words, nwords = features\n training = (mode == tf.estimator.ModeKeys.TRAIN)\n vocab_words = tf.contrib.lookup.index_table_from_file(params['vocab_words_file'], num_oov_buckets = params['num_oov_buckets'])\n\n '''\n If the file contains the following: \n B-LOC\n B-PER\n O\n I-LOC\n \n then indices = [0, 1, 3] and num_tags = 4\n \n Open Question: The special treatment of tag indices is probably needed for microavg metrics. Why though?\n '''\n\n with Path(params['vocab_tags_file']).open('r') as f:\n indices = [idx for idx, tag in enumerate(f) if tag.strip() != 'O']\n num_tags = len(indices) + 1\n\n # Word Embeddings\n # remember - as per the parse function \"words\" is a python list of\n word_ids = vocab_words.lookup(words)\n glove = np.load(params['glove'])['embeddings']\n glove = np.vstack([glove, [[0.]*params['dim']]])\n variable = tf.Variable(glove, dtype=tf.float32, trainable=False)\n embeddings = tf.nn.embedding_lookup(variable, word_ids)\n dropout = params['dropout']\n embeddings = tf.layers.dropout(embeddings, rate = dropout, training = training)\n\n # LSTM CRF\n time_major = tf.transpose(embeddings, perm = [1, 0, 2])\n lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])\n lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])\n lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)\n\n \"\"\"\n Any LSTM Cell returns two things: Cell Output (h) and Cell State (c)\n\n Following this, lstm_fw or lstm_bw each return a pair containing:\n\n Cell Output: A 3-D tensor of shape [time_len, batch_size, output_size]\n Final state: a tuple (cell_state, output) produced by the last LSTM Cell in the sequence.\n\n \"\"\"\n output_fw,_ = lstm_cell_fw(time_major, dtype = tf.float32, sequence_length = nwords)\n output_bw,_ = lstm_cell_bw(time_major, dtype = tf.float32, sequence_length = nwords)\n output = tf.concat([output_fw, output_bw], axis=-1)\n output = tf.transpose(output, perm=[1, 0, 2])\n output = tf.layers.dropout(output, rate=dropout, training=training)\n\n # CRf\n logits = tf.layers.dense(output, num_tags)\n crf_params = tf.get_variable('crf', shape = [num_tags, num_tags], dtype = tf.float32)\n pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords) # pred_ids = A [batch_size, max_seq_len] matrix, with dtype tf.int32.\n\n # Prediction mode\n if mode == tf.estimator.ModeKeys.PREDICT:\n reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_file(params['vocab_tags_file'])\n pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))\n predictions = {'pred_ids': pred_ids, 'tags': pred_strings}\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Loss\n vocab_tags = tf.contrib.lookup.index_table_from_file(params['vocab_tags_file'])\n label_ids = vocab_tags.lookup(labels)\n\n \"\"\"\n logits are the same thing as unary potentials,\n checkout https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html look for scores s[i]\n \"\"\"\n log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(logits, label_ids, nwords, crf_params)\n loss = tf.reduce_mean(-log_likelihood)\n\n # metrics\n weights = tf.sequence_mask(nwords)\n\n metrics = {\n 'acc': tf.metrics.accuracy(label_ids, pred_ids, weights),\n 'precision': precision(label_ids, pred_ids, num_tags, indices, weights), # indices indicate non-null classes\n 'recall': recall(label_ids, pred_ids, num_tags, indices, weights),\n 'f1': f1(label_ids, pred_ids, num_tags, indices, weights),\n }\n\n for metric_name, op in metrics.items():\n tf.summary.scalar(metric_name, op[1])\n\n\n # Evaluation Mode or training mode\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(mode, loss = loss, eval_metric_ops = metrics )\n elif mode == tf.estimator.ModeKeys.TRAIN:\n train_op = tf.train.AdamOptimizer().minimize(loss, global_step=tf.train.get_or_create_global_step())\n return tf.estimator.EstimatorSpec(mode, loss = loss, train_op = train_op)\n\n\ndef fwords(name):\n return str(Path(DATADIR, '{}.words.txt'.format(name)))\n\n\ndef ftags(name):\n return str(Path(DATADIR, '{}.tags.txt'.format(name)))\n\n# Write predictions to file\ndef write_predictions(name, estimator):\n Path('results/score').mkdir(parents=True, exist_ok=True)\n with Path('results/score/{}.preds.txt'.format(name)).open('wb') as f:\n test_inpf = functools.partial(input_fn, fwords(name), ftags(name))\n golds_gen = generator_fn(fwords(name), ftags(name))\n preds_gen = estimator.predict(test_inpf)\n for golds, preds in zip(golds_gen, preds_gen):\n ((words, _), tags) = golds\n for word, tag, tag_pred in zip(words, tags, preds['tags']):\n f.write(b' '.join([word, tag, tag_pred]) + b'\\n')\n f.write(b'\\n')\n\nif __name__ == '__main__':\n # Params\n params = {\n 'dim': 300,\n 'dropout': 0.5,\n 'num_oov_buckets': 1,\n 'epochs': 25,\n 'batch_size': 20,\n 'buffer': 15000,\n 'lstm_size': 100,\n 'vocab_words_file': str(Path(DATADIR, 'vocab.words.txt')),\n 'vocab_chars_file': str(Path(DATADIR, 'vocab.chars.txt')),\n 'vocab_tags_file': str(Path(DATADIR, 'vocab.tags.txt')),\n 'glove': str(Path(DATADIR, 'glove.npz'))\n }\n\n with Path('results/params.json').open('w') as f:\n json.dump(params, f, indent=4, sort_keys=True)\n\n print('Done writing params to disk')\n\n # Run configuration and estimator\n cfg = tf.estimator.RunConfig(save_checkpoints_secs=120)\n estimator = tf.estimator.Estimator(model_fn, 'results/model', cfg, params)\n\n print('Done creating estimator spec')\n\n # Defining our input functions\n train_inpf = functools.partial(input_fn, fwords('train'), ftags('train'), params, shuffle_and_repeat=True)\n eval_inpf = functools.partial(input_fn, fwords('testa'), ftags('testa'))\n\n # Create an early stopping hook\n Path(estimator.eval_dir()).mkdir(parents=True, exist_ok=True)\n\n \"\"\"\n Ref: https://stackoverflow.com/questions/47137061/early-stopping-with-tf-estimator-how\n \n The parameters for stop_if_no_decrease_hook are as follows:\n \n tf.contrib.estimator.stop_if_no_decrease_hook(\n estimator,\n metric_name='loss',\n max_steps_without_decrease=1000,\n min_steps=100)\n \"\"\"\n\n hook = tf.contrib.estimator.stop_if_no_increase_hook(estimator, 'f1', 500, min_steps=8000, run_every_secs=120)\n\n train_spec = tf.estimator.TrainSpec(input_fn = train_inpf, hooks = [hook])\n eval_spec = tf.estimator.EvalSpec(input_fn = eval_inpf, throttle_secs = 120) # Evaluate every 120 seconds\n\n print('Done creating train and eval spec')\n\n # Train with early stopping\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n\n print('Done training and evaluation')\n\n for name in ['train', 'testa', 'testb']:\n write_predictions(name, estimator)\n\n\n\n\n\n\n\n\n" ]
[ [ "tensorflow.contrib.rnn.TimeReversedFusedRNN", "numpy.load", "tensorflow.nn.embedding_lookup", "tensorflow.contrib.lookup.index_table_from_file", "tensorflow.contrib.rnn.LSTMBlockFusedCell", "tensorflow.concat", "tensorflow.estimator.TrainSpec", "tensorflow.Variable", "tensorflow.transpose", "tensorflow.estimator.RunConfig", "tensorflow.layers.dense", "numpy.vstack", "tensorflow.logging.set_verbosity", "tensorflow.estimator.EstimatorSpec", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.to_int64", "tensorflow.contrib.lookup.index_to_string_table_from_file", "tensorflow.estimator.EvalSpec", "tensorflow.metrics.accuracy", "tensorflow.get_variable", "tensorflow.contrib.estimator.stop_if_no_increase_hook", "tensorflow.layers.dropout", "tensorflow.contrib.crf.crf_decode", "tensorflow.contrib.crf.crf_log_likelihood", "tensorflow.sequence_mask", "tensorflow.data.Dataset.from_generator", "tensorflow.estimator.train_and_evaluate", "tensorflow.train.get_or_create_global_step", "tensorflow.estimator.Estimator", "tensorflow.reduce_mean" ] ]
dredwardhyde/tinygrad-universal
[ "aeb28fc42fb40e9848613ce81811a727fee6f313" ]
[ "tinygrad/ops_cpu.py" ]
[ "import numpy as np\n\nfrom .tensor import Function\n\n\n# ************* unary ops *************\n\nclass ReLU(Function):\n @staticmethod\n def forward(ctx, input):\n ctx.save_for_backward(input)\n return np.maximum(input, 0)\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n return grad_output * (input >= 0)\n\n\nclass Log(Function):\n @staticmethod\n def forward(ctx, input):\n ctx.save_for_backward(input)\n return np.log(input)\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n return grad_output / input\n\n\nclass Exp(Function):\n @staticmethod\n def forward(ctx, input):\n ret = np.exp(input)\n ctx.save_for_backward(ret)\n return ret\n\n @staticmethod\n def backward(ctx, grad_output):\n ret, = ctx.saved_tensors\n return grad_output * ret\n\n\n# ************* reduce ops *************\n\nclass Sum(Function):\n @staticmethod\n def forward(ctx, input, axis=None):\n ctx.save_for_backward(input, axis)\n return np.array([input.sum()]) if axis is None else input.sum(axis=axis)\n\n @staticmethod\n def backward(ctx, grad_output):\n input, axis = ctx.saved_tensors\n axis = [axis] if type(axis) is int else axis\n shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]\n return grad_output.reshape(shape) + np.zeros_like(input)\n\n\nclass Max(Function):\n @staticmethod\n def forward(ctx, inp, axis=None):\n axis = [axis] if type(axis) == int else axis\n ret = np.amax(inp, axis=None if axis is None else tuple(axis), keepdims=True)\n ctx.save_for_backward(inp, axis, ret)\n if axis is not None:\n ret = ret.reshape([inp.shape[i] for i in range(len(inp.shape)) if i not in axis])\n return ret\n\n @staticmethod\n def backward(ctx, grad_output):\n input, axis, ret = ctx.saved_tensors\n shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]\n ret2 = (input == ret.reshape(shape))\n div = ret2.sum(axis=None if axis is None else tuple(axis), keepdims=True)\n return ret2 * grad_output.reshape(shape) / div\n\n\n# ************* binary ops *************\n\ndef unbroadcast(out, in_sh):\n # adjoint operation to broadcast is sum. Need to sum all axis with 1 = in_sh[i] < out.shape[i]\n sum_axis = tuple([i for i in range(len(in_sh)) if in_sh[i] == 1 and out.shape[i] > 1]) if in_sh != (1,) else None\n return out.sum(axis=sum_axis).reshape(in_sh)\n\n\nclass Add(Function):\n @staticmethod\n def forward(ctx, x, y):\n ctx.save_for_backward(x.shape, y.shape)\n return x + y\n\n @staticmethod\n def backward(ctx, grad_output):\n shape_x, shape_y = ctx.saved_tensors\n return unbroadcast(grad_output, shape_x), unbroadcast(grad_output, shape_y)\n\n\nclass Sub(Function):\n @staticmethod\n def forward(ctx, x, y):\n ctx.save_for_backward(x.shape, y.shape)\n return x - y\n\n @staticmethod\n def backward(ctx, grad_output):\n shape_x, shape_y = ctx.saved_tensors\n return unbroadcast(grad_output, shape_x), unbroadcast(-grad_output, shape_y)\n\n\nclass Mul(Function):\n @staticmethod\n def forward(ctx, x, y):\n ctx.save_for_backward(x, y)\n return x * y\n\n @staticmethod\n def backward(ctx, grad_output):\n x, y = ctx.saved_tensors\n return unbroadcast(y * grad_output, x.shape), unbroadcast(x * grad_output, y.shape)\n\n\nclass Pow(Function):\n @staticmethod\n def forward(ctx, x, y):\n ctx.save_for_backward(x, y)\n return x ** y\n\n @staticmethod\n def backward(ctx, grad_output):\n x, y = ctx.saved_tensors\n return unbroadcast(y * (x ** (y - 1.0)) * grad_output, x.shape), \\\n unbroadcast((x ** y) * np.log(x) * grad_output, y.shape)\n\n\n# ************* movement ops *************\n\nclass Reshape(Function):\n @staticmethod\n def forward(ctx, x, shape):\n ctx.save_for_backward(x.shape)\n return x.reshape(shape)\n\n @staticmethod\n def backward(ctx, grad_output):\n in_shape, = ctx.saved_tensors\n return grad_output.reshape(in_shape)\n\n\nclass Transpose(Function):\n @staticmethod\n def forward(ctx, x, order):\n ctx.save_for_backward(order)\n return np.transpose(x, order)\n\n @staticmethod\n def backward(ctx, x):\n return np.transpose(x, np.argsort(ctx.order))\n\n\ndef inner_slice(x, arg):\n padding = [(max(0, -p[0]), max(0, p[1] - x.shape[i])) for i, p in enumerate(arg)]\n x = np.pad(x, padding)\n slicee = [(p[0] + padding[i][0], p[1] + padding[i][0]) for i, p in enumerate(arg)]\n return x[tuple([slice(x[0], x[1], None) for x in slicee])]\n\n\nclass Slice(Function):\n @staticmethod\n def forward(ctx, x, arg=None):\n ctx.save_for_backward(x.shape)\n return inner_slice(x, arg)\n\n @staticmethod\n def backward(ctx, grad_output):\n shape, = ctx.saved_tensors\n narg = [(0 - p[0], grad_output.shape[i] + (shape[i] - p[1])) for i, p in enumerate(ctx.arg)]\n return inner_slice(grad_output, narg)\n\n\n# ************* processing ops *************\n\nclass Matmul(Function):\n @staticmethod\n def forward(ctx, input, weight):\n ctx.save_for_backward(input, weight)\n return input @ weight\n\n @staticmethod\n def backward(ctx, grad_output):\n input, weight = ctx.saved_tensors\n grad_input = grad_output @ np.swapaxes(weight, -2, -1)\n grad_weight = np.swapaxes(input, -2, -1) @ grad_output\n return grad_input, grad_weight\n\n\nclass Conv2D(Function):\n @staticmethod\n def forward(ctx, x, w, stride=1, groups=1):\n if type(ctx.stride) == int:\n ctx.stride = (ctx.stride, ctx.stride)\n cout, cin, H, W = w.shape\n ys, xs = ctx.stride\n bs, cin_ = x.shape[0], x.shape[1]\n oy, ox = (x.shape[2] - (H - ys)) // ys, (x.shape[3] - (W - xs)) // xs\n assert cin * ctx.groups == cin_\n assert cout % ctx.groups == 0\n rcout = cout // ctx.groups\n\n gx = x.reshape(bs, ctx.groups, cin, x.shape[2], x.shape[3])\n tx = np.lib.stride_tricks.as_strided(gx,\n shape=(bs, ctx.groups, cin, oy, ox, H, W),\n strides=(*gx.strides[0:3], gx.strides[3] * ys, gx.strides[4] * xs,\n *gx.strides[3:5]),\n writeable=False,\n )\n tw = w.reshape(ctx.groups, rcout, cin, H, W)\n ctx.save_for_backward(tx, tw, x.shape)\n\n ret = np.zeros((bs, ctx.groups, oy, ox, rcout), dtype=x.dtype)\n for g in range(ctx.groups):\n # ijYXyx,kjyx -> iYXk ->ikYX\n ret[:, g] += np.tensordot(tx[:, g], tw[g], ((1, 4, 5), (1, 2, 3)))\n return np.moveaxis(ret, 4, 2).reshape(bs, cout, oy, ox)\n\n @staticmethod\n def backward(ctx, grad_output):\n bs, _, oy, ox = grad_output.shape\n tx, tw, x_shape = ctx.saved_tensors\n _, rcout, cin, H, W = tw.shape\n ys, xs = ctx.stride\n OY, OX = x_shape[2:4]\n\n ggg = grad_output.reshape(bs, ctx.groups, rcout, oy, ox)\n\n gdw = np.zeros((ctx.groups, rcout, cin, H, W), dtype=tx.dtype)\n for g in range(ctx.groups):\n # 'ikYX,ijYXyx -> kjyx'\n gdw[g] += np.tensordot(ggg[:, g], tx[:, g], ((0, 2, 3), (0, 2, 3)))\n\n # needs to be optimized\n gdx = np.zeros((bs, ctx.groups, cin, OY, OX), dtype=tx.dtype)\n for k in range(oy * ox):\n Y, X = k // ox, k % ox\n iY, iX = Y * ys, X * xs\n # gdx[:,:,: , iY:iY+H, iX:iX+W] += np.einsum('igk,gkjyx->igjyx', ggg[:,:,:,Y,X], tw)\n for g in range(ctx.groups):\n tg = np.dot(ggg[:, g, :, Y, X].reshape(bs, -1), tw[g].reshape(rcout, -1))\n gdx[:, g, :, iY:iY + H, iX:iX + W] += tg.reshape((bs, cin, H, W))\n\n return gdx.reshape((bs, ctx.groups * cin, OY, OX)), gdw.reshape((ctx.groups * rcout, cin, H, W))\n" ]
[ [ "numpy.pad", "numpy.lib.stride_tricks.as_strided", "numpy.zeros_like", "numpy.log", "numpy.zeros", "numpy.exp", "numpy.tensordot", "numpy.swapaxes", "numpy.transpose", "numpy.argsort", "numpy.moveaxis", "numpy.maximum" ] ]
pschuh/flax
[ "9eee5149f345bd871555f3b53e3605f58588c883" ]
[ "tests/linen/linen_linear_test.py" ]
[ "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for flax.nn.linear.\"\"\"\n\nimport functools\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nfrom flax import linen as nn\n\nimport jax\nfrom jax import random\nfrom jax.nn import initializers\nimport jax.numpy as jnp\n\nimport numpy as np\n\n# Parse absl flags test_srcdir and test_tmpdir.\njax.config.parse_flags_with_absl()\n\n\nclass LinearTest(parameterized.TestCase):\n\n def test_dense(self):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((1, 3))\n dense_module = nn.Dense(\n features=4,\n kernel_init=initializers.ones,\n bias_init=initializers.ones,\n )\n y, _ = dense_module.init_with_output(rng, x)\n self.assertEqual(y.shape, (1, 4))\n np.testing.assert_allclose(y, np.full((1, 4), 4.))\n\n def test_dense_extra_batch_dims(self):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((1, 2, 3))\n dense_module = nn.Dense(\n features=4,\n kernel_init=initializers.ones,\n bias_init=initializers.ones,\n )\n y, _ = dense_module.init_with_output(rng, x)\n np.testing.assert_allclose(y, np.full((1, 2, 4), 4.))\n\n def test_dense_no_bias(self):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((1, 3))\n dense_module = nn.Dense(\n features=4,\n use_bias=False,\n kernel_init=initializers.ones,\n )\n y, _ = dense_module.init_with_output(rng, x)\n np.testing.assert_allclose(y, np.full((1, 4), 3.))\n\n def test_dense_is_dense_general(self):\n x = jax.random.normal(random.PRNGKey(0), (5, 3))\n dense_module = nn.Dense(\n features=4,\n use_bias=True,\n bias_init=initializers.normal(),\n )\n y1, _ = dense_module.init_with_output(dict(params=random.PRNGKey(1)), x)\n dg_module = nn.DenseGeneral(\n features=4,\n use_bias=True,\n bias_init=initializers.normal(),\n )\n y2, _ = dg_module.init_with_output(dict(params=random.PRNGKey(1)), x)\n\n np.testing.assert_allclose(y1, y2)\n\n def test_dense_general_batch_dim_raises(self):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((1, 3, 2, 5))\n with self.assertRaises(ValueError):\n dg_module = nn.DenseGeneral(\n features=4,\n batch_dims=(0, 2),\n kernel_init=initializers.ones,\n bias_init=initializers.ones,\n )\n dg_module.init_with_output(rng, x)\n\n def test_dense_general_two_out(self):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((1, 3))\n dg_module = nn.DenseGeneral(\n features=(2, 2),\n kernel_init=initializers.ones,\n bias_init=initializers.ones,\n )\n y, _ = dg_module.init_with_output(rng, x)\n np.testing.assert_allclose(y, np.full((1, 2, 2), 4.))\n\n def test_dense_general_two_in(self):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((1, 2, 2))\n dg_module = nn.DenseGeneral(\n features=3,\n axis=(-2, 2),\n kernel_init=initializers.ones,\n bias_init=initializers.ones,\n )\n y, _ = dg_module.init_with_output(rng, x)\n np.testing.assert_allclose(y, np.full((1, 3), 5.))\n\n def test_dense_general_batch_dim(self):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((2, 1, 3, 5))\n\n state = {'counter': 0.}\n def _counter_init(rng, shape, dtype, state):\n del rng, dtype\n state['counter'] += 1.\n return jnp.full(shape, state['counter'])\n counter_init = functools.partial(_counter_init, state=state)\n\n dg_module = nn.DenseGeneral(\n features=7,\n axis=(3, -2),\n batch_dims=0,\n bias_init=initializers.ones,\n kernel_init=counter_init,\n )\n y, _ = dg_module.init_with_output(rng, x)\n target = np.concatenate(\n [np.full((1, 1, 7), 16.), np.full((1, 1, 7), 31.)], axis=0)\n np.testing.assert_allclose(y, target)\n\n @parameterized.parameters([((-2, 3), (), 'bijk,jklm->bilm'),\n ((3, -2), (), 'bijk,jklm->bilm'),\n ((-2, 3), (0,), 'bijk,bjklm->bilm')])\n def test_dense_general_vs_numpy(self, axis, batch_dims, einsum_expr):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((16, 8, 9, 10))\n\n dg_module = nn.DenseGeneral(\n features=(11, 12),\n axis=axis,\n batch_dims=batch_dims,\n bias_init=initializers.ones,\n kernel_init=initializers.normal(),\n )\n y, initial_params = dg_module.init_with_output(rng, x)\n target = np.einsum(einsum_expr, x, initial_params['params']['kernel']) + 1.\n np.testing.assert_allclose(y, target, atol=1e-6)\n\n @parameterized.parameters([((3,),), (3,)])\n def test_conv(self, kernel_size):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((1, 8, 3))\n conv_module = nn.Conv(\n features=4,\n kernel_size=kernel_size,\n padding='VALID',\n kernel_init=initializers.ones,\n bias_init=initializers.ones,\n )\n y, initial_params = conv_module.init_with_output(rng, x)\n self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))\n np.testing.assert_allclose(y, np.full((1, 6, 4), 10.))\n\n @parameterized.parameters([((3,),), (3,)])\n def test_single_input_conv(self, kernel_size):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((8, 3))\n conv_module = nn.Conv(\n features=4,\n kernel_size=kernel_size,\n padding='VALID',\n kernel_init=initializers.ones,\n bias_init=initializers.ones,\n )\n y, initial_params = conv_module.init_with_output(rng, x)\n self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))\n np.testing.assert_allclose(y, np.full((6, 4), 10.))\n\n @parameterized.parameters([((3,),), (3,)])\n def test_group_conv(self, kernel_size):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((1, 8, 4))\n conv_module = nn.Conv(\n features=4,\n kernel_size=kernel_size,\n feature_group_count=2,\n padding='VALID',\n kernel_init=initializers.ones,\n bias_init=initializers.ones,\n )\n y, initial_params = conv_module.init_with_output(rng, x)\n self.assertEqual(initial_params['params']['kernel'].shape, (3, 2, 4))\n np.testing.assert_allclose(y, np.full((1, 6, 4), 7.))\n\n @parameterized.parameters([((3,),), (3,)])\n def test_conv_transpose(self, kernel_size):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((1, 8, 3))\n conv_transpose_module = nn.ConvTranspose(\n features=4,\n kernel_size=kernel_size,\n padding='VALID',\n kernel_init=initializers.ones,\n bias_init=initializers.ones,\n )\n y, initial_params = conv_transpose_module.init_with_output(rng, x)\n self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))\n correct_ans = np.array([[[ 4., 4., 4., 4.],\n [ 7., 7., 7., 7.],\n [10., 10., 10., 10.],\n [10., 10., 10., 10.],\n [10., 10., 10., 10.],\n [10., 10., 10., 10.],\n [10., 10., 10., 10.],\n [10., 10., 10., 10.],\n [ 7., 7., 7., 7.],\n [ 4., 4., 4., 4.]]])\n np.testing.assert_allclose(y, correct_ans)\n\n @parameterized.parameters([((3,),), (3,)])\n def test_single_input_conv_transpose(self, kernel_size):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.ones((8, 3))\n conv_transpose_module = nn.ConvTranspose(\n features=4,\n kernel_size=kernel_size,\n padding='VALID',\n kernel_init=initializers.ones,\n bias_init=initializers.ones,\n )\n y, initial_params = conv_transpose_module.init_with_output(rng, x)\n self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))\n correct_ans = np.array([[ 4., 4., 4., 4.],\n [ 7., 7., 7., 7.],\n [10., 10., 10., 10.],\n [10., 10., 10., 10.],\n [10., 10., 10., 10.],\n [10., 10., 10., 10.],\n [10., 10., 10., 10.],\n [10., 10., 10., 10.],\n [ 7., 7., 7., 7.],\n [ 4., 4., 4., 4.]])\n np.testing.assert_allclose(y, correct_ans)\n\n def test_embed(self):\n rng = dict(params=random.PRNGKey(0))\n x = jnp.arange(4)[None]\n dummy_embedding = jnp.broadcast_to(\n jnp.arange(4)[..., None], (4, 3)).astype(jnp.float32)\n embed_module = nn.Embed(\n num_embeddings=4,\n features=3,\n embedding_init=lambda rng, shape, dtype: dummy_embedding,\n )\n y, initial_params = embed_module.init_with_output(rng, x)\n np.testing.assert_allclose(y, dummy_embedding[None])\n z = embed_module.apply(initial_params, jnp.ones((3,)), method=embed_module.attend)\n np.testing.assert_allclose(z, 3. * jnp.arange(4))\n \n def test_non_final_axis(self):\n class Foo(nn.Module):\n @nn.compact\n def __call__(self, x):\n return nn.DenseGeneral(features=6, axis=1, name='dense')(x)\n\n x = jnp.ones((2, 4, 8))\n y, variables = Foo().init_with_output(random.PRNGKey(0), x)\n self.assertEqual(jax.tree_map(jnp.shape, variables['params']), {\n 'dense': {'kernel': (4, 6), 'bias': (6,)}\n })\n self.assertEqual(y.shape, (2, 8, 6))\n \n def test_non_final_axes(self):\n class Foo(nn.Module):\n @nn.compact\n def __call__(self, x):\n return nn.DenseGeneral(features=6, axis=(0, 1), name='dense')(x)\n\n x = jnp.ones((2, 4, 8))\n y, variables = Foo().init_with_output(random.PRNGKey(0), x)\n self.assertEqual(jax.tree_map(jnp.shape, variables['params']), {\n 'dense': {'kernel': (2, 4, 6), 'bias': (6,)}\n })\n self.assertEqual(y.shape, (8, 6))\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array", "numpy.full", "numpy.einsum" ] ]
XpressAI/spark
[ "0a838dcd71c733289e60d9f74e8267027c7b2c4a" ]
[ "python/pyspark/pandas/generic.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA base class of DataFrame/Column to behave similar to pandas DataFrame/Series.\n\"\"\"\nfrom abc import ABCMeta, abstractmethod\nfrom collections import Counter\nfrom distutils.version import LooseVersion\nfrom functools import reduce\nfrom typing import (\n Any,\n Callable,\n Iterable,\n IO,\n List,\n Optional,\n NoReturn,\n Tuple,\n Union,\n TYPE_CHECKING,\n cast,\n)\nimport warnings\n\nimport numpy as np # noqa: F401\nimport pandas as pd\nfrom pandas.api.types import is_list_like\n\nfrom pyspark.sql import Column, functions as F\nfrom pyspark.sql.types import (\n BooleanType,\n DataType,\n DoubleType,\n FloatType,\n IntegralType,\n LongType,\n NumericType,\n)\n\nfrom pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.\nfrom pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar\nfrom pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer\nfrom pyspark.pandas.internal import InternalFrame\nfrom pyspark.pandas.spark import functions as SF\nfrom pyspark.pandas.typedef import spark_type_to_pandas_dtype\nfrom pyspark.pandas.utils import (\n is_name_like_tuple,\n is_name_like_value,\n name_like_string,\n scol_for,\n sql_conf,\n validate_arguments_and_invoke_function,\n validate_axis,\n SPARK_CONF_ARROW_ENABLED,\n)\n\nif TYPE_CHECKING:\n from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)\n from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)\n from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)\n from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)\n from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)\n\n\nbool_type = bool\n\n\nclass Frame(object, metaclass=ABCMeta):\n \"\"\"\n The base class for both DataFrame and Series.\n \"\"\"\n\n @abstractmethod\n def __getitem__(self, key: Any) -> Any:\n pass\n\n @property\n @abstractmethod\n def _internal(self) -> InternalFrame:\n pass\n\n @abstractmethod\n def _apply_series_op(\n self: FrameLike,\n op: Callable[[\"Series\"], Union[\"Series\", Column]],\n should_resolve: bool = False,\n ) -> FrameLike:\n pass\n\n @abstractmethod\n def _reduce_for_stat_function(\n self,\n sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],\n name: str,\n axis: Optional[Axis] = None,\n numeric_only: bool = True,\n **kwargs: Any\n ) -> Union[\"Series\", Scalar]:\n pass\n\n @property\n @abstractmethod\n def dtypes(self) -> Union[pd.Series, Dtype]:\n pass\n\n @abstractmethod\n def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:\n pass\n\n @property\n @abstractmethod\n def index(self) -> \"Index\":\n pass\n\n @abstractmethod\n def copy(self: FrameLike) -> FrameLike:\n pass\n\n @abstractmethod\n def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:\n pass\n\n @abstractmethod\n def head(self: FrameLike, n: int = 5) -> FrameLike:\n pass\n\n # TODO: add 'axis' parameter\n def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:\n \"\"\"\n Return cumulative minimum over a DataFrame or Series axis.\n\n Returns a DataFrame or Series of the same size containing the cumulative minimum.\n\n .. note:: the current implementation of cummin uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.min : Return the minimum over DataFrame axis.\n DataFrame.cummax : Return cumulative maximum over DataFrame axis.\n DataFrame.cummin : Return cumulative minimum over DataFrame axis.\n DataFrame.cumsum : Return cumulative sum over DataFrame axis.\n Series.min : Return the minimum over Series axis.\n Series.cummax : Return cumulative maximum over Series axis.\n Series.cummin : Return cumulative minimum over Series axis.\n Series.cumsum : Return cumulative sum over Series axis.\n Series.cumprod : Return cumulative product over Series axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))\n >>> df\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 1.0 0.0\n\n By default, iterates over rows and finds the minimum in each column.\n\n >>> df.cummin()\n A B\n 0 2.0 1.0\n 1 2.0 NaN\n 2 1.0 0.0\n\n It works identically in Series.\n\n >>> df.A.cummin()\n 0 2.0\n 1 2.0\n 2 1.0\n Name: A, dtype: float64\n \"\"\"\n return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)\n\n # TODO: add 'axis' parameter\n def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:\n \"\"\"\n Return cumulative maximum over a DataFrame or Series axis.\n\n Returns a DataFrame or Series of the same size containing the cumulative maximum.\n\n .. note:: the current implementation of cummax uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.max : Return the maximum over DataFrame axis.\n DataFrame.cummax : Return cumulative maximum over DataFrame axis.\n DataFrame.cummin : Return cumulative minimum over DataFrame axis.\n DataFrame.cumsum : Return cumulative sum over DataFrame axis.\n DataFrame.cumprod : Return cumulative product over DataFrame axis.\n Series.max : Return the maximum over Series axis.\n Series.cummax : Return cumulative maximum over Series axis.\n Series.cummin : Return cumulative minimum over Series axis.\n Series.cumsum : Return cumulative sum over Series axis.\n Series.cumprod : Return cumulative product over Series axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))\n >>> df\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 1.0 0.0\n\n By default, iterates over rows and finds the maximum in each column.\n\n >>> df.cummax()\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 3.0 1.0\n\n It works identically in Series.\n\n >>> df.B.cummax()\n 0 1.0\n 1 NaN\n 2 1.0\n Name: B, dtype: float64\n \"\"\"\n return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)\n\n # TODO: add 'axis' parameter\n def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:\n \"\"\"\n Return cumulative sum over a DataFrame or Series axis.\n\n Returns a DataFrame or Series of the same size containing the cumulative sum.\n\n .. note:: the current implementation of cumsum uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.sum : Return the sum over DataFrame axis.\n DataFrame.cummax : Return cumulative maximum over DataFrame axis.\n DataFrame.cummin : Return cumulative minimum over DataFrame axis.\n DataFrame.cumsum : Return cumulative sum over DataFrame axis.\n DataFrame.cumprod : Return cumulative product over DataFrame axis.\n Series.sum : Return the sum over Series axis.\n Series.cummax : Return cumulative maximum over Series axis.\n Series.cummin : Return cumulative minimum over Series axis.\n Series.cumsum : Return cumulative sum over Series axis.\n Series.cumprod : Return cumulative product over Series axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))\n >>> df\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 1.0 0.0\n\n By default, iterates over rows and finds the sum in each column.\n\n >>> df.cumsum()\n A B\n 0 2.0 1.0\n 1 5.0 NaN\n 2 6.0 1.0\n\n It works identically in Series.\n\n >>> df.A.cumsum()\n 0 2.0\n 1 5.0\n 2 6.0\n Name: A, dtype: float64\n \"\"\"\n return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)\n\n # TODO: add 'axis' parameter\n # TODO: use pandas_udf to support negative values and other options later\n # other window except unbounded ones is supported as of Spark 3.0.\n def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:\n \"\"\"\n Return cumulative product over a DataFrame or Series axis.\n\n Returns a DataFrame or Series of the same size containing the cumulative product.\n\n .. note:: the current implementation of cumprod uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n .. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by\n ``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.cummax : Return cumulative maximum over DataFrame axis.\n DataFrame.cummin : Return cumulative minimum over DataFrame axis.\n DataFrame.cumsum : Return cumulative sum over DataFrame axis.\n DataFrame.cumprod : Return cumulative product over DataFrame axis.\n Series.cummax : Return cumulative maximum over Series axis.\n Series.cummin : Return cumulative minimum over Series axis.\n Series.cumsum : Return cumulative sum over Series axis.\n Series.cumprod : Return cumulative product over Series axis.\n\n Raises\n ------\n Exception : If the values is equal to or lower than 0.\n\n Examples\n --------\n >>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))\n >>> df\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 4.0 10.0\n\n By default, iterates over rows and finds the sum in each column.\n\n >>> df.cumprod()\n A B\n 0 2.0 1.0\n 1 6.0 NaN\n 2 24.0 10.0\n\n It works identically in Series.\n\n >>> df.A.cumprod()\n 0 2.0\n 1 6.0\n 2 24.0\n Name: A, dtype: float64\n \"\"\"\n return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)\n\n # TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated\n # since we're using this for `DataFrame.info` internally.\n # We can drop it once our minimal pandas version becomes 1.0.0.\n def get_dtype_counts(self) -> pd.Series:\n \"\"\"\n Return counts of unique dtypes in this object.\n\n .. deprecated:: 0.14.0\n\n Returns\n -------\n dtype : pd.Series\n Series with the count of columns with each dtype.\n\n See Also\n --------\n dtypes : Return the dtypes in this object.\n\n Examples\n --------\n >>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]\n >>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])\n >>> df\n str int1 int2\n 0 a 1 1\n 1 b 2 2\n 2 c 3 3\n\n >>> df.get_dtype_counts().sort_values()\n object 1\n int64 2\n dtype: int64\n\n >>> df.str.get_dtype_counts().sort_values()\n object 1\n dtype: int64\n \"\"\"\n warnings.warn(\n \"`get_dtype_counts` has been deprecated and will be \"\n \"removed in a future version. For DataFrames use \"\n \"`.dtypes.value_counts()\",\n FutureWarning,\n )\n if not isinstance(self.dtypes, Iterable):\n dtypes = [self.dtypes]\n else:\n dtypes = list(self.dtypes)\n return pd.Series(dict(Counter([d.name for d in dtypes])))\n\n def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:\n r\"\"\"\n Apply func(self, \\*args, \\*\\*kwargs).\n\n Parameters\n ----------\n func : function\n function to apply to the DataFrame.\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the DataFrames.\n args : iterable, optional\n positional arguments passed into ``func``.\n kwargs : mapping, optional\n a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n Notes\n -----\n Use ``.pipe`` when chaining together functions that expect\n Series, DataFrames or GroupBy objects. For example, given\n\n >>> df = ps.DataFrame({'category': ['A', 'A', 'B'],\n ... 'col1': [1, 2, 3],\n ... 'col2': [4, 5, 6]},\n ... columns=['category', 'col1', 'col2'])\n >>> def keep_category_a(df):\n ... return df[df['category'] == 'A']\n >>> def add_one(df, column):\n ... return df.assign(col3=df[column] + 1)\n >>> def multiply(df, column1, column2):\n ... return df.assign(col4=df[column1] * df[column2])\n\n\n instead of writing\n\n >>> multiply(add_one(keep_category_a(df), column=\"col1\"), column1=\"col2\", column2=\"col3\")\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n\n You can write\n\n >>> (df.pipe(keep_category_a)\n ... .pipe(add_one, column=\"col1\")\n ... .pipe(multiply, column1=\"col2\", column2=\"col3\")\n ... )\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``df``:\n\n >>> def multiply_2(column1, df, column2):\n ... return df.assign(col4=df[column1] * df[column2])\n\n\n Then you can write\n\n >>> (df.pipe(keep_category_a)\n ... .pipe(add_one, column=\"col1\")\n ... .pipe((multiply_2, 'df'), column1=\"col2\", column2=\"col3\")\n ... )\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n You can use lambda as wel\n\n >>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename(\"value\"))\n 0 2\n 1 3\n 2 4\n Name: value, dtype: int64\n \"\"\"\n\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\"%s is both the pipe target and a keyword \" \"argument\" % target)\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)\n\n def to_numpy(self) -> np.ndarray:\n \"\"\"\n A NumPy ndarray representing the values in this DataFrame or Series.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Returns\n -------\n numpy.ndarray\n\n Examples\n --------\n >>> ps.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}).to_numpy()\n array([[1, 3],\n [2, 4]])\n\n With heterogeneous data, the lowest common type will have to be used.\n\n >>> ps.DataFrame({\"A\": [1, 2], \"B\": [3.0, 4.5]}).to_numpy()\n array([[1. , 3. ],\n [2. , 4.5]])\n\n For a mix of numeric and non-numeric types, the output array will have object dtype.\n\n >>> df = ps.DataFrame({\"A\": [1, 2], \"B\": [3.0, 4.5], \"C\": pd.date_range('2000', periods=2)})\n >>> df.to_numpy()\n array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],\n [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)\n\n For Series,\n\n >>> ps.Series(['a', 'b', 'a']).to_numpy()\n array(['a', 'b', 'a'], dtype=object)\n \"\"\"\n return self.to_pandas().values\n\n @property\n def values(self) -> np.ndarray:\n \"\"\"\n Return a Numpy representation of the DataFrame or the Series.\n\n .. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Returns\n -------\n numpy.ndarray\n\n Examples\n --------\n A DataFrame where all columns are the same type (e.g., int64) results in an array of\n the same type.\n\n >>> df = ps.DataFrame({'age': [ 3, 29],\n ... 'height': [94, 170],\n ... 'weight': [31, 115]})\n >>> df\n age height weight\n 0 3 94 31\n 1 29 170 115\n >>> df.dtypes\n age int64\n height int64\n weight int64\n dtype: object\n >>> df.values\n array([[ 3, 94, 31],\n [ 29, 170, 115]])\n\n A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray\n of the broadest type that accommodates these mixed types (e.g., object).\n\n >>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),\n ... ('lion', 80.5, 'first'),\n ... ('monkey', np.nan, None)],\n ... columns=('name', 'max_speed', 'rank'))\n >>> df2.dtypes\n name object\n max_speed float64\n rank object\n dtype: object\n >>> df2.values\n array([['parrot', 24.0, 'second'],\n ['lion', 80.5, 'first'],\n ['monkey', nan, None]], dtype=object)\n\n For Series,\n\n >>> ps.Series([1, 2, 3]).values\n array([1, 2, 3])\n\n >>> ps.Series(list('aabc')).values\n array(['a', 'a', 'b', 'c'], dtype=object)\n \"\"\"\n warnings.warn(\"We recommend using `{}.to_numpy()` instead.\".format(type(self).__name__))\n return self.to_numpy()\n\n def to_csv(\n self,\n path: Optional[str] = None,\n sep: str = \",\",\n na_rep: str = \"\",\n columns: Optional[List[Union[Any, Tuple]]] = None,\n header: bool = True,\n quotechar: str = '\"',\n date_format: Optional[str] = None,\n escapechar: Optional[str] = None,\n num_files: Optional[int] = None,\n mode: str = \"overwrite\",\n partition_cols: Optional[Union[str, List[str]]] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: Any\n ) -> Optional[str]:\n r\"\"\"\n Write object to a comma-separated values (csv) file.\n\n .. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',\n pandas-on-Spark respects HDFS's property such as 'fs.default.name'.\n\n .. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes\n multiple `part-...` files in the directory when `path` is specified.\n This behaviour was inherited from Apache Spark. The number of files can\n be controlled by `num_files`.\n\n Parameters\n ----------\n path : str, default None\n File path. If None is provided the result is returned as a string.\n sep : str, default ','\n String of length 1. Field delimiter for the output file.\n na_rep : str, default ''\n Missing data representation.\n columns : sequence, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given it is\n assumed to be aliases for the column names.\n quotechar : str, default '\\\"'\n String of length 1. Character used to quote fields.\n date_format : str, default None\n Format string for datetime objects.\n escapechar : str, default None\n String of length 1. Character used to escape `sep` and `quotechar`\n when appropriate.\n num_files : the number of files to be written in `path` directory when\n this is a path.\n mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},\n default 'overwrite'. Specifies the behavior of the save operation when the\n destination exists already.\n\n - 'append': Append the new data to existing data.\n - 'overwrite': Overwrite existing data.\n - 'ignore': Silently ignore this operation if data already exists.\n - 'error' or 'errorifexists': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark's index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options: keyword arguments for additional options specific to PySpark.\n This kwargs are specific to PySpark's CSV options to pass. Check\n the options in PySpark's API documentation for spark.write.csv(...).\n It has higher priority and overwrites all other options.\n This parameter only works when `path` is specified.\n\n Returns\n -------\n str or None\n\n See Also\n --------\n read_csv\n DataFrame.to_delta\n DataFrame.to_table\n DataFrame.to_parquet\n DataFrame.to_spark_io\n\n Examples\n --------\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),\n ... country=['KR', 'US', 'JP'],\n ... code=[1, 2 ,3]), columns=['date', 'country', 'code'])\n >>> df.sort_values(by=\"date\") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n date country code\n ... 2012-01-31 12:00:00 KR 1\n ... 2012-02-29 12:00:00 US 2\n ... 2012-03-31 12:00:00 JP 3\n\n >>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE\n date,country,code\n 2012-01-31 12:00:00,KR,1\n 2012-02-29 12:00:00,US,2\n 2012-03-31 12:00:00,JP,3\n\n >>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)\n >>> ps.read_csv(\n ... path=r'%s/to_csv/foo.csv' % path\n ... ).sort_values(by=\"date\") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n date country code\n ... 2012-01-31 12:00:00 KR 1\n ... 2012-02-29 12:00:00 US 2\n ... 2012-03-31 12:00:00 US 3\n\n In case of Series,\n\n >>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE\n date\n 2012-01-31 12:00:00\n 2012-02-29 12:00:00\n 2012-03-31 12:00:00\n\n >>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)\n >>> ps.read_csv(\n ... path=r'%s/to_csv/foo.csv' % path\n ... ).sort_values(by=\"date\") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n date\n ... 2012-01-31 12:00:00\n ... 2012-02-29 12:00:00\n ... 2012-03-31 12:00:00\n\n You can preserve the index in the roundtrip as below.\n\n >>> df.set_index(\"country\", append=True, inplace=True)\n >>> df.date.to_csv(\n ... path=r'%s/to_csv/bar.csv' % path,\n ... num_files=1,\n ... index_col=[\"index1\", \"index2\"])\n >>> ps.read_csv(\n ... path=r'%s/to_csv/bar.csv' % path, index_col=[\"index1\", \"index2\"]\n ... ).sort_values(by=\"date\") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n date\n index1 index2\n ... ... 2012-01-31 12:00:00\n ... ... 2012-02-29 12:00:00\n ... ... 2012-03-31 12:00:00\n \"\"\"\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\") # type: ignore\n\n if path is None:\n # If path is none, just collect and use pandas's to_csv.\n psdf_or_ser = self\n if (LooseVersion(\"0.24\") > LooseVersion(pd.__version__)) and isinstance(\n self, ps.Series\n ):\n # 0.23 seems not having 'columns' parameter in Series' to_csv.\n return psdf_or_ser.to_pandas().to_csv( # type: ignore\n None,\n sep=sep,\n na_rep=na_rep,\n header=header,\n date_format=date_format,\n index=False,\n )\n else:\n return psdf_or_ser.to_pandas().to_csv( # type: ignore\n None,\n sep=sep,\n na_rep=na_rep,\n columns=columns,\n header=header,\n quotechar=quotechar,\n date_format=date_format,\n escapechar=escapechar,\n index=False,\n )\n\n psdf = self\n if isinstance(self, ps.Series):\n psdf = self.to_frame()\n\n if columns is None:\n column_labels = psdf._internal.column_labels\n else:\n column_labels = []\n for label in columns:\n if not is_name_like_tuple(label):\n label = (label,)\n if label not in psdf._internal.column_labels:\n raise KeyError(name_like_string(label))\n column_labels.append(label)\n\n if isinstance(index_col, str):\n index_cols = [index_col]\n elif index_col is None:\n index_cols = []\n else:\n index_cols = index_col\n\n if header is True and psdf._internal.column_labels_level > 1:\n raise ValueError(\"to_csv only support one-level index column now\")\n elif isinstance(header, list):\n sdf = psdf.to_spark(index_col) # type: ignore\n sdf = sdf.select(\n [scol_for(sdf, name_like_string(label)) for label in index_cols]\n + [\n scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(\n new_name\n )\n for i, (label, new_name) in enumerate(zip(column_labels, header))\n ]\n )\n header = True\n else:\n sdf = psdf.to_spark(index_col) # type: ignore\n sdf = sdf.select(\n [scol_for(sdf, name_like_string(label)) for label in index_cols]\n + [\n scol_for(sdf, str(i) if label is None else name_like_string(label))\n for i, label in enumerate(column_labels)\n ]\n )\n\n if num_files is not None:\n sdf = sdf.repartition(num_files)\n\n builder = sdf.write.mode(mode)\n if partition_cols is not None:\n builder.partitionBy(partition_cols)\n builder._set_opts(\n sep=sep,\n nullValue=na_rep,\n header=header,\n quote=quotechar,\n dateFormat=date_format,\n charToEscapeQuoteEscaping=escapechar,\n )\n builder.options(**options).format(\"csv\").save(path)\n return None\n\n def to_json(\n self,\n path: Optional[str] = None,\n compression: str = \"uncompressed\",\n num_files: Optional[int] = None,\n mode: str = \"overwrite\",\n orient: str = \"records\",\n lines: bool = True,\n partition_cols: Optional[Union[str, List[str]]] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: Any\n ) -> Optional[str]:\n \"\"\"\n Convert the object to a JSON string.\n\n .. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',\n pandas-on-Spark respects HDFS's property such as 'fs.default.name'.\n\n .. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes\n multiple `part-...` files in the directory when `path` is specified.\n This behaviour was inherited from Apache Spark. The number of files can\n be controlled by `num_files`.\n\n .. note:: output JSON format is different from pandas'. It always use `orient='records'`\n for its output. This behaviour might have to change in the near future.\n\n Note NaN's and None will be converted to null and datetime objects\n will be converted to UNIX timestamps.\n\n Parameters\n ----------\n path : string, optional\n File path. If not specified, the result is returned as\n a string.\n lines : bool, default True\n If ‘orient’ is ‘records’ write out line delimited json format.\n Will throw ValueError if incorrect ‘orient’ since others are not\n list like. It should be always True for now.\n orient : str, default 'records'\n It should be always 'records' for now.\n compression : {'gzip', 'bz2', 'xz', None}\n A string representing the compression to use in the output file,\n only used when the first argument is a filename. By default, the\n compression is inferred from the filename.\n num_files : the number of files to be written in `path` directory when\n this is a path.\n mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},\n default 'overwrite'. Specifies the behavior of the save operation when the\n destination exists already.\n\n - 'append': Append the new data to existing data.\n - 'overwrite': Overwrite existing data.\n - 'ignore': Silently ignore this operation if data already exists.\n - 'error' or 'errorifexists': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark's index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options: keyword arguments for additional options specific to PySpark.\n It is specific to PySpark's JSON options to pass. Check\n the options in PySpark's API documentation for `spark.write.json(...)`.\n It has a higher priority and overwrites all other options.\n This parameter only works when `path` is specified.\n\n Returns\n --------\n str or None\n\n Examples\n --------\n >>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],\n ... columns=['col 1', 'col 2'])\n >>> df.to_json()\n '[{\"col 1\":\"a\",\"col 2\":\"b\"},{\"col 1\":\"c\",\"col 2\":\"d\"}]'\n\n >>> df['col 1'].to_json()\n '[{\"col 1\":\"a\"},{\"col 1\":\"c\"}]'\n\n >>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)\n >>> ps.read_json(\n ... path=r'%s/to_json/foo.json' % path\n ... ).sort_values(by=\"col 1\")\n col 1 col 2\n 0 a b\n 1 c d\n\n >>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col=\"index\")\n >>> ps.read_json(\n ... path=r'%s/to_json/foo.json' % path, index_col=\"index\"\n ... ).sort_values(by=\"col 1\") # doctest: +NORMALIZE_WHITESPACE\n col 1\n index\n 0 a\n 1 c\n \"\"\"\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\") # type: ignore\n\n if not lines:\n raise NotImplementedError(\"lines=False is not implemented yet.\")\n\n if orient != \"records\":\n raise NotImplementedError(\"orient='records' is supported only for now.\")\n\n if path is None:\n # If path is none, just collect and use pandas's to_json.\n psdf_or_ser = self\n pdf = psdf_or_ser.to_pandas() # type: ignore\n if isinstance(self, ps.Series):\n pdf = pdf.to_frame()\n # To make the format consistent and readable by `read_json`, convert it to pandas' and\n # use 'records' orient for now.\n return pdf.to_json(orient=\"records\")\n\n psdf = self\n if isinstance(self, ps.Series):\n psdf = self.to_frame()\n sdf = psdf.to_spark(index_col=index_col) # type: ignore\n\n if num_files is not None:\n sdf = sdf.repartition(num_files)\n\n builder = sdf.write.mode(mode)\n if partition_cols is not None:\n builder.partitionBy(partition_cols)\n builder._set_opts(compression=compression)\n builder.options(**options).format(\"json\").save(path)\n return None\n\n def to_excel(\n self,\n excel_writer: Union[str, pd.ExcelWriter],\n sheet_name: str = \"Sheet1\",\n na_rep: str = \"\",\n float_format: Optional[str] = None,\n columns: Optional[Union[str, List[str]]] = None,\n header: bool = True,\n index: bool = True,\n index_label: Optional[Union[str, List[str]]] = None,\n startrow: int = 0,\n startcol: int = 0,\n engine: Optional[str] = None,\n merge_cells: bool = True,\n encoding: Optional[str] = None,\n inf_rep: str = \"inf\",\n verbose: bool = True,\n freeze_panes: Optional[Tuple[int, int]] = None,\n ) -> None:\n \"\"\"\n Write object to an Excel sheet.\n\n .. note:: This method should only be used if the resulting DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n To write a single object to an Excel .xlsx file it is only necessary to\n specify a target file name. To write to multiple sheets it is necessary to\n create an `ExcelWriter` object with a target file name, and specify a sheet\n in the file to write to.\n\n Multiple sheets may be written to by specifying unique `sheet_name`.\n With all data written to the file it is necessary to save the changes.\n Note that creating an `ExcelWriter` object with a file name that already\n exists will result in the contents of the existing file being erased.\n\n Parameters\n ----------\n excel_writer : str or ExcelWriter object\n File path or existing ExcelWriter.\n sheet_name : str, default 'Sheet1'\n Name of sheet which will contain DataFrame.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, optional\n Format string for floating point numbers. For example\n ``float_format=\"%%.2f\"`` will format 0.1234 to 0.12.\n columns : sequence or list of str, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of string is given it is\n assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, optional\n Column label for index column(s) if desired. If not specified, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : int, default 0\n Upper left cell row to dump data frame.\n startcol : int, default 0\n Upper left cell column to dump data frame.\n engine : str, optional\n Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this\n via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : bool, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding : str, optional\n Encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : str, default 'inf'\n Representation for infinity (there is no native representation for\n infinity in Excel).\n verbose : bool, default True\n Display more information in the error logs.\n freeze_panes : tuple of int (length 2), optional\n Specifies the one-based bottommost row and rightmost column that\n is to be frozen.\n\n Notes\n -----\n Once a workbook has been saved it is not possible write further data\n without rewriting the whole workbook.\n\n See Also\n --------\n read_excel : Read Excel file.\n\n Examples\n --------\n Create, write to and save a workbook:\n\n >>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df1.to_excel(\"output.xlsx\") # doctest: +SKIP\n\n To specify the sheet name:\n\n >>> df1.to_excel(\"output.xlsx\") # doctest: +SKIP\n >>> df1.to_excel(\"output.xlsx\",\n ... sheet_name='Sheet_name_1') # doctest: +SKIP\n\n If you wish to write to more than one sheet in the workbook, it is\n necessary to specify an ExcelWriter object:\n\n >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP\n ... df1.to_excel(writer, sheet_name='Sheet_name_1')\n ... df2.to_excel(writer, sheet_name='Sheet_name_2')\n\n To set the library that is used to write the Excel file,\n you can pass the `engine` keyword (the default engine is\n automatically chosen depending on the file extension):\n\n >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n psdf = self\n\n if isinstance(self, ps.DataFrame):\n f = pd.DataFrame.to_excel\n elif isinstance(self, ps.Series):\n f = pd.Series.to_excel\n else:\n raise TypeError(\n \"Constructor expects DataFrame or Series; however, \" \"got [%s]\" % (self,)\n )\n return validate_arguments_and_invoke_function(\n psdf._to_internal_pandas(), self.to_excel, f, args\n )\n\n def mean(\n self, axis: Optional[Axis] = None, numeric_only: bool = None\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return the mean of the values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n mean : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.mean()\n a 2.0\n b 0.2\n dtype: float64\n\n >>> df.mean(axis=1)\n 0 0.55\n 1 1.10\n 2 1.65\n 3 NaN\n dtype: float64\n\n On a Series:\n\n >>> df['a'].mean()\n 2.0\n \"\"\"\n axis = validate_axis(axis)\n\n if numeric_only is None and axis == 0:\n numeric_only = True\n\n def mean(spark_column: Column, spark_type: DataType) -> Column:\n if isinstance(spark_type, BooleanType):\n spark_column = spark_column.cast(LongType())\n elif not isinstance(spark_type, NumericType):\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n return F.mean(spark_column)\n\n return self._reduce_for_stat_function(\n mean, name=\"mean\", axis=axis, numeric_only=numeric_only\n )\n\n def sum(\n self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return the sum of the values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n min_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n Returns\n -------\n sum : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.sum()\n a 6.0\n b 0.4\n dtype: float64\n\n >>> df.sum(axis=1)\n 0 1.1\n 1 2.0\n 2 3.3\n 3 0.0\n dtype: float64\n\n >>> df.sum(min_count=3)\n a 6.0\n b NaN\n dtype: float64\n\n >>> df.sum(axis=1, min_count=1)\n 0 1.1\n 1 2.0\n 2 3.3\n 3 NaN\n dtype: float64\n\n On a Series:\n\n >>> df['a'].sum()\n 6.0\n\n >>> df['a'].sum(min_count=3)\n 6.0\n >>> df['b'].sum(min_count=3)\n nan\n \"\"\"\n axis = validate_axis(axis)\n\n if numeric_only is None and axis == 0:\n numeric_only = True\n elif numeric_only is True and axis == 1:\n numeric_only = None\n\n def sum(spark_column: Column, spark_type: DataType) -> Column:\n if isinstance(spark_type, BooleanType):\n spark_column = spark_column.cast(LongType())\n elif not isinstance(spark_type, NumericType):\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n return F.coalesce(F.sum(spark_column), SF.lit(0))\n\n return self._reduce_for_stat_function(\n sum, name=\"sum\", axis=axis, numeric_only=numeric_only, min_count=min_count\n )\n\n def product(\n self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return the product of the values.\n\n .. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``\n trick. Therefore, it only works for positive numbers.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n min_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n Examples\n --------\n On a DataFrame:\n\n Non-numeric type column is not included to the result.\n\n >>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],\n ... 'B': [10, 20, 30, 40, 50],\n ... 'C': ['a', 'b', 'c', 'd', 'e']})\n >>> psdf\n A B C\n 0 1 10 a\n 1 2 20 b\n 2 3 30 c\n 3 4 40 d\n 4 5 50 e\n\n >>> psdf.prod()\n A 120\n B 12000000\n dtype: int64\n\n If there is no numeric type columns, returns empty Series.\n\n >>> ps.DataFrame({\"key\": ['a', 'b', 'c'], \"val\": ['x', 'y', 'z']}).prod()\n Series([], dtype: float64)\n\n On a Series:\n\n >>> ps.Series([1, 2, 3, 4, 5]).prod()\n 120\n\n By default, the product of an empty or all-NA Series is ``1``\n\n >>> ps.Series([]).prod()\n 1.0\n\n This can be controlled with the ``min_count`` parameter\n\n >>> ps.Series([]).prod(min_count=1)\n nan\n \"\"\"\n axis = validate_axis(axis)\n\n if numeric_only is None and axis == 0:\n numeric_only = True\n elif numeric_only is True and axis == 1:\n numeric_only = None\n\n def prod(spark_column: Column, spark_type: DataType) -> Column:\n if isinstance(spark_type, BooleanType):\n scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())\n elif isinstance(spark_type, NumericType):\n num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))\n sign = F.when(\n F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1\n ).otherwise(-1)\n\n scol = F.when(num_zeros > 0, 0).otherwise(\n sign * F.exp(F.sum(F.log(F.abs(spark_column))))\n )\n\n if isinstance(spark_type, IntegralType):\n scol = F.round(scol).cast(LongType())\n else:\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n\n return F.coalesce(scol, SF.lit(1))\n\n return self._reduce_for_stat_function(\n prod, name=\"prod\", axis=axis, numeric_only=numeric_only, min_count=min_count\n )\n\n prod = product\n\n def skew(\n self, axis: Optional[Axis] = None, numeric_only: bool = None\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return unbiased skew normalized by N-1.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n skew : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.skew() # doctest: +SKIP\n a 0.000000e+00\n b -3.319678e-16\n dtype: float64\n\n On a Series:\n\n >>> df['a'].skew()\n 0.0\n \"\"\"\n axis = validate_axis(axis)\n\n if numeric_only is None and axis == 0:\n numeric_only = True\n\n def skew(spark_column: Column, spark_type: DataType) -> Column:\n if isinstance(spark_type, BooleanType):\n spark_column = spark_column.cast(LongType())\n elif not isinstance(spark_type, NumericType):\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n return F.skewness(spark_column)\n\n return self._reduce_for_stat_function(\n skew, name=\"skew\", axis=axis, numeric_only=numeric_only\n )\n\n def kurtosis(\n self, axis: Optional[Axis] = None, numeric_only: bool = None\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).\n Normalized by N-1.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n kurt : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.kurtosis()\n a -1.5\n b -1.5\n dtype: float64\n\n On a Series:\n\n >>> df['a'].kurtosis()\n -1.5\n \"\"\"\n axis = validate_axis(axis)\n\n if numeric_only is None and axis == 0:\n numeric_only = True\n\n def kurtosis(spark_column: Column, spark_type: DataType) -> Column:\n if isinstance(spark_type, BooleanType):\n spark_column = spark_column.cast(LongType())\n elif not isinstance(spark_type, NumericType):\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n return F.kurtosis(spark_column)\n\n return self._reduce_for_stat_function(\n kurtosis, name=\"kurtosis\", axis=axis, numeric_only=numeric_only\n )\n\n kurt = kurtosis\n\n def min(\n self, axis: Optional[Axis] = None, numeric_only: bool = None\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return the minimum of the values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n If True, include only float, int, boolean columns. This parameter is mainly for\n pandas compatibility. False is supported; however, the columns should\n be all numeric or all non-numeric.\n\n Returns\n -------\n min : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.min()\n a 1.0\n b 0.1\n dtype: float64\n\n >>> df.min(axis=1)\n 0 0.1\n 1 0.2\n 2 0.3\n 3 NaN\n dtype: float64\n\n On a Series:\n\n >>> df['a'].min()\n 1.0\n \"\"\"\n axis = validate_axis(axis)\n\n if numeric_only is None and axis == 0:\n numeric_only = True\n elif numeric_only is True and axis == 1:\n numeric_only = None\n\n return self._reduce_for_stat_function(\n F.min, name=\"min\", axis=axis, numeric_only=numeric_only\n )\n\n def max(\n self, axis: Optional[Axis] = None, numeric_only: bool = None\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return the maximum of the values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n If True, include only float, int, boolean columns. This parameter is mainly for\n pandas compatibility. False is supported; however, the columns should\n be all numeric or all non-numeric.\n\n Returns\n -------\n max : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.max()\n a 3.0\n b 0.3\n dtype: float64\n\n >>> df.max(axis=1)\n 0 1.0\n 1 2.0\n 2 3.0\n 3 NaN\n dtype: float64\n\n On a Series:\n\n >>> df['a'].max()\n 3.0\n \"\"\"\n axis = validate_axis(axis)\n\n if numeric_only is None and axis == 0:\n numeric_only = True\n elif numeric_only is True and axis == 1:\n numeric_only = None\n\n return self._reduce_for_stat_function(\n F.max, name=\"max\", axis=axis, numeric_only=numeric_only\n )\n\n def count(\n self, axis: Optional[Axis] = None, numeric_only: bool = False\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Count non-NA cells for each column.\n\n The values `None`, `NaN` are considered NA.\n\n Parameters\n ----------\n axis : {0 or ‘index’, 1 or ‘columns’}, default 0\n If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are\n generated for each row.\n numeric_only : bool, default False\n If True, include only float, int, boolean columns. This parameter is mainly for\n pandas compatibility.\n\n Returns\n -------\n max : scalar for a Series, and a Series for a DataFrame.\n\n See Also\n --------\n DataFrame.shape: Number of DataFrame rows and columns (including NA\n elements).\n DataFrame.isna: Boolean same-sized DataFrame showing places of NA\n elements.\n\n Examples\n --------\n Constructing DataFrame from a dictionary:\n\n >>> df = ps.DataFrame({\"Person\":\n ... [\"John\", \"Myla\", \"Lewis\", \"John\", \"Myla\"],\n ... \"Age\": [24., np.nan, 21., 33, 26],\n ... \"Single\": [False, True, True, True, False]},\n ... columns=[\"Person\", \"Age\", \"Single\"])\n >>> df\n Person Age Single\n 0 John 24.0 False\n 1 Myla NaN True\n 2 Lewis 21.0 True\n 3 John 33.0 True\n 4 Myla 26.0 False\n\n Notice the uncounted NA values:\n\n >>> df.count()\n Person 5\n Age 4\n Single 5\n dtype: int64\n\n >>> df.count(axis=1)\n 0 3\n 1 2\n 2 3\n 3 3\n 4 3\n dtype: int64\n\n On a Series:\n\n >>> df['Person'].count()\n 5\n\n >>> df['Age'].count()\n 4\n \"\"\"\n\n return self._reduce_for_stat_function(\n Frame._count_expr, name=\"count\", axis=axis, numeric_only=numeric_only\n )\n\n def std(\n self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return sample standard deviation.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n std : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.std()\n a 1.0\n b 0.1\n dtype: float64\n\n >>> df.std(axis=1)\n 0 0.636396\n 1 1.272792\n 2 1.909188\n 3 NaN\n dtype: float64\n\n >>> df.std(ddof=0)\n a 0.816497\n b 0.081650\n dtype: float64\n\n On a Series:\n\n >>> df['a'].std()\n 1.0\n\n >>> df['a'].std(ddof=0)\n 0.816496580927726\n \"\"\"\n assert ddof in (0, 1)\n\n axis = validate_axis(axis)\n\n if numeric_only is None and axis == 0:\n numeric_only = True\n\n def std(spark_column: Column, spark_type: DataType) -> Column:\n if isinstance(spark_type, BooleanType):\n spark_column = spark_column.cast(LongType())\n elif not isinstance(spark_type, NumericType):\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n if ddof == 0:\n return F.stddev_pop(spark_column)\n else:\n return F.stddev_samp(spark_column)\n\n return self._reduce_for_stat_function(\n std, name=\"std\", axis=axis, numeric_only=numeric_only, ddof=ddof\n )\n\n def var(\n self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return unbiased variance.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n var : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.var()\n a 1.00\n b 0.01\n dtype: float64\n\n >>> df.var(axis=1)\n 0 0.405\n 1 1.620\n 2 3.645\n 3 NaN\n dtype: float64\n\n >>> df.var(ddof=0)\n a 0.666667\n b 0.006667\n dtype: float64\n\n On a Series:\n\n >>> df['a'].var()\n 1.0\n\n >>> df['a'].var(ddof=0)\n 0.6666666666666666\n \"\"\"\n assert ddof in (0, 1)\n\n axis = validate_axis(axis)\n\n if numeric_only is None and axis == 0:\n numeric_only = True\n\n def var(spark_column: Column, spark_type: DataType) -> Column:\n if isinstance(spark_type, BooleanType):\n spark_column = spark_column.cast(LongType())\n elif not isinstance(spark_type, NumericType):\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n if ddof == 0:\n return F.var_pop(spark_column)\n else:\n return F.var_samp(spark_column)\n\n return self._reduce_for_stat_function(\n var, name=\"var\", axis=axis, numeric_only=numeric_only, ddof=ddof\n )\n\n def median(\n self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return the median of the values for the requested axis.\n\n .. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon\n approximate percentile computation because computing median across a large dataset\n is extremely expensive.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n accuracy : int, optional\n Default accuracy of approximation. Larger value means better accuracy.\n The relative error can be deduced by 1.0 / accuracy.\n\n Returns\n -------\n median : scalar or Series\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])\n >>> df\n a b\n 0 24.0 1\n 1 21.0 2\n 2 25.0 3\n 3 33.0 4\n 4 26.0 5\n\n On a DataFrame:\n\n >>> df.median()\n a 25.0\n b 3.0\n dtype: float64\n\n On a Series:\n\n >>> df['a'].median()\n 25.0\n >>> (df['b'] + 100).median()\n 103.0\n\n For multi-index columns,\n\n >>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])\n >>> df\n x y\n a b\n 0 24.0 1\n 1 21.0 2\n 2 25.0 3\n 3 33.0 4\n 4 26.0 5\n\n On a DataFrame:\n\n >>> df.median()\n x a 25.0\n y b 3.0\n dtype: float64\n\n >>> df.median(axis=1)\n 0 12.5\n 1 11.5\n 2 14.0\n 3 18.5\n 4 15.5\n dtype: float64\n\n On a Series:\n\n >>> df[('x', 'a')].median()\n 25.0\n >>> (df[('y', 'b')] + 100).median()\n 103.0\n \"\"\"\n axis = validate_axis(axis)\n\n if numeric_only is None and axis == 0:\n numeric_only = True\n\n if not isinstance(accuracy, int):\n raise TypeError(\n \"accuracy must be an integer; however, got [%s]\" % type(accuracy).__name__\n )\n\n def median(spark_column: Column, spark_type: DataType) -> Column:\n if isinstance(spark_type, (BooleanType, NumericType)):\n return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)\n else:\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n\n return self._reduce_for_stat_function(\n median, name=\"median\", numeric_only=numeric_only, axis=axis\n )\n\n def sem(\n self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return unbiased standard error of the mean over requested axis.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n scalar(for Series) or Series(for DataFrame)\n\n Examples\n --------\n >>> psdf = ps.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n >>> psdf\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> psdf.sem()\n a 0.57735\n b 0.57735\n dtype: float64\n\n >>> psdf.sem(ddof=0)\n a 0.471405\n b 0.471405\n dtype: float64\n\n >>> psdf.sem(axis=1)\n 0 1.5\n 1 1.5\n 2 1.5\n dtype: float64\n\n Support for Series\n\n >>> psser = psdf.a\n >>> psser\n 0 1\n 1 2\n 2 3\n Name: a, dtype: int64\n\n >>> psser.sem()\n 0.5773502691896258\n\n >>> psser.sem(ddof=0)\n 0.47140452079103173\n \"\"\"\n assert ddof in (0, 1)\n\n axis = validate_axis(axis)\n\n if numeric_only is None and axis == 0:\n numeric_only = True\n\n def std(spark_column: Column, spark_type: DataType) -> Column:\n if isinstance(spark_type, BooleanType):\n spark_column = spark_column.cast(LongType())\n elif not isinstance(spark_type, NumericType):\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n if ddof == 0:\n return F.stddev_pop(spark_column)\n else:\n return F.stddev_samp(spark_column)\n\n def sem(spark_column: Column, spark_type: DataType) -> Column:\n return std(spark_column, spark_type) / pow(\n Frame._count_expr(spark_column, spark_type), 0.5\n )\n\n return self._reduce_for_stat_function(\n sem, name=\"sem\", numeric_only=numeric_only, axis=axis, ddof=ddof\n )\n\n @property\n def size(self) -> int:\n \"\"\"\n Return an int representing the number of elements in this object.\n\n Return the number of rows if Series. Otherwise return the number of\n rows times number of columns if DataFrame.\n\n Examples\n --------\n >>> s = ps.Series({'a': 1, 'b': 2, 'c': None})\n >>> s.size\n 3\n\n >>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})\n >>> df.size\n 6\n\n >>> df = ps.DataFrame(index=[1, 2, None])\n >>> df.size\n 0\n \"\"\"\n num_columns = len(self._internal.data_spark_columns)\n if num_columns == 0:\n return 0\n else:\n return len(self) * num_columns # type: ignore\n\n def abs(self: FrameLike) -> FrameLike:\n \"\"\"\n Return a Series/DataFrame with absolute numeric value of each element.\n\n Returns\n -------\n abs : Series/DataFrame containing the absolute value of each element.\n\n Examples\n --------\n\n Absolute numeric values in a Series.\n\n >>> s = ps.Series([-1.10, 2, -3.33, 4])\n >>> s.abs()\n 0 1.10\n 1 2.00\n 2 3.33\n 3 4.00\n dtype: float64\n\n Absolute numeric values in a DataFrame.\n\n >>> df = ps.DataFrame({\n ... 'a': [4, 5, 6, 7],\n ... 'b': [10, 20, 30, 40],\n ... 'c': [100, 50, -30, -50]\n ... },\n ... columns=['a', 'b', 'c'])\n >>> df.abs()\n a b c\n 0 4 10 100\n 1 5 20 50\n 2 6 30 30\n 3 7 40 50\n \"\"\"\n\n def abs(psser: \"Series\") -> Union[\"Series\", Column]:\n if isinstance(psser.spark.data_type, BooleanType):\n return psser\n elif isinstance(psser.spark.data_type, NumericType):\n return psser._with_new_scol(\n F.abs(psser.spark.column), field=psser._internal.data_fields[0]\n )\n else:\n raise TypeError(\n \"bad operand type for abs(): {} ({})\".format(\n spark_type_to_pandas_dtype(psser.spark.data_type),\n psser.spark.data_type.simpleString(),\n )\n )\n\n return self._apply_series_op(abs)\n\n # TODO: by argument only support the grouping name and as_index only for now. Documentation\n # should be updated when it's supported.\n def groupby(\n self: FrameLike,\n by: Union[Any, Tuple, \"Series\", List[Union[Any, Tuple, \"Series\"]]],\n axis: Axis = 0,\n as_index: bool = True,\n dropna: bool = True,\n ) -> \"GroupBy[FrameLike]\":\n \"\"\"\n Group DataFrame or Series using a Series of columns.\n\n A groupby operation involves some combination of splitting the\n object, applying a function, and combining the results. This can be\n used to group large amounts of data and compute operations on these\n groups.\n\n Parameters\n ----------\n by : Series, label, or list of labels\n Used to determine the groups for the groupby.\n If Series is passed, the Series or dict VALUES\n will be used to determine the groups. A label or list of\n labels may be passed to group by the columns in ``self``.\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n as_index : bool, default True\n For aggregated output, return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively \"SQL-style\" grouped output.\n dropna : bool, default True\n If True, and if group keys contain NA values,\n NA values together with row/column will be dropped.\n If False, NA values will also be treated as the key in groups.\n\n Returns\n -------\n DataFrameGroupBy or SeriesGroupBy\n Depends on the calling object and returns groupby object that\n contains information about the groups.\n\n See Also\n --------\n pyspark.pandas.groupby.GroupBy\n\n Examples\n --------\n >>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',\n ... 'Parrot', 'Parrot'],\n ... 'Max Speed': [380., 370., 24., 26.]},\n ... columns=['Animal', 'Max Speed'])\n >>> df\n Animal Max Speed\n 0 Falcon 380.0\n 1 Falcon 370.0\n 2 Parrot 24.0\n 3 Parrot 26.0\n\n >>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE\n Max Speed\n Animal\n Falcon 375.0\n Parrot 25.0\n\n >>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')\n ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n Animal Max Speed\n ...Falcon 375.0\n ...Parrot 25.0\n\n We can also choose to include NA in group keys or not by setting dropna parameter,\n the default setting is True:\n\n >>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]\n >>> df = ps.DataFrame(l, columns=[\"a\", \"b\", \"c\"])\n >>> df.groupby(by=[\"b\"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE\n a c\n b\n 1.0 2 3\n 2.0 2 5\n\n >>> df.groupby(by=[\"b\"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE\n a c\n b\n 1.0 2 3\n 2.0 2 5\n NaN 1 4\n \"\"\"\n if isinstance(by, ps.DataFrame):\n raise ValueError(\"Grouper for '{}' not 1-dimensional\".format(type(by).__name__))\n elif isinstance(by, ps.Series):\n new_by = [by] # type: List[Union[Tuple, ps.Series]]\n elif is_name_like_tuple(by):\n if isinstance(self, ps.Series):\n raise KeyError(by)\n new_by = [cast(Tuple, by)]\n elif is_name_like_value(by):\n if isinstance(self, ps.Series):\n raise KeyError(by)\n new_by = [(by,)]\n elif is_list_like(by):\n new_by = []\n for key in by:\n if isinstance(key, ps.DataFrame):\n raise ValueError(\n \"Grouper for '{}' not 1-dimensional\".format(type(key).__name__)\n )\n elif isinstance(key, ps.Series):\n new_by.append(key)\n elif is_name_like_tuple(key):\n if isinstance(self, ps.Series):\n raise KeyError(key)\n new_by.append(key)\n elif is_name_like_value(key):\n if isinstance(self, ps.Series):\n raise KeyError(key)\n new_by.append((key,))\n else:\n raise ValueError(\n \"Grouper for '{}' not 1-dimensional\".format(type(key).__name__)\n )\n else:\n raise ValueError(\"Grouper for '{}' not 1-dimensional\".format(type(by).__name__))\n if not len(new_by):\n raise ValueError(\"No group keys passed!\")\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)\n\n @abstractmethod\n def _build_groupby(\n self: FrameLike, by: List[Union[\"Series\", Tuple]], as_index: bool, dropna: bool\n ) -> \"GroupBy[FrameLike]\":\n pass\n\n def bool(self) -> bool:\n \"\"\"\n Return the bool of a single element in the current object.\n\n This must be a boolean scalar value, either True or False. Raise a ValueError if\n the object does not have exactly 1 element, or that element is not boolean\n\n Returns\n --------\n bool\n\n Examples\n --------\n >>> ps.DataFrame({'a': [True]}).bool()\n True\n\n >>> ps.Series([False]).bool()\n False\n\n If there are non-boolean or multiple values exist, it raises an exception in all\n cases as below.\n\n >>> ps.DataFrame({'a': ['a']}).bool()\n Traceback (most recent call last):\n ...\n ValueError: bool cannot act on a non-boolean single element DataFrame\n\n >>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE\n Traceback (most recent call last):\n ...\n ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),\n a.item(), a.any() or a.all().\n\n >>> ps.Series([1]).bool()\n Traceback (most recent call last):\n ...\n ValueError: bool cannot act on a non-boolean single element DataFrame\n \"\"\"\n if isinstance(self, ps.DataFrame):\n df = self\n elif isinstance(self, ps.Series):\n df = self.to_dataframe()\n else:\n raise TypeError(\"bool() expects DataFrame or Series; however, \" \"got [%s]\" % (self,))\n return df.head(2)._to_internal_pandas().bool()\n\n def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:\n \"\"\"\n Retrieves the index of the first valid value.\n\n Returns\n -------\n scalar, tuple, or None\n\n Examples\n --------\n\n Support for DataFrame\n\n >>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],\n ... 'b': [None, 2.0, 3.0, 1.0],\n ... 'c': [None, 200, 400, 200]},\n ... index=['Q', 'W', 'E', 'R'])\n >>> psdf\n a b c\n Q NaN NaN NaN\n W 2.0 2.0 200.0\n E 3.0 3.0 400.0\n R 2.0 1.0 200.0\n\n >>> psdf.first_valid_index()\n 'W'\n\n Support for MultiIndex columns\n\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n Q NaN NaN NaN\n W 2.0 2.0 200.0\n E 3.0 3.0 400.0\n R 2.0 1.0 200.0\n\n >>> psdf.first_valid_index()\n 'W'\n\n Support for Series.\n\n >>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])\n >>> s\n 100 NaN\n 200 NaN\n 300 3.0\n 400 4.0\n 500 5.0\n dtype: float64\n\n >>> s.first_valid_index()\n 300\n\n Support for MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)\n >>> s\n lama speed NaN\n weight NaN\n length NaN\n cow speed NaN\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n >>> s.first_valid_index()\n ('cow', 'weight')\n \"\"\"\n data_spark_columns = self._internal.data_spark_columns\n\n if len(data_spark_columns) == 0:\n return None\n\n cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))\n\n with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):\n # Disable Arrow to keep row ordering.\n first_valid_row = cast(\n pd.DataFrame,\n self._internal.spark_frame.filter(cond)\n .select(self._internal.index_spark_columns)\n .limit(1)\n .toPandas(),\n )\n\n # For Empty Series or DataFrame, returns None.\n if len(first_valid_row) == 0:\n return None\n\n first_valid_row = first_valid_row.iloc[0]\n if len(first_valid_row) == 1:\n return first_valid_row.iloc[0]\n else:\n return tuple(first_valid_row)\n\n def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:\n \"\"\"\n Return index for last non-NA/null value.\n\n Returns\n -------\n scalar, tuple, or None\n\n Notes\n -----\n This API only works with PySpark >= 3.0.\n\n Examples\n --------\n\n Support for DataFrame\n\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, None],\n ... 'b': [1.0, 2.0, 3.0, None],\n ... 'c': [100, 200, 400, None]},\n ... index=['Q', 'W', 'E', 'R'])\n >>> psdf\n a b c\n Q 1.0 1.0 100.0\n W 2.0 2.0 200.0\n E 3.0 3.0 400.0\n R NaN NaN NaN\n\n >>> psdf.last_valid_index() # doctest: +SKIP\n 'E'\n\n Support for MultiIndex columns\n\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n Q 1.0 1.0 100.0\n W 2.0 2.0 200.0\n E 3.0 3.0 400.0\n R NaN NaN NaN\n\n >>> psdf.last_valid_index() # doctest: +SKIP\n 'E'\n\n Support for Series.\n\n >>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])\n >>> s\n 100 1.0\n 200 2.0\n 300 3.0\n 400 NaN\n 500 NaN\n dtype: float64\n\n >>> s.last_valid_index() # doctest: +SKIP\n 300\n\n Support for MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)\n >>> s\n lama speed 250.0\n weight 1.5\n length 320.0\n cow speed 1.0\n weight 0.3\n length NaN\n falcon speed NaN\n weight NaN\n length NaN\n dtype: float64\n\n >>> s.last_valid_index() # doctest: +SKIP\n ('cow', 'weight')\n \"\"\"\n data_spark_columns = self._internal.data_spark_columns\n\n if len(data_spark_columns) == 0:\n return None\n\n cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))\n\n last_valid_rows = (\n self._internal.spark_frame.filter(cond)\n .select(self._internal.index_spark_columns)\n .tail(1)\n )\n\n # For Empty Series or DataFrame, returns None.\n if len(last_valid_rows) == 0:\n return None\n\n last_valid_row = last_valid_rows[0]\n\n if len(last_valid_row) == 1:\n return last_valid_row[0]\n else:\n return tuple(last_valid_row)\n\n # TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.\n def rolling(\n self: FrameLike, window: int, min_periods: Optional[int] = None\n ) -> \"Rolling[FrameLike]\":\n \"\"\"\n Provide rolling transformations.\n\n .. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.\n Unlike pandas, NA is also counted as the period. This might be changed\n in the near future.\n\n Parameters\n ----------\n window : int, or offset\n Size of the moving window.\n This is the number of observations used for calculating the statistic.\n Each window will be a fixed size.\n\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n For a window that is specified by an offset, min_periods will default to 1.\n Otherwise, min_periods will default to the size of the window.\n\n Returns\n -------\n a Window sub-classed for the particular operation\n \"\"\"\n from pyspark.pandas.window import Rolling\n\n return Rolling(self, window=window, min_periods=min_periods)\n\n # TODO: 'center' and 'axis' parameter should be implemented.\n # 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607\n def expanding(self: FrameLike, min_periods: int = 1) -> \"Expanding[FrameLike]\":\n \"\"\"\n Provide expanding transformations.\n\n .. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.\n Unlike pandas, NA is also counted as the period. This might be changed\n in the near future.\n\n Parameters\n ----------\n min_periods : int, default 1\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n\n Returns\n -------\n a Window sub-classed for the particular operation\n \"\"\"\n from pyspark.pandas.window import Expanding\n\n return Expanding(self, min_periods=min_periods)\n\n def get(self, key: Any, default: Optional[Any] = None) -> Any:\n \"\"\"\n Get item from object for given key (DataFrame column, Panel slice,\n etc.). Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n\n Examples\n --------\n >>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},\n ... columns=['x', 'y', 'z'], index=[10, 20, 20])\n >>> df\n x y z\n 10 0 a a\n 20 1 b b\n 20 2 b b\n\n >>> df.get('x')\n 10 0\n 20 1\n 20 2\n Name: x, dtype: int64\n\n >>> df.get(['x', 'y'])\n x y\n 10 0 a\n 20 1 b\n 20 2 b\n\n >>> df.x.get(10)\n 0\n\n >>> df.x.get(20)\n 20 1\n 20 2\n Name: x, dtype: int64\n\n >>> df.x.get(15, -1)\n -1\n \"\"\"\n try:\n return self[key]\n except (KeyError, ValueError, IndexError):\n return default\n\n def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, \"DataFrame\", \"Series\"]:\n \"\"\"\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed.\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = ps.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_1a = df.loc[[1], ['a']]\n >>> df_1a\n a\n 1 3\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_1a.squeeze('rows')\n a 3\n Name: 1, dtype: int64\n\n Squeezing all axes will project directly into a scalar:\n\n >>> df_1a.squeeze()\n 3\n \"\"\"\n if axis is not None:\n axis = \"index\" if axis == \"rows\" else axis\n axis = validate_axis(axis)\n\n if isinstance(self, ps.DataFrame):\n from pyspark.pandas.series import first_series\n\n is_squeezable = len(self.columns[:2]) == 1\n # If DataFrame has multiple columns, there is no change.\n if not is_squeezable:\n return self\n series_from_column = first_series(self)\n has_single_value = len(series_from_column.head(2)) == 1\n # If DataFrame has only a single value, use pandas API directly.\n if has_single_value:\n result = self._to_internal_pandas().squeeze(axis)\n return ps.Series(result) if isinstance(result, pd.Series) else result\n elif axis == 0:\n return self\n else:\n return series_from_column\n else:\n # The case of Series is simple.\n # If Series has only a single value, just return it as a scalar.\n # Otherwise, there is no change.\n self_top_two = cast(\"Series\", self).head(2)\n has_single_value = len(self_top_two) == 1\n return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)\n\n def truncate(\n self,\n before: Optional[Any] = None,\n after: Optional[Any] = None,\n axis: Optional[Axis] = None,\n copy: bool_type = True,\n ) -> DataFrameOrSeries:\n \"\"\"\n Truncate a Series or DataFrame before and after some index value.\n\n This is a useful shorthand for boolean indexing based on index\n values above or below certain thresholds.\n\n .. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`\n which can be expensive.\n\n Parameters\n ----------\n before : date, str, int\n Truncate all rows before this index value.\n after : date, str, int\n Truncate all rows after this index value.\n axis : {0 or 'index', 1 or 'columns'}, optional\n Axis to truncate. Truncates the index (rows) by default.\n copy : bool, default is True,\n Return a copy of the truncated section.\n\n Returns\n -------\n type of caller\n The truncated Series or DataFrame.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by label.\n DataFrame.iloc : Select a subset of a DataFrame by position.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],\n ... 'B': ['f', 'g', 'h', 'i', 'j'],\n ... 'C': ['k', 'l', 'm', 'n', 'o']},\n ... index=[1, 2, 3, 4, 5])\n >>> df\n A B C\n 1 a f k\n 2 b g l\n 3 c h m\n 4 d i n\n 5 e j o\n\n >>> df.truncate(before=2, after=4)\n A B C\n 2 b g l\n 3 c h m\n 4 d i n\n\n The columns of a DataFrame can be truncated.\n\n >>> df.truncate(before=\"A\", after=\"B\", axis=\"columns\")\n A B\n 1 a f\n 2 b g\n 3 c h\n 4 d i\n 5 e j\n\n For Series, only rows can be truncated.\n\n >>> df['A'].truncate(before=2, after=4)\n 2 b\n 3 c\n 4 d\n Name: A, dtype: object\n\n A Series has index that sorted integers.\n\n >>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],\n ... index=[1, 2, 3, 4, 5, 6, 7])\n >>> s\n 1 10\n 2 20\n 3 30\n 4 40\n 5 50\n 6 60\n 7 70\n dtype: int64\n\n >>> s.truncate(2, 5)\n 2 20\n 3 30\n 4 40\n 5 50\n dtype: int64\n\n A Series has index that sorted strings.\n\n >>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],\n ... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])\n >>> s\n a 10\n b 20\n c 30\n d 40\n e 50\n f 60\n g 70\n dtype: int64\n\n >>> s.truncate('b', 'e')\n b 20\n c 30\n d 40\n e 50\n dtype: int64\n \"\"\"\n from pyspark.pandas.series import first_series\n\n axis = validate_axis(axis)\n indexes = self.index\n indexes_increasing = indexes.is_monotonic_increasing\n if not indexes_increasing and not indexes.is_monotonic_decreasing:\n raise ValueError(\"truncate requires a sorted index\")\n if (before is None) and (after is None):\n return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)\n if (before is not None and after is not None) and before > after:\n raise ValueError(\"Truncate: %s must be after %s\" % (after, before))\n\n if isinstance(self, ps.Series):\n if indexes_increasing:\n result = first_series(self.to_frame().loc[before:after]).rename(self.name)\n else:\n result = first_series(self.to_frame().loc[after:before]).rename(self.name)\n elif isinstance(self, ps.DataFrame):\n if axis == 0:\n if indexes_increasing:\n result = self.loc[before:after]\n else:\n result = self.loc[after:before]\n elif axis == 1:\n result = self.loc[:, before:after]\n\n return cast(DataFrameOrSeries, result.copy() if copy else result)\n\n def to_markdown(\n self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None\n ) -> str:\n \"\"\"\n Print Series or DataFrame in Markdown-friendly format.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n buf : writable buffer, defaults to sys.stdout\n Where to send the output. By default, the output is printed to\n sys.stdout. Pass a writable buffer if you need to further process\n the output.\n mode : str, optional\n Mode in which file is opened.\n **kwargs\n These parameters will be passed to `tabulate`.\n\n Returns\n -------\n str\n Series or DataFrame in Markdown-friendly format.\n\n Notes\n -----\n Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.\n\n Examples\n --------\n >>> psser = ps.Series([\"elk\", \"pig\", \"dog\", \"quetzal\"], name=\"animal\")\n >>> print(psser.to_markdown()) # doctest: +SKIP\n | | animal |\n |---:|:---------|\n | 0 | elk |\n | 1 | pig |\n | 2 | dog |\n | 3 | quetzal |\n\n >>> psdf = ps.DataFrame(\n ... data={\"animal_1\": [\"elk\", \"pig\"], \"animal_2\": [\"dog\", \"quetzal\"]}\n ... )\n >>> print(psdf.to_markdown()) # doctest: +SKIP\n | | animal_1 | animal_2 |\n |---:|:-----------|:-----------|\n | 0 | elk | dog |\n | 1 | pig | quetzal |\n \"\"\"\n # `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.\n if LooseVersion(pd.__version__) < LooseVersion(\"1.0.0\"):\n raise NotImplementedError(\n \"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0\"\n )\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n psser_or_psdf = self\n internal_pandas = psser_or_psdf._to_internal_pandas()\n return validate_arguments_and_invoke_function(\n internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args\n )\n\n @abstractmethod\n def fillna(\n self: FrameLike,\n value: Optional[Any] = None,\n method: Optional[str] = None,\n axis: Optional[Axis] = None,\n inplace: bool_type = False,\n limit: Optional[int] = None,\n ) -> FrameLike:\n pass\n\n # TODO: add 'downcast' when value parameter exists\n def bfill(\n self: FrameLike,\n axis: Optional[Axis] = None,\n inplace: bool_type = False,\n limit: Optional[int] = None,\n ) -> FrameLike:\n \"\"\"\n Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.\n\n .. note:: the current implementation of 'bfill' uses Spark's Window\n without specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame or Series\n DataFrame or Series with NA entries filled.\n\n Examples\n --------\n >>> psdf = ps.DataFrame({\n ... 'A': [None, 3, None, None],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> psdf\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 1.0 4\n\n Propagate non-null values backward.\n\n >>> psdf.bfill()\n A B C D\n 0 3.0 2.0 1.0 0\n 1 3.0 4.0 1.0 1\n 2 NaN 3.0 1.0 5\n 3 NaN 3.0 1.0 4\n\n For Series\n\n >>> psser = ps.Series([None, None, None, 1])\n >>> psser\n 0 NaN\n 1 NaN\n 2 NaN\n 3 1.0\n dtype: float64\n\n >>> psser.bfill()\n 0 1.0\n 1 1.0\n 2 1.0\n 3 1.0\n dtype: float64\n \"\"\"\n return self.fillna(method=\"bfill\", axis=axis, inplace=inplace, limit=limit)\n\n backfill = bfill\n\n # TODO: add 'downcast' when value parameter exists\n def ffill(\n self: FrameLike,\n axis: Optional[Axis] = None,\n inplace: bool_type = False,\n limit: Optional[int] = None,\n ) -> FrameLike:\n \"\"\"\n Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.\n\n .. note:: the current implementation of 'ffill' uses Spark's Window\n without specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame or Series\n DataFrame or Series with NA entries filled.\n\n Examples\n --------\n >>> psdf = ps.DataFrame({\n ... 'A': [None, 3, None, None],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> psdf\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 1.0 4\n\n Propagate non-null values forward.\n\n >>> psdf.ffill()\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 1.0 4\n\n For Series\n\n >>> psser = ps.Series([2, 4, None, 3])\n >>> psser\n 0 2.0\n 1 4.0\n 2 NaN\n 3 3.0\n dtype: float64\n\n >>> psser.ffill()\n 0 2.0\n 1 4.0\n 2 4.0\n 3 3.0\n dtype: float64\n \"\"\"\n return self.fillna(method=\"ffill\", axis=axis, inplace=inplace, limit=limit)\n\n pad = ffill\n\n @property\n def at(self) -> AtIndexer:\n return AtIndexer(self) # type: ignore\n\n at.__doc__ = AtIndexer.__doc__\n\n @property\n def iat(self) -> iAtIndexer:\n return iAtIndexer(self) # type: ignore\n\n iat.__doc__ = iAtIndexer.__doc__\n\n @property\n def iloc(self) -> iLocIndexer:\n return iLocIndexer(self) # type: ignore\n\n iloc.__doc__ = iLocIndexer.__doc__\n\n @property\n def loc(self) -> LocIndexer:\n return LocIndexer(self) # type: ignore\n\n loc.__doc__ = LocIndexer.__doc__\n\n def __bool__(self) -> NoReturn:\n raise ValueError(\n \"The truth value of a {0} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\".format(self.__class__.__name__)\n )\n\n @staticmethod\n def _count_expr(spark_column: Column, spark_type: DataType) -> Column:\n # Special handle floating point types because Spark's count treats nan as a valid value,\n # whereas pandas count doesn't include nan.\n if isinstance(spark_type, (FloatType, DoubleType)):\n return F.count(F.nanvl(spark_column, SF.lit(None)))\n else:\n return F.count(spark_column)\n\n\ndef _test() -> None:\n import os\n import doctest\n import shutil\n import sys\n import tempfile\n from pyspark.sql import SparkSession\n import pyspark.pandas.generic\n\n os.chdir(os.environ[\"SPARK_HOME\"])\n\n globs = pyspark.pandas.generic.__dict__.copy()\n globs[\"ps\"] = pyspark.pandas\n spark = (\n SparkSession.builder.master(\"local[4]\")\n .appName(\"pyspark.pandas.generic tests\")\n .getOrCreate()\n )\n\n path = tempfile.mkdtemp()\n globs[\"path\"] = path\n\n (failure_count, test_count) = doctest.testmod(\n pyspark.pandas.generic,\n globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,\n )\n\n shutil.rmtree(path, ignore_errors=True)\n spark.stop()\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n" ]
[ [ "pandas.api.types.is_list_like" ] ]
spongezhang/maskgen
[ "7284e300d1cb326a5349879de0bace9cfa8788a8", "7284e300d1cb326a5349879de0bace9cfa8788a8" ]
[ "plugins/CropPermutations/__init__.py", "tests/MedianBlurTest.py" ]
[ "from PIL import Image\nimport numpy\nfrom random import randint\n\n\"\"\"\nA plugin used to create a set of variable spefications for permutation groups.\n\"\"\"\n\ndef transform(img,source,target,**kwargs):\n cv_image = numpy.array(img)\n shape = cv_image.shape\n snapto8 = 'eightbit_boundary' in kwargs and kwargs['eightbit_boundary'] == 'yes'\n percentageWidth = float(kwargs['percentage_width'])\n percentageHeight = float(kwargs['percentage_height'])\n divisionsWidth = float(kwargs['divisions_width'] if 'divisions_width' in kwargs else shape[1])\n divisionsHeight = float(kwargs['divisions_height'] if 'divisions_height' in kwargs else shape[0])\n pixelWidth = int(shape[1] * percentageWidth)\n pixelHeight = int(shape[0] * percentageHeight)\n if snapto8:\n pixelWidth = pixelWidth - pixelWidth % 8\n pixelHeight = pixelHeight - pixelHeight % 8\n incrementsWidth = max(8,int(pixelWidth/divisionsWidth))\n incrementsHeight = max(8,int(pixelHeight/divisionsHeight))\n crop_x = { \"type\": \"list\", \"values\" : [i for i in xrange(incrementsWidth,pixelWidth,incrementsWidth)]}\n crop_y = { \"type\": \"list\", \"values\" : [i for i in xrange(incrementsHeight, pixelHeight, incrementsHeight)]}\n return {'crop_x':crop_x,'crop_y':crop_y, 'crop_width':pixelWidth,'crop_height':pixelHeight},None\n\ndef operation():\n return {\n 'category': 'Select',\n 'name': 'SelectRegion',\n 'description':'Select a region to crop',\n 'software':'OpenCV',\n 'version':cv2.__version__,\n 'arguments':{'percentage_width':\n {'type': \"float[0:0.5]\", 'description':'the percentage of pixels to remove horizontal'},\n 'percentage_height':\n {'type': \"float[0:0.5]\", 'description':'the percentage of pixels to remove vertically'},\n 'divisions_width':\n {'type': \"int[0:100000]\", 'description': 'the number samples in the x direction'},\n 'divisions_height':\n {'type': \"int[0:100000]\", 'description': 'the number of samples in the y direction'},\n 'eightbit_boundary':\n {'type': \"yesno\", 'defaultvalue':'no', 'description':'Snap to 8 bit boundary'}\n },\n 'transitions': [\n 'image.image'\n ]\n }\n", "import unittest\nimport os\nfrom maskgen import plugins, image_wrap\nimport numpy\nimport tempfile\n\n\nclass MedianBlurTestCase(unittest.TestCase):\n filesToKill = []\n\n def setUp(self):\n plugins.loadPlugins()\n\n def test_something(self):\n img = numpy.random.randint(0, 255, (500, 500, 3), dtype='uint8')\n wrapper = image_wrap.ImageWrapper(img)\n filename = tempfile.mktemp(prefix='mstc',suffix='.png',dir='.')\n filename_output = tempfile.mktemp(prefix='mstcr', suffix='.png', dir='.')\n self.filesToKill.append(filename)\n wrapper.save(filename)\n self.filesToKill.append(filename_output)\n image_wrap.ImageWrapper(img).save(filename_output)\n args,error = plugins.callPlugin('MedianBlur',\n wrapper,\n filename,\n filename_output,\n percentageChange = 0.5)\n wrapper = image_wrap.openImageFile(filename_output)\n output = wrapper.to_array()\n self.assertEqual(output.shape, img.shape)\n diff = abs(output - img)\n finaldiff = numpy.zeros((500,500))\n for i in range(3):\n finaldiff = finaldiff + diff[:,:,i]\n finaldiff[finaldiff > 0] = 1\n self.assertTrue(abs(sum(sum(finaldiff))-62500) < 100)\n\n def tearDown(self):\n for f in self.filesToKill:\n if os.path.exists(f):\n os.remove(f)\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array" ], [ "numpy.random.randint", "numpy.zeros" ] ]
dasmy/minibatchNMF
[ "f7f910e290103c26e2925426849f8bfbe75ba242" ]
[ "beta_nmf_minibatch.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nbeta\\_nmf_minibatch.py\n~~~~~~~~~~~\n\n.. topic:: Contents\n\n The beta_nmf_minibatch module includes the betaNMF class,\n fit function and theano functions to compute updates and cost.\n \n Copyright 2014-2016 Romain Serizel\n This software is distributed under the terms of the GNU Public License\n version 3 (http://www.gnu.org/licenses/gpl.txt)\"\"\"\n\nimport time\nimport numpy as np\nimport theano\nimport base\nimport theano.tensor as T\nimport updates\nimport costs\n\n\nclass BetaNMF(object):\n \"\"\"BetaNMF class\n\n Performs nonnegative matrix factorization with mini-batch multiplicative\n updates. GPGPU implementation based on Theano.\n\n Parameters\n ----------\n data_shape : tuple composed of integers\n The shape of the data to approximate\n\n n_components : positive integer\n The number of latent components for the NMF model\n\n\n beta: arbitrary float (default 2).\n The beta-divergence to consider. Particular cases of interest are\n * beta=2 : Euclidean distance\n * beta=1 : Kullback Leibler\n * beta=0 : Itakura-Saito\n\n n_iter: positive integer\n number of iterations\n\n fixed_factors: array of intergers\n Indexes of the factors that are kept fixed during the updates\n * [0] : corresponds to fixed H\n * [1] : corresponds to fixed W\n\n cache1_size: integer\n Size (in frames) of the first data cache.\n The size is reduced to the closest multiple of the batch_size.\n If set to zero the algorithm tries to fit all the data in cache\n\n batch_size: integer\n Size (in frames) of the batch for batch processing.\n The batch size has an impact on the parrelization and the memory needed\n to store partial gradients (see Schmidt et al.)\n\n verbose: integer\n The numer of iterations to wait between two computation and printing\n of the cost\n\n init_mode : string (default 'random')\n * random : initalise the factors randomly\n * custom : intialise the factors with custom value\n\n W : array (optionnal)\n Initial wvalue for factor W when custom initialisation is used\n\n H : array (optionnal)\n Initial wvalue for factor H when custom initialisation is used\n\n solver : string (default 'mu_batch')\n * mu_batch : mini-batch version of the MU updates.\n (fully equivalent to standard NMF with MU).\n * asg_mu : Asymetric stochatistic gradient for MU [1]_\n * gsg_mu : Greedy stochatistic gradient for MU [1]_\n * asag_mu : Asymetric stochatistic average gradient [2]_ for MU [1]_\n * gsag_mu : Greedy stochatistic average gradient [2]_ for MU [1]_\n\n nb_batch_w : interger (default 1)\n number of batches on which W updates is computed\n * 1 : greedy approaches [1]_\n\n sag_memory : integer (default 0)\n number of batches used to compute the average gradient\n * 0 : SG approaches\n * nb_batches : SAG approaches\n\n Attributes\n ----------\n nb_cache1 : integer\n number of caches needed to fill the full data\n\n forget_factor : float\n forgetting factor for SAG\n\n scores : array\n reconstruction cost and iteration time for each iteration\n\n factors\\_ : list of arrays\n The estimated factors\n\n w : theano tensor\n factor W\n\n h_cache1 : theano tensor\n part of the factor H in cache1\n\n x_cache1 : theano tensor\n data cache\n\n References\n ----------\n .. [#] R. Serizel, S. Essid, and G. Richard. “Mini-batch stochastic\n approaches for accelerated multiplicative updates in nonnegative matrix\n factorisation with beta-divergence”. Accepted for publication\n In *Proc. of MLSP*, p. 5, 2016.\n\n .. [#] Schmidt, M., Roux, N. L., & Bach, F. (2013).\n Minimizing finite sums with the stochastic average gradient\n https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf\n \"\"\"\n\n # Constructor\n def __init__(self, data_shape, n_components=50, beta=2, n_iter=50,\n fixed_factors=None, cache1_size=0,\n batch_size=100, verbose=0,\n init_mode='random', W=None, H=None, solver='mu_batch',\n nb_batch_w=1, sag_memory=0):\n self.data_shape = data_shape\n self.n_components = n_components\n self.batch_size = batch_size\n self.nb_batch = int(np.ceil(np.true_divide(data_shape[0],\n self.batch_size)))\n self.batch_ind = np.zeros((self.nb_batch, self.batch_size))\n\n if cache1_size > 0:\n cache1_size = min((cache1_size, data_shape[0]))\n if cache1_size < self.batch_size:\n raise ValueError('cache1_size should be at '\n 'least equal to batch_size')\n self.cache1_size = int(np.ceil(cache1_size/self.batch_size * self.batch_size))\n self.nb_cache1 = int(np.ceil(np.true_divide(self.data_shape[0],\n self.cache1_size)))\n else:\n self.cache1_size = data_shape[0]\n self.nb_cache1 = 1\n\n self.n_components = np.asarray(n_components, dtype='int32')\n self.beta = theano.shared(np.asarray(beta, theano.config.floatX),\n name=\"beta\")\n self.eps = theano.shared(np.asarray(1e-10, theano.config.floatX),\n name=\"eps\")\n self.sag_memory = sag_memory\n self.forget_factor = 1./(self.sag_memory + 1)\n self.verbose = verbose\n self.n_iter = n_iter\n self.solver = solver\n self.scores = []\n self.nb_batch_w = nb_batch_w\n if fixed_factors is None:\n fixed_factors = []\n self.fixed_factors = fixed_factors\n fact_ = [base.nnrandn((dim, self.n_components)) for dim in data_shape]\n self.init_mode = init_mode\n if self.init_mode == 'custom':\n fact_[0] = H\n fact_[1] = W\n self.w = theano.shared(fact_[1].astype(theano.config.floatX),\n name=\"W\", borrow=True, allow_downcast=True)\n self.h_cache1 = theano.shared(fact_[0][:self.cache1_size,\n ].astype(theano.config.floatX),\n name=\"H cache1\", borrow=True,\n allow_downcast=True)\n self.factors_ = fact_\n self.x_cache1 = theano.shared(np.zeros((self.cache1_size,\n data_shape[1])).astype(\n theano.config.floatX),\n name=\"X cache1\")\n self.init()\n\n def check_shape(self):\n \"\"\"Check that all the matrix have consistent shapes\n \"\"\"\n batch_shape = self.x_cache1.get_value().shape\n dim = int(self.n_components)\n if self.w.get_value().shape != (self.data_shape[1], dim):\n print(\"Inconsistent data for W, expected {1}, found {0}\".format(\n self.w.get_value().shape,\n (self.data_shape[1], dim)))\n raise SystemExit\n if self.factors_[0].shape != (self.data_shape[0], dim):\n print(\"Inconsistent shape for H, expected {1}, found {0}\".format(\n self.factors_[0].shape,\n (self.data_shape[0], dim)))\n raise SystemExit\n if self.h_cache1.get_value().shape != (batch_shape[0], dim):\n print(\"Inconsistent shape for h_cache1, expected {1}, found {0}\".format(\n self.h_cache1.get_value().shape,\n (batch_shape[0], dim)))\n raise SystemExit\n\n def fit(self, data, cyclic=False, warm_start=False):\n \"\"\"Learns NMF model\n\n Parameters\n ----------\n data : ndarray with nonnegative entries\n The input array\n\n cyclic : Boolean (default False)\n pick the sample cyclically\n\n warm_start : Boolean (default False)\n start from previous values\n \"\"\"\n self.data_shape = data.shape\n if (not warm_start) & (self.init_mode != 'custom'):\n print(\"cold start\")\n self.set_factors(data, fixed_factors=self.fixed_factors)\n self.check_shape()\n self.prepare_batch(False)\n self.prepare_cache1(False)\n div_func = self.get_div_function()\n if self.verbose > 0:\n scores = np.zeros((\n int(np.floor(self.n_iter/self.verbose)) + 2, 2))\n else:\n scores = np.zeros((2, 2))\n if self.solver == 'asag_mu' or self.solver == 'gsag_mu':\n grad_func = self.get_gradient_mu_sag()\n update_func = self.get_updates()\n elif self.solver == 'asg_mu' or self.solver == 'gsg_mu':\n grad_func = self.get_gradient_mu_sg()\n update_func = self.get_updates()\n elif self.solver == 'mu_batch':\n grad_func = self.get_gradient_mu_batch()\n update_func = self.get_updates()\n tick = time.time()\n score = 0\n\n for cache_ind in range(self.nb_cache1):\n current_cache_ind = np.hstack(self.batch_ind[\n self.cache1_ind[\n cache_ind, self.cache1_ind[cache_ind] >= 0]])\n current_cache_ind = current_cache_ind[current_cache_ind >= 0]\n self.x_cache1.set_value(data[current_cache_ind, ].astype(\n theano.config.floatX))\n self.h_cache1.set_value(self.factors_[0][\n current_cache_ind, ].astype(theano.config.floatX))\n score += div_func['div_cache1']()\n score_ind = 0\n scores[0, ] = [score, time.time() - tick]\n\n self.prepare_batch(not cyclic)\n self.prepare_cache1(not cyclic)\n\n print('Intitial score = %.2f' % score)\n print('Fitting NMF model with %d iterations....' % self.n_iter)\n if self.nb_cache1 == 1:\n current_cache_ind = np.hstack(self.batch_ind[\n self.cache1_ind[\n 0, self.cache1_ind[0] >= 0]])\n current_cache_ind = current_cache_ind[current_cache_ind >= 0]\n self.x_cache1.set_value(data[current_cache_ind, ].astype(\n theano.config.floatX))\n self.h_cache1.set_value(self.factors_[0][\n current_cache_ind, ].astype(theano.config.floatX))\n if self.solver == 'sag':\n self.c1_grad_w.set_value(self.old_grad_w[self.cache1_ind[\n 0, self.cache1_ind[0] >= 0]].astype(\n theano.config.floatX))\n # main loop\n for it in range(self.n_iter):\n tick = time.time()\n self.prepare_cache1(not cyclic)\n score = 0\n for cache_ind in range(self.nb_cache1):\n if self.nb_cache1 > 1:\n current_cache_ind = np.hstack(self.batch_ind[\n self.cache1_ind[\n cache_ind, self.cache1_ind[cache_ind] >= 0]])\n current_cache_ind = current_cache_ind[\n current_cache_ind >= 0]\n self.x_cache1.set_value(data[current_cache_ind, ].astype(\n theano.config.floatX))\n\n self.h_cache1.set_value(self.factors_[0][\n current_cache_ind, ].astype(theano.config.floatX))\n if self.solver == 'sag':\n self.c1_grad_w.set_value(\n self.old_grad_w[\n self.cache1_ind[\n cache_ind,\n self.cache1_ind[cache_ind] >= 0]].astype(\n theano.config.floatX))\n for batch_i in range(self.cache1_ind[\n cache_ind, self.cache1_ind[cache_ind] >= 0].shape[0]):\n batch_ind = np.arange(batch_i * self.batch_size,\n (batch_i + 1) * self.batch_size)\n batch_ind = batch_ind[\n batch_ind < current_cache_ind.shape[0]]\n batch_ind = np.asarray([batch_ind[0],\n batch_ind[-1] + 1]).astype(\n theano.config.floatX)\n\n if self.solver == 'mu_batch':\n self.update_mu_batch_h(batch_ind,\n update_func, grad_func)\n if self.solver == 'asag_mu' or self.solver == 'asg_mu':\n self.update_mu_sag(batch_ind,\n update_func, grad_func)\n if self.solver == 'gsag_mu' or self.solver == 'gsg_mu':\n grad_func['grad_h'](batch_ind)\n update_func['train_h'](batch_ind)\n if batch_i == 0 and cache_ind == 0:\n grad_func['grad_w'](batch_ind)\n if self.nb_cache1 > 1:\n self.factors_[0][current_cache_ind, ] =\\\n self.h_cache1.get_value()\n else:\n self.factors_[0] = self.h_cache1.get_value()\n if self.solver == 'mu_batch':\n self.update_mu_batch_w(update_func)\n elif self.solver == 'gsag_mu' or self.solver == 'gsg_mu':\n update_func['train_w']()\n if self.nb_cache1 > 1:\n for cache_ind in range(self.nb_cache1):\n self.x_cache1.set_value(data[np.hstack(self.batch_ind[\n self.cache1_ind[\n cache_ind,\n self.cache1_ind[cache_ind] >= 0]]), ].astype(\n theano.config.floatX))\n self.h_cache1.set_value(self.factors_[0][\n np.hstack(self.batch_ind[\n self.cache1_ind[\n cache_ind, self.cache1_ind[cache_ind] >= 0]]),\n ].astype(theano.config.floatX))\n if (it+1) % self.verbose == 0:\n score += div_func['div_cache1']()\n else:\n self.factors_[0] = self.h_cache1.get_value()\n if (it+1) % self.verbose == 0:\n score = div_func['div_cache1']()\n if (it+1) % self.verbose == 0:\n score_ind += 1\n scores[score_ind, ] = [\n score, time.time() - tick + scores[score_ind - 1, 1]]\n print('Iteration %d / %d, duration=%.1fms, cost=%f'\n % (it + 1,\n self.n_iter,\n scores[score_ind, 1] * 1000,\n scores[score_ind, 0]))\n tick = time.time()\n score_ind += 1\n scores[score_ind, ] = [\n score, time.time() - tick + scores[score_ind - 1, 1]]\n print('Iteration %d / %d, duration=%.1fms, cost=%f'\n % (it + 1,\n self.n_iter,\n scores[-1, 1] * 1000,\n scores[-1, 0])) \n return scores\n\n def get_div_function(self):\n \"\"\" compile the theano-based divergence function\"\"\"\n div_cache1 = theano.function(inputs=[],\n outputs=costs.beta_div(self.x_cache1,\n self.w.T,\n self.h_cache1,\n self.beta),\n name=\"div c1\",\n allow_input_downcast=True, profile=False)\n return dict(\n div_cache1=div_cache1)\n\n def get_gradient_mu_sag(self):\n \"\"\"compile the theano based gradient functions for mu_sag algorithms\"\"\"\n tbatch_ind = T.ivector('batch_ind')\n tind = T.iscalar('ind')\n grad_new = updates.gradient_w_mu(\n self.x_cache1[tbatch_ind[0]:tbatch_ind[1],\n ],\n self.w,\n self.h_cache1[tbatch_ind[0]:tbatch_ind[1],\n ],\n self.beta)\n up_grad_w = self.forget_factor * grad_new + (\n 1 - self.forget_factor) * self.grad_w\n\n grad_w = theano.function(inputs=[tbatch_ind],\n outputs=[],\n updates={(self.grad_w, up_grad_w)},\n name=\"grad w\",\n allow_input_downcast=True)\n grad_new = updates.gradient_h_mu(\n self.x_cache1[tbatch_ind[0]:tbatch_ind[1],\n ],\n self.w,\n self.h_cache1[tbatch_ind[0]:tbatch_ind[1],\n ],\n self.beta)\n\n grad_h = theano.function(inputs=[tbatch_ind],\n outputs=[],\n updates={(self.c1_grad_h, grad_new)},\n name=\"grad h\",\n allow_input_downcast=True)\n return dict(\n grad_h=grad_h,\n grad_w=grad_w)\n\n def get_gradient_mu_sg(self):\n \"\"\"compile the theano based gradient functions for mu_sg algorithms\"\"\"\n tbatch_ind = T.ivector('batch_ind')\n tind = T.iscalar('ind')\n grad_new = updates.gradient_w_mu(\n self.x_cache1[tbatch_ind[0]:tbatch_ind[1],\n ],\n self.w,\n self.h_cache1[tbatch_ind[0]:tbatch_ind[1],\n ],\n self.beta)\n\n grad_w = theano.function(inputs=[tbatch_ind],\n outputs=[],\n updates={(self.grad_w, grad_new)},\n name=\"grad w\",\n allow_input_downcast=True)\n grad_new = updates.gradient_h_mu(\n self.x_cache1[tbatch_ind[0]:tbatch_ind[1],\n ],\n self.w,\n self.h_cache1[tbatch_ind[0]:tbatch_ind[1],\n ],\n self.beta)\n\n grad_h = theano.function(inputs=[tbatch_ind],\n outputs=[],\n updates={(self.c1_grad_h, grad_new)},\n name=\"grad h\",\n allow_input_downcast=True)\n return dict(\n grad_h=grad_h,\n grad_w=grad_w)\n\n def get_gradient_mu_batch(self):\n \"\"\"compile the theano based gradient functions for mu\"\"\"\n tbatch_ind = T.ivector('batch_ind')\n tind = T.iscalar('ind')\n grad_new = updates.gradient_w_mu(\n self.x_cache1[tbatch_ind[0]:tbatch_ind[1],\n ],\n self.w,\n self.h_cache1[tbatch_ind[0]:tbatch_ind[1],\n ],\n self.beta)\n\n grad_w = theano.function(inputs=[tbatch_ind],\n outputs=[],\n updates={(self.grad_w,\n self.grad_w + grad_new)},\n name=\"grad w\",\n allow_input_downcast=True,\n on_unused_input='ignore')\n grad_new = updates.gradient_h_mu(\n self.x_cache1[tbatch_ind[0]:tbatch_ind[1],\n ],\n self.w,\n self.h_cache1[tbatch_ind[0]:tbatch_ind[1],\n ],\n self.beta)\n grad_h = theano.function(inputs=[tbatch_ind],\n outputs=[],\n updates={(self.c1_grad_h, grad_new)},\n name=\"grad h\",\n allow_input_downcast=True)\n return dict(\n grad_h=grad_h,\n grad_w=grad_w)\n\n def get_updates(self):\n \"\"\"compile the theano based update functions\"\"\"\n tbatch_ind = T.ivector('batch_ind')\n tneg = T.iscalar('neg')\n tpos = T.iscalar('pos')\n up_h = T.set_subtensor(self.h_cache1[tbatch_ind[0]:tbatch_ind[1], ],\n updates.mu_update(self.h_cache1[\n tbatch_ind[0]:tbatch_ind[1], ],\n self.c1_grad_h[0, ],\n self.c1_grad_h[1, ],\n self.eps))\n train_h = theano.function(inputs=[tbatch_ind],\n outputs=[],\n updates={(self.h_cache1, up_h)},\n name=\"trainH\",\n allow_input_downcast=True,\n on_unused_input='ignore')\n update_w = updates.mu_update(self.w,\n self.grad_w[0],\n self.grad_w[1],\n self.eps)\n train_w = theano.function(inputs=[],\n outputs=[],\n updates={self.w: update_w},\n name=\"trainW\",\n allow_input_downcast=True)\n return dict(\n train_h=train_h,\n train_w=train_w)\n\n def init(self):\n \"\"\"Initialise theano variable to store the gradients\"\"\"\n self.grad_w = theano.shared(\n np.zeros((2,\n self.data_shape[1],\n self.n_components)).astype(theano.config.floatX),\n name=\"gradW\", borrow=True,\n allow_downcast=True)\n self.grad_h = np.zeros((2, self.data_shape[0], self.n_components))\n self.c1_grad_h = theano.shared(\n np.zeros((2,\n self.batch_size,\n self.n_components)).astype(theano.config.floatX),\n name=\"c1_gradH\", borrow=True,\n allow_downcast=True)\n\n def prepare_batch(self, randomize=True):\n \"\"\"Arrange data for batches\n\n Parameters\n ----------\n randomize : boolean (default True)\n Randomise the data (time-wise) before preparing batch indexes\n \"\"\"\n ind = - np.ones((self.nb_batch * self.batch_size, ))\n ind[:self.data_shape[0], ] = np.arange(self.data_shape[0])\n if randomize:\n np.random.shuffle(ind[:self.data_shape[0], ])\n self.batch_ind = np.reshape(ind, (self.nb_batch,\n self.batch_size)).astype(int)\n\n def prepare_cache1(self, randomize=True):\n \"\"\"Arrange data for to fill cache1\n\n Parameters\n ----------\n randomize : boolean (default True)\n Randomise the data (time-wise) before preparing cahce indexes\n \"\"\"\n ind = - np.ones((self.nb_cache1 *\n int(np.ceil(np.true_divide(self.cache1_size,\n self.batch_size)))))\n ind[:self.nb_batch, ] = np.arange(self.nb_batch)\n if randomize:\n np.random.shuffle(ind[:self.nb_batch, ])\n self.cache1_ind = np.reshape(ind, (self.nb_cache1,\n int(np.ceil(np.true_divide(\n self.cache1_size,\n self.batch_size)))\n )).astype(int)\n\n def set_factors(self, data, W=None, H=None, fixed_factors=None):\n \"\"\"Re-set theano based parameters according to the object attributes.\n\n Parameters\n ----------\n W : array (optionnal)\n Value for factor W when custom initialisation is used\n\n H : array (optionnal)\n Value for factor H when custom initialisation is used\n\n fixed_factors : array (default Null)\n list of factors that are not updated\n e.g. fixed_factors = [0] -> H is not updated\n\n fixed_factors = [1] -> W is not updated\n \"\"\"\n self.data_shape = data.shape\n self.nb_batch = int(np.ceil(np.true_divide(self.data_shape[0],\n self.batch_size)))\n self.batch_ind = np.zeros((self.nb_batch, self.batch_size))\n\n if self.cache1_size > 0 and self.cache1_size < self.data_shape[0]:\n if self.cache1_size < self.batch_size:\n raise ValueError('cache1_size should be at '\n 'least equal to batch_size')\n self.cache1_size = self.cache1_size/self.batch_size * self.batch_size\n self.nb_cache1 = int(np.ceil(np.true_divide(self.data_shape[0],\n self.cache1_size)))\n else:\n self.cache1_size = self.data_shape[0]\n self.nb_cache1 = 1\n\n self.forget_factor = 1./(self.sag_memory + 1)\n fact_ = [base.nnrandn((dim, self.n_components))\n for dim in self.data_shape]\n if H is not None:\n fact_[0] = H\n if W is not None:\n fact_[1] = W\n if fixed_factors is None:\n fixed_factors = []\n if 1 not in fixed_factors:\n self.w = theano.shared(fact_[1].astype(theano.config.floatX),\n name=\"W\", borrow=True, allow_downcast=True)\n if 0 not in fixed_factors:\n self.h_cache1 = theano.shared(\n fact_[0][\n :self.cache1_size, ].astype(theano.config.floatX),\n name=\"H cache1\", borrow=True,\n allow_downcast=True)\n self.factors_[0] = fact_[0]\n self.factors_ = fact_\n self.x_cache1 = theano.shared(np.zeros((self.cache1_size,\n self.data_shape[1])).astype(\n theano.config.floatX),\n name=\"X cache1\")\n self.init()\n\n def transform(self, data, warm_start=False):\n \"\"\"Project data X on the basis W\n\n Parameters\n ----------\n X : array\n The input data\n warm_start : Boolean (default False)\n start from previous values\n\n Returns\n -------\n H : array\n Activations\n \"\"\"\n self.fixed_factors = [1]\n if not warm_start:\n print(\"cold start\")\n self.set_factors(data, fixed_factors=self.fixed_factors)\n self.fit(data, warm_start=True)\n return self.factors_[0]\n\n def update_mu_sag(self, batch_ind, update_func, grad_func):\n \"\"\"Update current batch with SAG based algorithms\n\n Parameters\n ----------\n batch_ind : array with 2 elements\n :batch_ind[0]: batch start\n :batch_ind[1]: batch end\n\n update_func : Theano compiled function\n Update function\n\n grad_func : Theano compiled function\n Gradient function\n \"\"\"\n if 0 not in self.fixed_factors:\n grad_func['grad_h'](batch_ind)\n update_func['train_h'](batch_ind)\n if 1 not in self.fixed_factors:\n grad_func['grad_w'](batch_ind)\n update_func['train_w']()\n\n def update_mu_batch_h(self, batch_ind, update_func, grad_func):\n \"\"\"Update h for current batch with standard MU\n\n Parameters\n ----------\n batch_ind : array with 2 elements\n :batch_ind[0]: batch start\n :batch_ind[1]: batch end\n\n update_func : Theano compiled function\n Update function\n\n grad_func : Theano compiled function\n Gradient function\n \"\"\"\n if 0 not in self.fixed_factors:\n grad_func['grad_h'](batch_ind)\n update_func['train_h'](batch_ind)\n grad_func['grad_w'](batch_ind)\n\n def update_mu_batch_w(self, udpate_func):\n \"\"\"Update W with standard MU\n\n Parameters\n ----------\n update_func : Theano compiled function\n Update function\n \"\"\"\n if 1 not in self.fixed_factors:\n udpate_func['train_w']()\n self.grad_w.set_value(\n np.zeros((\n 2,\n self.data_shape[1],\n self.n_components)).astype(\n theano.config.floatX))\n" ]
[ [ "numpy.ceil", "numpy.asarray", "numpy.zeros", "numpy.reshape", "numpy.true_divide", "numpy.ones", "numpy.random.shuffle", "numpy.arange", "numpy.hstack", "numpy.floor" ] ]
udham2511/Python-Screen-Recorder
[ "419be068398f15128f1279bd68a64c285b60493b" ]
[ "recorder.py" ]
[ "from PIL import ImageGrab\r\nimport pyautogui\r\nimport numpy\r\nimport time\r\nimport cv2\r\nimport os\r\n\r\n\r\ntimeA = time.time()\r\nfourcc = cv2.VideoWriter_fourcc(*\"XVID\")\r\nname = f\"Recording{len(os.listdir())-2}.mp4\"\r\nout = cv2.VideoWriter(name, fourcc, 14.0, (1920, 1080))\r\n\r\nwhite = (255, 255, 255)\r\nblack = (0, 0, 0)\r\n\r\nwhile True:\r\n frame = ImageGrab.grab()\r\n data = frame.load()\r\n\r\n (x, y) = pyautogui.position()\r\n\r\n mouseFrame = numpy.array(frame)\r\n finalFrame = cv2.cvtColor(mouseFrame, 4)\r\n\r\n cv2.circle(finalFrame, (x, y), 7, (0, 0, 0), -1)\r\n cv2.circle(finalFrame, (x, y), 6, (255, 255, 255), -1)\r\n\r\n cv2.imshow(\"Recoding\", finalFrame)\r\n out.write(finalFrame)\r\n\r\n if (cv2.waitKey(1) & 0xFF == ord(\"q\")):\r\n break\r\n\r\nout.release()\r\ncv2.destroyAllWindows()\r\nprint(\"Time:\", str(time.time() - timeA)[:4]+\"s\")" ]
[ [ "numpy.array" ] ]
Jiaming1999/ChainConsumer
[ "5606696525d91f11d8093085934fa352b98ce97c", "5606696525d91f11d8093085934fa352b98ce97c" ]
[ "examples/customisations/plot_rainbow_serif_bins.py", "chainconsumer/diagnostic.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n=======================\nCmap and Custom Bins\n=======================\n\nInvoke the cmap colour scheme and choose how many bins to use with your data.\n\nBy default, the cmap colour scheme is used if you have many, many chains. You can\nenable it before that point if you wish and pass in the cmap you want to use.\n\nYou can also pick how many bins you want to display your data with.\n\nYou can see that in this example, we pick too many bins and would not get good\nsummaries. If you simply want more (or less) bins than the default estimate,\nif you input a float instead of an integer, the number of bins will simply scale\nby that amount. For example, if the estimated picks 20 bins, and you set ``bins=1.5``\nyour plots and summaries would be calculated with 30 bins.\n\n\"\"\"\nimport numpy as np\nfrom numpy.random import normal, random, multivariate_normal\nfrom chainconsumer import ChainConsumer\n\n\nnp.random.seed(0)\ncov = 0.3 * random(size=(3, 3)) + np.identity(3)\ndata = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)\ncov = 0.3 * random(size=(3, 3)) + np.identity(3)\ndata2 = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)\ncov = 0.3 * random(size=(3, 3)) + np.identity(3)\ndata3 = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)\ncov = 0.3 * random(size=(3, 3)) + np.identity(3)\ndata4 = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)\n\nc = ChainConsumer()\nc.add_chain(data, name=\"A\")\nc.add_chain(data2, name=\"B\")\nc.add_chain(data3, name=\"C\")\nc.add_chain(data4, name=\"D\")\nc.configure(bins=50, cmap=\"plasma\")\nfig = c.plotter.plot(figsize=0.75) # Also making the figure 75% of its original size, for fun\n\nfig.set_size_inches(3 + fig.get_size_inches()) # Resize fig for doco. You don't need this.\n", "# -*- coding: utf-8 -*-\nimport numpy as np\nimport logging\nfrom scipy.stats import normaltest\n\n\nclass Diagnostic(object):\n def __init__(self, parent):\n self.parent = parent\n self._logger = logging.getLogger(\"chainconsumer\")\n\n def gelman_rubin(self, chain=None, threshold=0.05):\n r\"\"\" Runs the Gelman Rubin diagnostic on the supplied chains.\n\n Parameters\n ----------\n chain : int|str, optional\n Which chain to run the diagnostic on. By default, this is `None`,\n which will run the diagnostic on all chains. You can also\n supply and integer (the chain index) or a string, for the chain\n name (if you set one).\n threshold : float, optional\n The maximum deviation permitted from 1 for the final value\n :math:`\\hat{R}`\n\n Returns\n -------\n float\n whether or not the chains pass the test\n\n Notes\n -----\n\n I follow PyMC in calculating the Gelman-Rubin statistic, where,\n having :math:`m` chains of length :math:`n`, we compute\n\n .. math::\n\n B = \\frac{n}{m-1} \\sum_{j=1}^{m} \\left(\\bar{\\theta}_{.j} - \\bar{\\theta}_{..}\\right)^2\n\n W = \\frac{1}{m} \\sum_{j=1}^{m} \\left[ \\frac{1}{n-1} \\sum_{i=1}^{n} \\left( \\theta_{ij} - \\bar{\\theta_{.j}}\\right)^2 \\right]\n\n where :math:`\\theta` represents each model parameter. We then compute\n :math:`\\hat{V} = \\frac{n_1}{n}W + \\frac{1}{n}B`, and have our convergence ratio\n :math:`\\hat{R} = \\sqrt{\\frac{\\hat{V}}{W}}`. We check that for all parameters,\n this ratio deviates from unity by less than the supplied threshold.\n \"\"\"\n if chain is None:\n return np.all([self.gelman_rubin(k, threshold=threshold) for k in range(len(self.parent.chains))])\n\n index = self.parent._get_chain(chain)\n assert len(index) == 1, \"Please specify only one chain, have %d chains\" % len(index)\n chain = self.parent.chains[index[0]]\n\n num_walkers = chain.walkers\n parameters = chain.parameters\n name = chain.name\n data = chain.chain\n chains = np.split(data, num_walkers)\n assert num_walkers > 1, \"Cannot run Gelman-Rubin statistic with only one walker\"\n m = 1.0 * len(chains)\n n = 1.0 * chains[0].shape[0]\n all_mean = np.mean(data, axis=0)\n chain_means = np.array([np.mean(c, axis=0) for c in chains])\n chain_var = np.array([np.var(c, axis=0, ddof=1) for c in chains])\n b = n / (m - 1) * ((chain_means - all_mean) ** 2).sum(axis=0)\n w = (1 / m) * chain_var.sum(axis=0)\n var = (n - 1) * w / n + b / n\n v = var + b / (n * m)\n R = np.sqrt(v / w)\n\n passed = np.abs(R - 1) < threshold\n print(\"Gelman-Rubin Statistic values for chain %s\" % name)\n for p, v, pas in zip(parameters, R, passed):\n param = \"Param %d\" % p if isinstance(p, int) else p\n print(\"%s: %7.5f (%s)\" % (param, v, \"Passed\" if pas else \"Failed\"))\n return np.all(passed)\n\n def geweke(self, chain=None, first=0.1, last=0.5, threshold=0.05):\n \"\"\" Runs the Geweke diagnostic on the supplied chains.\n\n Parameters\n ----------\n chain : int|str, optional\n Which chain to run the diagnostic on. By default, this is `None`,\n which will run the diagnostic on all chains. You can also\n supply and integer (the chain index) or a string, for the chain\n name (if you set one).\n first : float, optional\n The amount of the start of the chain to use\n last : float, optional\n The end amount of the chain to use\n threshold : float, optional\n The p-value to use when testing for normality.\n\n Returns\n -------\n float\n whether or not the chains pass the test\n\n \"\"\"\n if chain is None:\n return np.all([self.geweke(k, threshold=threshold) for k in range(len(self.parent.chains))])\n\n index = self.parent._get_chain(chain)\n assert len(index) == 1, \"Please specify only one chain, have %d chains\" % len(index)\n chain = self.parent.chains[index[0]]\n\n num_walkers = chain.walkers\n assert num_walkers is not None and num_walkers > 0, \"You need to specify the number of walkers to use the Geweke diagnostic.\"\n name = chain.name\n data = chain.chain\n chains = np.split(data, num_walkers)\n n = 1.0 * chains[0].shape[0]\n n_start = int(np.floor(first * n))\n n_end = int(np.floor((1 - last) * n))\n mean_start = np.array([np.mean(c[:n_start, i]) for c in chains for i in range(c.shape[1])])\n var_start = np.array([self._spec(c[:n_start, i]) / c[:n_start, i].size for c in chains for i in range(c.shape[1])])\n mean_end = np.array([np.mean(c[n_end:, i]) for c in chains for i in range(c.shape[1])])\n var_end = np.array([self._spec(c[n_end:, i]) / c[n_end:, i].size for c in chains for i in range(c.shape[1])])\n zs = (mean_start - mean_end) / (np.sqrt(var_start + var_end))\n _, pvalue = normaltest(zs)\n print(\"Gweke Statistic for chain %s has p-value %e\" % (name, pvalue))\n return pvalue > threshold\n\n # Method of estimating spectral density following PyMC.\n # See https://github.com/pymc-devs/pymc/blob/master/pymc/diagnostics.py\n def _spec(self, x, order=2):\n from statsmodels.regression.linear_model import yule_walker\n\n beta, sigma = yule_walker(x, order)\n return sigma ** 2 / (1.0 - np.sum(beta)) ** 2\n" ]
[ [ "numpy.random.normal", "numpy.dot", "numpy.random.seed", "numpy.identity", "numpy.random.random" ], [ "scipy.stats.normaltest", "numpy.sum", "numpy.mean", "numpy.split", "numpy.sqrt", "numpy.abs", "numpy.all", "numpy.var", "numpy.floor" ] ]
hakimakbarmaulana/dtaidistance
[ "ddf4a8111732d4429686d96c9195a81151be1dd8" ]
[ "tests/test_clustering.py" ]
[ "import os\nimport sys\nimport tempfile\nimport pytest\nimport logging\nfrom pathlib import Path\n\nfrom dtaidistance import dtw, dtw_ndim, clustering, util_numpy\nimport dtaidistance.dtw_visualisation as dtwvis\nfrom dtaidistance.exceptions import PyClusteringException\n\n\nlogger = logging.getLogger(\"be.kuleuven.dtai.distance\")\ndirectory = None\nnumpyonly = pytest.mark.skipif(\"util_numpy.test_without_numpy()\")\nscipyonly = pytest.mark.skipif(\"util_numpy.test_without_scipy()\")\n\n\n@numpyonly\ndef test_clustering():\n with util_numpy.test_uses_numpy() as np:\n s = np.array([\n [0., 0, 1, 2, 1, 0, 1, 0, 0],\n [0., 1, 2, 0, 0, 0, 0, 0, 0],\n [1., 2, 0, 0, 0, 0, 0, 1, 1],\n [0., 0, 1, 2, 1, 0, 1, 0, 0],\n [0., 1, 2, 0, 0, 0, 0, 0, 0],\n [1., 2, 0, 0, 0, 0, 0, 1, 1]])\n\n def test_hook(from_idx, to_idx, distance):\n assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (1, 0)]\n model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, 2, merge_hook=test_hook,\n show_progress=False)\n cluster_idx = model.fit(s)\n assert cluster_idx[0] == {0, 1, 3, 4}\n assert cluster_idx[2] == {2, 5}\n\n\n@numpyonly\ndef test_clustering_tree():\n with util_numpy.test_uses_numpy() as np:\n s = np.array([\n [0., 0, 1, 2, 1, 0, 1, 0, 0],\n [0., 1, 2, 0, 0, 0, 0, 0, 0],\n [1., 2, 0, 0, 0, 0, 0, 1, 1],\n [0., 0, 1, 2, 1, 0, 1, 0, 0],\n [0., 1, 2, 0, 0, 0, 0, 0, 0],\n [1., 2, 0, 0, 0, 0, 0, 1, 1],\n [1., 2, 0, 0, 0, 0, 0, 1, 1]])\n\n def test_hook(from_idx, to_idx, distance):\n assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]\n model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, merge_hook=test_hook,\n show_progress=False)\n modelw = clustering.HierarchicalTree(model)\n cluster_idx = modelw.fit(s)\n assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}\n\n if directory:\n hierarchy_fn = os.path.join(directory, \"hierarchy.png\")\n graphviz_fn = os.path.join(directory, \"hierarchy.dot\")\n else:\n file = tempfile.NamedTemporaryFile()\n hierarchy_fn = file.name + \"_hierarchy.png\"\n graphviz_fn = file.name + \"_hierarchy.dot\"\n\n if not dtwvis.test_without_visualization():\n modelw.plot(hierarchy_fn)\n print(\"Figure saved to\", hierarchy_fn)\n\n with open(graphviz_fn, \"w\") as ofile:\n print(modelw.to_dot(), file=ofile)\n print(\"Dot saved to\", graphviz_fn)\n\n\n@numpyonly\ndef test_clustering_tree_ndim():\n with util_numpy.test_uses_numpy() as np:\n s = np.array([\n [[0.,0.], [0,0], [1,0], [2,0], [1,0], [0,0], [1,0], [0,0], [0,0]],\n [[0.,0.], [1,0], [2,0], [0,0], [0,0], [0,0], [0,0], [0,0], [0,0]],\n [[1.,0.], [2,0], [0,0], [0,0], [0,0], [0,0], [0,0], [1,0], [1,0]]])\n\n model = clustering.Hierarchical(dtw_ndim.distance_matrix_fast, {'ndim':2},\n show_progress=False)\n cluster_idx = model.fit(s)\n assert cluster_idx[0] == {0, 1, 2}\n\n\n@numpyonly\ndef test_clustering_tree_maxdist():\n with util_numpy.test_uses_numpy() as np:\n s = np.array([\n [0., 0, 1, 2, 1, 0, 1, 0, 0],\n [0., 1, 2, 0, 0, 0, 0, 0, 0],\n [1., 2, 0, 0, 0, 0, 0, 1, 1],\n [0., 0, 1, 2, 1, 0, 1, 0, 0],\n [0., 1, 2, 0, 0, 0, 0, 0, 0],\n [1., 2, 0, 0, 0, 0, 0, 1, 1],\n [1., 2, 0, 0, 0, 0, 0, 1, 1]])\n\n def test_hook(from_idx, to_idx, distance):\n assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]\n model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, merge_hook=test_hook,\n show_progress=False, max_dist=0.1)\n modelw = clustering.HierarchicalTree(model)\n cluster_idx = modelw.fit(s)\n assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}\n\n if directory:\n hierarchy_fn = os.path.join(directory, \"hierarchy.png\")\n graphviz_fn = os.path.join(directory, \"hierarchy.dot\")\n else:\n file = tempfile.NamedTemporaryFile()\n hierarchy_fn = file.name + \"_hierarchy.png\"\n graphviz_fn = file.name + \"_hierarchy.dot\"\n\n if not dtwvis.test_without_visualization():\n modelw.plot(hierarchy_fn)\n print(\"Figure saved to\", hierarchy_fn)\n\n with open(graphviz_fn, \"w\") as ofile:\n print(modelw.to_dot(), file=ofile)\n print(\"Dot saved to\", graphviz_fn)\n\n\n@scipyonly\n@numpyonly\ndef test_linkage_tree():\n with util_numpy.test_uses_numpy() as np:\n s = np.array([\n [0., 0, 1, 2, 1, 0, 1, 0, 0],\n [0., 1, 2, 0, 0, 0, 0, 0, 0],\n [1., 2, 0, 0, 0, 0, 0, 1, 1],\n [0., 0, 1, 2, 1, 0, 1, 0, 0],\n [0., 1, 2, 0, 0, 0, 0, 0, 0],\n [1., 2, 0, 0, 0, 0, 0, 1, 1],\n [1., 2, 0, 0, 0, 0, 0, 1, 1]])\n\n model = clustering.LinkageTree(dtw.distance_matrix_fast, {})\n cluster_idx = model.fit(s)\n\n if directory:\n hierarchy_fn = os.path.join(directory, \"hierarchy.png\")\n graphviz_fn = os.path.join(directory, \"hierarchy.dot\")\n else:\n file = tempfile.NamedTemporaryFile()\n hierarchy_fn = file.name + \"_hierarchy.png\"\n graphviz_fn = file.name + \"_hierarchy.dot\"\n if not dtwvis.test_without_visualization():\n model.plot(hierarchy_fn)\n print(\"Figure saved to\", hierarchy_fn)\n with open(graphviz_fn, \"w\") as ofile:\n print(model.to_dot(), file=ofile)\n print(\"Dot saved to\", graphviz_fn)\n\n\n@scipyonly\n@numpyonly\ndef test_controlchart():\n with util_numpy.test_uses_numpy() as np:\n series = np.zeros((600, 60))\n rsrc_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'rsrc', 'synthetic_control.data')\n with open(rsrc_fn, 'r') as ifile:\n for idx, line in enumerate(ifile.readlines()):\n series[idx, :] = line.split()\n s = []\n for idx in range(0, 600, 20):\n s.append(series[idx, :])\n\n model = clustering.LinkageTree(dtw.distance_matrix_fast, {'parallel': True})\n cluster_idx = model.fit(s)\n\n if not dtwvis.test_without_visualization():\n import matplotlib.pyplot as plt\n if directory:\n hierarchy_fn = os.path.join(directory, \"hierarchy.png\")\n else:\n file = tempfile.NamedTemporaryFile()\n hierarchy_fn = file.name + \"_hierarchy.png\"\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 10))\n show_ts_label = lambda idx: \"ts-\" + str(idx)\n # show_ts_label = list(range(len(s)))\n\n def curcmap(idx):\n if idx % 2 == 0:\n return 'r'\n return 'g'\n\n model.plot(hierarchy_fn, axes=ax, show_ts_label=show_ts_label,\n show_tr_label=True, ts_label_margin=-10,\n ts_left_margin=10, ts_sample_length=1, ts_color=curcmap)\n print(\"Figure saved to\", hierarchy_fn)\n\n\n@scipyonly\n@numpyonly\ndef test_plotbug1():\n with util_numpy.test_uses_numpy() as np:\n s1 = np.array([0., 0, 1, 2, 1, 0, 1, 0, 0, 2, 1, 0, 0])\n s2 = np.array([0., 1, 2, 3, 1, 0, 0, 0, 2, 1, 0, 0])\n\n series = s1, s2\n\n m = clustering.LinkageTree(dtw.distance_matrix, {})\n m.fit(series)\n\n if not dtwvis.test_without_visualization():\n if directory:\n hierarchy_fn = os.path.join(directory, \"clustering.png\")\n else:\n file = tempfile.NamedTemporaryFile()\n hierarchy_fn = file.name + \"_clustering.png\"\n m.plot(hierarchy_fn)\n print(\"Figure save to\", hierarchy_fn)\n\n\n@numpyonly\ndef test_clustering_centroid():\n with util_numpy.test_uses_numpy() as np:\n s = np.array([\n [0., 0, 1, 2, 1, 0, 1, 0, 0],\n [0., 1, 2, 0, 0, 0, 0, 0, 0],\n [1., 2, 0, 0, 0, 0, 0, 1, 1],\n [0., 0, 1, 2, 1, 0, 1, 0, 0],\n [0., 1, 2, 0, 0, 0, 0, 0, 0],\n [1., 2, 0, 0, 0, 0, 0, 1, 1],\n [1., 2, 0, 0, 0, 0, 0, 1, 1]])\n\n # def test_hook(from_idx, to_idx, distance):\n # assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]\n model = clustering.KMedoids(dtw.distance_matrix_fast, {}, k=3,\n show_progress=False)\n try:\n cluster_idx = model.fit(s)\n except PyClusteringException:\n return\n # assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}\n\n if not dtwvis.test_without_visualization():\n if directory:\n png_fn = os.path.join(directory, \"centroid.png\")\n else:\n file = tempfile.NamedTemporaryFile()\n png_fn = file.name + \"_centroid.png\"\n model.plot(png_fn)\n print(\"Figure saved to\", png_fn)\n\n\nif __name__ == \"__main__\":\n logger.setLevel(logging.DEBUG)\n logger.addHandler(logging.StreamHandler(sys.stdout))\n directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))\n print(f\"Saving files to {directory}\")\n # test_clustering_tree()\n test_clustering_tree_ndim()\n # test_clustering_tree_maxdist()\n # test_linkage_tree()\n # test_controlchart()\n # test_plotbug1()\n # test_clustering_centroid()\n" ]
[ [ "matplotlib.pyplot.subplots" ] ]
pei223/deepext_with_lightning
[ "e40ac19844a05864f803431d8ef4a534286a0950" ]
[ "deepext_with_lightning/metrics/object_detection.py" ]
[ "from typing import List, Tuple, Union\n\nimport numpy as np\nimport torch\n\nimport pytorch_lightning as pl\n\n\ndef calc_area(bbox: np.ndarray):\n return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])\n\n\ndef calc_bbox_overlap_union_iou(pred: np.ndarray or None, teacher: np.ndarray) -> Tuple[float, float, float]:\n \"\"\"\n :param pred: ndarray (4, )\n :param teacher: ndarray (4, )\n :return: overlap, union, iou\n \"\"\"\n teacher_area = (teacher[2] - teacher[0]) * (teacher[3] - teacher[1])\n if pred is None:\n return 0.0, teacher_area, 0.0\n\n pred_area = (pred[2] - pred[0]) * (pred[3] - pred[1])\n\n intersection_width = np.maximum(np.minimum(pred[2], teacher[2]) - np.maximum(pred[0], teacher[0]), 0)\n intersection_height = np.maximum(np.minimum(pred[3], teacher[3]) - np.maximum(pred[1], teacher[1]), 0)\n\n overlap = intersection_width * intersection_height\n union = teacher_area + pred_area - overlap\n iou = overlap / union\n return overlap, union, iou\n\n\nclass DetectionIoU(pl.metrics.Metric):\n def __init__(self, n_classes: int, by_classes: bool = False):\n super().__init__(compute_on_step=False)\n self._n_classes = n_classes\n self._by_classes = by_classes\n self.add_state(\"image_count_by_classes\", default=torch.tensor([0. for _ in range(n_classes)]),\n dist_reduce_fx=\"sum\")\n self.add_state(\"total_iou_by_classes\", default=torch.tensor([0. for _ in range(n_classes)]),\n dist_reduce_fx=\"sum\")\n\n def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:\n \"\"\"\n :param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))\n :param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))\n :return:\n \"\"\"\n targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets\n # 全探索だと遅いのでクラスごとにまとめておく\n preds_by_class = []\n for pred_bboxes in preds:\n pred_by_class = [[] for _ in range(self._n_classes)]\n for pred_bbox in pred_bboxes:\n pred_by_class[int(pred_bbox[4])].append(pred_bbox)\n preds_by_class.append(pred_by_class)\n\n for i in range(targets.shape[0]): # Explore every batch.\n bbox_annotations = targets[i, :, :]\n # Exclude invalid label annotation.\n bbox_annotations = bbox_annotations[bbox_annotations[:, 4] >= 0]\n\n pred_by_class = preds_by_class[i]\n\n \"\"\"\n 1画像でラベルごとに計算.\n ラベルごとの面積合計/overlapを計算\n 1画像ごとにIoU算出、最終的に画像平均を算出\n \"\"\"\n\n total_area_by_classes = [0 for _ in range(self._n_classes)]\n total_overlap_by_classes = [0 for _ in range(self._n_classes)]\n is_label_appeared = [False for _ in range(self._n_classes)]\n for bbox_annotation in bbox_annotations:\n\n label = int(bbox_annotation[4])\n total_area_by_classes[label] += calc_area(bbox_annotation)\n pred_bboxes = pred_by_class[label]\n\n if pred_bboxes is None or len(pred_bboxes) == 0:\n continue\n\n # Calculate area and overlap by class.\n for pred_bbox in pred_bboxes:\n overlap, _, _ = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)\n total_overlap_by_classes[label] += overlap\n if is_label_appeared[label]:\n continue\n total_area_by_classes[label] += calc_area(pred_bbox)\n is_label_appeared[label] = True\n\n for label in range(self._n_classes):\n # Not exist label in this data.\n if total_area_by_classes[label] <= 0:\n continue\n self.total_iou_by_classes[label] += total_overlap_by_classes[label] / (\n total_area_by_classes[label] - total_overlap_by_classes[label])\n self.image_count_by_classes[label] += 1\n\n def compute(self):\n epsilon = 1e-8\n iou_by_classes = self.total_iou_by_classes / (self.image_count_by_classes + epsilon)\n if self._by_classes:\n return iou_by_classes\n return torch.mean(iou_by_classes)\n\n\nclass RecallPrecision(pl.metrics.Metric):\n def __init__(self, n_classes: int, by_classes: bool = False):\n super().__init__(compute_on_step=False)\n self._n_classes = n_classes\n self._by_classes = by_classes\n self.add_state(\"tp_by_classes\", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx=\"sum\")\n self.add_state(\"fp_by_classes\", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx=\"sum\")\n self.add_state(\"fn_by_classes\", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx=\"sum\")\n\n def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:\n \"\"\"\n :param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))\n :param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))\n :return:\n \"\"\"\n targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets\n # 全探索だと遅いのでクラスごとにまとめておく\n preds_by_class = []\n for pred_bboxes in preds:\n pred_by_class = [[] for _ in range(self._n_classes)]\n for pred_bbox in pred_bboxes:\n pred_by_class[int(pred_bbox[4])].append(pred_bbox)\n preds_by_class.append(pred_by_class)\n\n for i in range(targets.shape[0]):\n bbox_annotations = targets[i, :, :]\n # Exclude invalid label annotation.\n bbox_annotations = bbox_annotations[bbox_annotations[:, 4] >= 0]\n\n pred_by_class = preds_by_class[i]\n\n applied_bbox_count_by_classes = [0 for _ in range(self._n_classes)]\n for bbox_annotation in bbox_annotations:\n label = int(bbox_annotation[4])\n pred_bboxes = pred_by_class[label]\n\n if pred_bboxes is None or len(pred_bboxes) == 0:\n self.fn_by_classes[label] += 1\n continue\n # Explore max iou of bbox_annotation\n is_matched = False\n for pred_bbox in pred_bboxes:\n overlap, union, iou = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)\n if iou >= 0.5:\n applied_bbox_count_by_classes[label] += 1\n self.tp_by_classes[label] += 1\n is_matched = True\n break\n if not is_matched:\n self.fn_by_classes[label] += 1\n\n for label in range(self._n_classes):\n self.fp_by_classes[label] += len(pred_by_class[label]) - applied_bbox_count_by_classes[label]\n\n def compute(self):\n epsilon = 1e-8\n recall = self.tp_by_classes / (self.tp_by_classes + self.fn_by_classes + epsilon)\n precision = self.tp_by_classes / (self.tp_by_classes + self.fp_by_classes + epsilon)\n f_score = 2. * recall * precision / (recall + precision + epsilon)\n if self._by_classes:\n return recall, precision, f_score\n return torch.mean(recall), torch.mean(precision), torch.mean(f_score)\n\n\nclass MeanAveragePrecision(pl.metrics.Metric):\n def __init__(self, n_classes: int, by_classes=False):\n super().__init__(compute_on_step=False)\n self._n_classes = n_classes\n # TODO want to implement using add_state\n self.fp_list_by_classes = [[] for _ in range(n_classes)]\n self.tp_list_by_classes = [[] for _ in range(n_classes)]\n self.score_list_by_classes = [[] for _ in range(n_classes)]\n self.num_annotations_by_classes = [0 for _ in range(n_classes)]\n # self.add_state(\"fp_list_by_classes\", default=[[] for _ in range(n_classes)], dist_reduce_fx=\"cat\")\n # self.add_state(\"tp_list_by_classes\", default=[[] for _ in range(n_classes)], dist_reduce_fx=\"cat\")\n # self.add_state(\"score_list_by_classes\", default=[[] for _ in range(n_classes)], dist_reduce_fx=\"cat\")\n # self.add_state(\"num_annotations_by_classes\", default=[0 for _ in range(n_classes)], dist_reduce_fx=\"cat\")\n self._by_classes = by_classes\n\n def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:\n \"\"\"\n :param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))\n :param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))\n :return:\n \"\"\"\n targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets\n for i in range(len(preds)):\n pred_bboxes, target_bboxes = preds[i], targets[i]\n # exclude invalid annotations.\n target_bboxes = target_bboxes[target_bboxes[:, 4] >= 0]\n self._update_num_annotations(target_bboxes)\n self._update_tp_fp_score(pred_bboxes, target_bboxes)\n\n def compute(self):\n ap_by_classes = [0 for _ in range(self._n_classes)]\n for label in range(self._n_classes):\n num_annotations = self.num_annotations_by_classes[label]\n tp_list, fp_list = np.array(self.tp_list_by_classes[label]), np.array(self.fp_list_by_classes[label])\n scores = np.array(self.score_list_by_classes[label])\n indices = np.argsort(-scores)\n # sort by score\n tp_list, fp_list = tp_list[indices], fp_list[indices]\n # cumulative sum\n tp_list, fp_list = np.cumsum(tp_list), np.cumsum(fp_list)\n\n if num_annotations == 0:\n ap_by_classes[label] = 0\n continue\n recall_curve = tp_list / num_annotations\n precision_curve = tp_list / np.maximum(tp_list + fp_list, np.finfo(np.float64).eps)\n ap_by_classes[label] = self._compute_average_precision(recall_curve, precision_curve)\n return ap_by_classes if self._by_classes else sum(ap_by_classes) / len(ap_by_classes)\n\n def _update_tp_fp_score(self, pred_bboxes: np.ndarray, target_bboxes: np.ndarray):\n \"\"\"\n :param pred_bboxes: (N, 6(xmin, ymin, xmax, ymax, class, score))\n :param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))\n \"\"\"\n detected_indices = []\n for i in range(pred_bboxes.shape[0]):\n pred_label, pred_score = int(pred_bboxes[i][4]), pred_bboxes[i][5]\n matched = False\n for j in filter(lambda k: int(target_bboxes[k][4]) == pred_label and k not in detected_indices,\n range(target_bboxes.shape[0])):\n overlap, union, iou = calc_bbox_overlap_union_iou(pred_bboxes[i], target_bboxes[j])\n if iou >= 0.5:\n detected_indices.append(j)\n self.fp_list_by_classes[pred_label].append(0)\n self.tp_list_by_classes[pred_label].append(1)\n matched = True\n break\n if not matched:\n self.fp_list_by_classes[pred_label].append(1)\n self.tp_list_by_classes[pred_label].append(0)\n self.score_list_by_classes[pred_label].append(pred_score)\n\n def _update_num_annotations(self, target_bboxes: np.ndarray):\n \"\"\"\n :param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))\n \"\"\"\n counts = list(map(lambda i: np.count_nonzero(target_bboxes[:, 4] == i), range(self._n_classes)))\n self.num_annotations_by_classes = list(\n map(lambda i: counts[i] + self.num_annotations_by_classes[i], range(self._n_classes)))\n\n def _compute_average_precision(self, recall_curve: np.ndarray, precision_curve: np.ndarray):\n # Reference by https://github.com/toandaominh1997/EfficientDet.Pytorch/blob/master/eval.py\n assert recall_curve.ndim == 1 and precision_curve.ndim == 1\n # correct AP calculation\n # first append sentinel values at the end\n mean_recall = np.concatenate(([0.], recall_curve, [1.]))\n mean_precision = np.concatenate(([0.], precision_curve, [0.]))\n\n # compute the precision envelope\n for i in range(mean_precision.size - 1, 0, -1):\n mean_precision[i - 1] = np.maximum(mean_precision[i - 1], mean_precision[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mean_recall[1:] != mean_recall[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mean_recall[i + 1] - mean_recall[i]) * mean_precision[i + 1])\n return ap\n\n def reset(self):\n self.fp_list_by_classes = [[] for _ in range(self._n_classes)]\n self.tp_list_by_classes = [[] for _ in range(self._n_classes)]\n self.score_list_by_classes = [[] for _ in range(self._n_classes)]\n self.num_annotations_by_classes = [0 for _ in range(self._n_classes)]\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.count_nonzero", "numpy.minimum", "numpy.sum", "numpy.where", "numpy.finfo", "numpy.argsort", "numpy.cumsum", "torch.mean", "numpy.maximum" ] ]
uw-ipd/numba
[ "26dde2b28cadda403a5549a84dc1698900b23f74", "26dde2b28cadda403a5549a84dc1698900b23f74", "26dde2b28cadda403a5549a84dc1698900b23f74", "26dde2b28cadda403a5549a84dc1698900b23f74" ]
[ "examples/linear_regression/linear_regression_numba.py", "numba/tests/test_pycc.py", "numba/typing/arraydecl.py", "numba/cuda/tests/cudapy/test_smart_array.py" ]
[ "#\n# Copyright (c) 2017 Intel Corporation\n# SPDX-License-Identifier: BSD-2-Clause\n#\n\nimport numba\nimport numpy as np\nimport argparse\nimport time\n\n@numba.njit()\ndef linear_regression(Y, X, w, iterations, alphaN):\n for i in range(iterations):\n w -= alphaN * np.dot(X.T, np.dot(X,w)-Y)\n return w\n\ndef main():\n parser = argparse.ArgumentParser(description='Linear Regression.')\n parser.add_argument('--samples', dest='samples', type=int, default=200000)\n parser.add_argument('--features', dest='features', type=int, default=10)\n parser.add_argument('--functions', dest='functions', type=int, default=4)\n parser.add_argument('--iterations', dest='iterations', type=int, default=20)\n args = parser.parse_args()\n N = args.samples\n D = args.features\n p = args.functions\n iterations = args.iterations\n alphaN = 0.01/N\n w = np.zeros((D,p))\n np.random.seed(0)\n points = np.random.random((N,D))\n labels = np.random.random((N,p))\n t1 = time.time()\n w = linear_regression(labels, points, w, iterations, alphaN)\n selftimed = time.time()-t1\n print(\"SELFTIMED \", selftimed)\n print(\"checksum: \", np.sum(w))\n\nif __name__ == '__main__':\n main()\n", "from __future__ import print_function\n\nimport contextlib\nimport imp\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nfrom unittest import skip\nfrom ctypes import *\n\nimport numpy as np\ntry:\n import setuptools\nexcept ImportError:\n setuptools = None\n\nimport llvmlite.binding as ll\n\nfrom numba import unittest_support as unittest\nfrom numba.pycc import main\nfrom numba.pycc.decorators import clear_export_registry\nfrom numba.pycc.platform import find_shared_ending, find_pyext_ending\nfrom numba.pycc.platform import _external_compiler_ok\n\n# if suitable compilers are not present then skip.\n_skip_reason = 'AOT compatible compilers missing'\n_skip_missing_compilers = unittest.skipIf(not _external_compiler_ok,\n _skip_reason)\n\nfrom .matmul_usecase import has_blas\nfrom .support import TestCase, tag, import_dynamic, temp_directory\n\n\nbase_path = os.path.dirname(os.path.abspath(__file__))\n\n\ndef unset_macosx_deployment_target():\n \"\"\"Unset MACOSX_DEPLOYMENT_TARGET because we are not building portable\n libraries\n \"\"\"\n if 'MACOSX_DEPLOYMENT_TARGET' in os.environ:\n del os.environ['MACOSX_DEPLOYMENT_TARGET']\n\n\nclass BasePYCCTest(TestCase):\n\n def setUp(self):\n unset_macosx_deployment_target()\n\n self.tmpdir = temp_directory('test_pycc')\n # Make sure temporary files and directories created by\n # distutils don't clutter the top-level /tmp\n tempfile.tempdir = self.tmpdir\n\n def tearDown(self):\n tempfile.tempdir = None\n # Since we're executing the module-under-test several times\n # from the same process, we must clear the exports registry\n # between invocations.\n clear_export_registry()\n\n @contextlib.contextmanager\n def check_c_ext(self, extdir, name):\n sys.path.append(extdir)\n try:\n lib = import_dynamic(name)\n yield lib\n finally:\n sys.path.remove(extdir)\n sys.modules.pop(name, None)\n\n\n@_skip_missing_compilers\nclass TestLegacyAPI(BasePYCCTest):\n\n def test_pycc_ctypes_lib(self):\n \"\"\"\n Test creating a C shared library object using pycc.\n \"\"\"\n source = os.path.join(base_path, 'compile_with_pycc.py')\n cdll_modulename = 'test_dll_legacy' + find_shared_ending()\n cdll_path = os.path.join(self.tmpdir, cdll_modulename)\n if os.path.exists(cdll_path):\n os.unlink(cdll_path)\n\n main(args=['--debug', '-o', cdll_path, source])\n lib = CDLL(cdll_path)\n lib.mult.argtypes = [POINTER(c_double), c_void_p,\n c_double, c_double]\n lib.mult.restype = c_int\n\n lib.multf.argtypes = [POINTER(c_float), c_void_p,\n c_float, c_float]\n lib.multf.restype = c_int\n\n res = c_double()\n lib.mult(byref(res), None, 123, 321)\n self.assertEqual(res.value, 123 * 321)\n\n res = c_float()\n lib.multf(byref(res), None, 987, 321)\n self.assertEqual(res.value, 987 * 321)\n\n def test_pycc_pymodule(self):\n \"\"\"\n Test creating a CPython extension module using pycc.\n \"\"\"\n self.skipTest(\"lack of environment can make the extension crash\")\n\n source = os.path.join(base_path, 'compile_with_pycc.py')\n modulename = 'test_pyext_legacy'\n out_modulename = os.path.join(self.tmpdir,\n modulename + find_pyext_ending())\n if os.path.exists(out_modulename):\n os.unlink(out_modulename)\n\n main(args=['--debug', '--python', '-o', out_modulename, source])\n\n with self.check_c_ext(self.tmpdir, modulename) as lib:\n res = lib.multi(123, 321)\n self.assertPreciseEqual(res, 123 * 321)\n res = lib.multf(987, 321)\n self.assertPreciseEqual(res, 987.0 * 321.0)\n\n def test_pycc_bitcode(self):\n \"\"\"\n Test creating a LLVM bitcode file using pycc.\n \"\"\"\n modulename = os.path.join(base_path, 'compile_with_pycc')\n bitcode_modulename = os.path.join(self.tmpdir, 'test_bitcode_legacy.bc')\n if os.path.exists(bitcode_modulename):\n os.unlink(bitcode_modulename)\n\n main(args=['--debug', '--llvm', '-o', bitcode_modulename,\n modulename + '.py'])\n\n # Sanity check bitcode file contents\n with open(bitcode_modulename, \"rb\") as f:\n bc = f.read()\n\n bitcode_wrapper_magic = b'\\xde\\xc0\\x17\\x0b'\n bitcode_magic = b'BC\\xc0\\xde'\n self.assertTrue(bc.startswith((bitcode_magic, bitcode_wrapper_magic)), bc)\n\n\n@_skip_missing_compilers\nclass TestCC(BasePYCCTest):\n\n def setUp(self):\n super(TestCC, self).setUp()\n from . import compile_with_pycc\n self._test_module = compile_with_pycc\n imp.reload(self._test_module)\n\n @contextlib.contextmanager\n def check_cc_compiled(self, cc):\n #cc.verbose = True\n cc.output_dir = self.tmpdir\n cc.compile()\n\n with self.check_c_ext(self.tmpdir, cc.name) as lib:\n yield lib\n\n def check_cc_compiled_in_subprocess(self, lib, code):\n prolog = \"\"\"if 1:\n import sys\n sys.path.insert(0, %(path)r)\n import %(name)s as lib\n \"\"\" % {'name': lib.__name__,\n 'path': os.path.dirname(lib.__file__)}\n code = prolog.strip(' ') + code\n subprocess.check_call([sys.executable, '-c', code])\n\n def test_cc_properties(self):\n cc = self._test_module.cc\n self.assertEqual(cc.name, 'pycc_test_simple')\n\n # Inferred output directory\n d = self._test_module.cc.output_dir\n self.assertTrue(os.path.isdir(d), d)\n\n # Inferred output filename\n f = self._test_module.cc.output_file\n self.assertFalse(os.path.exists(f), f)\n self.assertTrue(os.path.basename(f).startswith('pycc_test_simple.'), f)\n if sys.platform.startswith('linux'):\n self.assertTrue(f.endswith('.so'), f)\n if sys.version_info >= (3,):\n self.assertIn('.cpython', f)\n\n def test_compile(self):\n with self.check_cc_compiled(self._test_module.cc) as lib:\n res = lib.multi(123, 321)\n self.assertPreciseEqual(res, 123 * 321)\n res = lib.multf(987, 321)\n self.assertPreciseEqual(res, 987.0 * 321.0)\n res = lib.square(5)\n self.assertPreciseEqual(res, 25)\n self.assertIs(lib.get_none(), None)\n with self.assertRaises(ZeroDivisionError):\n lib.div(1, 0)\n\n def check_compile_for_cpu(self, cpu_name):\n cc = self._test_module.cc\n cc.target_cpu = cpu_name\n\n with self.check_cc_compiled(cc) as lib:\n res = lib.multi(123, 321)\n self.assertPreciseEqual(res, 123 * 321)\n self.assertEqual(lib.multi.__module__, 'pycc_test_simple')\n\n def test_compile_for_cpu(self):\n # Compiling for the host CPU should always succeed\n self.check_compile_for_cpu(ll.get_host_cpu_name())\n\n def test_compile_for_cpu_host(self):\n # Compiling for the host CPU should always succeed\n self.check_compile_for_cpu(\"host\")\n\n @tag('important')\n def test_compile_helperlib(self):\n with self.check_cc_compiled(self._test_module.cc_helperlib) as lib:\n res = lib.power(2, 7)\n self.assertPreciseEqual(res, 128)\n for val in (-1, -1 + 0j, np.complex128(-1)):\n res = lib.sqrt(val)\n self.assertPreciseEqual(res, 1j)\n for val in (4, 4.0, np.float64(4)):\n res = lib.np_sqrt(val)\n self.assertPreciseEqual(res, 2.0)\n res = lib.spacing(1.0)\n self.assertPreciseEqual(res, 2**-52)\n # Implicit seeding at startup should guarantee a non-pathological\n # start state.\n self.assertNotEqual(lib.random(-1), lib.random(-1))\n res = lib.random(42)\n expected = np.random.RandomState(42).random_sample()\n self.assertPreciseEqual(res, expected)\n res = lib.size(np.float64([0] * 3))\n self.assertPreciseEqual(res, 3)\n\n code = \"\"\"if 1:\n from numpy.testing import assert_equal, assert_allclose\n res = lib.power(2, 7)\n assert res == 128\n res = lib.random(42)\n assert_allclose(res, %(expected)s)\n res = lib.spacing(1.0)\n assert_allclose(res, 2**-52)\n \"\"\" % {'expected': expected}\n self.check_cc_compiled_in_subprocess(lib, code)\n\n @tag('important')\n def test_compile_nrt(self):\n with self.check_cc_compiled(self._test_module.cc_nrt) as lib:\n # Sanity check\n self.assertPreciseEqual(lib.zero_scalar(1), 0.0)\n res = lib.zeros(3)\n self.assertEqual(list(res), [0, 0, 0])\n if has_blas:\n res = lib.vector_dot(4)\n self.assertPreciseEqual(res, 30.0)\n\n code = \"\"\"if 1:\n res = lib.zero_scalar(1)\n assert res == 0.0\n res = lib.zeros(3)\n assert list(res) == [0, 0, 0]\n if %(has_blas)s:\n res = lib.vector_dot(4)\n assert res == 30.0\n \"\"\" % dict(has_blas=has_blas)\n self.check_cc_compiled_in_subprocess(lib, code)\n\n\n@_skip_missing_compilers\nclass TestDistutilsSupport(TestCase):\n\n def setUp(self):\n unset_macosx_deployment_target()\n\n # Copy the test project into a temp directory to avoid\n # keeping any build leftovers in the source tree\n self.tmpdir = temp_directory('test_pycc_distutils')\n source_dir = os.path.join(base_path, 'pycc_distutils_usecase')\n self.usecase_dir = os.path.join(self.tmpdir, 'work')\n shutil.copytree(source_dir, self.usecase_dir)\n\n def check_setup_py(self, setup_py_file):\n # Compute PYTHONPATH to ensure the child processes see this Numba\n import numba\n numba_path = os.path.abspath(os.path.dirname(\n os.path.dirname(numba.__file__)))\n env = dict(os.environ)\n if env.get('PYTHONPATH', ''):\n env['PYTHONPATH'] = numba_path + os.pathsep + env['PYTHONPATH']\n else:\n env['PYTHONPATH'] = numba_path\n\n def run_python(args):\n p = subprocess.Popen([sys.executable] + args,\n cwd=self.usecase_dir,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n env=env)\n out, _ = p.communicate()\n rc = p.wait()\n if rc != 0:\n self.fail(\"python failed with the following output:\\n%s\"\n % out.decode('utf-8', 'ignore'))\n\n run_python([setup_py_file, \"build_ext\", \"--inplace\"])\n code = \"\"\"if 1:\n import pycc_compiled_module as lib\n assert lib.get_const() == 42\n res = lib.ones(3)\n assert list(res) == [1.0, 1.0, 1.0]\n \"\"\"\n run_python([\"-c\", code])\n\n def test_setup_py_distutils(self):\n if sys.version_info < (3,) and sys.platform == \"win32\":\n # See e.g. https://stackoverflow.com/questions/28931875/problems-finding-vcvarsall-bat-when-using-distutils\n self.skipTest(\"must use setuptools to build extensions for Python 2\")\n self.check_setup_py(\"setup_distutils.py\")\n\n @unittest.skipIf(setuptools is None, \"test needs setuptools\")\n def test_setup_py_setuptools(self):\n self.check_setup_py(\"setup_setuptools.py\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "from __future__ import print_function, division, absolute_import\n\nimport numpy as np\n\nfrom collections import namedtuple\n\nfrom numba import types, utils\nfrom numba.typing.templates import (AttributeTemplate, AbstractTemplate,\n infer, infer_getattr, signature,\n bound_function)\n# import time side effect: array operations requires typing support of sequence\n# defined in collections: e.g. array.shape[i]\nfrom numba.typing import collections\nfrom numba.errors import TypingError\n\nIndexing = namedtuple(\"Indexing\", (\"index\", \"result\", \"advanced\"))\n\n\ndef get_array_index_type(ary, idx):\n \"\"\"\n Returns None or a tuple-3 for the types of the input array, index, and\n resulting type of ``array[index]``.\n\n Note: This is shared logic for ndarray getitem and setitem.\n \"\"\"\n if not isinstance(ary, types.Buffer):\n return\n\n ndim = ary.ndim\n\n left_indices = []\n right_indices = []\n ellipsis_met = False\n advanced = False\n has_integer = False\n\n if not isinstance(idx, types.BaseTuple):\n idx = [idx]\n\n # Walk indices\n for ty in idx:\n if ty is types.ellipsis:\n if ellipsis_met:\n raise TypeError(\"only one ellipsis allowed in array index \"\n \"(got %s)\" % (idx,))\n ellipsis_met = True\n elif isinstance(ty, types.SliceType):\n pass\n elif isinstance(ty, types.Integer):\n # Normalize integer index\n ty = types.intp if ty.signed else types.uintp\n # Integer indexing removes the given dimension\n ndim -= 1\n has_integer = True\n elif (isinstance(ty, types.Array) and ty.ndim == 0\n and isinstance(ty.dtype, types.Integer)):\n # 0-d array used as integer index\n ndim -= 1\n has_integer = True\n elif (isinstance(ty, types.Array)\n and ty.ndim == 1\n and isinstance(ty.dtype, (types.Integer, types.Boolean))):\n if advanced or has_integer:\n # We don't support the complicated combination of\n # advanced indices (and integers are considered part\n # of them by Numpy).\n raise NotImplementedError(\"only one advanced index supported\")\n advanced = True\n else:\n raise TypeError(\"unsupported array index type %s in %s\"\n % (ty, idx))\n (right_indices if ellipsis_met else left_indices).append(ty)\n\n # Only Numpy arrays support advanced indexing\n if advanced and not isinstance(ary, types.Array):\n return\n\n # Check indices and result dimensionality\n all_indices = left_indices + right_indices\n if ellipsis_met:\n assert right_indices[0] is types.ellipsis\n del right_indices[0]\n\n n_indices = len(all_indices) - ellipsis_met\n if n_indices > ary.ndim:\n raise TypeError(\"cannot index %s with %d indices: %s\"\n % (ary, n_indices, idx))\n if n_indices == ary.ndim and ndim == 0 and not ellipsis_met:\n # Full integer indexing => scalar result\n # (note if ellipsis is present, a 0-d view is returned instead)\n res = ary.dtype\n\n elif advanced:\n # Result is a copy\n res = ary.copy(ndim=ndim, layout='C', readonly=False)\n\n else:\n # Result is a view\n if ary.slice_is_copy:\n # Avoid view semantics when the original type creates a copy\n # when slicing.\n return\n\n # Infer layout\n layout = ary.layout\n\n def keeps_contiguity(ty, is_innermost):\n # A slice can only keep an array contiguous if it is the\n # innermost index and it is not strided\n return (ty is types.ellipsis or isinstance(ty, types.Integer)\n or (is_innermost and isinstance(ty, types.SliceType)\n and not ty.has_step))\n\n def check_contiguity(outer_indices):\n \"\"\"\n Whether indexing with the given indices (from outer to inner in\n physical layout order) can keep an array contiguous.\n \"\"\"\n for ty in outer_indices[:-1]:\n if not keeps_contiguity(ty, False):\n return False\n if outer_indices and not keeps_contiguity(outer_indices[-1], True):\n return False\n return True\n\n if layout == 'C':\n # Integer indexing on the left keeps the array C-contiguous\n if n_indices == ary.ndim:\n # If all indices are there, ellipsis's place is indifferent\n left_indices = left_indices + right_indices\n right_indices = []\n if right_indices:\n layout = 'A'\n elif not check_contiguity(left_indices):\n layout = 'A'\n elif layout == 'F':\n # Integer indexing on the right keeps the array F-contiguous\n if n_indices == ary.ndim:\n # If all indices are there, ellipsis's place is indifferent\n right_indices = left_indices + right_indices\n left_indices = []\n if left_indices:\n layout = 'A'\n elif not check_contiguity(right_indices[::-1]):\n layout = 'A'\n\n res = ary.copy(ndim=ndim, layout=layout)\n\n # Re-wrap indices\n if isinstance(idx, types.BaseTuple):\n idx = types.BaseTuple.from_types(all_indices)\n else:\n idx, = all_indices\n\n return Indexing(idx, res, advanced)\n\n\n@infer\nclass GetItemBuffer(AbstractTemplate):\n key = \"getitem\"\n\n def generic(self, args, kws):\n assert not kws\n [ary, idx] = args\n out = get_array_index_type(ary, idx)\n if out is not None:\n return signature(out.result, ary, out.index)\n\n@infer\nclass SetItemBuffer(AbstractTemplate):\n key = \"setitem\"\n\n def generic(self, args, kws):\n assert not kws\n ary, idx, val = args\n if not isinstance(ary, types.Buffer):\n return\n if not ary.mutable:\n raise TypeError(\"Cannot modify value of type %s\" %(ary,))\n out = get_array_index_type(ary, idx)\n if out is None:\n return\n\n idx = out.index\n res = out.result\n if isinstance(res, types.Array):\n # Indexing produces an array\n if isinstance(val, types.Array):\n if not self.context.can_convert(val.dtype, res.dtype):\n # DType conversion not possible\n return\n else:\n res = val\n elif isinstance(val, types.Sequence):\n if (res.ndim == 1 and\n self.context.can_convert(val.dtype, res.dtype)):\n # Allow assignement of sequence to 1d array\n res = val\n else:\n # NOTE: sequence-to-array broadcasting is unsupported\n return\n else:\n # Allow scalar broadcasting\n if self.context.can_convert(val, res.dtype):\n res = res.dtype\n else:\n # Incompatible scalar type\n return\n elif not isinstance(val, types.Array):\n # Single item assignment\n if not self.context.can_convert(val, res):\n # if the array dtype is not yet defined\n if not res.is_precise():\n # set the array type to use the dtype of value (RHS)\n newary = ary.copy(dtype=val)\n return signature(types.none, newary, idx, res)\n else:\n return\n res = val\n else:\n return\n return signature(types.none, ary, idx, res)\n\n\ndef normalize_shape(shape):\n if isinstance(shape, types.UniTuple):\n if isinstance(shape.dtype, types.Integer):\n dimtype = types.intp if shape.dtype.signed else types.uintp\n return types.UniTuple(dimtype, len(shape))\n\n elif isinstance(shape, types.Tuple) and shape.count == 0:\n # Force (0 x intp) for consistency with other shapes\n return types.UniTuple(types.intp, 0)\n\n\n@infer_getattr\nclass ArrayAttribute(AttributeTemplate):\n key = types.Array\n\n def resolve_dtype(self, ary):\n return types.DType(ary.dtype)\n\n def resolve_itemsize(self, ary):\n return types.intp\n\n def resolve_shape(self, ary):\n return types.UniTuple(types.intp, ary.ndim)\n\n def resolve_strides(self, ary):\n return types.UniTuple(types.intp, ary.ndim)\n\n def resolve_ndim(self, ary):\n return types.intp\n\n def resolve_size(self, ary):\n return types.intp\n\n def resolve_flat(self, ary):\n return types.NumpyFlatType(ary)\n\n def resolve_ctypes(self, ary):\n return types.ArrayCTypes(ary)\n\n def resolve_flags(self, ary):\n return types.ArrayFlags(ary)\n\n def resolve_T(self, ary):\n if ary.ndim <= 1:\n retty = ary\n else:\n layout = {\"C\": \"F\", \"F\": \"C\"}.get(ary.layout, \"A\")\n retty = ary.copy(layout=layout)\n return retty\n\n def resolve_real(self, ary):\n return self._resolve_real_imag(ary, attr='real')\n\n def resolve_imag(self, ary):\n return self._resolve_real_imag(ary, attr='imag')\n\n def _resolve_real_imag(self, ary, attr):\n if ary.dtype in types.complex_domain:\n return ary.copy(dtype=ary.dtype.underlying_float, layout='A')\n elif ary.dtype in types.number_domain:\n res = ary.copy(dtype=ary.dtype)\n if attr == 'imag':\n res = res.copy(readonly=True)\n return res\n else:\n msg = \"cannot access .{} of array of {}\"\n raise TypingError(msg.format(attr, ary.dtype))\n\n @bound_function(\"array.transpose\")\n def resolve_transpose(self, ary, args, kws):\n def sentry_shape_scalar(ty):\n if ty in types.number_domain:\n # Guard against non integer type\n if not isinstance(ty, types.Integer):\n raise TypeError(\"transpose() arg cannot be {0}\".format(ty))\n return True\n else:\n return False\n\n assert not kws\n if len(args) == 0:\n return signature(self.resolve_T(ary))\n\n if len(args) == 1:\n shape, = args\n\n if sentry_shape_scalar(shape):\n assert ary.ndim == 1\n return signature(ary, *args)\n\n shape = normalize_shape(shape)\n if shape is None:\n return\n\n assert ary.ndim == shape.count\n return signature(self.resolve_T(ary), shape)\n\n else:\n if any(not sentry_shape_scalar(a) for a in args):\n raise TypeError(\"transpose({0}) is not supported\".format(\n ', '.join(args)))\n assert ary.ndim == len(args)\n return signature(self.resolve_T(ary), *args)\n\n @bound_function(\"array.copy\")\n def resolve_copy(self, ary, args, kws):\n assert not args\n assert not kws\n retty = ary.copy(layout=\"C\", readonly=False)\n return signature(retty)\n\n @bound_function(\"array.item\")\n def resolve_item(self, ary, args, kws):\n assert not kws\n # We don't support explicit arguments as that's exactly equivalent\n # to regular indexing. The no-argument form is interesting to\n # allow some degree of genericity when writing functions.\n if not args:\n return signature(ary.dtype)\n\n @bound_function(\"array.itemset\")\n def resolve_itemset(self, ary, args, kws):\n assert not kws\n # We don't support explicit arguments as that's exactly equivalent\n # to regular indexing. The no-argument form is interesting to\n # allow some degree of genericity when writing functions.\n if len(args) == 1:\n return signature(types.none, ary.dtype)\n\n @bound_function(\"array.nonzero\")\n def resolve_nonzero(self, ary, args, kws):\n assert not args\n assert not kws\n # 0-dim arrays return one result array\n ndim = max(ary.ndim, 1)\n retty = types.UniTuple(types.Array(types.intp, 1, 'C'), ndim)\n return signature(retty)\n\n @bound_function(\"array.reshape\")\n def resolve_reshape(self, ary, args, kws):\n def sentry_shape_scalar(ty):\n if ty in types.number_domain:\n # Guard against non integer type\n if not isinstance(ty, types.Integer):\n raise TypeError(\"reshape() arg cannot be {0}\".format(ty))\n return True\n else:\n return False\n\n assert not kws\n if ary.layout not in 'CF':\n # only work for contiguous array\n raise TypeError(\"reshape() supports contiguous array only\")\n\n if len(args) == 1:\n # single arg\n shape, = args\n\n if sentry_shape_scalar(shape):\n ndim = 1\n else:\n shape = normalize_shape(shape)\n if shape is None:\n return\n ndim = shape.count\n retty = ary.copy(ndim=ndim)\n return signature(retty, shape)\n\n elif len(args) == 0:\n # no arg\n raise TypeError(\"reshape() take at least one arg\")\n\n else:\n # vararg case\n if any(not sentry_shape_scalar(a) for a in args):\n raise TypeError(\"reshape({0}) is not supported\".format(\n ', '.join(args)))\n\n retty = ary.copy(ndim=len(args))\n return signature(retty, *args)\n\n @bound_function(\"array.sort\")\n def resolve_sort(self, ary, args, kws):\n assert not args\n assert not kws\n if ary.ndim == 1:\n return signature(types.none)\n\n @bound_function(\"array.argsort\")\n def resolve_argsort(self, ary, args, kws):\n assert not args\n kwargs = dict(kws)\n kind = kwargs.pop('kind', types.Const('quicksort'))\n if kwargs:\n msg = \"Unsupported keywords: {!r}\"\n raise TypingError(msg.format([k for k in kwargs.keys()]))\n if ary.ndim == 1:\n def argsort_stub(kind='quicksort'):\n pass\n pysig = utils.pysignature(argsort_stub)\n sig = signature(types.Array(types.intp, 1, 'C'), kind).replace(pysig=pysig)\n return sig\n\n @bound_function(\"array.view\")\n def resolve_view(self, ary, args, kws):\n from .npydecl import _parse_dtype\n assert not kws\n dtype, = args\n dtype = _parse_dtype(dtype)\n if dtype is None:\n return\n retty = ary.copy(dtype=dtype)\n return signature(retty, *args)\n\n @bound_function(\"array.astype\")\n def resolve_astype(self, ary, args, kws):\n from .npydecl import _parse_dtype\n assert not kws\n dtype, = args\n dtype = _parse_dtype(dtype)\n if dtype is None:\n return\n if not self.context.can_convert(ary.dtype, dtype):\n raise TypeError(\"astype(%s) not supported on %s: \"\n \"cannot convert from %s to %s\"\n % (dtype, ary, ary.dtype, dtype))\n layout = ary.layout if ary.layout in 'CF' else 'C'\n retty = ary.copy(dtype=dtype, layout=layout)\n return signature(retty, *args)\n\n @bound_function(\"array.ravel\")\n def resolve_ravel(self, ary, args, kws):\n # Only support no argument version (default order='C')\n assert not kws\n assert not args\n return signature(ary.copy(ndim=1, layout='C'))\n\n @bound_function(\"array.flatten\")\n def resolve_flatten(self, ary, args, kws):\n # Only support no argument version (default order='C')\n assert not kws\n assert not args\n return signature(ary.copy(ndim=1, layout='C'))\n\n @bound_function(\"array.take\")\n def resolve_take(self, ary, args, kws):\n assert not kws\n argty, = args\n if isinstance(argty, types.Integer):\n sig = signature(ary.dtype, *args)\n elif isinstance(argty, types.Array):\n sig = signature(argty.copy(layout='C', dtype=ary.dtype), *args)\n elif isinstance(argty, types.List): # 1d lists only\n sig = signature(types.Array(ary.dtype, 1, 'C'), *args)\n elif isinstance(argty, types.BaseTuple):\n sig = signature(types.Array(ary.dtype, np.ndim(argty), 'C'), *args)\n else:\n raise TypeError(\"take(%s) not supported for %s\" % argty)\n return sig\n\n def generic_resolve(self, ary, attr):\n # Resolution of other attributes, for record arrays\n if isinstance(ary.dtype, types.Record):\n if attr in ary.dtype.fields:\n return ary.copy(dtype=ary.dtype.typeof(attr), layout='A')\n\n\n@infer_getattr\nclass DTypeAttr(AttributeTemplate):\n key = types.DType\n\n def resolve_type(self, ary):\n # Wrap the numeric type in NumberClass\n return types.NumberClass(ary.dtype)\n\n def resolve_kind(self, ary):\n if isinstance(ary.key, types.scalars.Float):\n val = 'f'\n elif isinstance(ary.key, types.scalars.Integer):\n val = 'i'\n else:\n return None # other types not supported yet\n return types.Const(val)\n\n@infer\nclass StaticGetItemArray(AbstractTemplate):\n key = \"static_getitem\"\n\n def generic(self, args, kws):\n # Resolution of members for record and structured arrays\n ary, idx = args\n if (isinstance(ary, types.Array) and isinstance(idx, str) and\n isinstance(ary.dtype, types.Record)):\n if idx in ary.dtype.fields:\n return ary.copy(dtype=ary.dtype.typeof(idx), layout='A')\n\n\n@infer_getattr\nclass RecordAttribute(AttributeTemplate):\n key = types.Record\n\n def generic_resolve(self, record, attr):\n ret = record.typeof(attr)\n assert ret\n return ret\n\n@infer\nclass StaticGetItemRecord(AbstractTemplate):\n key = \"static_getitem\"\n\n def generic(self, args, kws):\n # Resolution of members for records\n record, idx = args\n if isinstance(record, types.Record) and isinstance(idx, str):\n ret = record.typeof(idx)\n assert ret\n return ret\n\n@infer\nclass StaticSetItemRecord(AbstractTemplate):\n key = \"static_setitem\"\n\n def generic(self, args, kws):\n # Resolution of members for record and structured arrays\n record, idx, value = args\n if isinstance(record, types.Record) and isinstance(idx, str):\n expectedty = record.typeof(idx)\n if self.context.can_convert(value, expectedty) is not None:\n return signature(types.void, record, types.Const(idx), value)\n\n\n@infer_getattr\nclass ArrayCTypesAttribute(AttributeTemplate):\n key = types.ArrayCTypes\n\n def resolve_data(self, ctinfo):\n return types.uintp\n\n\n@infer_getattr\nclass ArrayFlagsAttribute(AttributeTemplate):\n key = types.ArrayFlags\n\n def resolve_contiguous(self, ctflags):\n return types.boolean\n\n def resolve_c_contiguous(self, ctflags):\n return types.boolean\n\n def resolve_f_contiguous(self, ctflags):\n return types.boolean\n\n\n@infer_getattr\nclass NestedArrayAttribute(ArrayAttribute):\n key = types.NestedArray\n\n\ndef _expand_integer(ty):\n \"\"\"\n If *ty* is an integer, expand it to a machine int (like Numpy).\n \"\"\"\n if isinstance(ty, types.Integer):\n if ty.signed:\n return max(types.intp, ty)\n else:\n return max(types.uintp, ty)\n elif isinstance(ty, types.Boolean):\n return types.intp\n else:\n return ty\n\ndef generic_homog(self, args, kws):\n assert not args\n assert not kws\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_expand(self, args, kws):\n assert not args\n assert not kws\n return signature(_expand_integer(self.this.dtype), recvr=self.this)\n\ndef sum_expand(self, args, kws):\n \"\"\"\n sum can be called with or without an axis parameter.\n \"\"\"\n pysig = None\n if kws:\n def sum_stub(axis):\n pass\n pysig = utils.pysignature(sum_stub)\n # rewrite args\n args = list(args) + [kws['axis']]\n kws = None\n args_len = len(args)\n assert args_len <= 1\n if args_len == 0:\n # No axis parameter so the return type of the summation is a scalar\n # of the type of the array.\n out = signature(_expand_integer(self.this.dtype), *args,\n recvr=self.this)\n else:\n # There is an axis paramter so the return type of this summation is\n # an array of dimension one less than the input array.\n return_type = types.Array(dtype=_expand_integer(self.this.dtype),\n ndim=self.this.ndim-1, layout='C')\n out = signature(return_type, *args, recvr=self.this)\n return out.replace(pysig=pysig)\n\ndef generic_expand_cumulative(self, args, kws):\n assert not args\n assert not kws\n assert isinstance(self.this, types.Array)\n return_type = types.Array(dtype=_expand_integer(self.this.dtype),\n ndim=1, layout='C')\n return signature(return_type, recvr=self.this)\n\ndef generic_hetero_real(self, args, kws):\n assert not args\n assert not kws\n if isinstance(self.this.dtype, (types.Integer, types.Boolean)):\n return signature(types.float64, recvr=self.this)\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_index(self, args, kws):\n assert not args\n assert not kws\n return signature(types.intp, recvr=self.this)\n\ndef install_array_method(name, generic, support_literals=False):\n my_attr = {\"key\": \"array.\" + name, \"generic\": generic}\n temp_class = type(\"Array_\" + name, (AbstractTemplate,), my_attr)\n if support_literals:\n temp_class.support_literals = support_literals\n def array_attribute_attachment(self, ary):\n return types.BoundFunction(temp_class, ary)\n\n setattr(ArrayAttribute, \"resolve_\" + name, array_attribute_attachment)\n\n# Functions that return the same type as the array\nfor fname in [\"min\", \"max\"]:\n install_array_method(fname, generic_homog)\n\n# Functions that return a machine-width type, to avoid overflows\ninstall_array_method(\"prod\", generic_expand)\ninstall_array_method(\"sum\", sum_expand, support_literals=True)\n\n# Functions that return a machine-width type, to avoid overflows\nfor fname in [\"cumsum\", \"cumprod\"]:\n install_array_method(fname, generic_expand_cumulative)\n\n# Functions that require integer arrays get promoted to float64 return\nfor fName in [\"mean\", \"var\", \"std\"]:\n install_array_method(fName, generic_hetero_real)\n\n# Functions that return an index (intp)\ninstall_array_method(\"argmin\", generic_index)\ninstall_array_method(\"argmax\", generic_index)\n\n\n@infer\nclass CmpOpEqArray(AbstractTemplate):\n key = '=='\n\n def generic(self, args, kws):\n assert not kws\n [va, vb] = args\n if isinstance(va, types.Array) and va == vb:\n return signature(va.copy(dtype=types.boolean), va, vb)\n", "from __future__ import print_function, division, absolute_import\n\nimport sys\n\nimport numpy as np\n\nfrom numba import unittest_support as unittest\nfrom numba import types\nfrom numba.extending import typeof_impl\nfrom numba.cuda.kernels.transpose import transpose\nfrom numba.tracing import event\nfrom numba import SmartArray\nfrom numba.cuda.testing import skip_on_cudasim, SerialMixin\n\n@skip_on_cudasim('Simulator does not support Device arrays')\nclass TestJIT(SerialMixin, unittest.TestCase):\n \"\"\"Test handling of numba.SmartArray\"\"\"\n\n def test_transpose(self):\n\n # To verify non-redundant data movement run this test with NUMBA_TRACE=1\n a = SmartArray(np.arange(16, dtype=float).reshape(4,4))\n b = SmartArray(where='gpu', shape=(4,4), dtype=float)\n c = SmartArray(where='gpu', shape=(4,4), dtype=float)\n event(\"initialization done\")\n transpose(a, b)\n event(\"checkpoint\")\n transpose(b, c)\n event(\"done\")\n self.assertTrue((c.get('host') == a.get('host')).all())\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.dot", "numpy.zeros", "numpy.random.seed", "numpy.sum", "numpy.random.random" ], [ "numpy.float64", "numpy.complex128", "numpy.random.RandomState" ], [ "numpy.ndim" ], [ "numpy.arange" ] ]
brusic/nlpia
[ "e239074eaa1fd51eb1c9a35d53a69e3b15343f57" ]
[ "nlpia/book/examples/ch09.py" ]
[ "\n# coding: utf-8\n\n# In[ ]:\n\n\nimport os\nimport re\nimport tarfile\n\nimport requests\n\nfrom pugnlp.futil import path_status, find_files\n\n\n# In[ ]:\n\n\n# From the nlpia package for downloading data too big for the repo\n\nBIG_URLS = {\n 'w2v': (\n 'https://www.dropbox.com/s/965dir4dje0hfi4/GoogleNews-vectors-negative300.bin.gz?dl=1',\n 1647046227,\n ),\n 'slang': (\n 'https://www.dropbox.com/s/43c22018fbfzypd/slang.csv.gz?dl=1',\n 117633024,\n ),\n 'tweets': (\n 'https://www.dropbox.com/s/5gpb43c494mc8p0/tweets.csv.gz?dl=1',\n 311725313,\n ),\n 'lsa_tweets': (\n 'https://www.dropbox.com/s/rpjt0d060t4n1mr/lsa_tweets_5589798_2003588x200.tar.gz?dl=1',\n 3112841563, # 3112841312,\n ),\n 'imdb': (\n 'https://www.dropbox.com/s/yviic64qv84x73j/aclImdb_v1.tar.gz?dl=1',\n 3112841563, # 3112841312,\n ),\n}\n\n\n# In[ ]:\n\n\n# These functions are part of the nlpia package which can be pip installed and run from there.\ndef dropbox_basename(url):\n filename = os.path.basename(url)\n match = re.findall(r'\\?dl=[0-9]$', filename)\n if match:\n return filename[:-len(match[0])]\n return filename\n\ndef download_file(url, data_path='.', filename=None, size=None, chunk_size=4096, verbose=True):\n \"\"\"Uses stream=True and a reasonable chunk size to be able to download large (GB) files over https\"\"\"\n if filename is None:\n filename = dropbox_basename(url)\n file_path = os.path.join(data_path, filename)\n if url.endswith('?dl=0'):\n url = url[:-1] + '1' # noninteractive download\n if verbose:\n tqdm_prog = tqdm\n print('requesting URL: {}'.format(url))\n else:\n tqdm_prog = no_tqdm\n r = requests.get(url, stream=True, allow_redirects=True)\n size = r.headers.get('Content-Length', None) if size is None else size\n print('remote size: {}'.format(size))\n\n stat = path_status(file_path)\n print('local size: {}'.format(stat.get('size', None)))\n if stat['type'] == 'file' and stat['size'] == size: # TODO: check md5 or get the right size of remote file\n r.close()\n return file_path\n\n print('Downloading to {}'.format(file_path))\n\n with open(file_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=chunk_size):\n if chunk: # filter out keep-alive chunks\n f.write(chunk)\n\n r.close()\n return file_path\n\ndef untar(fname):\n if fname.endswith(\"tar.gz\"):\n with tarfile.open(fname) as tf:\n tf.extractall()\n else:\n print(\"Not a tar.gz file: {}\".format(fname))\n\n\n# In[ ]:\n\n\n# UNCOMMENT these 2 lines if you haven't already download the word2vec model and the imdb dataset\n# download_file(BIG_URLS['w2v'][0])\n# untar(download_file(BIG_URLS['imdb'][0]))\n\n\n# In[ ]:\n\n\nmaxlen = 400\nbatch_size = 32\nembedding_dims = 300\nepochs = 2\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, LSTM\n\nnum_neurons = 50\n\nprint('Build model...')\nmodel = Sequential()\n\nmodel.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))\nmodel.add(Dropout(.2))\n\nmodel.add(Flatten())\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])\nprint(model.summary())\n\n\n# In[ ]:\n\n\nimport glob\nimport os\n\nfrom random import shuffle\n\ndef pre_process_data(filepath):\n \"\"\"\n This is dependent on your training data source but we will try to generalize it as best as possible.\n \"\"\"\n positive_path = os.path.join(filepath, 'pos')\n negative_path = os.path.join(filepath, 'neg')\n \n pos_label = 1\n neg_label = 0\n \n dataset = []\n \n for filename in glob.glob(os.path.join(positive_path, '*.txt')):\n with open(filename, 'r') as f:\n dataset.append((pos_label, f.read()))\n \n for filename in glob.glob(os.path.join(negative_path, '*.txt')):\n with open(filename, 'r') as f:\n dataset.append((neg_label, f.read()))\n \n shuffle(dataset)\n \n return dataset\n\n\n# In[ ]:\n\n\nfrom nltk.tokenize import TreebankWordTokenizer\nfrom gensim.models.keyedvectors import KeyedVectors\nword_vectors = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True, limit=200000)\n\ndef tokenize_and_vectorize(dataset):\n tokenizer = TreebankWordTokenizer()\n vectorized_data = []\n expected = []\n for sample in dataset:\n tokens = tokenizer.tokenize(sample[1])\n sample_vecs = []\n for token in tokens:\n try:\n sample_vecs.append(word_vectors[token])\n\n except KeyError:\n pass # No matching token in the Google w2v vocab\n \n vectorized_data.append(sample_vecs)\n\n return vectorized_data\n\n\n# In[ ]:\n\n\ndef collect_expected(dataset):\n \"\"\" Peel of the target values from the dataset \"\"\"\n expected = []\n for sample in dataset:\n expected.append(sample[0])\n return expected\n\n\n# In[ ]:\n\n\ndef pad_trunc(data, maxlen):\n \"\"\" For a given dataset pad with zero vectors or truncate to maxlen \"\"\"\n new_data = []\n\n # Create a vector of 0's the length of our word vectors\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n\n for sample in data:\n \n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = sample\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\n# In[ ]:\n\n\nimport numpy as np\n\ndataset = pre_process_data('./aclImdb_v1/train')\nvectorized_data = tokenize_and_vectorize(dataset)\nexpected = collect_expected(dataset)\n\nsplit_point = int(len(vectorized_data)*.8)\n\nx_train = vectorized_data[:split_point]\ny_train = expected[:split_point]\nx_test = vectorized_data[split_point:]\ny_test = expected[split_point:]\n\nmaxlen = 400\nbatch_size = 32 # How many samples to show the net before backpropogating the error and updating the weights\nembedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet\nepochs = 2\n\nx_train = pad_trunc(x_train, maxlen)\nx_test = pad_trunc(x_test, maxlen)\n\nx_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))\ny_train = np.array(y_train)\nx_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))\ny_test = np.array(y_test)\n\n\n# In[ ]:\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, LSTM\n\nnum_neurons = 50\n\nprint('Build model...')\nmodel = Sequential()\n\nmodel.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))\nmodel.add(Dropout(.2))\n\nmodel.add(Flatten())\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])\nprint(model.summary())\n\n\n# In[ ]:\n\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test))\nmodel_structure = model.to_json()\nwith open(\"lstm_model1.json\", \"w\") as json_file:\n json_file.write(model_structure)\n\nmodel.save_weights(\"lstm_weights1.h5\")\nprint('Model saved.')\n\n\n# In[ ]:\n\n\nfrom keras.models import model_from_json\nwith open(\"lstm_model1.json\", \"r\") as json_file:\n json_string = json_file.read()\nmodel = model_from_json(json_string)\n\nmodel.load_weights('lstm_weights1.h5')\n\n\n# In[ ]:\n\n\nsample_1 = \"I'm hate that the dismal weather that had me down for so long, when will it break! Ugh, when does happiness return? The sun is blinding and the puffy clouds are too thin. I can't wait for the weekend.\"\n\n# We pass a dummy value in the first element of the tuple just because our helper expects it from the way processed the initial data. That value won't ever see the network, so it can be whatever.\nvec_list = tokenize_and_vectorize([(1, sample_1)])\n\n# Tokenize returns a list of the data (length 1 here)\ntest_vec_list = pad_trunc(vec_list, maxlen)\n\ntest_vec = np.reshape(test_vec_list, (len(test_vec_list), maxlen, embedding_dims))\n\nprint(\"Sample's sentiment, 1 - pos, 2 - neg : {}\".format(model.predict_classes(test_vec)))\nprint(\"Raw output of sigmoid function: {}\".format(model.predict(test_vec)))\n\n\n# In[ ]:\n\n\ndef test_len(data, maxlen):\n total_len = truncated = exact = padded = 0\n for sample in data:\n total_len += len(sample)\n if len(sample) > maxlen:\n truncated += 1\n elif len(sample) < maxlen:\n padded += 1\n else:\n exact +=1 \n print('Padded: {}'.format(padded))\n print('Equal: {}'.format(exact))\n print('Truncated: {}'.format(truncated))\n print('Avg length: {}'.format(total_len/len(data)))\n\ndataset = pre_process_data('./aclImdb_v1/train')\nvectorized_data = tokenize_and_vectorize(dataset)\ntest_len(vectorized_data, 400)\n\n\n# In[ ]:\n\n\nimport numpy as np\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, LSTM\n\n\nmaxlen = 200\nbatch_size = 32 # How many samples to show the net before backpropagating the error and updating the weights\nembedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet\n\nepochs = 2\n\ndataset = pre_process_data('./aclImdb_v1/train')\nvectorized_data = tokenize_and_vectorize(dataset)\nexpected = collect_expected(dataset)\n\nsplit_point = int(len(vectorized_data)*.8)\n\nx_train = vectorized_data[:split_point]\ny_train = expected[:split_point]\nx_test = vectorized_data[split_point:]\ny_test = expected[split_point:]\n\nx_train = pad_trunc(x_train, maxlen)\nx_test = pad_trunc(x_test, maxlen)\n\nx_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))\ny_train = np.array(y_train)\nx_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))\ny_test = np.array(y_test)\n\nnum_neurons = 50\n\nprint('Build model...')\nmodel = Sequential()\n\nmodel.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))\nmodel.add(Dropout(.2))\n\nmodel.add(Flatten())\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])\nprint(model.summary())\n\n\n# In[ ]:\n\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test))\nmodel_structure = model.to_json()\nwith open(\"lstm_model7.json\", \"w\") as json_file:\n json_file.write(model_structure)\n\nmodel.save_weights(\"lstm_weights7.h5\")\nprint('Model saved.')\n\n\n# In[ ]:\n\n\ndataset = pre_process_data('./aclImdb_v1/train')\nexpected = collect_expected(dataset)\n\n\n# In[ ]:\n\n\ndef avg_len(data):\n total_len = 0\n for sample in data:\n total_len += len(sample[1])\n print(total_len/len(data))\n\nprint(avg_len(dataset))\n\n\n# In[ ]:\n\n\ndef clean_data(data):\n \"\"\" Shift to lower case, replace unknowns with UNK, and listify \"\"\"\n new_data = []\n VALID = 'abcdefghijklmnopqrstuvwxyz123456789\"\\'?!.,:; '\n for sample in data:\n new_sample = []\n for char in sample[1].lower(): # Just grab the string, not the label\n if char in VALID:\n new_sample.append(char)\n else:\n new_sample.append('UNK')\n \n new_data.append(new_sample)\n return new_data\n\nlistified_data = clean_data(dataset)\n\n\n# In[ ]:\n\n\ndef char_pad_trunc(data, maxlen):\n \"\"\" We truncate to maxlen or add in PAD tokens \"\"\"\n new_dataset = []\n for sample in data:\n if len(sample) > maxlen:\n new_data = sample[:maxlen]\n elif len(sample) < maxlen:\n pads = maxlen - len(sample)\n new_data = sample + ['PAD'] * pads\n else:\n new_data = sample\n new_dataset.append(new_data)\n return new_dataset\n\nmaxlen = 1500\n\n\n# In[ ]:\n\n\ndef create_dicts(data):\n \"\"\" Modified from Keras LSTM example\"\"\"\n chars = set()\n for sample in data:\n chars.update(set(sample))\n char_indices = dict((c, i) for i, c in enumerate(chars))\n indices_char = dict((i, c) for i, c in enumerate(chars))\n return char_indices, indices_char\n\n\n# In[ ]:\n\n\nimport numpy as np\n\ndef onehot_encode(dataset, char_indices, maxlen):\n \"\"\" \n One hot encode the tokens\n \n Args:\n dataset list of lists of tokens\n char_indices dictionary of {key=character, value=index to use encoding vector}\n maxlen int Length of each sample\n Return:\n np array of shape (samples, tokens, encoding length)\n \"\"\"\n X = np.zeros((len(dataset), maxlen, len(char_indices.keys())))\n for i, sentence in enumerate(dataset):\n for t, char in enumerate(sentence):\n X[i, t, char_indices[char]] = 1\n return X\n\n\n# In[ ]:\n\n\ndataset = pre_process_data('./aclImdb_v1/train')\nexpected = collect_expected(dataset)\nlistified_data = clean_data(dataset)\n\nmaxlen = 1500\ncommon_length_data = char_pad_trunc(listified_data, maxlen)\n\nchar_indices, indices_char = create_dicts(common_length_data)\nencoded_data = onehot_encode(common_length_data, char_indices, maxlen)\n\n\n# In[ ]:\n\n\nsplit_point = int(len(encoded_data)*.8)\n\nx_train = encoded_data[:split_point]\ny_train = expected[:split_point]\nx_test = encoded_data[split_point:]\ny_test = expected[split_point:]\n\n\n# In[ ]:\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Embedding, Flatten, LSTM\n\n\nnum_neurons = 40\n\nprint('Build model...')\nmodel = Sequential()\n\nmodel.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, len(char_indices.keys()))))\nmodel.add(Dropout(.2))\n\nmodel.add(Flatten())\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])\nprint(model.summary())\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\nbatch_size = 32\nepochs = 10\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test))\nmodel_structure = model.to_json()\nwith open(\"char_lstm_model3.json\", \"w\") as json_file:\n json_file.write(model_structure)\n\nmodel.save_weights(\"char_lstm_weights3.h5\")\nprint('Model saved.')\n\n\n# In[ ]:\n\n\nfrom nltk.corpus import gutenberg\n\nprint(gutenberg.fileids())\n\n\n# In[ ]:\n\n\ntext = ''\nfor txt in gutenberg.fileids():\n if 'shakespeare' in txt:\n text += gutenberg.raw(txt).lower()\n\nprint('corpus length:', len(text))\n\nchars = sorted(list(set(text)))\nprint('total chars:', len(chars))\nchar_indices = dict((c, i) for i, c in enumerate(chars))\nindices_char = dict((i, c) for i, c in enumerate(chars))\n\n\n# In[ ]:\n\n\nprint(text[:500])\n\n\n# In[ ]:\n\n\n# cut the text in semi-redundant sequences of maxlen characters\nmaxlen = 40\nstep = 3\nsentences = []\nnext_chars = []\nfor i in range(0, len(text) - maxlen, step):\n sentences.append(text[i: i + maxlen])\n next_chars.append(text[i + maxlen])\nprint('nb sequences:', len(sentences))\n\n\n# In[ ]:\n\n\nprint('Vectorization...')\nX = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)\ny = np.zeros((len(sentences), len(chars)), dtype=np.bool)\nfor i, sentence in enumerate(sentences):\n for t, char in enumerate(sentence):\n X[i, t, char_indices[char]] = 1\n y[i, char_indices[next_chars[i]]] = 1\n\n\n# In[ ]:\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.layers import LSTM\nfrom keras.optimizers import RMSprop\n\n# build the model: a single LSTM\nprint('Build model...')\nmodel = Sequential()\nmodel.add(LSTM(128, input_shape=(maxlen, len(chars))))\nmodel.add(Dense(len(chars)))\nmodel.add(Activation('softmax'))\n\noptimizer = RMSprop(lr=0.01)\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizer)\n\nprint(model.summary())\n\n\n# In[ ]:\n\n\nepochs = 6\nbatch_size = 128\n\nmodel_structure = model.to_json()\nwith open(\"shakes_lstm_model.json\", \"w\") as json_file:\n json_file.write(model_structure)\n\nfor i in range(5):\n model.fit(X, y,\n batch_size=batch_size,\n epochs=epochs)\n\n model.save_weights(\"shakes_lstm_weights_{}.h5\".format(i+1))\n print('Model saved.')\n\n\n# In[ ]:\n\n\n### NOT IN CHAPTER, Just to reproduce output\n\nfrom keras.models import model_from_json\n\nwith open('shakes_lstm_model.json', 'r') as f:\n model_json = f.read()\n \nmodel = model_from_json(model_json)\nmodel.load_weights('shakes_lstm_weights_4.h5')\n\n\n# In[ ]:\n\n\nimport random\n\ndef sample(preds, temperature=1.0):\n # helper function to sample an index from a probability array\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\n# In[ ]:\n\n\nimport sys\n\nstart_index = random.randint(0, len(text) - maxlen - 1)\n\nfor diversity in [0.2, 0.5, 1.0]:\n print()\n print('----- diversity:', diversity)\n\n generated = ''\n sentence = text[start_index: start_index + maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n\n for i in range(400):\n x = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x[0, t, char_indices[char]] = 1.\n\n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = indices_char[next_index]\n\n generated += next_char\n sentence = sentence[1:] + next_char\n\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()\n\n\n# In[ ]:\n\n\nfrom keras.models import Sequential\nfrom keras.layers import GRU\n\nmodel = Sequential()\nmodel.add(GRU(num_neurons, return_sequences=True, input_shape=X[0].shape))\n\n\n# In[ ]:\n\n\nfrom keras.models import Sequential\nfrom keras.layers import LSTM\n\nmodel = Sequential()\nmodel.add(LSTM(num_neurons, return_sequences=True, input_shape=X[0].shape))\nmodel.add(LSTM(num_neurons_2, return_sequences=True))\n\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.log", "numpy.sum", "numpy.exp", "numpy.argmax", "numpy.random.multinomial" ] ]
lbg251/ClusterTrellis
[ "06e9c8cd3f04f606e185b95f4d68703a34cc86ef" ]
[ "src/ClusterTrellis/utils.py" ]
[ "import os\nimport pickle\nimport string\nimport time\nimport logging\nimport numpy as np\n\n\n\ndef get_logger(name=__file__, level=logging.INFO):\n logger = logging.getLogger(name)\n\n if getattr(logger, \"_init_done__\", None):\n logger.setLevel(level)\n return logger\n\n logger._init_done__ = True\n logger.propagate = False\n logger.setLevel(level)\n\n formatter = logging.Formatter(\"%(asctime)s:%(levelname)s::%(message)s\")\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n handler.setLevel(0)\n\n del logger.handlers[:]\n logger.addHandler(handler)\n\n return logger\n\n\n## Utils\ndef load_jets():\n root_dir = \"data/\"\n filename = os.path.join(root_dir, \"TruthBS_10\")\n with open(filename + \".pkl\", \"rb\") as fd:\n Truth10, BS10 = pickle.load(fd, encoding='latin-1')\n return Truth10, BS10\n\ndef sumLogLH(jetList):\n for jet in jetList:\n jet[\"totLogLH\"] = np.sum(jet[\"logLH\"])\n\ndef getConstituents(jet, node_id, outers_list):\n \"\"\"\n Recursive function to get a list of the tree leaves\n \"\"\"\n if jet[\"tree\"][node_id, 0] == -1:\n\n outers_list.append(jet[\"content\"][node_id])\n\n else:\n getConstituents(\n jet,\n jet[\"tree\"][node_id, 0],\n outers_list,)\n\n getConstituents(\n jet,\n jet[\"tree\"][node_id, 1],\n outers_list,)\n\n return outers_list\n\ndef get_leaves(jet):\n return getConstituents(jet, jet[\"root_id\"], [])" ]
[ [ "numpy.sum" ] ]
matsuken92/optuna
[ "79ce70c24c2150ffc8a4fd243e664976c6e1acef" ]
[ "examples/xgboost_simple.py" ]
[ "\"\"\"\nOptuna example that optimizes a classifier configuration for cancer dataset\nusing XGBoost.\n\nIn this example, we optimize the validation accuracy of cancer detection\nusing XGBoost. We optimize both the choice of booster model and their hyper\nparameters.\n\nWe have following two ways to execute this example:\n\n(1) Execute this code directly.\n $ python xgboost_simple.py\n\n\n(2) Execute through CLI.\n $ STUDY_NAME=`optuna create-study --storage sqlite:///example.db`\n $ optuna study optimize xgboost_simple.py objective --n-trials=100 --study $STUDY_NAME \\\n --storage sqlite:///example.db\n\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\nimport sklearn.datasets\nimport sklearn.metrics\nfrom sklearn.model_selection import train_test_split\nimport xgboost as xgb\n\nimport optuna\n\n\n# FYI: Objective functions can take additional arguments\n# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\ndef objective(trial):\n (data, target) = sklearn.datasets.load_breast_cancer(return_X_y=True)\n train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.25)\n dtrain = xgb.DMatrix(train_x, label=train_y)\n dtest = xgb.DMatrix(test_x, label=test_y)\n\n param = {\n 'silent': 1,\n 'objective': 'binary:logistic',\n 'booster': trial.suggest_categorical('booster', ['gbtree', 'gblinear', 'dart']),\n 'lambda': trial.suggest_loguniform('lambda', 1e-8, 1.0),\n 'alpha': trial.suggest_loguniform('alpha', 1e-8, 1.0)\n }\n\n if param['booster'] == 'gbtree' or param['booster'] == 'dart':\n param['max_depth'] = trial.suggest_int('max_depth', 1, 9)\n param['eta'] = trial.suggest_loguniform('eta', 1e-8, 1.0)\n param['gamma'] = trial.suggest_loguniform('gamma', 1e-8, 1.0)\n param['grow_policy'] = trial.suggest_categorical('grow_policy', ['depthwise', 'lossguide'])\n if param['booster'] == 'dart':\n param['sample_type'] = trial.suggest_categorical('sample_type', ['uniform', 'weighted'])\n param['normalize_type'] = trial.suggest_categorical('normalize_type', ['tree', 'forest'])\n param['rate_drop'] = trial.suggest_loguniform('rate_drop', 1e-8, 1.0)\n param['skip_drop'] = trial.suggest_loguniform('skip_drop', 1e-8, 1.0)\n\n bst = xgb.train(param, dtrain)\n preds = bst.predict(dtest)\n pred_labels = np.rint(preds)\n accuracy = sklearn.metrics.accuracy_score(test_y, pred_labels)\n return 1.0 - accuracy\n\n\nif __name__ == '__main__':\n study = optuna.create_study()\n study.optimize(objective, n_trials=100)\n print(study.best_trial)\n" ]
[ [ "sklearn.model_selection.train_test_split", "numpy.rint" ] ]
J-Moravec/pairtree
[ "91cbba628b78aea31034efb080976fdb47d83976", "91cbba628b78aea31034efb080976fdb47d83976" ]
[ "comparison/pwgs/convert_inputs.py", "comparison/pairtree/convert_simdata_to_results.py" ]
[ "import sys\nimport os\nimport argparse\nimport json\nimport numpy as np\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))\nimport inputparser\nimport clustermaker\n\ndef write_ssms(variants, outfn):\n _stringify = lambda A: ','.join([str(V) for V in A])\n mu_r = 0.999\n cols = ('id', 'gene', 'a', 'd', 'mu_r', 'mu_v')\n\n with open(outfn, 'w') as outf:\n print(*cols, sep='\\t', file=outf)\n for V in variants.values():\n assert len(set(V['omega_v'])) == 1\n\n variant = {\n 'id': 's%s' % int(V['id'][1:]),\n 'gene': V['name'],\n 'a': _stringify(V['ref_reads']),\n 'd': _stringify(V['total_reads']),\n 'mu_r': mu_r,\n 'mu_v': np.mean(1 - V['omega_v']),\n }\n print(*[variant[K] for K in cols], sep='\\t', file=outf)\n\ndef write_params(sampnames, outfn):\n with open(outfn, 'w') as outf:\n json.dump({'samples': sampnames}, outf)\n\ndef main():\n parser = argparse.ArgumentParser(\n description='LOL HI THERE',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument('--use-supervars', dest='use_supervars', action='store_true')\n parser.add_argument('ssm_fn')\n parser.add_argument('params_fn')\n parser.add_argument('pwgs_ssm_fn')\n parser.add_argument('pwgs_params_fn')\n args = parser.parse_args()\n\n variants = inputparser.load_ssms(args.ssm_fn)\n params = inputparser.load_params(args.params_fn)\n\n if args.use_supervars:\n variants = clustermaker.make_cluster_supervars(params['clusters'], variants)\n write_ssms(variants, args.pwgs_ssm_fn)\n write_params(params['samples'], args.pwgs_params_fn)\n\nif __name__ == '__main__':\n main()\n", "import pickle\nimport numpy as np\nimport argparse\n\ndef main():\n parser = argparse.ArgumentParser(\n description='LOL HI THERE',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument('pickle_fn')\n parser.add_argument('results_fn')\n args = parser.parse_args()\n\n with open(args.pickle_fn, 'rb') as F:\n truth = pickle.load(F)\n np.savez_compressed(\n args.results_fn, \n clusters = truth['clusters'],\n garbage = truth['vids_garbage'],\n phi = [truth['phi']],\n adjm = [truth['adjm']],\n llh = [0],\n )\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.mean" ], [ "numpy.savez_compressed" ] ]
tbarford/bg_streamlit_demo
[ "72e54d2c9cff278edf1852ae04893fdb80d8ed4d" ]
[ "streamlit_app.py" ]
[ "##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##\r\n##python3 script created by tBarford on 20220205\r\n##\r\n##\r\n##File Description: This is the streamlit webapp MVP for BG Golf EI Profile Database Demo\r\n## run in term w/ : streamlit run streamlit_app.py\r\n##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~## \r\n\r\nimport streamlit as st\r\nimport firestoreservice as fs\r\nfrom matplotlib import pyplot as plt\r\nimport PIL as img\r\n\r\ndef main():\r\n firestore = fs.FirestoreService()\r\n\r\n ## Sidebar\r\n with st.sidebar:\r\n st.subheader('Shaft Selection Tools:')\r\n shaftType = st.selectbox('Type of shaft:', options = ['iron', 'wood'], key = 'type')\r\n shaft = st.selectbox('Choose a shaft to display:', options = firestore.getShaftList(shaftType), key = 'shaft')\r\n stiffness = st.selectbox('Choose a stiffness:', options = firestore.getStiffness(shaftType, shaft), key = 'stiff')\r\n compare = st.radio('Compare another shaft?', options = ['No', 'Yes'])\r\n if compare == 'Yes':\r\n shaftType_compare = st.selectbox('Type of shaft:', options = ['iron', 'wood'], key = 'type2')\r\n shaft_compare = st.selectbox('Choose a shaft to display:', options = firestore.getShaftList(shaftType_compare), key = 'shaft2')\r\n stiffness_compare = st.selectbox('Choose a stiffness:', options = firestore.getStiffness(shaftType_compare, shaft_compare), key = 'stiff2')\r\n else:\r\n shaftType_compare, shaft_compare, stiffness_compare = None, None, None\r\n \r\n\r\n ## Main Content\r\n st.image(img.Image.open('./assets/bg_logo_horz.png'), use_column_width=True)\r\n st.header('Shaft Profile Demo')\r\n\r\n #manage shafts to plot\r\n if stiffness is not None:\r\n dataToPlot = {f'{shaft} {stiffness}':firestore.getEI(shaftType, shaft, stiffness)}\r\n if stiffness_compare is not None:\r\n dataToPlot[f'{shaft_compare} {stiffness_compare}'] = firestore.getEI(shaftType_compare, shaft_compare, stiffness_compare)\r\n\r\n\r\n if st.button('Update Plot'):\r\n \r\n fig, ax = plt.subplots()\r\n for each in dataToPlot.keys():\r\n ax.plot(dataToPlot[each][0], dataToPlot[each][1], label = each)\r\n \r\n ax.set(xlabel='Length From Tip (in.)', ylabel='EI',\r\n title='BG Measured EI Curve')\r\n ax.grid()\r\n ax.legend()\r\n st.pyplot(fig)\r\n\r\nif __name__ == '__main__':\r\n main()" ]
[ [ "matplotlib.pyplot.subplots" ] ]
Yudonggeun/PySC2-Tutorial
[ "80449c3b5774a58e8ee6490379890e9abd60a11a" ]
[ "7. Using Reward for Agent/reward_agent.py" ]
[ "import random\nimport math\nimport os.path\n\nimport numpy as np\nimport pandas as pd\n\nfrom pysc2.agents import base_agent\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\n\n_NO_OP = actions.FUNCTIONS.no_op.id\n_SELECT_POINT = actions.FUNCTIONS.select_point.id\n_BUILD_SUPPLY_DEPOT = actions.FUNCTIONS.Build_SupplyDepot_screen.id\n_BUILD_BARRACKS = actions.FUNCTIONS.Build_Barracks_screen.id\n_TRAIN_MARINE = actions.FUNCTIONS.Train_Marine_quick.id\n_SELECT_ARMY = actions.FUNCTIONS.select_army.id\n_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id\n_HARVEST_GATHER = actions.FUNCTIONS.Harvest_Gather_screen.id\n\n_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index\n_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index\n_PLAYER_ID = features.SCREEN_FEATURES.player_id.index\n\n_PLAYER_SELF = 1\n_PLAYER_HOSTILE = 4\n_ARMY_SUPPLY = 5\n\n_TERRAN_COMMANDCENTER = 18\n_TERRAN_SCV = 45\n_TERRAN_SUPPLY_DEPOT = 19\n_TERRAN_BARRACKS = 21\n_NEUTRAL_MINERAL_FIELD = 341\n\n_NOT_QUEUED = [0]\n_QUEUED = [1]\n_SELECT_ALL = [2]\n\nDATA_FILE = 'sparse_agent_data'\n\nACTION_DO_NOTHING = 'donothing'\nACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'\nACTION_BUILD_BARRACKS = 'buildbarracks'\nACTION_BUILD_MARINE = 'buildmarine'\nACTION_ATTACK = 'attack'\n\nsmart_actions = [\n ACTION_DO_NOTHING,\n ACTION_BUILD_SUPPLY_DEPOT,\n ACTION_BUILD_BARRACKS,\n ACTION_BUILD_MARINE,\n]\n\nfor mm_x in range(0, 64):\n for mm_y in range(0, 64):\n if (mm_x + 1) % 32 == 0 and (mm_y + 1) % 32 == 0:\n smart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 16) + '_' + str(mm_y - 16))\n\n\n# Stolen from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow\nclass QLearningTable:\n def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):\n self.actions = actions # a list\n self.lr = learning_rate\n self.gamma = reward_decay\n self.epsilon = e_greedy\n self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)\n\n def choose_action(self, observation):\n self.check_state_exist(observation)\n\n if np.random.uniform() < self.epsilon:\n # choose best action\n state_action = self.q_table.ix[observation, :]\n\n # some actions have the same value\n state_action = state_action.reindex(np.random.permutation(state_action.index))\n\n action = state_action.idxmax()\n else:\n # choose random action\n action = np.random.choice(self.actions)\n\n return action\n\n def learn(self, s, a, r, s_):\n self.check_state_exist(s_)\n self.check_state_exist(s)\n\n q_predict = self.q_table.ix[s, a]\n\n if s_ != 'terminal':\n q_target = r + self.gamma * self.q_table.ix[s_, :].max()\n else:\n q_target = r # next state is terminal\n\n # update\n self.q_table.ix[s, a] += self.lr * (q_target - q_predict)\n\n def check_state_exist(self, state):\n if state not in self.q_table.index:\n # append new state to q table\n self.q_table = self.q_table.append(\n pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))\n\n\nclass SparseAgent(base_agent.BaseAgent):\n def __init__(self):\n super(SparseAgent, self).__init__()\n\n self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))\n\n self.previous_action = None\n self.previous_state = None\n\n self.cc_y = None\n self.cc_x = None\n\n self.move_number = 0\n\n if os.path.isfile(DATA_FILE + '.gz'):\n self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')\n\n def transformDistance(self, x, x_distance, y, y_distance):\n if not self.base_top_left:\n return [x - x_distance, y - y_distance]\n\n return [x + x_distance, y + y_distance]\n\n def transformLocation(self, x, y):\n if not self.base_top_left:\n return [64 - x, 64 - y]\n\n return [x, y]\n\n def splitAction(self, action_id):\n smart_action = smart_actions[action_id]\n\n x = 0\n y = 0\n if '_' in smart_action:\n smart_action, x, y = smart_action.split('_')\n\n return (smart_action, x, y)\n\n def step(self, obs):\n super(SparseAgent, self).step(obs)\n\n if obs.last():\n reward = obs.reward\n\n self.qlearn.learn(str(self.previous_state), self.previous_action, reward, 'terminal')\n\n self.qlearn.q_table.to_pickle(DATA_FILE + '.gz', 'gzip')\n\n self.previous_action = None\n self.previous_state = None\n\n self.move_number = 0\n\n return actions.FunctionCall(_NO_OP, [])\n\n unit_type = obs.observation['screen'][_UNIT_TYPE]\n\n if obs.first():\n player_y, player_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_SELF).nonzero()\n self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0\n\n self.cc_y, self.cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()\n\n cc_y, cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()\n cc_count = 1 if cc_y.any() else 0\n\n depot_y, depot_x = (unit_type == _TERRAN_SUPPLY_DEPOT).nonzero()\n supply_depot_count = int(round(len(depot_y) / 69))\n\n barracks_y, barracks_x = (unit_type == _TERRAN_BARRACKS).nonzero()\n barracks_count = int(round(len(barracks_y) / 137))\n\n if self.move_number == 0:\n self.move_number += 1\n\n current_state = np.zeros(8)\n current_state[0] = cc_count\n current_state[1] = supply_depot_count\n current_state[2] = barracks_count\n current_state[3] = obs.observation['player'][_ARMY_SUPPLY]\n\n hot_squares = np.zeros(4)\n enemy_y, enemy_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero()\n for i in range(0, len(enemy_y)):\n y = int(math.ceil((enemy_y[i] + 1) / 32))\n x = int(math.ceil((enemy_x[i] + 1) / 32))\n\n hot_squares[((y - 1) * 2) + (x - 1)] = 1\n\n if not self.base_top_left:\n hot_squares = hot_squares[::-1]\n\n for i in range(0, 4):\n current_state[i + 4] = hot_squares[i]\n\n if self.previous_action is not None:\n self.qlearn.learn(str(self.previous_state), self.previous_action, 0, str(current_state))\n\n rl_action = self.qlearn.choose_action(str(current_state))\n\n self.previous_state = current_state\n self.previous_action = rl_action\n\n smart_action, x, y = self.splitAction(self.previous_action)\n\n if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:\n unit_y, unit_x = (unit_type == _TERRAN_SCV).nonzero()\n\n if unit_y.any():\n i = random.randint(0, len(unit_y) - 1)\n target = [unit_x[i], unit_y[i]]\n\n return actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])\n\n elif smart_action == ACTION_BUILD_MARINE:\n if barracks_y.any():\n i = random.randint(0, len(barracks_y) - 1)\n target = [barracks_x[i], barracks_y[i]]\n\n return actions.FunctionCall(_SELECT_POINT, [_SELECT_ALL, target])\n\n elif smart_action == ACTION_ATTACK:\n if _SELECT_ARMY in obs.observation['available_actions']:\n return actions.FunctionCall(_SELECT_ARMY, [_NOT_QUEUED])\n\n elif self.move_number == 1:\n self.move_number += 1\n\n smart_action, x, y = self.splitAction(self.previous_action)\n\n if smart_action == ACTION_BUILD_SUPPLY_DEPOT:\n if supply_depot_count < 2 and _BUILD_SUPPLY_DEPOT in obs.observation['available_actions']:\n if self.cc_y.any():\n if supply_depot_count == 0:\n target = self.transformDistance(round(self.cc_x.mean()), -35, round(self.cc_y.mean()), 0)\n elif supply_depot_count == 1:\n target = self.transformDistance(round(self.cc_x.mean()), -25, round(self.cc_y.mean()), -25)\n\n return actions.FunctionCall(_BUILD_SUPPLY_DEPOT, [_NOT_QUEUED, target])\n\n elif smart_action == ACTION_BUILD_BARRACKS:\n if barracks_count < 2 and _BUILD_BARRACKS in obs.observation['available_actions']:\n if self.cc_y.any():\n if barracks_count == 0:\n target = self.transformDistance(round(self.cc_x.mean()), 15, round(self.cc_y.mean()), -9)\n elif barracks_count == 1:\n target = self.transformDistance(round(self.cc_x.mean()), 15, round(self.cc_y.mean()), 12)\n\n return actions.FunctionCall(_BUILD_BARRACKS, [_NOT_QUEUED, target])\n\n elif smart_action == ACTION_BUILD_MARINE:\n if _TRAIN_MARINE in obs.observation['available_actions']:\n return actions.FunctionCall(_TRAIN_MARINE, [_QUEUED])\n\n elif smart_action == ACTION_ATTACK:\n do_it = True\n\n if len(obs.observation['single_select']) > 0 and obs.observation['single_select'][0][0] == _TERRAN_SCV:\n do_it = False\n\n if len(obs.observation['multi_select']) > 0 and obs.observation['multi_select'][0][0] == _TERRAN_SCV:\n do_it = False\n\n if do_it and _ATTACK_MINIMAP in obs.observation[\"available_actions\"]:\n x_offset = random.randint(-1, 1)\n y_offset = random.randint(-1, 1)\n\n return actions.FunctionCall(_ATTACK_MINIMAP, [_NOT_QUEUED,\n self.transformLocation(int(x) + (x_offset * 8),\n int(y) + (y_offset * 8))])\n\n elif self.move_number == 2:\n self.move_number = 0\n\n smart_action, x, y = self.splitAction(self.previous_action)\n\n if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:\n if _HARVEST_GATHER in obs.observation['available_actions']:\n unit_y, unit_x = (unit_type == _NEUTRAL_MINERAL_FIELD).nonzero()\n\n if unit_y.any():\n i = random.randint(0, len(unit_y) - 1)\n\n m_x = unit_x[i]\n m_y = unit_y[i]\n\n target = [int(m_x), int(m_y)]\n\n return actions.FunctionCall(_HARVEST_GATHER, [_QUEUED, target])\n\n return actions.FunctionCall(_NO_OP, [])" ]
[ [ "pandas.read_pickle", "numpy.random.choice", "numpy.zeros", "pandas.DataFrame", "numpy.random.permutation", "numpy.random.uniform" ] ]
UpSea/midProjects
[ "ed6086e74f68b1b89f725abe0b270e67cf8993a8" ]
[ "BasicOperations/05_Pandas/05_Pandas_02_groupby.py" ]
[ "import pandas as pd\nimport numpy as np\n\n\ndf = pd.DataFrame(np.random.randn(10,3),columns=['a','b','c'],index=list('abcdefghij'))\nprint(df)\ndf.ix[::2,0] = np.nan; df.ix[::4,1] = np.nan; df.ix[::3,2] = np.nan;\n\ndf = df.dropna(subset=['a','b']) #mid delete rows where df['htm3']==na\nbins = np.arange(-3,3,0.1)\nbins = [-100,0,100]\nindices = np.digitize(df.a,bins)\n'''\nbins代表若干连续的区间0:[-1,2),1:[2,7),2:[7,9),3:[9,10),用数组表示为:[-1,2,7,9,10]\nnp.digitize()函数生成一列数,对应位置的值表示参数一对应值在bins中所属区段的编号。\n'''\ngroups = df.groupby(indices)\nprint('#'*20)\nfor i,group in groups:\n print(i,len(group))\n print(group)\nprint('#'*20)\nprint(groups.mean())\n" ]
[ [ "numpy.random.randn", "numpy.arange", "numpy.digitize" ] ]
rexxxx1234/SAUNet-demo
[ "20e968e1d42217c89cdf4fc304ed2d8717697eec" ]
[ "data/crop_and_pad_augmentations.py" ]
[ "# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom builtins import range\nimport numpy as np\nfrom batchgenerators.augmentations.utils import pad_nd_image\n\n\ndef center_crop(data, crop_size, seg=None):\n return crop(data, seg, crop_size, 0, 'center')\n\n\ndef get_lbs_for_random_crop(crop_size, data_shape, margins):\n \"\"\"\n :param crop_size:\n :param data_shape: (b,c,x,y(,z)) must be the whole thing!\n :param margins:\n :return:\n \"\"\"\n lbs = []\n for i in range(len(data_shape) - 2):\n if data_shape[i+2] - crop_size[i] - margins[i] > margins[i]:\n lbs.append(np.random.randint(margins[i], data_shape[i+2] - crop_size[i] - margins[i]))\n else:\n lbs.append((data_shape[i+2] - crop_size[i]) // 2)\n return lbs\n\n\ndef get_lbs_for_center_crop(crop_size, data_shape):\n \"\"\"\n :param crop_size:\n :param data_shape: (b,c,x,y(,z)) must be the whole thing!\n :return:\n \"\"\"\n lbs = []\n for i in range(len(data_shape) - 2):\n lbs.append((data_shape[i + 2] - crop_size[i]) // 2)\n return lbs\n\n\ndef crop(data, seg=None, crop_size=128, margins=(0, 0, 0), crop_type=\"center\",\n pad_mode='constant', pad_kwargs={'constant_values': 0},\n pad_mode_seg='constant', pad_kwargs_seg={'constant_values': 0}):\n \"\"\"\n crops data and seg (seg may be None) to crop_size. Whether this will be achieved via center or random crop is\n determined by crop_type. Margin will be respected only for random_crop and will prevent the crops form being closer\n than margin to the respective image border. crop_size can be larger than data_shape - margin -> data/seg will be\n padded with zeros in that case. margins can be negative -> results in padding of data/seg followed by cropping with\n margin=0 for the appropriate axes\n :param data: b, c, x, y(, z)\n :param seg:\n :param crop_size:\n :param margins: distance from each border, can be int or list/tuple of ints (one element for each dimension).\n Can be negative (data/seg will be padded if needed)\n :param crop_type: random or center\n :return:\n \"\"\"\n if not isinstance(data, (list, tuple, np.ndarray)):\n raise TypeError(\"data has to be either a numpy array or a list\")\n\n data_shape = tuple([len(data)] + list(data[0].shape))\n data_dtype = data[0].dtype\n dim = len(data_shape) - 2\n\n if seg is not None:\n seg_shape = tuple([len(seg)] + list(seg[0].shape))\n seg_dtype = seg[0].dtype\n\n if not isinstance(seg, (list, tuple, np.ndarray)):\n raise TypeError(\"data has to be either a numpy array or a list\")\n\n assert all([i == j for i, j in zip(seg_shape[2:], data_shape[2:])]), \"data and seg must have the same spatial \" \\\n \"dimensions. Data: %s, seg: %s\" % \\\n (str(data_shape), str(seg_shape))\n\n if type(crop_size) not in (tuple, list, np.ndarray):\n crop_size = [crop_size] * dim\n else:\n assert len(crop_size) == len(\n data_shape) - 2, \"If you provide a list/tuple as center crop make sure it has the same dimension as your \" \\\n \"data (2d/3d)\"\n\n if not isinstance(margins, (np.ndarray, tuple, list)):\n margins = [margins] * dim\n\n data_return = np.zeros([data_shape[0], data_shape[1]] + list(crop_size), dtype=data_dtype)\n if seg is not None:\n seg_return = np.zeros([seg_shape[0], seg_shape[1]] + list(crop_size), dtype=seg_dtype)\n else:\n seg_return = None\n\n for b in range(data_shape[0]):\n data_shape_here = [data_shape[0]] + list(data[b].shape)\n if seg is not None:\n seg_shape_here = [seg_shape[0]] + list(seg[b].shape)\n\n if crop_type == \"center\":\n lbs = get_lbs_for_center_crop(crop_size, data_shape_here)\n elif crop_type == \"random\":\n lbs = get_lbs_for_random_crop(crop_size, data_shape_here, margins)\n else:\n raise NotImplementedError(\"crop_type must be either center or random\")\n\n need_to_pad = [[0, 0]] + [[abs(min(0, lbs[d])),\n abs(min(0, data_shape_here[d + 2] - (lbs[d] + crop_size[d])))]\n for d in range(dim)]\n\n # we should crop first, then pad -> reduces i/o for memmaps, reduces RAM usage and improves speed\n ubs = [min(lbs[d] + crop_size[d], data_shape_here[d+2]) for d in range(dim)]\n lbs = [max(0, lbs[d]) for d in range(dim)]\n\n slicer_data = [slice(0, data_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]\n data_cropped = data[b][tuple(slicer_data)]\n\n if seg_return is not None:\n slicer_seg = [slice(0, seg_shape_here[1])] + [slice(lbs[d], ubs[d]) for d in range(dim)]\n seg_cropped = seg[b][tuple(slicer_seg)]\n\n if any([i > 0 for j in need_to_pad for i in j]):\n data_return[b] = np.pad(data_cropped, need_to_pad, pad_mode, **pad_kwargs)\n if seg_return is not None:\n seg_return[b] = np.pad(seg_cropped, need_to_pad, pad_mode_seg, **pad_kwargs_seg)\n else:\n data_return[b] = data_cropped\n if seg_return is not None:\n seg_return[b] = seg_cropped\n\n return data_return, seg_return\n\n\ndef random_crop(data, seg=None, crop_size=128, margins=[0, 0, 0]):\n return crop(data, seg, crop_size, margins, 'random')\n\n\ndef pad_nd_image_and_seg(data, seg, new_shape=None, must_be_divisible_by=None, pad_mode_data='constant',\n np_pad_kwargs_data=None, pad_mode_seg='constant', np_pad_kwargs_seg=None):\n \"\"\"\n Pads data and seg to new_shape. new_shape is thereby understood as min_shape (if data/seg is already larger then\n new_shape the shape stays the same for the dimensions this applies)\n :param data:\n :param seg:\n :param new_shape: if none then only must_be_divisible_by is applied\n :param must_be_divisible_by: UNet like architectures sometimes require the input to be divisibly by some number. This\n will modify new_shape if new_shape is not divisibly by this (by increasing it accordingly).\n must_be_divisible_by should be a list of int (one for each spatial dimension) and this list must have the same\n length as new_shape\n :param pad_mode_data: see np.pad\n :param np_pad_kwargs_data:see np.pad\n :param pad_mode_seg:see np.pad\n :param np_pad_kwargs_seg:see np.pad\n :return:\n \"\"\"\n sample_data = pad_nd_image(data, new_shape, mode=pad_mode_data, kwargs=np_pad_kwargs_data,\n return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)\n if seg is not None:\n sample_seg = pad_nd_image(seg, new_shape, mode=pad_mode_seg, kwargs=np_pad_kwargs_seg,\n return_slicer=False, shape_must_be_divisible_by=must_be_divisible_by)\n else:\n sample_seg = None\n return sample_data, sample_seg\n" ]
[ [ "numpy.pad", "numpy.random.randint" ] ]
861934367/genecast
[ "b4c5710aef526f4e3bdf0ba3594dab583068eca3" ]
[ "genecast_package/core.py" ]
[ "## this tool is the core function of cnv and snv analysis\n## author: taozhou\n## email: zhou.tao@genecast.com.cn\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport itertools\nimport seaborn as sns\nimport matplotlib.pylab as plt\nimport matplotlib.colors as mc\nfrom genecast_package.svm_analysis import feature_select, evaluate_model\nfrom sklearn.decomposition import PCA\nfrom collections import OrderedDict\nfrom collections import defaultdict\nimport datetime\nimport pandas as pd\nfrom scipy.stats import ranksums\nimport os\nimport sh\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef z_score(data, axis):\n if axis == 3:\n return data\n if axis == 1:\n z_scored = data\n else:\n z_scored = data.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n\ndef pheatmap(data, length, col_cluster=True, xticklabels=True, yticklabels=True, color=None, name=None, args=None):\n data = z_score(data, axis=args.z_score)\n if len(data.columns) > 30:\n xticklabels = False\n if len(data) > 80:\n yticklabels = False\n vmin, vmax = data.unstack().quantile([.05, .95])\n if args.z_score == 3:\n vmin, vmax = 0, 4\n re = sns.clustermap(data, cmap=args.cmp, row_cluster=True, method=args.cluster_method, col_cluster=col_cluster, figsize=(13, 10), \\\n xticklabels=True, yticklabels=yticklabels, vmin=vmin, vmax=vmax, col_colors=color)\n re.ax_heatmap.set_xticklabels(re.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)\n re.ax_heatmap.set_yticklabels(re.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)\n if col_cluster == False:\n for group, number in length.items():\n re.ax_col_colors.text((number[0] + number[1])/2 + 1.5 - len(group)/2, 1.2, group, size=30)\n re.savefig(name + \".\" + args.save)\n else:\n re.savefig(name + \"_col_cluster.\" + args.save)\n plt.close()\n\n\ndef make_col_color_heatmap(group_dic, args=None):\n common_color = [\"blue\", \"red\", \"green\", \"grey\"]\n color = {}; length = {}\n temp = 0\n i = 0\n for name, group in group_dic.items():\n length[name] = [temp, temp + len(group)]\n temp += len(group)\n for sample in group:\n color[sample] = common_color[i]\n i += 1\n if args.ac and args.bc:\n color[group1] = args.ac\n color[group2] = args.bc\n color = pd.Series(color)\n color.name = \"group\"\n return color, length\n\n\ndef pca(data, group_dic, n=None, args=None):\n pca = PCA(n_components=2)\n group = []\n length = OrderedDict()\n temp = 0\n for name, g in group_dic.items():\n length[name] = [temp, temp + len(g)]\n temp += len(g)\n group += g\n data = data[group]\n newData = pca.fit_transform(data.T)\n colors = {}\n colors1 = [\"blue\", \"red\", \"green\", 'turquoise', \"grey\"]\n i = 0\n for name, number in length.items():\n colors[name] = colors1[i]\n i += 1\n if args.ac and args.bc:\n colors[group1] = args.ac\n colors[group2] = args.bc\n for name, number in length.items():\n plt.scatter(newData[number[0]:number[1], 0], newData[number[0]:number[1], 1], label=name, color=colors[name])\n plt.title(\"PCA analysis\", size=20)\n pc1 = 100*pca.explained_variance_ratio_[0]\n pc2 = 100*pca.explained_variance_ratio_[1]\n plt.xlabel(\"PC1(%.1f)\" % pc1, size=15)\n plt.ylabel(\"PC1(%.1f)\" % pc2, size=15)\n plt.legend()\n plt.savefig(\"PCA_%s.png\" % n)\n plt.close()\n\n\ndef plot_box(data, which, outname, palette, regulation, group, args=None):\n fig, ax1 = plt.subplots(figsize=(8,12))\n box_data = defaultdict(list)\n names = []\n if which == \"cnv\":\n how = \"mean\"\n for name, g in group.items():\n names.append(name)\n box_data[name] = data[g]\n else:\n how = \"sum\"\n for name, g in group.items():\n names.append(name)\n box_data[name] = data[g]\n z, p = ranksums(box_data[names[0]], box_data[names[1]])\n if p >= 0.05:\n plt.close()\n return\n data.to_csv(outname + \"_box_data_%s\" % (regulation) + \".txt\", sep=\"\\t\")\n if args.ac and args.bc:\n group1 = list(group.keys())[0]\n group2 = list(group.keys())[1]\n palette[group1] = args.ac\n palette[group2] = args.bc\n sns.boxplot(data=pd.DataFrame(box_data), ax=ax1, width=0.2, linewidth=.5, palette=palette)\n ax1.set_title(\"Difference of %s (p = %f)\" % (which, p), size=30)\n ax1.set_ylabel('%s value' % (which), size=30)\n fig.autofmt_xdate(ha='center', rotation=0)\n plt.xticks(rotation=0, size=30)\n plt.legend()\n fig.savefig(r'%s_box_data_%s_%s_Boxplot.%s' % (outname, regulation, how, args.save), dpi=600, size=0.5)\n plt.close()\n\n\ndef databox(raw, which, outname=None, group=None, args=None):\n palette_up = {}; palette_down = {}\n up = []; down = []\n group1_data = raw[list(group.values())[0]]; group1 = list(group.keys())[0]\n group2_data = raw[list(group.values())[1]]; group2 = list(group.keys())[1]\n for gene in raw.index:\n if group1_data.ix[gene].sum() - group2_data.ix[gene].sum() >= 0:\n up.append(gene); palette_up[group1] = \"red\"; palette_up[group2] = \"blue\"\n else:\n down.append(gene); palette_down[group1] = \"blue\"; palette_down[group2] = \"red\"\n if len(palette_up) > 0:\n for i in up:\n plot_box(raw.ix[i], which, i, palette_up, \"up\", group, args=args)\n if len(palette_down) > 0:\n for i in down:\n plot_box(raw.ix[i], which, i, palette_down, \"down\", group, args=args)\n\n\ndef save_data_pdf(data, name, length, color, group_dic, which, args=None):\n data.to_csv(\"%s.txt\" % name, sep=\"\\t\")\n length = {key.split(\"/\")[-1]: value for key, value in length.items()}\n group_dic = {key.split(\"/\")[-1]: value for key, value in group_dic.items()}\n try:\n pheatmap(data, length, col_cluster=True, color=color, name=name, args=args)\n pheatmap(data, length, col_cluster=False, color=color, name=name, args=args)\n except MemoryError:\n print(\"you gene need too much MemoryError and i, so pass and do next\")\n pca(data, group_dic, n=name, args=args)\n databox(data, which, outname=name, group=group_dic, args=args)\n\n\ndef save_parameters(args=None):\n f = open(\"parameters.txt\", \"w\")\n for arg in dir(args):\n if not arg.startswith(\"_\"):\n f.write(arg + \": \" + str(getattr(args, arg)) + \"\\n\")\n f.close()\n\n\ndef make_result_folder(args=None, which=\"cnv\", fun=None):\n feature_genes = []; gene_lists = {}; color_length = {}\n os.chdir(args.outdir)\n i = datetime.datetime.now()\n # for two_group in itertools.combinations([args.group1, args.group2], 2):\n two_group = [args.group1[0].split(\"/\")[-2], args.group2[0].split(\"/\")[-2]]\n target = args.group1[0].split(\"/\")[-2] + \"_VS_\" + args.group2[0].split(\"/\")[-2] + \"_%s%s%s_%s%s\" % (i.year, i.month, i.day, i.hour, i.minute)\n try:\n os.mkdir(target)\n except FileExistsError:\n sh.rm(\"-rf\",target)\n os.mkdir(target)\n if which == \"cnv\":\n name = \"cnv_median_\" + args.data_type\n gene_list, a_group, b_group = fun(args=args)\n else:\n if args.cal_type == \"num\":\n name = \"snv_number\"\n else:\n name = \"snv_mean\"\n gene_list, a_group, b_group = fun(args=args)\n # feature_gene = feature_select(gene_list, a_group, b_group, pval=args.pval, method=args.feature_selection_method,\\\n # criterion=args.criterion, penalty=args.penalty, C=args.C, threshold=args.threshold)\n feature_gene = feature_select(gene_list, a_group, b_group, args=args)\n feature_genes.append(feature_gene)\n gene_lists[two_group[0]] = gene_list[a_group]; gene_lists[two_group[1]] = gene_list[b_group]\n os.chdir(target)\n save_parameters(args=args)\n group_dic = {two_group[0]: a_group, two_group[1]: b_group}\n color_length[two_group[0]] = a_group; color_length[two_group[1]] = b_group\n color, length = make_col_color_heatmap(group_dic, args=args)\n save_data_pdf(gene_list, \"host_gene_%s\" % name, length, color, group_dic, which, args=args)\n pd.DataFrame({\"gene\":feature_gene}).to_csv(\"feature_gene_pval%0.2f.txt\" % args.pval, sep=\"\\t\", index=False)\n feature_gene_cnv = gene_list.ix[feature_gene]\n evaluate_model(gene_list, a_group, b_group, feature_gene, name=\"feature_gene_%s\" % name, args=args)\n save_data_pdf(feature_gene_cnv, \"feature_gene_%s\" % name, length, color, group_dic, which, args=args)\n os.chdir(args.outdir)\n # if len(args.group1 + args.group2) > 2:\n # try:\n # os.mkdir(\"intersection\")\n # except FileExistsError:\n # pass\n # os.chdir(\"intersection\")\n # color, length = make_col_color_heatmap(color_length)\n # intersection_feature_gene = list(set(feature_genes[0]).intersection(*feature_genes[1:]))\n # intersection_feature_gene_cnv = pd.concat([data.ix[intersection_feature_gene] for [args.group1, args.group2], data in gene_lists.items()], axis=1)\n # try:\n # save_data_pdf(intersection_feature_gene_cnv, \"intersection\", length, color, color_length)\n # except Exception:\n # print(\"no intersection\\njob finish...\")\n # os.chdir(args.outdir)\n" ]
[ [ "matplotlib.use", "matplotlib.pylab.savefig", "matplotlib.pylab.ylabel", "pandas.DataFrame", "matplotlib.pylab.legend", "matplotlib.pylab.close", "matplotlib.pylab.xlabel", "matplotlib.pylab.subplots", "scipy.stats.ranksums", "pandas.Series", "matplotlib.pylab.title", "matplotlib.pylab.xticks", "sklearn.decomposition.PCA", "matplotlib.pylab.scatter" ] ]
hoangphucITJP/tpu
[ "e4ce0d8eb61a828d4b5fe09effd082356e88545c" ]
[ "models/official/detection/modeling/architecture/resnet.py" ]
[ "# Lint as: python2, python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains definitions for the post-activation form of Residual Networks.\n\nResidual networks (ResNets) were proposed in:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import range\nimport tensorflow.compat.v1 as tf\n\nfrom modeling.architecture import nn_blocks\nfrom modeling.architecture import nn_ops\n\n\ndef get_drop_connect_rate(init_rate, block_num, total_blocks):\n \"\"\"Get drop connect rate for the ith block.\"\"\"\n if init_rate is not None:\n return init_rate * float(block_num) / total_blocks\n else:\n return None\n\n\ndef block_group(inputs,\n filters,\n strides,\n use_projection,\n block_fn,\n block_repeats,\n batch_norm_relu=nn_ops.BatchNormRelu(),\n dropblock=nn_ops.Dropblock(),\n drop_connect_rate=None,\n data_format='channels_last',\n name=None,\n is_training=False):\n \"\"\"Builds one group of blocks.\n\n Args:\n inputs: a `Tensor` of size `[batch, channels, height, width]`.\n filters: an `int` number of filters for the first two convolutions.\n strides: an `int` block stride. If greater than 1, this block will\n ultimately downsample the input.\n use_projection: a `bool` for whether this block should use a projection\n shortcut (versus the default identity shortcut). This is usually `True`\n for the first block of a block group, which may change the number of\n filters and the resolution.\n block_fn: the `function` for the block to use within the model\n block_repeats: an `int` number of blocks to repeat in the group.\n batch_norm_relu: an operation that is added after convolutions, including a\n batch norm layer and an optional relu activation.\n dropblock: a drop block layer that is added after convluations. Note that\n the default implementation does not apply any drop block.\n drop_connect_rate: a 'float' number that specifies the drop connection rate\n of the block. Note that the default `None` means no drop connection is\n applied.\n data_format: a `str` that specifies the data format.\n name: a `str` name for the Tensor output of the block layer.\n is_training: a `bool` if True, the model is in training mode.\n\n Returns:\n The output `Tensor` of the block layer.\n \"\"\"\n # Only the first block per block_group uses projection shortcut and strides.\n inputs = block_fn(\n inputs,\n filters,\n strides,\n use_projection=use_projection,\n batch_norm_relu=batch_norm_relu,\n dropblock=dropblock,\n drop_connect_rate=drop_connect_rate,\n data_format=data_format,\n is_training=is_training)\n for _ in range(1, block_repeats):\n inputs = block_fn(\n inputs,\n filters,\n 1,\n use_projection=False,\n batch_norm_relu=batch_norm_relu,\n dropblock=dropblock,\n drop_connect_rate=drop_connect_rate,\n data_format=data_format,\n is_training=is_training)\n return tf.identity(inputs, name)\n\n\nclass Resnet(object):\n \"\"\"Class to build ResNet family model.\"\"\"\n\n def __init__(self,\n resnet_depth,\n dropblock=nn_ops.Dropblock(),\n batch_norm_relu=nn_ops.BatchNormRelu(),\n init_drop_connect_rate=None,\n data_format='channels_last'):\n \"\"\"ResNet initialization function.\n\n Args:\n resnet_depth: `int` depth of ResNet backbone model.\n dropblock: a dropblock layer.\n batch_norm_relu: an operation that includes a batch normalization layer\n followed by a relu layer(optional).\n init_drop_connect_rate: a 'float' number that specifies the initial drop\n connection rate. Note that the default `None` means no drop connection\n is applied.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n \"\"\"\n self._resnet_depth = resnet_depth\n\n self._dropblock = dropblock\n self._batch_norm_relu = batch_norm_relu\n self._init_drop_connect_rate = init_drop_connect_rate\n\n self._data_format = data_format\n\n model_params = {\n 10: {'block': nn_blocks.residual_block, 'layers': [1, 1, 1, 1]},\n 18: {'block': nn_blocks.residual_block, 'layers': [2, 2, 2, 2]},\n 34: {'block': nn_blocks.residual_block, 'layers': [3, 4, 6, 3]},\n 50: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 6, 3]},\n 101: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 23, 3]},\n 152: {'block': nn_blocks.bottleneck_block, 'layers': [3, 8, 36, 3]},\n 200: {'block': nn_blocks.bottleneck_block, 'layers': [3, 24, 36, 3]}\n }\n\n if resnet_depth not in model_params:\n valid_resnet_depths = ', '.join(\n [str(depth) for depth in sorted(model_params.keys())])\n raise ValueError(\n 'The resnet_depth should be in [%s]. Not a valid resnet_depth:'%(\n valid_resnet_depths), self._resnet_depth)\n params = model_params[resnet_depth]\n self._resnet_fn = self.resnet_v1_generator(\n params['block'], params['layers'])\n\n def __call__(self, inputs, is_training=False):\n \"\"\"Returns the ResNet model for a given size and number of output classes.\n\n Args:\n inputs: a `Tesnor` with shape [batch_size, height, width, 3] representing\n a batch of images.\n is_training: `bool` if True, the model is in training mode.\n\n Returns:\n a `dict` containing `int` keys for continuous feature levels [2, 3, 4, 5].\n The values are corresponding feature hierarchy in ResNet with shape\n [batch_size, height_l, width_l, num_filters].\n \"\"\"\n with tf.variable_scope('resnet%s' % self._resnet_depth):\n return self._resnet_fn(inputs, is_training)\n\n def resnet_v1_generator(self, block_fn, layers):\n \"\"\"Generator for ResNet v1 models.\n\n Args:\n block_fn: `function` for the block to use within the model. Either\n `residual_block` or `bottleneck_block`.\n layers: list of 4 `int`s denoting the number of blocks to include in each\n of the 4 block groups. Each group consists of blocks that take inputs of\n the same resolution.\n\n Returns:\n Model `function` that takes in `inputs` and `is_training` and returns the\n output `Tensor` of the ResNet model.\n \"\"\"\n def model(inputs, is_training=False):\n \"\"\"Creation of the model graph.\"\"\"\n inputs = nn_ops.conv2d_fixed_padding(\n inputs=inputs, filters=64, kernel_size=7, strides=2,\n data_format=self._data_format)\n inputs = tf.identity(inputs, 'initial_conv')\n inputs = self._batch_norm_relu(inputs, is_training=is_training)\n\n inputs = tf.layers.max_pooling2d(\n inputs=inputs, pool_size=3, strides=2, padding='SAME',\n data_format=self._data_format)\n inputs = tf.identity(inputs, 'initial_max_pool')\n\n c2 = block_group(\n inputs=inputs,\n filters=64,\n strides=1,\n use_projection=True,\n block_fn=block_fn,\n block_repeats=layers[0],\n batch_norm_relu=self._batch_norm_relu,\n dropblock=self._dropblock,\n drop_connect_rate=get_drop_connect_rate(\n self._init_drop_connect_rate, 2, 5),\n name='block_group1',\n is_training=is_training)\n c3 = block_group(\n inputs=c2,\n filters=128,\n strides=2,\n use_projection=True,\n block_fn=block_fn,\n block_repeats=layers[1],\n batch_norm_relu=self._batch_norm_relu,\n dropblock=self._dropblock,\n drop_connect_rate=get_drop_connect_rate(\n self._init_drop_connect_rate, 3, 5),\n name='block_group2',\n is_training=is_training)\n c4 = block_group(\n inputs=c3,\n filters=256,\n strides=2,\n use_projection=True,\n block_fn=block_fn,\n block_repeats=layers[2],\n batch_norm_relu=self._batch_norm_relu,\n dropblock=self._dropblock,\n drop_connect_rate=get_drop_connect_rate(\n self._init_drop_connect_rate, 4, 5),\n name='block_group3',\n is_training=is_training)\n c5 = block_group(\n inputs=c4,\n filters=512,\n strides=2,\n use_projection=True,\n block_fn=block_fn,\n block_repeats=layers[3],\n batch_norm_relu=self._batch_norm_relu,\n dropblock=self._dropblock,\n drop_connect_rate=get_drop_connect_rate(\n self._init_drop_connect_rate, 5, 5),\n name='block_group4',\n is_training=is_training)\n return {2: c2, 3: c3, 4: c4, 5: c5}\n\n return model\n" ]
[ [ "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.identity", "tensorflow.compat.v1.layers.max_pooling2d" ] ]
LJeub/Local2Global_embedding
[ "22e1818639043444f97655d944997a171b992745" ]
[ "local2global_embedding/run.py" ]
[ "\"\"\"Training run script\"\"\"\n\nimport argparse\nimport json\nfrom pathlib import Path\nfrom bisect import bisect_left\n\nimport torch\nimport torch_geometric as tg\nimport matplotlib.pyplot as plt\nimport local2global as l2g\n\nfrom local2global_embedding.embedding import speye, train, embedding, VGAE_model, VGAE_loss, reconstruction_auc\nfrom local2global_embedding.network import largest_connected_component, TGraph\nfrom local2global_embedding.patches import create_patch_data\nfrom local2global_embedding.clustering import distributed_clustering, fennel_clustering, louvain_clustering, metis_clustering\n\n\nclass ResultsDict:\n \"\"\"\n Class for keeping track of results\n \"\"\"\n @classmethod\n def load(cls, filename, replace=False):\n \"\"\"\n restore results from file\n\n Args:\n filename: input json file\n replace: set the replace attribute\n\n Returns:\n populated ResultsDict\n\n \"\"\"\n self = cls(replace=replace)\n with open(filename) as f:\n self._data.update(json.load(f))\n return self\n\n def save(self, filename):\n \"\"\"\n dump contents to json file\n\n Args:\n filename: output file path\n\n \"\"\"\n with open(filename, 'w') as f:\n json.dump(self._data, f)\n\n def __init__(self, replace=False):\n \"\"\"\n initialise empty ResultsDict\n Args:\n replace: set the replace attribute (default: ``False``)\n \"\"\"\n self._data = {'dims': [], 'auc': [], 'args': []}\n self.replace = replace #: if ``True``, updates replace existing data, if ``False``, updates append data\n\n def __getitem__(self, item):\n return self._data[item]\n\n def _update_index(self, index, aucs: list, args=None):\n \"\"\"\n update data for a given index\n\n Args:\n index: integer index into data lists\n aucs: new auc values (should be a list)\n args: new args data (optional)\n\n \"\"\"\n if self.replace:\n self['auc'][index] = aucs\n self['args'][index] = args\n else:\n self['auc'][index].extend(aucs)\n self['args'][index].extend([args] * len(aucs))\n\n def _insert_index(self, index: int, dim: int, aucs: list, args=None):\n \"\"\"\n insert new data at index\n\n Args:\n index: integer index into data lists\n dim: data dimension for index\n aucs: new auc values\n args: new args data (optional)\n \"\"\"\n self['auc'].insert(index, aucs)\n self['dims'].insert(index, dim)\n self['args'].insert(index, [args] * len(aucs))\n\n def update_dim(self, dim, aucs, args=None):\n \"\"\"\n update data for given dimension\n\n Args:\n dim: dimension to update\n aucs: new auc values\n args: new args data (optional)\n\n if ``self.contains_dim(dim) == True``, behaviour depends on the value of\n ``self.replace``\n\n \"\"\"\n index = bisect_left(self['dims'], dim)\n if index < len(self['dims']) and self['dims'][index] == dim:\n self._update_index(index, aucs, args)\n else:\n self._insert_index(index, dim, aucs, args)\n\n def max_auc(self, dim=None):\n \"\"\"\n return maximum auc values\n\n Args:\n dim: if ``dim=None``, return list of values for all dimension, else only return maximum value for ``dim``.\n\n \"\"\"\n if dim is None:\n return [max(aucs) for aucs in self['auc']]\n else:\n index = bisect_left(self['dims'], dim)\n if index < len(self['dims']) and self['dims'][index] == dim:\n return max(self['auc'][index])\n else:\n return 0.\n\n def contains_dim(self, dim):\n \"\"\"\n equivalent to ``dim in self['dims']``\n\n \"\"\"\n index = bisect_left(self['dims'], dim)\n return index < len(self['dims']) and self['dims'][index] == dim\n\n def reduce_to_dims(self, dims):\n \"\"\"\n remove all data for dimensions not in ``dims``\n Args:\n dims: list of dimensions to keep\n\n \"\"\"\n index = [i for i, d in enumerate(dims) if self.contains_dim(d)]\n for key1 in self._data:\n if isinstance(self._data[key1], list):\n self._data[key1] = [self[key1][i] for i in index]\n return self\n\n def runs(self, dim=None):\n \"\"\"\n return the number of runs\n\n Args:\n dim: if ``dim is None``, return list of number of runs for all dimension, else return number of\n runs for dimension ``dim``.\n\n \"\"\"\n if dim is None:\n return [len(x) for x in self['auc']]\n else:\n index = bisect_left(self['dims'], dim)\n if index < len(self['dims']) and self['dims'][index] == dim:\n return len(self['auc'][index])\n else:\n return 0\n\n\n_dataloaders = {} #: dataloaders\n\n\ndef dataloader(name):\n \"\"\"\n decorator for registering dataloader functions\n\n Args:\n name: data set name\n\n \"\"\"\n def loader(func):\n _dataloaders[name] = func\n return func\n return loader\n\n\n@dataloader('Cora')\ndef _load_cora():\n return tg.datasets.Planetoid(name='Cora', root='/tmp/cora')[0]\n\n\n@dataloader('PubMed')\ndef _load_pubmed():\n return tg.datasets.Planetoid(name='PubMed', root='/tmp/pubmed')[0]\n\n\n@dataloader('AMZ_computers')\ndef _load_amazon_computers():\n return tg.datasets.Amazon(root='/tmp/amazon', name='Computers')[0]\n\n\n@dataloader('AMZ_photo')\ndef _load_amazon_photos():\n return tg.datasets.Amazon(root='/tmp/amazon', name='photo')[0]\n\n\ndef load_data(name):\n \"\"\"\n load data set\n\n Args:\n name: name of data set (one of {names})\n\n Returns:\n largest connected component of data set\n\n \"\"\"\n data = _dataloaders[name]()\n data = largest_connected_component(data=data)\n data.num_nodes = data.x.shape[0]\n return data\n\n\nload_data.__doc__ = load_data.__doc__.format(names=list(_dataloaders.keys()))\n\n\ndef prepare_patches(output_folder, **kwargs):\n \"\"\"\n initialise patch data if ``output_folder`` does not exist, else load existing patch data\n\n Args:\n output_folder: folder for storing patch data\n **kwargs: arguments passed to :py:func:`~local2global_embedding.patches.create_patch_data`\n\n Returns:\n patch_data, patch_graph\n \"\"\"\n output_folder = Path(output_folder)\n if output_folder.is_dir():\n patch_graph = torch.load(output_folder / 'patch_graph.pt')\n patch_data = [torch.load(output_folder / f\"patch{i}.pt\") for i in range(patch_graph.num_nodes)]\n else:\n patch_data, patch_graph = create_patch_data(**kwargs)\n output_folder.mkdir(parents=True)\n torch.save(patch_graph, output_folder / 'patch_graph.pt')\n for i, data in enumerate(patch_data):\n torch.save(data, output_folder / f'patch{i}.pt')\n return patch_data, patch_graph\n\n\ndef csvlist(input_type=str):\n \"\"\"\n Create an argparse type that parses comma separated lists of type ``input_type``\n\n Args:\n input_type: type of list elements\n\n Returns:\n list parser\n\n \"\"\"\n def make_list(input_str):\n return [input_type(s) for s in input_str.split(',')]\n make_list.__doc__ = f\"\"\"\n argparse type that parses comma separated list of type {input_type}\n \n Args:\n input_str: string to be parsed\n \n Returns:\n list of elements of type {input_type}\n \"\"\"\n return make_list\n\n\n_parser = argparse.ArgumentParser(description=\"Run training example.\")\n_parser.add_argument('--data', default='Cora', choices=_dataloaders.keys(), help='Dataset to load')\n_parser.add_argument('--no_features', action='store_true', help='Discard features and use node identity.')\n_parser.add_argument('--num_epochs', type=int, default=200, help='Number of training epochs')\n_parser.add_argument('--runs', type=int, default=10, help='Number of training runs (keep best result)')\n_parser.add_argument('--dims', type=csvlist(int), default=[2], help='Embedding dimensions (comma-separated)')\n_parser.add_argument('--hidden_multiplier', type=int, default=2, help='Hidden dim is `hidden_multiplier` * `dim`')\n_parser.add_argument('--target_patch_degree', type=float, default=4.0, help='Target patch degree for sparsification.')\n_parser.add_argument('--min_overlap', type=int, default=None, help='Minimum target patch overlap (defaults to `max(dims) + 1`)')\n_parser.add_argument('--target_overlap', type=int, default=None, help='Target patch overlap (defaults to twice `min_overlap`)')\n_parser.add_argument('--gamma', type=float, default=0.0, help=\"Value of 'gamma' for RMST sparsification.\")\n_parser.add_argument('--sparsify', default='resistance', help=\"Sparsification method to use.\",\n choices={'resistance', 'rmst', 'none'})\n_parser.add_argument('--cluster', default='metis', choices={'louvain', 'distributed', 'fennel', 'metis'}, help=\"Clustering method to use\")\n_parser.add_argument('--num_clusters', default=10, type=int, help=\"Target number of clusters for fennel, or metis.\")\n_parser.add_argument('--beta', default=0.1, type=float, help=\"Beta value for distributed\")\n_parser.add_argument('--num_iters', default=None, type=int, help=\"Maximum iterations for distributed or fennel (default depends on method choice)\")\n_parser.add_argument('--lr', default=0.01, type=float, help='Learning rate')\n_parser.add_argument('--dist', action='store_true', help='use distance decoder instead of inner product decoder')\n_parser.add_argument('--output',\n default='.',\n help='output folder')\n_parser.add_argument('--device', default=None, help=\"Device used for training e.g., 'cpu', 'cuda'\")\n_parser.add_argument('--plot', action='store_true', help='Plot embedding performance')\n_parser.add_argument('--verbose', action='store_true', help='Show progress info')\n\n\ndef run(**kwargs):\n \"\"\"\n Run training example.\n\n By default this function writes results to the current working directory. To override this use the ``output``\n keyword argument.\n\n This function reproduces figure 1(a) of [#l2g]_ if called as ``run(dims=[2**i for i in range(1, 8)], plot=True)``.\n\n\n Keyword Args:\n data: Name of data set to load (one of {``'Cora'``, ``'PubMed'``, ``'AMZ_computers'``, ``'AMZ_photo'``}) (default: ``'Cora'``)\n no_features: If ``True``, discard features and use node identity. (default: ``False``)\n num_epochs: Number of training epochs (default: ``200``)\n runs: Number of training runs (keep best result) (default: ``1``)\n dims: list of embedding dimensions (default: ``[2]``)\n hidden_multiplier: Hidden dimension is ``hidden_multiplier * dim``\n target_patch_degree: Target patch degree for resistance sparsification. (default: ``4``)\n min_overlap: Minimum target patch overlap (default: ``max(dims) + 1``)\n target_overlap: Target patch overlap (default: ``2 * max(dims)``)\n gamma: Value of 'gamma' for RMST sparsification (default: ``0``)\n sparsify: Sparsification method to use (one of {``'resistance'``, ``'none'``, ``'rmst'``})\n (default: ``'resistance'``)\n cluster: Clustering method to use (one of {``'louvain'``, ``'fennel'`` , ``'distributed'``, ``'metis'``})\n (default: ``'metis'``)\n num_clusters: Target number of clusters for distributed, fennel, or metis.\n num_iters: Maximum iterations for distributed or fennel\n lr: Learning rate\n dist: If ``True``, use distance decoder instead of inner product decoder (default: ``False``)\n output: output folder (default: ``'.'``)\n device: Device used for training e.g., 'cpu', 'cuda' (defaults to ``'cuda'`` if available else ``'cpu'``)\n plot: If ``True``, plot embedding performance (default: ``False``)\n verbose: If ``True``, show progress info (default: ``False``)\n\n This function only accepts keyword arguments and is also exposed as a command-line interface.\n\n .. rubric:: References\n\n .. [#l2g] L. G. S. Jeub et al.\n “Local2Global: Scaling global representation learning on graphs via local training”.\n DLG-KDD’21. 2021. `arXiv:2107.12224 [cs.LG] <https://arxiv.org/abs/2107.12224>`_.\n\n \"\"\"\n\n # support calling this as a python function with keyword arguments\n args = _parser.parse_args([])\n for key, value in kwargs.items():\n if key in args:\n setattr(args, key, value)\n else:\n raise TypeError(f'Unknown argument {key}')\n\n output_folder = Path(args.output)\n data = load_data(args.data)\n neg_edges = tg.utils.negative_sampling(data.edge_index, data.num_nodes)\n graph = TGraph(data.edge_index, data.edge_attr)\n basename = args.data\n dims = args.dims\n num_epochs = args.num_epochs\n runs = args.runs\n min_overlap = args.min_overlap if args.min_overlap is not None else max(dims) + 1\n target_overlap = args.target_overlap if args.target_overlap is not None else 2 * max(dims)\n\n if args.no_features:\n data.x = None # remove node features (trained with identity)\n basename += '_no_features'\n\n if args.dist:\n basename += '_dist'\n\n if args.sparsify == 'resistance':\n sp_string = f\"resistance_deg{args.target_patch_degree}\"\n elif args.sparsify == 'rmst':\n sp_string = f\"rmst_gamma{args.gamma}\"\n elif args.sparsify == 'none':\n sp_string = \"no_sparsify\"\n else:\n raise RuntimeError(f\"Unknown sparsification method '{args.sparsify}'.\")\n if args.cluster == 'louvain':\n cluster_fun = lambda: louvain_clustering(graph)\n cluster_string = 'louvain'\n elif args.cluster == 'distributed':\n cluster_fun = lambda: distributed_clustering(graph, args.beta, rounds=args.num_iters)\n cluster_string = f'distributed_beta{args.beta}_it{args.num_iters}'\n elif args.cluster == 'fennel':\n cluster_fun = lambda: fennel_clustering(graph, num_clusters=args.num_clusters, randomise_order=True,\n num_iters=args.num_iters)\n cluster_string = f\"fennel_n{args.num_clusters}_it{args.num_iters}\"\n elif args.cluster == 'metis':\n cluster_fun = lambda: metis_clustering(graph, num_clusters=args.num_clusters)\n cluster_string = f\"metis_n{args.num_clusters}\"\n else:\n raise RuntimeError(f\"Unknown cluster method '{args.cluster}'.\")\n\n cluster_file = output_folder / f\"{args.data}_{cluster_string}_clusters.pt\"\n if cluster_file.is_file():\n clusters = torch.load(cluster_file)\n else:\n clusters = cluster_fun()\n torch.save(clusters, cluster_file)\n\n patch_folder = output_folder / f'{args.data}_{cluster_string}_{sp_string}_mo{min_overlap}_to{target_overlap}_patches'\n patch_data, patch_graph = prepare_patches(\n output_folder=patch_folder,\n data=data,\n partition_tensor=clusters,\n min_overlap=min_overlap,\n target_overlap=target_overlap,\n sparsify_method=args.sparsify,\n gamma=args.gamma,\n target_patch_degree=args.target_patch_degree,\n verbose=args.verbose)\n if args.verbose:\n print(f'total edges: {data.num_edges}')\n print(f'total patch edges: {sum(c.num_edges for c in patch_data)}')\n\n if args.no_features:\n data.x = speye(data.num_nodes) # add identity as node features for training full model\n\n # compute baseline full model if necessary\n baseline_file = output_folder / f'{basename}_full_info.json'\n training_args = {'lr': args.lr, 'num_epochs': args.num_epochs, 'hidden_multiplier': args.hidden_multiplier}\n if baseline_file.is_file():\n baseline_data = ResultsDict.load(baseline_file)\n else:\n baseline_data = ResultsDict()\n\n for d in dims:\n r = baseline_data.runs(d)\n if r < runs:\n if args.verbose:\n print(f'training full model for {runs-r} runs and d={d}')\n for r_it in range(r, runs):\n if args.verbose:\n print(f\"full model (d={d}) run {r_it + 1} of {runs}\")\n data = data.to(args.device)\n model = train(data,\n VGAE_model(d, d * args.hidden_multiplier, data.num_features, dist=args.dist).to(args.device),\n loss_fun=VGAE_loss,\n num_epochs=num_epochs,\n lr=args.lr,\n verbose=args.verbose,\n )\n coords = embedding(model, data)\n auc = reconstruction_auc(coords, data, dist=args.dist)\n if auc > baseline_data.max_auc(d):\n if args.verbose:\n print(f\"new best (auc={auc})\")\n torch.save(model.state_dict(), output_folder / f'{basename}_full_d{d}_best_model.pt')\n torch.save(coords, output_folder / f'{basename}_full_d{d}_best_coords.pt')\n baseline_data.update_dim(d, [auc], training_args)\n baseline_data.save(baseline_file)\n\n results_file = patch_folder / f'{basename}_l2g_info.json'\n nt_results_file = patch_folder / f'{basename}_nt_info.json'\n if results_file.is_file():\n results = ResultsDict.load(results_file, replace=True)\n else:\n results = ResultsDict(replace=True)\n if nt_results_file.is_file():\n nt_results = ResultsDict.load(nt_results_file, replace=True)\n else:\n nt_results = ResultsDict(replace=True)\n\n for d in dims:\n patch_list = []\n update_aligned_embedding = False\n for p_ind, patch in enumerate(patch_data):\n patch_result_file = patch_folder / f'{basename}_patch{p_ind}_info.json'\n if patch_result_file.is_file():\n patch_results = ResultsDict.load(patch_result_file)\n else:\n patch_results = ResultsDict()\n coords_file = patch_folder / f'{basename}_patch{p_ind}_d{d}_best_coords.pt'\n if coords_file.is_file():\n best_coords = torch.load(coords_file)\n\n r = patch_results.runs(d)\n if args.no_features:\n patch.x = speye(patch.num_nodes)\n if r < runs:\n if args.verbose:\n print(f'training patch{p_ind} for {runs-r} runs and d={d}')\n patch = patch.to(args.device)\n for r_it in range(r, runs):\n if args.verbose:\n print(f\"patch{p_ind} (d={d}) run {r_it+1} of {runs}\")\n model = train(patch,\n VGAE_model(d, d * args.hidden_multiplier, patch.num_features, dist=args.dist).to(args.device),\n loss_fun=VGAE_loss,\n num_epochs=num_epochs,\n lr=args.lr,\n )\n coords = embedding(model, patch)\n auc = reconstruction_auc(coords, patch, dist=args.dist)\n if auc > patch_results.max_auc(d):\n if args.verbose:\n print(f\"new best (auc={auc})\")\n best_coords = coords\n torch.save(model.state_dict(), patch_folder / f'{basename}_patch{p_ind}_d{d}_best_model.pt')\n torch.save(best_coords, coords_file)\n update_aligned_embedding = True\n patch_results.update_dim(d, [auc], training_args)\n patch_results.save(patch_result_file)\n patch_list.append(l2g.Patch(patch.nodes.cpu().numpy(), best_coords.cpu().numpy()))\n\n\n patched_embedding_file = patch_folder / f'{basename}_d{d}_coords.pt'\n patched_embedding_file_nt = patch_folder / f'{basename}_d{d}_ntcoords.pt'\n if update_aligned_embedding or not patched_embedding_file.is_file():\n prob = l2g.WeightedAlignmentProblem(patch_list, patch_edges=patch_graph.edges())\n ntcoords = prob.mean_embedding()\n coords = prob.get_aligned_embedding()\n torch.save(coords, patched_embedding_file)\n torch.save(ntcoords, patched_embedding_file_nt)\n\n results.update_dim(d, [reconstruction_auc(torch.as_tensor(coords), data, neg_edges, dist=args.dist)])\n nt_results.update_dim(d, [reconstruction_auc(torch.as_tensor(ntcoords), data, neg_edges, dist=args.dist)])\n results.save(results_file)\n nt_results.save(nt_results_file)\n\n baseline_data = baseline_data.reduce_to_dims(dims)\n results = results.reduce_to_dims(dims)\n nt_results = nt_results.reduce_to_dims(dims)\n\n if args.plot:\n plt.figure()\n plt.plot(dims, [max(v) for v in baseline_data['auc']], label='full, inner product', marker='o',\n color='tab:blue')\n plt.plot(dims, results['auc'], '--', label='l2g, inner product', marker='>', color='tab:blue')\n plt.plot(dims, nt_results['auc'], ':', label='no-trans, inner product', color='tab:blue',\n linewidth=1)\n\n plt.xscale('log')\n plt.xticks(dims, dims)\n plt.minorticks_off()\n plt.xlabel('embedding dimension')\n plt.ylabel('AUC')\n plt.legend()\n oversampling_ratio = sum(p.num_edges for p in patch_data) / data.num_edges\n plt.title(f\"oversampling ratio: {oversampling_ratio:.2}, #patches: {len(patch_data)}\")\n plt.savefig(output_folder / f\"{basename}_{cluster_string}_{sp_string}_mo{min_overlap}_to{target_overlap}.pdf\")\n plt.show()\n\n\nif __name__ == '__main__':\n # run main script\n args = _parser.parse_args()\n run(**vars(args))\n" ]
[ [ "matplotlib.pyplot.xscale", "matplotlib.pyplot.xlabel", "torch.save", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure", "torch.as_tensor", "torch.load", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks", "matplotlib.pyplot.minorticks_off" ] ]
KarhouTam/FedLab-benchmarks
[ "6de0ca56f645794ca7eae0f19c6b0117165d3404" ]
[ "fedlab_benchmarks/fedmgda+/standalone.py" ]
[ "from json import load\nimport os\nimport argparse\nimport random\nfrom copy import deepcopy\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch import nn\nimport sys\nimport torch\nimport numpy as np\nimport cvxopt\ntorch.manual_seed(0)\n\nfrom fedlab.core.client.serial_trainer import SubsetSerialTrainer\nfrom fedlab.utils.aggregator import Aggregators\nfrom fedlab.utils.serialization import SerializationTool\nfrom fedlab.utils.functional import evaluate\nfrom fedlab.utils.functional import get_best_gpu, load_dict\n\nsys.path.append(\"../\")\nfrom models.cnn import CNN_MNIST\n\ndef quadprog(Q, q, G, h, A, b):\n \"\"\"\n Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html\n Output: Numpy array of the solution\n \"\"\"\n Q = cvxopt.matrix(Q.tolist())\n q = cvxopt.matrix(q.tolist(), tc='d')\n G = cvxopt.matrix(G.tolist())\n h = cvxopt.matrix(h.tolist())\n A = cvxopt.matrix(A.tolist())\n b = cvxopt.matrix(b.tolist(), tc='d')\n sol = cvxopt.solvers.qp(Q, q.T, G.T, h.T, A.T, b)\n return np.array(sol['x'])\n\ndef optim_lambdas(gradients, lambda0):\n epsilon = 0.5\n n = len(gradients)\n J_t = [grad.numpy() for grad in gradients]\n J_t = np.array(J_t)\n # target function\n Q = 2 * np.dot(J_t, J_t.T)\n q = np.array([[0] for i in range(n)])\n # equality constrint\n A = np.ones(n).T\n b = np.array([1])\n # boundary\n lb = np.array([max(0, lambda0[i] - epsilon) for i in range(n)])\n ub = np.array([min(1, lambda0[i] + epsilon) for i in range(n)])\n G = np.zeros((2 * n, n))\n for i in range(n):\n G[i][i] = -1\n G[n + i][i] = 1\n h = np.zeros((2 * n, 1))\n for i in range(n):\n h[i] = -lb[i]\n h[n + i] = ub[i]\n res = quadprog(Q, q, G, h, A, b)\n return res\n\n# python standalone.py --sample_ratio 0.1 --batch_size 10 --epochs 5 --partition iid\n# configuration\nparser = argparse.ArgumentParser(description=\"Standalone training example\")\nparser.add_argument(\"--total_client\", type=int, default=10)\nparser.add_argument(\"--com_round\", type=int, default=5)\n\nparser.add_argument(\"--sample_ratio\", type=float)\nparser.add_argument(\"--batch_size\", type=int)\nparser.add_argument(\"--lr\", type=float)\nparser.add_argument(\"--epochs\", type=int)\n\nargs = parser.parse_args()\n\n# get raw dataset\nroot = \"../datasets/mnist/\"\ntrainset = torchvision.datasets.MNIST(root=root,\n train=True,\n download=True,\n transform=transforms.ToTensor())\n\ntestset = torchvision.datasets.MNIST(root=root,\n train=False,\n download=True,\n transform=transforms.ToTensor())\n\ntest_loader = torch.utils.data.DataLoader(testset,\n batch_size=len(testset),\n drop_last=False,\n shuffle=False)\n\n# setup\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"\n\ngpu = get_best_gpu()\nmodel = CNN_MNIST().cuda(gpu)\n\n# FL settings\nnum_per_round = int(args.total_client * args.sample_ratio)\naggregator = Aggregators.fedavg_aggregate\ntotal_client_num = args.total_client # client总数\ndata_indices = load_dict(\"./mnist_noniid.pkl\")\n\n\n# fedlab setup\nlocal_model = deepcopy(model)\n\ntrainer = SubsetSerialTrainer(model=local_model,\n dataset=trainset,\n data_slices=data_indices,\n aggregator=aggregator,\n args={\n \"batch_size\": args.batch_size,\n \"epochs\": args.epochs,\n \"lr\": args.lr\n })\n\ndynamic_lambdas = np.ones(num_per_round) * 1.0 / num_per_round\n\n# train procedure\nto_select = [i for i in range(total_client_num)]\nfor round in range(args.com_round):\n model_parameters = SerializationTool.serialize_model(model)\n selection = random.sample(to_select, num_per_round)\n parameters = trainer.train(model_parameters=model_parameters,\n id_list=selection,\n aggregate=False)\n\n gradients = [model_parameters - model for model in parameters]\n for i, grad in enumerate(gradients):\n gradients[i] = grad / grad.norm()\n print(len(gradients))\n print(gradients[0].shape)\n # calculate lamda\n lambda0 = [1.0 / num_per_round for _ in range(num_per_round)]\n dynamic_lambdas = torch.Tensor(optim_lambdas(gradients, lambda0)).view(-1)\n dt = Aggregators.fedavg_aggregate(gradients, dynamic_lambdas)\n serialized_parameters = model_parameters - dt * args.lr\n SerializationTool.deserialize_model(model, serialized_parameters)\n\n criterion = nn.CrossEntropyLoss()\n loss, acc = evaluate(model, criterion, test_loader)\n print(\"loss: {:.4f}, acc: {:.2f}\".format(loss, acc))\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.zeros", "numpy.ones", "torch.manual_seed", "torch.nn.CrossEntropyLoss" ] ]
BrunoKM/rhoana_graph_tools
[ "7150f4bc6337ecf51dd9123cf03561a57d655160" ]
[ "utils/graph_utils.py" ]
[ "import numpy as np\nimport networkx as nx\n\nif __name__ == '__main__':\n from ged4py.algorithm import graph_edit_dist\nelse:\n from .ged4py.algorithm import graph_edit_dist\n\ndef rearrange_adj_matrix(matrix, ordering):\n assert matrix.ndim == 2\n # Check that matrix is square\n assert matrix.shape[0] == matrix.shape[1]\n num_nodes = matrix.shape[0]\n assert len(ordering) == num_nodes\n\n # Swap rows into correct ordering\n matrix = matrix[ordering, :]\n # Swap columns into correct ordering\n matrix = matrix[:, ordering]\n return matrix\n\n\ndef rand_permute_adj_matrix(matrix):\n \"\"\"Randomly permute the order of vertices in the adjacency matrix, while maintaining the connectivity\n between them.\"\"\"\n num_vertices = matrix.shape[0]\n rand_order = np.arange(num_vertices)\n np.random.shuffle(rand_order)\n matrix_permuted = rearrange_adj_matrix(matrix, rand_order)\n return matrix_permuted\n\n\ndef ged_from_adj(adj_mat_1, adj_mat_2, directed=False, ged_function=graph_edit_dist.compare):\n \"\"\"Calculate the graph edit distance between two graphs\"\"\"\n if directed:\n create_using = nx.DiGraph\n else:\n create_using = nx.Graph\n g1 = nx.from_numpy_matrix(adj_mat_1, create_using=create_using())\n g2 = nx.from_numpy_matrix(adj_mat_2, create_using=create_using())\n return ged_function(g1, g2)\n\n\ndef ged_from_adj_nx(adj_mat_1, adj_mat_2, directed=False):\n \"\"\"Calculate the graph edit distance between two graphs using the networkx implementation\"\"\"\n return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=nx.graph_edit_distance)\n\n\ndef ged_from_adj_ged4py(adj_mat_1, adj_mat_2, directed=False):\n \"\"\"Calculate the graph edit distance between two graphs using the ged4py implementation\"\"\"\n return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=graph_edit_dist.compare)\n\n\ndef is_isomorphic_from_adj(adj_mat_1, adj_mat_2):\n \"\"\"Checks whether two graphs are isomorphic taking adjacency matrices as inputs\"\"\"\n g1 = nx.from_numpy_matrix(adj_mat_1, create_using=nx.DiGraph())\n g2 = nx.from_numpy_matrix(adj_mat_2, create_using=nx.DiGraph())\n\n return nx.is_isomorphic(g1, g2)\n\n\ndef adj_matrix_to_edge_list(adj_matrix, directed=True, first_id=0, weighted=False):\n num_nodes = adj_matrix.shape[0]\n\n if directed:\n num_edges = np.sum(adj_matrix)\n else:\n num_edges = int(np.sum(adj_matrix) / 2)\n if weighted:\n edge_list = np.zeros([num_edges, 3], dtype=np.int32)\n else:\n edge_list = np.zeros([num_edges, 2], dtype=np.int32)\n\n i = 0\n for node_in in range(num_nodes):\n if directed:\n range_2 = range(num_nodes)\n else:\n range_2 = range(node_in + 1, num_nodes)\n for node_out in range_2:\n edge_val = adj_matrix[node_in, node_out]\n if edge_val > 0:\n # If there is a connection\n if weighted:\n edge_list[i] = (node_in + first_id, node_out + first_id, edge_val)\n else:\n edge_list[i] = (node_in + first_id, node_out + first_id)\n i += 1\n\n return edge_list\n\n\ndef edge_list_to_textfile(edge_list, filepath, weighted=False):\n with open(filepath, 'w') as file:\n if weighted:\n for i, j, weight in edge_list:\n file.write(f\"{i} {j} {weight}\\n\")\n else:\n for i, j in edge_list:\n file.write(f\"{i} {j}\\n\")\n return\n" ]
[ [ "numpy.sum", "numpy.arange", "numpy.zeros", "numpy.random.shuffle" ] ]
webclinic017/qf-lib
[ "96463876719bba8a76c8269cef76addf3a2d836d", "96463876719bba8a76c8269cef76addf3a2d836d" ]
[ "qf_lib_tests/integration_tests/backtesting/alpha_model_strategy_testers/test_alpha_model_strategy_for_stop_losses_intraday.py", "qf_lib/plotting/charts/surface_chart_3d.py" ]
[ "# Copyright 2016-present CERN – European Organization for Nuclear Research\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nimport pandas as pd\nfrom numpy.testing import assert_equal, assert_almost_equal\n\nfrom qf_lib.backtesting.events.time_event.regular_time_event.market_close_event import MarketCloseEvent\nfrom qf_lib.backtesting.events.time_event.regular_time_event.market_open_event import MarketOpenEvent\nfrom qf_lib.common.enums.frequency import Frequency\nfrom qf_lib.common.enums.price_field import PriceField\nfrom qf_lib.common.utils.dateutils.date_format import DateFormat\nfrom qf_lib.common.utils.dateutils.string_to_date import str_to_date\nfrom qf_lib.containers.qf_data_array import QFDataArray\nfrom qf_lib_tests.integration_tests.backtesting.alpha_model_strategy_testers.test_alpha_model_strategy_for_stop_losses import \\\n TestAlphaModelStrategy\n\n\nclass TestAlphaModelIntradayStrategy(TestAlphaModelStrategy):\n data_start_date = str_to_date(\"2014-12-25 00:00:00.00\", DateFormat.FULL_ISO)\n data_end_date = str_to_date(\"2015-02-28 23:59:59.00\", DateFormat.FULL_ISO)\n end_date = str_to_date(\"2015-02-28 13:30:00.00\", DateFormat.FULL_ISO)\n\n frequency = Frequency.MIN_1\n\n def test_stop_losses(self):\n expected_transactions_quantities = \\\n [8130, -127, 1, -8004, 7454, -58, -7396, 6900, -6900, 6390, -44, -6346, 5718, -36]\n\n result_transactions_quantities = [t.quantity for t in self.transactions]\n assert_equal(expected_transactions_quantities, result_transactions_quantities)\n\n expected_transactions_prices = [125, 130, 135, 235.6, 255, 260, 259.35, 280, 264.1, 285, 290, 282, 315, 320]\n result_transactions_prices = [t.price for t in self.transactions]\n assert_almost_equal(expected_transactions_prices, result_transactions_prices)\n\n expected_portfolio_values = [1024390, 1064659, 1064659, 1064659, 1104677, 1144697, 1184717, 1224737, 1264757,\n 1264757, 1264757, 1304777, 1344797, 1384817, 1424837, 1464857, 1464857, 1464857,\n 1504877, 1544897, 1584917, 1624937, 1664957, 1664957, 1664957, 1704977, 1744997,\n 1785017, 1825037, 1865057, 1865057, 1865057, 1905077, 1945097, 1985117, 1885867.4,\n 1908229.4, 1908229.4, 1908229.4, 1945325.4, 1982305.4, 2019285.4, 1918330, 1808620,\n 1808620, 1808620, 1827790, 1859608, 1891338, 1923068, 1954798, 1954798, 1954798,\n 1789802, 1806956, 1835438, 1863848, 1892258, 1892258]\n assert_almost_equal(expected_portfolio_values, list(self.portfolio.portfolio_eod_series()))\n\n def _make_mock_data_array(self, tickers, fields):\n all_dates_market_open = pd.date_range(start=self.data_start_date + MarketOpenEvent.trigger_time(),\n end=self.data_end_date + MarketOpenEvent.trigger_time(), freq=\"B\")\n all_dates_market_close = pd.date_range(start=self.data_start_date + MarketCloseEvent.trigger_time() - Frequency.MIN_1.time_delta(),\n end=self.data_end_date + MarketCloseEvent.trigger_time() - Frequency.MIN_1.time_delta(), freq=\"B\")\n\n num_of_dates = len(all_dates_market_open)\n num_of_tickers = len(tickers)\n num_of_fields = len(fields)\n\n start_value = 100.0\n values = np.arange(start_value, num_of_dates * num_of_tickers * num_of_fields + start_value)\n reshaped_values = np.reshape(values, (num_of_dates, num_of_tickers, num_of_fields))\n\n mocked_result_market_open = QFDataArray.create(all_dates_market_open, tickers, fields, data=reshaped_values)\n\n mocked_result_market_close = QFDataArray.create(all_dates_market_close, tickers, fields, data=reshaped_values)\n mocked_result_market_close.loc[:, :, PriceField.Low] -= 5.0\n mocked_result_market_close.loc[:, :, PriceField.High] += 5.0\n\n all_dates = all_dates_market_open.union(all_dates_market_close)\n\n mocked_result = QFDataArray.create(all_dates, tickers, fields)\n mocked_result.loc[all_dates_market_open, :, :] = mocked_result_market_open.loc[:, :, :]\n mocked_result.loc[all_dates_market_close, :, :] = mocked_result_market_close.loc[:, :, :]\n\n self._add_test_cases(mocked_result, tickers)\n return mocked_result\n\n def _add_test_cases(self, mocked_result, tickers):\n # single low price breaking the stop level\n mocked_result.loc[\n str_to_date('2015-02-05 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0\n # two consecutive low prices breaking the stop level\n mocked_result.loc[\n str_to_date('2015-02-12 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0\n mocked_result.loc[\n str_to_date('2015-02-13 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 15.0\n # single open price breaking the stop level\n mocked_result.loc[\n str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low] -= 25.0\n mocked_result.loc[str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Open] = \\\n mocked_result.loc[str_to_date('2015-02-23 19:59:00.00', DateFormat.FULL_ISO), tickers[0], PriceField.Low]\n", "# Copyright 2016-present CERN – European Organization for Nuclear Research\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport threading\nfrom collections import Sequence\nfrom typing import Tuple\n\nfrom mpl_toolkits.mplot3d import Axes3D # important to keep this line for figure.add_subplot(1, 1, 1, projection='3d')\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass SurfaceChart3D(object):\n \"\"\"\n Creates a 3D surface chart\n\n Parameters\n ----------\n x_vector: Sequence\n vector corresponding to points on X axis\n y_vector: Sequence\n vector corresponding to points on Y axis\n z_matrix: numpy.array\n matrix with values. The shape of the Z matrix has to be [len(Y), len(X)]\n X values correspond to COLUMNS\n Y values correspond to ROWS\n \"\"\"\n\n # Static lock used by all charts to ensure more than one chart isn't being plotted at the same time.\n plot_lock = threading.Lock()\n\n def __init__(self, x_vector: Sequence, y_vector: Sequence, z_matrix: np.array):\n # convert vectors into matrices (necessary operation in order to plot)\n assert matplotlib.get_backend() == \"TkAgg\"\n\n x = np.array(x_vector)\n y = np.array(y_vector)\n self.X, self.Y = np.meshgrid(x, y)\n self.Z = z_matrix\n\n self.axes = None\n self.figure = None\n\n # formatting specific fields\n self._x_label = None\n self._y_label = None\n self._z_label = None\n self._title_str = None\n\n def plot(self, figsize: Tuple[float, float] = None, include_contour: bool = False):\n \"\"\"\n Plots the chart. The underlying figure stays hidden until the show() method is called.\n\n Parameters\n ----------\n figsize: Tuple[float, float]\n The figure size to draw the chart at in inches. This is a tuple of (width, height) passed directly\n to matplotlib's ``plot`` function. The values are expressed in inches.\n include_contour: bool\n \"\"\"\n\n self._setup_axes_if_necessary(figsize)\n\n # Plot the surface.\n surf = self.axes.plot_surface(self.X, self.Y, self.Z, cmap='jet', rstride=1, cstride=1)\n\n if include_contour:\n self.axes.contour(self.X, self.Y, self.Z, zdir='x', offset=self.X.min(), cmap='coolwarm')\n self.axes.contour(self.X, self.Y, self.Z, zdir='y', offset=self.Y.max(), cmap='coolwarm')\n\n self.figure.colorbar(surf)\n self._apply_formatting()\n\n def show(self):\n \"\"\"\n Shows the chart. It is necessary to call the plot function first.\n \"\"\"\n self.figure.show()\n\n def close(self):\n \"\"\"\n Closes the window containing the figure.\n \"\"\"\n plt.close(self.figure)\n\n def set_axes_names(self, x_label: str = None, y_label: str = None, z_label: str = None):\n self._x_label = x_label\n self._y_label = y_label\n self._z_label = z_label\n\n def set_title(self, title_str: str):\n self._title_str = title_str\n\n def _apply_formatting(self):\n if self._x_label is not None:\n self.axes.set_xlabel(self._x_label)\n if self._y_label is not None:\n self.axes.set_ylabel(self._y_label)\n if self._z_label is not None:\n self.axes.set_zlabel(self._z_label)\n\n if self._title_str is not None:\n plt.title(self._title_str)\n\n def _setup_axes_if_necessary(self, figsize: Tuple[float, float] = None):\n if self.axes is None:\n figure = plt.figure(figsize=figsize)\n ax = figure.add_subplot(1, 1, 1, projection='3d') # (nrows, ncols, axnum)\n ax.grid(True)\n self.axes = ax\n self.figure = figure\n if figsize is not None:\n # Make sure the specified figsize is set.\n self.axes.figure.set_size_inches(figsize[0], figsize[1])\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.arange", "numpy.reshape", "numpy.testing.assert_equal" ], [ "numpy.array", "matplotlib.pyplot.title", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "numpy.meshgrid", "matplotlib.get_backend" ] ]
zhaowt96/models
[ "03182253673b0e2666ad9a33839759834c0acebd", "03182253673b0e2666ad9a33839759834c0acebd" ]
[ "research/object_detection/builders/calibration_builder_test.py", "research/slim/datasets/flowers.py" ]
[ "# Lint as: python2, python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for calibration_builder.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nfrom scipy import interpolate\nfrom six.moves import zip\nimport tensorflow as tf\nfrom object_detection.builders import calibration_builder\nfrom object_detection.protos import calibration_pb2\nfrom object_detection.utils import test_case\n\n\nclass CalibrationBuilderTest(test_case.TestCase):\n\n def test_tf_linear_interp1d_map(self):\n \"\"\"Tests TF linear interpolation mapping to a single number.\"\"\"\n def graph_fn():\n tf_x = tf.constant([0., 0.5, 1.])\n tf_y = tf.constant([0.5, 0.5, 0.5])\n new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])\n tf_map_outputs = calibration_builder._tf_linear_interp1d(\n new_x, tf_x, tf_y)\n return tf_map_outputs\n tf_map_outputs_np = self.execute(graph_fn, [])\n self.assertAllClose(tf_map_outputs_np, [0.5, 0.5, 0.5, 0.5, 0.5])\n\n def test_tf_linear_interp1d_interpolate(self):\n \"\"\"Tests TF 1d linear interpolation not mapping to a single number.\"\"\"\n def graph_fn():\n tf_x = tf.constant([0., 0.5, 1.])\n tf_y = tf.constant([0.6, 0.7, 1.0])\n new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])\n tf_interpolate_outputs = calibration_builder._tf_linear_interp1d(\n new_x, tf_x, tf_y)\n return tf_interpolate_outputs\n tf_interpolate_outputs_np = self.execute(graph_fn, [])\n self.assertAllClose(tf_interpolate_outputs_np, [0.6, 0.65, 0.7, 0.85, 1.])\n\n @staticmethod\n def _get_scipy_interp1d(new_x, x, y):\n \"\"\"Helper performing 1d linear interpolation using SciPy.\"\"\"\n interpolation1d_fn = interpolate.interp1d(x, y)\n return interpolation1d_fn(new_x)\n\n def _get_tf_interp1d(self, new_x, x, y):\n \"\"\"Helper performing 1d linear interpolation using Tensorflow.\"\"\"\n def graph_fn():\n tf_interp_outputs = calibration_builder._tf_linear_interp1d(\n tf.convert_to_tensor(new_x, dtype=tf.float32),\n tf.convert_to_tensor(x, dtype=tf.float32),\n tf.convert_to_tensor(y, dtype=tf.float32))\n return tf_interp_outputs\n np_tf_interp_outputs = self.execute(graph_fn, [])\n return np_tf_interp_outputs\n\n def test_tf_linear_interp1d_against_scipy_map(self):\n \"\"\"Tests parity of TF linear interpolation with SciPy for simple mapping.\"\"\"\n length = 10\n np_x = np.linspace(0, 1, length)\n\n # Mapping all numbers to 0.5\n np_y_map = np.repeat(0.5, length)\n\n # Scipy and TF interpolations\n test_data_np = np.linspace(0, 1, length * 10)\n scipy_map_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_map)\n np_tf_map_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_map)\n self.assertAllClose(scipy_map_outputs, np_tf_map_outputs)\n\n def test_tf_linear_interp1d_against_scipy_interpolate(self):\n \"\"\"Tests parity of TF linear interpolation with SciPy.\"\"\"\n length = 10\n np_x = np.linspace(0, 1, length)\n\n # Requires interpolation over 0.5 to 1 domain\n np_y_interp = np.linspace(0.5, 1, length)\n\n # Scipy interpolation for comparison\n test_data_np = np.linspace(0, 1, length * 10)\n scipy_interp_outputs = self._get_scipy_interp1d(test_data_np, np_x,\n np_y_interp)\n np_tf_interp_outputs = self._get_tf_interp1d(test_data_np, np_x,\n np_y_interp)\n self.assertAllClose(scipy_interp_outputs, np_tf_interp_outputs)\n\n @staticmethod\n def _add_function_approximation_to_calibration_proto(calibration_proto,\n x_array, y_array,\n class_id):\n \"\"\"Adds a function approximation to calibration proto for a class id.\"\"\"\n # Per-class calibration.\n if class_id is not None:\n function_approximation = (\n calibration_proto.class_id_function_approximations\n .class_id_xy_pairs_map[class_id])\n # Class-agnostic calibration.\n else:\n function_approximation = (\n calibration_proto.function_approximation.x_y_pairs)\n\n for x, y in zip(x_array, y_array):\n x_y_pair_message = function_approximation.x_y_pair.add()\n x_y_pair_message.x = x\n x_y_pair_message.y = y\n\n def test_class_agnostic_function_approximation(self):\n \"\"\"Tests that calibration produces correct class-agnostic values.\"\"\"\n # Generate fake calibration proto. For this interpolation, any input on\n # [0.0, 0.5] should be divided by 2 and any input on (0.5, 1.0] should have\n # 0.25 subtracted from it.\n class_agnostic_x = np.asarray([0.0, 0.5, 1.0])\n class_agnostic_y = np.asarray([0.0, 0.25, 0.75])\n calibration_config = calibration_pb2.CalibrationConfig()\n self._add_function_approximation_to_calibration_proto(\n calibration_config, class_agnostic_x, class_agnostic_y, class_id=None)\n\n def graph_fn():\n calibration_fn = calibration_builder.build(calibration_config)\n # batch_size = 2, num_classes = 2, num_anchors = 2.\n class_predictions_with_background = tf.constant(\n [[[0.1, 0.2, 0.3],\n [0.4, 0.5, 0.0]],\n [[0.6, 0.7, 0.8],\n [0.9, 1.0, 1.0]]], dtype=tf.float32)\n\n # Everything should map to 0.5 if classes are ignored.\n calibrated_scores = calibration_fn(class_predictions_with_background)\n return calibrated_scores\n calibrated_scores_np = self.execute(graph_fn, [])\n self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15],\n [0.2, 0.25, 0.0]],\n [[0.35, 0.45, 0.55],\n [0.65, 0.75, 0.75]]])\n\n def test_multiclass_function_approximations(self):\n \"\"\"Tests that calibration produces correct multiclass values.\"\"\"\n # Background class (0-index) maps all predictions to 0.5.\n class_0_x = np.asarray([0.0, 0.5, 1.0])\n class_0_y = np.asarray([0.5, 0.5, 0.5])\n calibration_config = calibration_pb2.CalibrationConfig()\n self._add_function_approximation_to_calibration_proto(\n calibration_config, class_0_x, class_0_y, class_id=0)\n\n # Class id 1 will interpolate using these values.\n class_1_x = np.asarray([0.0, 0.2, 1.0])\n class_1_y = np.asarray([0.0, 0.6, 1.0])\n self._add_function_approximation_to_calibration_proto(\n calibration_config, class_1_x, class_1_y, class_id=1)\n\n def graph_fn():\n calibration_fn = calibration_builder.build(calibration_config)\n # batch_size = 2, num_classes = 2, num_anchors = 2.\n class_predictions_with_background = tf.constant(\n [[[0.1, 0.2], [0.9, 0.1]],\n [[0.6, 0.4], [0.08, 0.92]]],\n dtype=tf.float32)\n calibrated_scores = calibration_fn(class_predictions_with_background)\n return calibrated_scores\n calibrated_scores_np = self.execute(graph_fn, [])\n self.assertAllClose(calibrated_scores_np, [[[0.5, 0.6], [0.5, 0.3]],\n [[0.5, 0.7], [0.5, 0.96]]])\n\n def test_temperature_scaling(self):\n \"\"\"Tests that calibration produces correct temperature scaling values.\"\"\"\n calibration_config = calibration_pb2.CalibrationConfig()\n calibration_config.temperature_scaling_calibration.scaler = 2.0\n\n def graph_fn():\n calibration_fn = calibration_builder.build(calibration_config)\n # batch_size = 2, num_classes = 2, num_anchors = 2.\n class_predictions_with_background = tf.constant(\n [[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]],\n [[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]],\n dtype=tf.float32)\n calibrated_scores = calibration_fn(class_predictions_with_background)\n return calibrated_scores\n calibrated_scores_np = self.execute(graph_fn, [])\n self.assertAllClose(calibrated_scores_np,\n [[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]],\n [[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]])\n\n def test_temperature_scaling_incorrect_value_error(self):\n calibration_config = calibration_pb2.CalibrationConfig()\n calibration_config.temperature_scaling_calibration.scaler = 0\n\n calibration_fn = calibration_builder.build(calibration_config)\n class_predictions_with_background = tf.constant(\n [[[0.1, 0.2, 0.3]]], dtype=tf.float32)\n with self.assertRaises(ValueError):\n calibration_fn(class_predictions_with_background)\n\n def test_skips_class_when_calibration_parameters_not_present(self):\n \"\"\"Tests that graph fails when parameters not present for all classes.\"\"\"\n # Only adding calibration parameters for class id = 0, even though class id\n # 1 is present in the data.\n class_0_x = np.asarray([0.0, 0.5, 1.0])\n class_0_y = np.asarray([0.5, 0.5, 0.5])\n calibration_config = calibration_pb2.CalibrationConfig()\n self._add_function_approximation_to_calibration_proto(\n calibration_config, class_0_x, class_0_y, class_id=0)\n def graph_fn():\n calibration_fn = calibration_builder.build(calibration_config)\n # batch_size = 2, num_classes = 2, num_anchors = 2.\n class_predictions_with_background = tf.constant(\n [[[0.1, 0.2], [0.9, 0.1]],\n [[0.6, 0.4], [0.08, 0.92]]],\n dtype=tf.float32)\n calibrated_scores = calibration_fn(class_predictions_with_background)\n return calibrated_scores\n calibrated_scores_np = self.execute(graph_fn, [])\n self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]],\n [[0.5, 0.4], [0.5, 0.92]]])\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Provides data for the flowers dataset.\n\nThe dataset scripts used to create the dataset can be found at:\ntensorflow/models/research/slim/datasets/download_and_convert_flowers.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nslim = tf.contrib.slim\n\nfrom datasets import dataset_utils\n\n_FILE_PATTERN = 'flowers_%s_*.tfrecord'\n\nSPLITS_TO_SIZES = {'train': 3320, 'validation': 350}\n\n_NUM_CLASSES = 5\n\n_ITEMS_TO_DESCRIPTIONS = {\n 'image': 'A color image of varying size.',\n 'label': 'A single integer between 0 and 4',\n}\n\n\ndef get_split(split_name, dataset_dir, file_pattern=None, reader=None):\n \"\"\"Gets a dataset tuple with instructions for reading flowers.\n\n Args:\n split_name: A train/validation split name.\n dataset_dir: The base directory of the dataset sources.\n file_pattern: The file pattern to use when matching the dataset sources.\n It is assumed that the pattern contains a '%s' string so that the split\n name can be inserted.\n reader: The TensorFlow reader type.\n\n Returns:\n A `Dataset` namedtuple.\n\n Raises:\n ValueError: if `split_name` is not a valid train/validation split.\n \"\"\"\n if split_name not in SPLITS_TO_SIZES:\n raise ValueError('split name %s was not recognized.' % split_name)\n\n if not file_pattern:\n file_pattern = _FILE_PATTERN\n file_pattern = os.path.join(dataset_dir, file_pattern % split_name)\n\n # Allowing None in the signature so that dataset_factory can use the default.\n if reader is None:\n reader = tf.TFRecordReader\n\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),\n 'image/class/label': tf.FixedLenFeature(\n [], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),\n }\n\n items_to_handlers = {\n 'image': slim.tfexample_decoder.Image(),\n 'label': slim.tfexample_decoder.Tensor('image/class/label'),\n }\n\n decoder = slim.tfexample_decoder.TFExampleDecoder(\n keys_to_features, items_to_handlers)\n\n labels_to_names = None\n if dataset_utils.has_labels(dataset_dir):\n labels_to_names = dataset_utils.read_label_file(dataset_dir)\n\n return slim.dataset.Dataset(\n data_sources=file_pattern,\n reader=reader,\n decoder=decoder,\n num_samples=SPLITS_TO_SIZES[split_name],\n items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,\n num_classes=_NUM_CLASSES,\n labels_to_names=labels_to_names)\n" ]
[ [ "scipy.interpolate.interp1d", "tensorflow.convert_to_tensor", "numpy.asarray", "tensorflow.constant", "tensorflow.test.main", "numpy.repeat", "numpy.linspace" ], [ "tensorflow.FixedLenFeature", "tensorflow.zeros" ] ]
e-mayo/mscreen
[ "2dd2316837bcb7c19384294443b2855e5ccd3e01" ]
[ "mscreen/autodocktools_prepare_py3k/AutoDockTools/Utilities24/rotate_molecule.py" ]
[ "#!/usr/bin/env python\n#$Id: rotate_molecule.py,v 1.2.10.1 2016/02/11 09:24:08 annao Exp $\nimport os \nfrom MolKit import Read\nfrom MolKit.pdbWriter import PdbWriter, PdbqsWriter, PdbqWriter, PdbqtWriter\nfrom mglutil.math.rotax import rotax\nimport numpy\n\n\nif __name__ == '__main__':\n import sys\n import getopt\n\n\n def usage():\n \"Print helpful, accurate usage statement to stdout.\"\n print(\"Usage: rotate_molecule.py -f filename\")\n print()\n print(\" Description of command...\")\n print(\" [-f] filename\")\n print(\" Optional parameters:\")\n print(\" [-o] alternative output filename\")\n print(\" (default is 'rotated_' +filename)\")\n print(\" [-y] rotate around the y axis\")\n print(\" (default is rotation around the z axis)\")\n print(\" [-x] rotate around the x axis\")\n print(\" (default is rotation around the z axis)\")\n print(\" [-u] user-defined axis of rotation '1.0,2.0,-6.2'\")\n print(\" (default is rotation around the z axis)\")\n print(\" [-a] angle for rotation about axis \")\n print(\" (default is rotation around the z axis)\")\n print(\" [-v] verbose output\")\n\n\n # process command arguments\n try:\n opt_list, args = getopt.getopt(sys.argv[1:], 'f:o:xyu:a:v')\n\n except getopt.GetoptError as msg:\n print('rotate_molecule.py: %s' %msg)\n usage()\n sys.exit(2)\n\n # initialize required parameters\n #-f: pdb_filename_stem\n filename = None\n\n # optional parameters\n verbose = None\n outputfilename = None\n rotation = 'z'\n #arbitrary axis angle for rotation\n axis = None\n angle = None\n\n #'f:o:v'\n for o, a in opt_list:\n print(\"o=\", o, \" a=\",a)\n if o in ('-f', '--f'):\n filename = a\n if verbose: print('set filename to ', filename)\n outputfilename = 'rotated_' + filename\n if o in ('-o', '--o'):\n outputfilename = a \n if verbose: \n print('set output outputfilename to ', a)\n if o in ('-x', '--x'):\n rotation = 'x'\n if verbose: print('set rotation to ', rotation)\n if o in ('-y', '--y'):\n rotation = 'y'\n if verbose: print('set rotation to ', rotation)\n if o in ('-u', '--u'):\n axis = a\n if verbose: print('set user-defined axis to ', axis)\n if o in ('-a', '--a'):\n angle = a\n if verbose: print('set angle for rotation to ', angle)\n if o in ('-v', '--v'):\n verbose = True\n if verbose: print('set verbose to ', True)\n if o in ('-h', '--'):\n usage()\n sys.exit()\n\n\n if not filename:\n print('rotate_molecule: filename must be specified.')\n usage()\n sys.exit()\n\n mol = Read(filename)[0]\n if verbose: print('read ', filename)\n filetype = os.path.splitext(os.path.basename(filename))[1]\n if verbose: print(\"filetype=\", filetype)\n writer = None\n if filetype=='.pdbqt':\n writer = PdbqtWriter()\n elif filetype=='.pdbq':\n writer = PdbqWriter()\n elif filetype=='.pdbqs':\n writer = PdbqsWriter()\n elif filetype=='.pdb':\n writer = PdbWriter()\n else:\n print('Sorry! Unable to write this filetype->', filetype)\n\n center = numpy.add.reduce(mol.allAtoms.coords)/len(mol.allAtoms)\n crds = numpy.array(mol.allAtoms.coords)\n center = numpy.add.reduce(crds)/len(mol.allAtoms)\n crds = crds - center\n crds = crds.tolist()\n mol.allAtoms.updateCoords(crds)\n lenCoords = len(crds)\n #rotate the atoms here\n if axis is not None and angle is not None:\n rot = (float(angle)* 3.14159/180.)%(2 * numpy.pi)\n x = numpy.array([0.,0.,0.])\n y = numpy.array(list(map(float,axis.split(','))))\n matrix = rotax(x,y, rot)\n _ones = numpy.ones(lenCoords, 'f')\n _ones.shape = (lenCoords,1)\n mov_coords = numpy.concatenate((crds, _ones),1)\n newcoords = numpy.dot(mov_coords, matrix)\n nc = newcoords[:,:3].astype('f')\n for i in range(lenCoords):\n mol.allAtoms[i]._coords[0] = nc[i].tolist()\n else:\n if rotation=='z':\n #for rotation around z-axis:\n for a in mol.allAtoms:\n a._coords[0][0] = -1.*a._coords[0][0]\n a._coords[0][1] = -1.*a._coords[0][1]\n elif rotation=='y':\n #for rotation around y-axis:\n for a in mol.allAtoms:\n a._coords[0][0] = -1.*a._coords[0][0]\n a._coords[0][2] = -1.*a._coords[0][2]\n elif rotation=='x':\n #for rotation around x-axis:\n for a in mol.allAtoms:\n a._coords[0][1] = -1.*a._coords[0][1]\n a._coords[0][2] = -1.*a._coords[0][2]\n ncrds = numpy.array(mol.allAtoms.coords)\n ncrds = ncrds + center\n ncrds = ncrds.tolist()\n mol.allAtoms.updateCoords(ncrds)\n\n if writer:\n outptr = open(outputfilename, 'w')\n liglines = mol.parser.allLines\n ctr = 0\n for l in liglines:\n if l.find(\"ATOM\")!=0 and l.find(\"HETATM\")!=0:\n outptr.write(l)\n else:\n writer.write_atom(outptr, mol.allAtoms[ctr])\n ctr += 1\n outptr.close()\n\n\n# To execute this command type:\n# rotate_molecule.py -f filename [-o outputfilename -u axis -a angle to rotate] -v\n\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.dot", "numpy.add.reduce", "numpy.ones" ] ]
ryanhammonds/nilearn
[ "f33cd4e4685d9050e5bba0a8ece1b0b0f0ad1be2", "f33cd4e4685d9050e5bba0a8ece1b0b0f0ad1be2" ]
[ "nilearn/plotting/tests/test_html_connectome.py", "nilearn/input_data/tests/test_multi_nifti_masker.py" ]
[ "import warnings\n\nimport numpy as np\n\nfrom nilearn.plotting import cm\nfrom nilearn.plotting.js_plotting_utils import decode\nfrom nilearn.plotting import html_connectome\n\nfrom .test_js_plotting_utils import check_html\n\n\ndef test_prepare_line():\n e = np.asarray([0, 1, 2, 3], dtype=int)\n n = np.asarray([[0, 1], [0, 2], [2, 3], [8, 9]], dtype=int)\n pe, pn = html_connectome._prepare_line(e, n)\n assert (pn == [0, 1, 0, 0, 2, 0, 2, 3, 0, 8, 9, 0]).all()\n assert(pe == [0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 3, 0]).all()\n\n\ndef _make_connectome():\n adj = np.diag([1.5, .3, 2.5], 2)\n adj += adj.T\n adj += np.eye(5)\n\n coord = np.arange(5)\n coord = np.asarray([coord * 10, -coord, coord[::-1]]).T\n return adj, coord\n\n\ndef test_get_connectome():\n adj, coord = _make_connectome()\n connectome = html_connectome._get_connectome(adj, coord)\n con_x = decode(connectome['_con_x'], '<f4')\n expected_x = np.asarray(\n [0, 0, 0,\n 0, 20, 0,\n 10, 10, 0,\n 10, 30, 0,\n 20, 0, 0,\n 20, 20, 0,\n 20, 40, 0,\n 30, 10, 0,\n 30, 30, 0,\n 40, 20, 0,\n 40, 40, 0], dtype='<f4')\n assert (con_x == expected_x).all()\n assert {'_con_x', '_con_y', '_con_z', '_con_w', 'colorscale'\n }.issubset(connectome.keys())\n assert (connectome['cmin'], connectome['cmax']) == (-2.5, 2.5)\n adj[adj == 0] = np.nan\n connectome = html_connectome._get_connectome(adj, coord)\n con_x = decode(connectome['_con_x'], '<f4')\n assert (con_x == expected_x).all()\n assert (connectome['cmin'], connectome['cmax']) == (-2.5, 2.5)\n\n\ndef test_view_connectome():\n adj, coord = _make_connectome()\n html = html_connectome.view_connectome(adj, coord)\n check_html(html, False, 'connectome-plot')\n html = html_connectome.view_connectome(adj, coord, '85.3%',\n title=\"SOME_TITLE\")\n check_html(html, False, 'connectome-plot')\n assert \"SOME_TITLE\" in html.html\n html = html_connectome.view_connectome(adj, coord, '85.3%',\n linewidth=8.5, node_size=4.2)\n check_html(html, False, 'connectome-plot')\n html = html_connectome.view_connectome(\n adj, coord, '85.3%', linewidth=8.5, marker_size=np.arange(len(coord)))\n check_html(html, False, 'connectome-plot')\n\n\ndef test_params_deprecation_view_connectome():\n deprecated_params = {'coords': 'node_coords',\n 'threshold': 'edge_threshold',\n 'cmap': 'edge_cmap',\n 'marker_size': 'node_size',\n }\n deprecation_msg = (\n 'The parameter \"{}\" will be removed in 0.6.0 release of Nilearn. '\n 'Please use the parameter \"{}\" instead.'\n )\n warning_msgs = {old_: deprecation_msg.format(old_, new_)\n for old_, new_ in deprecated_params.items()\n }\n\n adj, coord = _make_connectome()\n with warnings.catch_warnings(record=True) as raised_warnings:\n html_connectome.view_connectome(adjacency_matrix=adj,\n coords=coord,\n edge_threshold='85.3%',\n edge_cmap=cm.cyan_orange,\n linewidth=8.5, node_size=4.2,\n )\n\n html_connectome.view_connectome(adjacency_matrix=adj,\n node_coords=coord,\n threshold='85.3%',\n edge_cmap=cm.cyan_orange,\n linewidth=8.5,\n node_size=4.2,\n )\n\n html_connectome.view_connectome(adjacency_matrix=adj,\n node_coords=coord,\n edge_threshold='85.3%',\n cmap=cm.cyan_orange,\n linewidth=8.5,\n node_size=4.2,\n )\n\n html_connectome.view_connectome(adjacency_matrix=adj,\n node_coords=coord,\n edge_threshold='85.3%',\n edge_cmap=cm.cyan_orange,\n linewidth=8.5,\n marker_size=4.2,\n )\n\n html_connectome.view_connectome(adjacency_matrix=adj,\n node_coords=coord,\n edge_threshold='85.3%',\n edge_cmap=cm.cyan_orange,\n linewidth=8.5,\n node_size=4.2,\n )\n\n html_connectome.view_connectome(adj,\n coord,\n '85.3%',\n cm.cyan_orange,\n 8.5,\n 4.2,\n )\n old_params = ['coords', 'threshold', 'cmap', 'marker_size']\n\n raised_warning_messages = ''.join(\n str(warning.message) for warning in raised_warnings)\n print(raised_warning_messages)\n for old_param_ in old_params:\n assert warning_msgs[old_param_] in raised_warning_messages\n\n\ndef test_get_markers():\n coords = np.arange(12).reshape((4, 3))\n colors = ['r', 'g', 'black', 'white']\n markers = html_connectome._get_markers(coords, colors)\n assert markers[\"marker_color\"] == [\n '#ff0000', '#007f00', '#000000', '#ffffff']\n assert markers['markers_only']\n con_x = decode(markers['_con_x'], '<f4')\n assert np.allclose(con_x, coords[:, 0])\n\n\ndef test_view_markers():\n coords = np.arange(12).reshape((4, 3))\n colors = ['r', 'g', 'black', 'white']\n html = html_connectome.view_markers(coords, colors)\n check_html(html, False, 'connectome-plot')\n html = html_connectome.view_markers(coords)\n check_html(html, False, 'connectome-plot')\n html = html_connectome.view_markers(coords, marker_size=15)\n check_html(html, False, 'connectome-plot')\n html = html_connectome.view_markers(\n coords, marker_size=np.arange(len(coords)))\n check_html(html, False, 'connectome-plot')\n html = html_connectome.view_markers(\n coords, marker_size=list(range(len(coords))))\n check_html(html, False, 'connectome-plot')\n\n\ndef test_params_deprecation_view_markers():\n \"\"\" Tests whether use of deprecated keyword parameters of view_markers\n raise corrrect warnings.\n \"\"\"\n deprecated_params = {'coords': 'marker_coords',\n 'colors': 'marker_color',\n }\n deprecation_msg = (\n 'The parameter \"{}\" will be removed in 0.6.0 release of Nilearn. '\n 'Please use the parameter \"{}\" instead.'\n )\n warning_msgs = {old_: deprecation_msg.format(old_, new_)\n for old_, new_ in deprecated_params.items()\n }\n coords = np.arange(12).reshape((4, 3))\n colors = ['r', 'g', 'black', 'white']\n with warnings.catch_warnings(record=True) as raised_warnings:\n html_connectome.view_markers(coords=coords,\n marker_color=colors,\n )\n html_connectome.view_markers(marker_coords=coords,\n colors=colors,\n )\n html_connectome.view_markers(marker_coords=coords,\n marker_color=colors,\n )\n html_connectome.view_markers(coords,\n colors,\n )\n old_params = ['coords', 'colors']\n assert len(raised_warnings) == 2\n for old_param_, raised_warning_ in zip(old_params, raised_warnings):\n assert warning_msgs[old_param_] == str(raised_warning_.message)\n assert raised_warning_.category is DeprecationWarning\n", "\"\"\"\nTest the multi_nifti_masker module\n\"\"\"\n# Author: Gael Varoquaux\n# License: simplified BSD\nimport shutil\nfrom distutils.version import LooseVersion\nfrom tempfile import mkdtemp\n\nimport nibabel\nimport numpy as np\nimport sklearn\nfrom nibabel import Nifti1Image\nfrom nose import SkipTest\nfrom nose.tools import assert_true, assert_false, assert_raises, assert_equal\nfrom numpy.testing import assert_array_equal\nfrom nilearn._utils.compat import Memory\n\nfrom nilearn._utils.exceptions import DimensionError\nfrom nilearn._utils.testing import assert_raises_regex, write_tmp_imgs\nfrom nilearn.input_data.multi_nifti_masker import MultiNiftiMasker\nfrom nilearn.image import get_data\n\n\ndef test_auto_mask():\n # This mostly a smoke test\n data = np.zeros((9, 9, 9))\n data[2:-2, 2:-2, 2:-2] = 10\n img = Nifti1Image(data, np.eye(4))\n masker = MultiNiftiMasker(mask_args=dict(opening=0))\n # Check that if we have not fit the masker we get a intelligible\n # error\n assert_raises(ValueError, masker.transform, [[img, ]])\n # Check error return due to bad data format\n assert_raises(ValueError, masker.fit, img)\n # Smoke test the fit\n masker.fit([[img]])\n\n # Test mask intersection\n data2 = np.zeros((9, 9, 9))\n data2[1:-3, 1:-3, 1:-3] = 10\n img2 = Nifti1Image(data2, np.eye(4))\n\n masker.fit([[img, img2]])\n assert_array_equal(get_data(masker.mask_img_),\n np.logical_or(data, data2))\n # Smoke test the transform\n masker.transform([[img, ]])\n # It should also work with a 3D image\n masker.transform(img)\n\n # check exception when transform() called without prior fit()\n masker2 = MultiNiftiMasker(mask_img=img)\n assert_raises_regex(\n ValueError,\n 'has not been fitted. ', masker2.transform, img2)\n\n\ndef test_nan():\n data = np.ones((9, 9, 9))\n data[0] = np.nan\n data[:, 0] = np.nan\n data[:, :, 0] = np.nan\n data[-1] = np.nan\n data[:, -1] = np.nan\n data[:, :, -1] = np.nan\n data[3:-3, 3:-3, 3:-3] = 10\n img = Nifti1Image(data, np.eye(4))\n masker = MultiNiftiMasker(mask_args=dict(opening=0))\n masker.fit([img])\n mask = get_data(masker.mask_img_)\n assert_true(mask[1:-1, 1:-1, 1:-1].all())\n assert_false(mask[0].any())\n assert_false(mask[:, 0].any())\n assert_false(mask[:, :, 0].any())\n assert_false(mask[-1].any())\n assert_false(mask[:, -1].any())\n assert_false(mask[:, :, -1].any())\n\n\ndef test_different_affines():\n # Mask and EIP files with different affines\n mask_img = Nifti1Image(np.ones((2, 2, 2), dtype=np.int8),\n affine=np.diag((4, 4, 4, 1)))\n epi_img1 = Nifti1Image(np.ones((4, 4, 4, 3)),\n affine=np.diag((2, 2, 2, 1)))\n epi_img2 = Nifti1Image(np.ones((3, 3, 3, 3)),\n affine=np.diag((3, 3, 3, 1)))\n masker = MultiNiftiMasker(mask_img=mask_img)\n epis = masker.fit_transform([epi_img1, epi_img2])\n for this_epi in epis:\n masker.inverse_transform(this_epi)\n\n\ndef test_3d_images():\n # Test that the MultiNiftiMasker works with 3D images\n mask_img = Nifti1Image(np.ones((2, 2, 2), dtype=np.int8),\n affine=np.diag((4, 4, 4, 1)))\n epi_img1 = Nifti1Image(np.ones((2, 2, 2)),\n affine=np.diag((4, 4, 4, 1)))\n epi_img2 = Nifti1Image(np.ones((2, 2, 2)),\n affine=np.diag((2, 2, 2, 1)))\n masker = MultiNiftiMasker(mask_img=mask_img)\n epis = masker.fit_transform([epi_img1, epi_img2])\n # This is mostly a smoke test\n assert_equal(len(epis), 2)\n\n # verify that 4D mask arguments are refused\n mask_img_4d = Nifti1Image(np.ones((2, 2, 2, 2), dtype=np.int8),\n affine=np.diag((4, 4, 4, 1)))\n masker2 = MultiNiftiMasker(mask_img=mask_img_4d)\n assert_raises_regex(DimensionError,\n \"Input data has incompatible dimensionality: \"\n \"Expected dimension is 3D and you provided \"\n \"a 4D image.\",\n masker2.fit)\n\n\ndef test_joblib_cache():\n from nilearn._utils.compat import hash\n # Dummy mask\n mask = np.zeros((40, 40, 40))\n mask[20, 20, 20] = 1\n mask_img = Nifti1Image(mask, np.eye(4))\n\n with write_tmp_imgs(mask_img, create_files=True) as filename:\n masker = MultiNiftiMasker(mask_img=filename)\n masker.fit()\n mask_hash = hash(masker.mask_img_)\n get_data(masker.mask_img_)\n assert_true(mask_hash == hash(masker.mask_img_))\n # enables to delete \"filename\" on windows\n del masker\n\n\ndef test_shelving():\n\n mask_img = Nifti1Image(np.ones((2, 2, 2), dtype=np.int8),\n affine=np.diag((4, 4, 4, 1)))\n epi_img1 = Nifti1Image(np.ones((2, 2, 2)),\n affine=np.diag((4, 4, 4, 1)))\n epi_img2 = Nifti1Image(np.ones((2, 2, 2)),\n affine=np.diag((2, 2, 2, 1)))\n cachedir = mkdtemp()\n try:\n masker_shelved = MultiNiftiMasker(mask_img=mask_img,\n memory=Memory(cachedir=cachedir,\n mmap_mode='r',\n verbose=0))\n masker_shelved._shelving = True\n masker = MultiNiftiMasker(mask_img=mask_img)\n epis_shelved = masker_shelved.fit_transform([epi_img1, epi_img2])\n epis = masker.fit_transform([epi_img1, epi_img2])\n for epi_shelved, epi in zip(epis_shelved, epis):\n epi_shelved = epi_shelved.get()\n assert_array_equal(epi_shelved, epi)\n\n epi = masker.fit_transform(epi_img1)\n epi_shelved = masker_shelved.fit_transform(epi_img1)\n epi_shelved = epi_shelved.get()\n assert_array_equal(epi_shelved, epi)\n finally:\n # enables to delete \"filename\" on windows\n del masker\n shutil.rmtree(cachedir, ignore_errors=True)\n\n\ndef test_compute_multi_gray_matter_mask():\n # Check mask is correctly is correctly calculated\n imgs = [Nifti1Image(np.random.rand(9, 9, 5), np.eye(4)),\n Nifti1Image(np.random.rand(9, 9, 5), np.eye(4))]\n\n masker = MultiNiftiMasker(mask_strategy='template')\n masker.fit(imgs)\n\n # Check that the order of the images does not change the output\n masker2 = MultiNiftiMasker(mask_strategy='template')\n masker2.fit(imgs[::-1])\n\n mask = masker.mask_img_\n mask2 = masker2.mask_img_\n\n mask_ref = np.zeros((9, 9, 5))\n mask_ref[2:7, 2:7, 2] = 1\n\n np.testing.assert_array_equal(get_data(mask), mask_ref)\n np.testing.assert_array_equal(get_data(mask2), mask_ref)\n\n\ndef test_dtype():\n data = np.zeros((9, 9, 9), dtype=np.float64)\n data[2:-2, 2:-2, 2:-2] = 10\n img = Nifti1Image(data, np.eye(4))\n\n masker = MultiNiftiMasker(dtype='auto')\n masker.fit([[img]])\n\n masked_img = masker.transform([[img]])\n assert(masked_img[0].dtype == np.float32)\n\n\ndef test_standardization():\n data_shape = (9, 9, 5)\n n_samples = 500\n\n signals = np.random.randn(2, np.prod(data_shape), n_samples)\n means = np.random.randn(2, np.prod(data_shape), 1) * 50 + 1000\n signals += means\n\n img1 = Nifti1Image(signals[0].reshape(data_shape + (n_samples,)),\n np.eye(4))\n img2 = Nifti1Image(signals[1].reshape(data_shape + (n_samples,)),\n np.eye(4))\n\n mask = Nifti1Image(np.ones(data_shape), np.eye(4))\n\n # z-score\n masker = MultiNiftiMasker(mask, standardize='zscore')\n trans_signals = masker.fit_transform([img1, img2])\n\n for ts in trans_signals:\n np.testing.assert_almost_equal(ts.mean(0), 0)\n np.testing.assert_almost_equal(ts.std(0), 1)\n\n # psc\n masker = MultiNiftiMasker(mask, standardize='psc')\n trans_signals = masker.fit_transform([img1, img2])\n\n for ts, s in zip(trans_signals, signals):\n np.testing.assert_almost_equal(ts.mean(0), 0)\n np.testing.assert_almost_equal(ts,\n (s / s.mean(1)[:, np.newaxis] *\n 100 - 100).T)\n" ]
[ [ "numpy.asarray", "numpy.eye", "numpy.allclose", "numpy.arange", "numpy.diag" ], [ "numpy.logical_or", "numpy.random.rand", "numpy.zeros", "numpy.testing.assert_array_equal", "numpy.ones", "numpy.eye", "numpy.prod", "numpy.diag" ] ]
IkeoluwaStat/QFT
[ "fe36763e90e3601dfab2a78a08962113343efd0c" ]
[ "MetafierV2.py" ]
[ "# Metafier V2: writes directly to output.mc\r\n# Uses numpy and memoization to speed up a crap ton & compress data a bit\r\n# ===REQUIRES metatemplate11.mc===\r\n\r\nimport golly as g\r\nimport numpy as np\r\nfrom shutil import copyfile\r\n\r\n#Get the selection\r\nselection = g.getselrect()\r\nif not selection: g.exit(\"No selection.\")\r\n\r\n#Get the cells in the selection\r\ncells = g.getcells(selection)\r\nif not cells: g.exit(\"No pattern in selection\")\r\nif len(cells) % 3: cells = cells[:-1]\r\n\r\nselw = selection[2]\r\nselh = selection[3]\r\n\r\npatternsize = 1 << int(np.ceil(np.log2(selh | selw)))\r\n\r\nmetapattern = np.zeros((patternsize, patternsize))\r\n\r\n#Pseudo-convolution, to detect diagonal neighbors\r\n# +1 +0 +2\r\n# +0 *16 +0\r\n# +4 +0 +8\r\nfor cell in np.reshape(cells, (-1, 3)):\r\n selx = cell[0] - selection[0]\r\n sely = cell[1] - selection[1]\r\n\r\n metapattern[sely][selx] += 16 * cell[2]\r\n if sely:\r\n if selx:\r\n metapattern[sely - 1][selx - 1] += 8\r\n if selx + 1 < selw:\r\n metapattern[sely - 1][selx + 1] += 4\r\n if sely + 1 < selh:\r\n if selx:\r\n metapattern[sely + 1][selx - 1] += 2\r\n if selx + 1 < selw:\r\n metapattern[sely + 1][selx + 1] += 1\r\n\r\n#Remove all B/S cells\r\nmetapattern[metapattern < 32] = np.nan\r\nmetapattern += 5630 - 32 #5632 is starting point of 11s in template\r\nmetapattern[np.isnan(metapattern)] = 0\r\nmetapattern = metapattern.astype(int)\r\n\r\n#Using metatemplate11, memoization, and some recursion\r\ndef createLine(pattern, outfile, linenum = [5726], memo = {}): #linenum and memo are mutable function arguments, which are only initialized during function definition\r\n if tuple(pattern.ravel().tolist()) not in memo: #If we haven't seen this type of pattern before, let's remember it\r\n if pattern.shape[0] == 2: #Pattern is a leaf, write leaf line\r\n outfile.write('{} {} {} {} {}\\n'.format(pattern.shape[0].bit_length() + 10,\r\n pattern[0, 0],\r\n pattern[0, 1],\r\n pattern[1, 0],\r\n pattern[1, 1]))\r\n else: #Pattern is a branch, keep going down quadtree\r\n subpatterns = pattern.reshape(2, pattern.shape[0] >> 1, 2, pattern.shape[0] >> 1).swapaxes(1,2)\r\n outfile.write('{} {} {} {} {}\\n'.format(pattern.shape[0].bit_length() + 10,\r\n createLine(subpatterns[0, 0], outfile),\r\n createLine(subpatterns[0, 1], outfile),\r\n createLine(subpatterns[1, 0], outfile),\r\n createLine(subpatterns[1, 1], outfile)))\r\n memo[tuple(pattern.ravel().tolist())] = linenum[0]\r\n linenum[0] += 1\r\n return memo[tuple(pattern.ravel().tolist())]\r\n\r\ncopyfile('metatemplate11.mc', 'output.mc')\r\nwith open('output.mc', 'a') as outputfile:\r\n createLine(metapattern, outputfile)\r\n\r\n#Display output.mc\r\ng.addlayer()\r\ng.open('output.mc')\r\n#TODO: Use metatemplate10?\r\n" ]
[ [ "numpy.isnan", "numpy.log2", "numpy.reshape", "numpy.zeros" ] ]
Can-Zhao/MONAI
[ "e0db5a564225a7cb62e7a23df97267019006302f", "e0db5a564225a7cb62e7a23df97267019006302f", "e0db5a564225a7cb62e7a23df97267019006302f" ]
[ "tests/test_integration_workflows_gan.py", "tests/test_spacingd.py", "monai/networks/blocks/warp.py" ]
[ "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport unittest\nfrom glob import glob\n\nimport nibabel as nib\nimport numpy as np\nimport torch\n\nimport monai\nfrom monai.data import create_test_image_2d\nfrom monai.engines import GanTrainer\nfrom monai.engines.utils import GanKeys as Keys\nfrom monai.handlers import CheckpointSaver, StatsHandler, TensorBoardStatsHandler\nfrom monai.networks import normal_init\nfrom monai.networks.nets import Discriminator, Generator\nfrom monai.transforms import AsChannelFirstd, Compose, LoadImaged, RandFlipd, ScaleIntensityd, ToTensord\nfrom monai.utils import set_determinism\nfrom tests.utils import DistTestCase, TimedCall, skip_if_quick\n\n\ndef run_training_test(root_dir, device=\"cuda:0\"):\n real_images = sorted(glob(os.path.join(root_dir, \"img*.nii.gz\")))\n train_files = [{\"reals\": img} for img in zip(real_images)]\n\n # prepare real data\n train_transforms = Compose(\n [\n LoadImaged(keys=[\"reals\"]),\n AsChannelFirstd(keys=[\"reals\"]),\n ScaleIntensityd(keys=[\"reals\"]),\n RandFlipd(keys=[\"reals\"], prob=0.5),\n ToTensord(keys=[\"reals\"]),\n ]\n )\n train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5)\n train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4)\n\n learning_rate = 2e-4\n betas = (0.5, 0.999)\n real_label = 1\n fake_label = 0\n\n # create discriminator\n disc_net = Discriminator(\n in_shape=(1, 64, 64), channels=(8, 16, 32, 64, 1), strides=(2, 2, 2, 2, 1), num_res_units=1, kernel_size=5\n ).to(device)\n disc_net.apply(normal_init)\n disc_opt = torch.optim.Adam(disc_net.parameters(), learning_rate, betas=betas)\n disc_loss_criterion = torch.nn.BCELoss()\n\n def discriminator_loss(gen_images, real_images):\n real = real_images.new_full((real_images.shape[0], 1), real_label)\n gen = gen_images.new_full((gen_images.shape[0], 1), fake_label)\n realloss = disc_loss_criterion(disc_net(real_images), real)\n genloss = disc_loss_criterion(disc_net(gen_images.detach()), gen)\n return torch.div(torch.add(realloss, genloss), 2)\n\n # create generator\n latent_size = 64\n gen_net = Generator(\n latent_shape=latent_size, start_shape=(latent_size, 8, 8), channels=[32, 16, 8, 1], strides=[2, 2, 2, 1]\n )\n gen_net.apply(normal_init)\n gen_net.conv.add_module(\"activation\", torch.nn.Sigmoid())\n gen_net = gen_net.to(device)\n gen_opt = torch.optim.Adam(gen_net.parameters(), learning_rate, betas=betas)\n gen_loss_criterion = torch.nn.BCELoss()\n\n def generator_loss(gen_images):\n output = disc_net(gen_images)\n cats = output.new_full(output.shape, real_label)\n return gen_loss_criterion(output, cats)\n\n key_train_metric = None\n\n train_handlers = [\n StatsHandler(\n name=\"training_loss\", output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]}\n ),\n TensorBoardStatsHandler(\n log_dir=root_dir,\n tag_name=\"training_loss\",\n output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]},\n ),\n CheckpointSaver(\n save_dir=root_dir, save_dict={\"g_net\": gen_net, \"d_net\": disc_net}, save_interval=2, epoch_level=True\n ),\n ]\n\n disc_train_steps = 2\n num_epochs = 5\n\n trainer = GanTrainer(\n device,\n num_epochs,\n train_loader,\n gen_net,\n gen_opt,\n generator_loss,\n disc_net,\n disc_opt,\n discriminator_loss,\n d_train_steps=disc_train_steps,\n latent_shape=latent_size,\n key_train_metric=key_train_metric,\n train_handlers=train_handlers,\n )\n trainer.run()\n\n return trainer.state\n\n\n@skip_if_quick\nclass IntegrationWorkflowsGAN(DistTestCase):\n def setUp(self):\n set_determinism(seed=0)\n\n self.data_dir = tempfile.mkdtemp()\n for i in range(40):\n im, _ = create_test_image_2d(64, 64, num_objs=3, rad_max=14, num_seg_classes=1, channel_dim=-1)\n n = nib.Nifti1Image(im, np.eye(4))\n nib.save(n, os.path.join(self.data_dir, f\"img{i:d}.nii.gz\"))\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu:0\")\n monai.config.print_config()\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\n def tearDown(self):\n set_determinism(seed=None)\n shutil.rmtree(self.data_dir)\n\n @TimedCall(seconds=200, daemon=False)\n def test_training(self):\n torch.manual_seed(0)\n\n finish_state = run_training_test(self.data_dir, device=self.device)\n\n # assert GAN training finished\n self.assertEqual(finish_state.iteration, 100)\n self.assertEqual(finish_state.epoch, 5)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom typing import List, Tuple\n\nimport numpy as np\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.transforms import Spacingd\nfrom tests.utils import TEST_NDARRAYS, assert_allclose\n\nTESTS: List[Tuple] = []\nfor p in TEST_NDARRAYS:\n TESTS.append(\n (\n \"spacing 3d\",\n {\"image\": p(np.ones((2, 10, 15, 20))), \"image_meta_dict\": {\"affine\": p(np.eye(4))}},\n dict(keys=\"image\", pixdim=(1, 2, 1.4)),\n (\"image\", \"image_meta_dict\", \"image_transforms\"),\n (2, 10, 8, 15),\n p(np.diag([1, 2, 1.4, 1.0])),\n )\n )\n TESTS.append(\n (\n \"spacing 2d\",\n {\"image\": np.ones((2, 10, 20)), \"image_meta_dict\": {\"affine\": np.eye(3)}},\n dict(keys=\"image\", pixdim=(1, 2)),\n (\"image\", \"image_meta_dict\", \"image_transforms\"),\n (2, 10, 10),\n np.diag((1, 2, 1)),\n )\n )\n TESTS.append(\n (\n \"spacing 2d no metadata\",\n {\"image\": np.ones((2, 10, 20))},\n dict(keys=\"image\", pixdim=(1, 2)),\n (\"image\", \"image_meta_dict\", \"image_transforms\"),\n (2, 10, 10),\n np.diag((1, 2, 1)),\n )\n )\n TESTS.append(\n (\n \"interp all\",\n {\n \"image\": np.arange(20).reshape((2, 1, 10)),\n \"seg\": np.ones((2, 1, 10)),\n \"image_meta_dict\": {\"affine\": np.eye(4)},\n \"seg_meta_dict\": {\"affine\": np.eye(4)},\n },\n dict(keys=(\"image\", \"seg\"), mode=\"nearest\", pixdim=(1, 0.2)),\n (\"image\", \"image_meta_dict\", \"image_transforms\", \"seg\", \"seg_meta_dict\", \"seg_transforms\"),\n (2, 1, 46),\n np.diag((1, 0.2, 1, 1)),\n )\n )\n TESTS.append(\n (\n \"interp sep\",\n {\n \"image\": np.ones((2, 1, 10)),\n \"seg\": np.ones((2, 1, 10)),\n \"image_meta_dict\": {\"affine\": np.eye(4)},\n \"seg_meta_dict\": {\"affine\": np.eye(4)},\n },\n dict(keys=(\"image\", \"seg\"), mode=(\"bilinear\", \"nearest\"), pixdim=(1, 0.2)),\n (\"image\", \"image_meta_dict\", \"image_transforms\", \"seg\", \"seg_meta_dict\", \"seg_transforms\"),\n (2, 1, 46),\n np.diag((1, 0.2, 1, 1)),\n )\n )\n\n\nclass TestSpacingDCase(unittest.TestCase):\n @parameterized.expand(TESTS)\n def test_spacingd(self, _, data, kw_args, expected_keys, expected_shape, expected_affine):\n res = Spacingd(**kw_args)(data)\n if isinstance(data[\"image\"], torch.Tensor):\n self.assertEqual(data[\"image\"].device, res[\"image\"].device)\n self.assertEqual(expected_keys, tuple(sorted(res)))\n np.testing.assert_allclose(res[\"image\"].shape, expected_shape)\n assert_allclose(res[\"image_meta_dict\"][\"affine\"], expected_affine)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom typing import List\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom monai.config.deviceconfig import USE_COMPILED\nfrom monai.networks.layers.spatial_transforms import grid_pull\nfrom monai.networks.utils import meshgrid_ij\nfrom monai.utils import GridSampleMode, GridSamplePadMode, optional_import\n\n_C, _ = optional_import(\"monai._C\")\n\n__all__ = [\"Warp\", \"DVF2DDF\"]\n\n\nclass Warp(nn.Module):\n \"\"\"\n Warp an image with given dense displacement field (DDF).\n \"\"\"\n\n def __init__(self, mode=GridSampleMode.BILINEAR.value, padding_mode=GridSamplePadMode.BORDER.value):\n \"\"\"\n For pytorch native APIs, the possible values are:\n\n - mode: ``\"nearest\"``, ``\"bilinear\"``, ``\"bicubic\"``.\n - padding_mode: ``\"zeros\"``, ``\"border\"``, ``\"reflection\"``\n\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n\n For MONAI C++/CUDA extensions, the possible values are:\n\n - mode: ``\"nearest\"``, ``\"bilinear\"``, ``\"bicubic\"``, 0, 1, ...\n - padding_mode: ``\"zeros\"``, ``\"border\"``, ``\"reflection\"``, 0, 1, ...\n\n See also: :py:class:`monai.networks.layers.grid_pull`\n \"\"\"\n super().__init__()\n # resolves _interp_mode for different methods\n\n if USE_COMPILED:\n if mode in (inter.value for inter in GridSampleMode):\n mode = GridSampleMode(mode)\n if mode == GridSampleMode.BILINEAR:\n mode = 1\n elif mode == GridSampleMode.NEAREST:\n mode = 0\n elif mode == GridSampleMode.BICUBIC:\n mode = 3\n else:\n mode = 1 # default to linear\n self._interp_mode = mode\n else:\n warnings.warn(\"monai.networks.blocks.Warp: Using PyTorch native grid_sample.\")\n self._interp_mode = GridSampleMode(mode).value\n\n # resolves _padding_mode for different methods\n if USE_COMPILED:\n if padding_mode in (pad.value for pad in GridSamplePadMode):\n padding_mode = GridSamplePadMode(padding_mode)\n if padding_mode == GridSamplePadMode.ZEROS:\n padding_mode = 7\n elif padding_mode == GridSamplePadMode.BORDER:\n padding_mode = 0\n elif padding_mode == GridSamplePadMode.REFLECTION:\n padding_mode = 1\n else:\n padding_mode = 0 # default to nearest\n self._padding_mode = padding_mode\n else:\n self._padding_mode = GridSamplePadMode(padding_mode).value\n\n @staticmethod\n def get_reference_grid(ddf: torch.Tensor) -> torch.Tensor:\n mesh_points = [torch.arange(0, dim) for dim in ddf.shape[2:]]\n grid = torch.stack(meshgrid_ij(*mesh_points), dim=0) # (spatial_dims, ...)\n grid = torch.stack([grid] * ddf.shape[0], dim=0) # (batch, spatial_dims, ...)\n grid = grid.to(ddf)\n return grid\n\n def forward(self, image: torch.Tensor, ddf: torch.Tensor):\n \"\"\"\n Args:\n image: Tensor in shape (batch, num_channels, H, W[, D])\n ddf: Tensor in the same spatial size as image, in shape (batch, ``spatial_dims``, H, W[, D])\n\n Returns:\n warped_image in the same shape as image (batch, num_channels, H, W[, D])\n \"\"\"\n spatial_dims = len(image.shape) - 2\n if spatial_dims not in (2, 3):\n raise NotImplementedError(f\"got unsupported spatial_dims={spatial_dims}, currently support 2 or 3.\")\n ddf_shape = (image.shape[0], spatial_dims) + tuple(image.shape[2:])\n if ddf.shape != ddf_shape:\n raise ValueError(\n f\"Given input {spatial_dims}-d image shape {image.shape}, \" f\"the input DDF shape must be {ddf_shape}.\"\n )\n grid = self.get_reference_grid(ddf) + ddf\n grid = grid.permute([0] + list(range(2, 2 + spatial_dims)) + [1]) # (batch, ..., spatial_dims)\n\n if not USE_COMPILED: # pytorch native grid_sample\n for i, dim in enumerate(grid.shape[1:-1]):\n grid[..., i] = grid[..., i] * 2 / (dim - 1) - 1\n index_ordering: List[int] = list(range(spatial_dims - 1, -1, -1))\n grid = grid[..., index_ordering] # z, y, x -> x, y, z\n return F.grid_sample(\n image, grid, mode=self._interp_mode, padding_mode=f\"{self._padding_mode}\", align_corners=True\n )\n\n # using csrc resampling\n return grid_pull(image, grid, bound=self._padding_mode, extrapolate=True, interpolation=self._interp_mode)\n\n\nclass DVF2DDF(nn.Module):\n \"\"\"\n Layer calculates a dense displacement field (DDF) from a dense velocity field (DVF)\n with scaling and squaring.\n\n Adapted from:\n DeepReg (https://github.com/DeepRegNet/DeepReg)\n\n \"\"\"\n\n def __init__(\n self, num_steps: int = 7, mode=GridSampleMode.BILINEAR.value, padding_mode=GridSamplePadMode.ZEROS.value\n ):\n super().__init__()\n if num_steps <= 0:\n raise ValueError(f\"expecting positive num_steps, got {num_steps}\")\n self.num_steps = num_steps\n self.warp_layer = Warp(mode=mode, padding_mode=padding_mode)\n\n def forward(self, dvf):\n \"\"\"\n Args:\n dvf: dvf to be transformed, in shape (batch, ``spatial_dims``, H, W[,D])\n\n Returns:\n a dense displacement field\n \"\"\"\n ddf: torch.Tensor = dvf / (2 ** self.num_steps)\n for _ in range(self.num_steps):\n ddf = ddf + self.warp_layer(image=ddf, ddf=ddf)\n return ddf\n" ]
[ [ "torch.nn.Sigmoid", "torch.add", "numpy.eye", "torch.manual_seed", "torch.cuda.is_available", "torch.nn.BCELoss" ], [ "numpy.testing.assert_allclose", "numpy.ones", "numpy.eye", "numpy.arange", "numpy.diag" ], [ "torch.nn.functional.grid_sample", "torch.stack", "torch.arange" ] ]
bccp/bananaplots
[ "dbfe107207e07351c7d7125430fde16fb2731cc2" ]
[ "bananas/model.py" ]
[ "import numpy\n# FIXME: copy the functions here\n\nfrom sklearn.mixture.gmm import log_multivariate_normal_density, logsumexp\n\ndef sample_gaussian2(means, cv, size, random_state, mins, maxes):\n def once(size1):\n g = random_state.multivariate_normal(means, cv, size1).T\n g = g.reshape(len(means), -1)\n mask = (g >= mins[:, None]).all(axis=0)\n mask &= (g <= maxes[:, None]).all(axis=0)\n return g[:, mask]\n\n g = once(size)\n generated = size\n\n while g.shape[1] < size:\n fac = 1.0 * g.shape[1] / size\n togen = (size - g.shape[1]) * generated // g.shape[1]\n g1 = once(togen)\n generated = generated + togen\n g = numpy.append(g, g1, axis=1)\n return g[:, :size]\n\nclass GMM(object):\n def __init__(self, weights, means, covs, lims):\n self.weights = numpy.array(weights)\n self.means = numpy.array(means)\n self.covs = numpy.array(covs)\n self.lims = numpy.array(lims)\n\n [nc] = self.weights.shape\n\n assert self.means.shape[0] == nc\n [nc, nf] = self.means.shape\n\n assert self.covs.shape[0] == nc\n assert self.covs.shape[1] == nf\n assert self.covs.shape[2] == nf\n [nc, nf, nf] = self.covs.shape\n\n assert self.lims.shape[0] == nf\n assert self.lims.shape[1] == 2\n\n def score(self, X, return_responsibilities=False):\n nc = len(self.weights)\n X = numpy.array(X)\n if X.ndim == 1:\n X = X[:, None]\n\n if X.shape[1] != self.means.shape[1]:\n raise ValueError('The shape of X is not compatible with self')\n\n mins = self.lims[:, 0]\n maxes = self.lims[:, 1]\n\n lpr = numpy.log(self.weights) + \\\n log_multivariate_normal_density(X,\n self.means,\n self.covs, 'full')\n mask = (X >= mins[None, :]).all(axis=-1)\n mask &= (X <= maxes[None, :]).all(axis=-1)\n logprob = logsumexp(lpr, axis=1)\n logprob[~mask] = -numpy.inf\n if return_responsibilities:\n responsibilities = numpy.exp(lpr - logprob[:, None])\n responsibilities[~mask] = 0\n return logprob, responsibilities\n return logprob\n\n def marginalize(self, axes):\n return GMM(self.weights, self.means[..., axes], self.covs[..., axes][..., axes, :], self.lims[axes])\n\n def sample(self, size, random_state=None):\n \"\"\"Generate random samples from the model.\n Returns\n -------\n X : array_like, shape (n_samples, n_features)\n List of samples\n \"\"\"\n\n if random_state is None:\n random_state = numpy.random\n\n mins = self.lims[:, 0]\n maxes = self.lims[:, 1]\n\n X = numpy.empty(size, ('f8', (self.means.shape[1],)))\n # decide which component to use for each sample\n comps = random_state.choice(len(self.weights), p=self.weights, size=size)\n # for each component, generate all needed samples\n for comp in range(len(self.weights)):\n # occurrences of current component in X\n comp_in_X = (comp == comps)\n # number of those occurrences\n num_comp_in_X = comp_in_X.sum()\n if num_comp_in_X > 0:\n cv = self.covs[comp]\n g = sample_gaussian2(\n self.means[comp], cv,\n num_comp_in_X, random_state, mins, maxes).T\n X[comp_in_X] = g\n return X\n\n @classmethod\n def fit(kls, nc, X, lims):\n # FIXME: get rid of this and add weights support\n from sklearn import mixture\n\n # XXX: Do not use DPGMM because the normalization is buggy\n # https://github.com/scikit-learn/scikit-learn/issues/7371\n\n model = mixture.GMM(nc, covariance_type='full', n_iter=1000)\n model.fit(X)\n\n if not model.converged_:\n raise ValueError(\"Your data is strange. Gaussian mixture failed to converge\")\n\n return kls(model.weights_, model.means_, model.covars_, lims)\n\nclass Confidence(object):\n def __init__(self, model, confidence_table):\n self.model = model\n self.confidence_table = confidence_table\n \n def score(self, sc):\n x, y = self.confidence_table\n return numpy.interp(sc, x, y, left=1., right=0.)\n\n @classmethod\n def fit(kls, model, nsample=4*1024, vmin=-5, vmax=0, nb=100):\n X = model.sample(nsample)\n sc = model.score(X)\n confidence_levels = 1 - numpy.logspace(vmin, vmax, num=nb)\n # FIXME: add weight support here\n sc_cl = numpy.percentile(sc, 100. - confidence_levels * 100.)\n confidence_table = numpy.array([sc_cl, confidence_levels])\n return kls(model, confidence_table)\n\nclass CombinedModel(object):\n def __init__(self, models):\n self.models = models\n\n def score(self, X):\n return sum([model.score(X) for model in self.models])\n\n def marginalize(self, axes):\n return CombinedModel([\n model.marginalize(axes) for model in self.models])\n\n def sample(self, nsample, random_state=None):\n if random_state is None:\n random_state = numpy.random\n\n def once(size):\n X = self.models[0].sample(size, random_state)\n nf = X.shape[-1]\n lnprob = sum([model.score(X) for model in self.models[1:]])\n prob = numpy.exp(lnprob)\n prob /= prob.max()\n keep = random_state.rand(len(X)) < prob\n return X[keep].reshape(-1, nf)\n g = once(nsample)\n ng = nsample\n while len(g) < nsample:\n togen = (nsample - len(g)) * ng // len(g)\n g1 = once(togen)\n ng = ng + togen\n g = numpy.append(g, g1, axis=0)\n return g[:nsample]\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.log", "numpy.percentile", "sklearn.mixture.gmm.log_multivariate_normal_density", "numpy.interp", "numpy.exp", "sklearn.mixture.GMM", "numpy.append", "sklearn.mixture.gmm.logsumexp", "numpy.logspace" ] ]
THinnerichs/MiS-Information-Clustering
[ "597c70e1283222e0e841e24f6805b967aaf3c9e0" ]
[ "src/scripts/segmentation/baselines/kmeans_and_sift.py" ]
[ "from __future__ import print_function\n\nimport argparse\nimport os\nimport pickle\nimport sys\n\nimport cv2\nimport numpy as np\nimport torch\nimport vlfeat # calls constructor\nfrom sklearn.cluster import MiniBatchKMeans\n\nfrom src.utils.cluster.eval_metrics import _hungarian_match, _original_match, \\\n _acc\nfrom src.utils.segmentation.data import make_Coco_dataloaders, \\\n make_Potsdam_dataloaders\n\nSIFT_DLEN = 128\nSIFT_STEP = 10\n\n\ndef _get_vectorised_sift_samples(archetype_config, dataloader):\n # returns num unmasked pixels x SIFT_DLEN, in uint8 format\n # operates on greyscale 128 bit images\n\n num_batches, batch_sz = len(dataloader), archetype_config.dataloader_batch_sz\n num_imgs_max = num_batches * batch_sz # estimate\n img_sz = archetype_config.input_sz\n\n # cluster individual (box central) pixels\n desc_side = int(img_sz / SIFT_STEP)\n print(\"img sz %d, desc_side %d\" % (img_sz, desc_side))\n sys.stdout.flush()\n\n descs_all = np.zeros((num_imgs_max, desc_side * desc_side,\n SIFT_DLEN), dtype=np.uint8)\n masks_all = np.zeros((num_imgs_max, desc_side * desc_side), dtype=np.bool)\n labels_all = None\n actual_num_imgs = 0\n\n # when descriptor matrix flattened, goes along rows first (rows change slow)\n central_inds_h = (np.arange(desc_side) * SIFT_STEP +\n (SIFT_STEP / 2)).reshape((desc_side, 1)).repeat(desc_side,\n axis=1)\n central_inds_w = (np.arange(desc_side) * SIFT_STEP +\n (SIFT_STEP / 2)).reshape((1, desc_side)).repeat(desc_side,\n axis=0)\n central_inds_h, central_inds_w = central_inds_h.reshape(-1), \\\n central_inds_w.reshape(-1)\n\n for b_i, batch in enumerate(dataloader):\n if len(batch) == 3: # test dataloader\n store_labels = True\n\n if (labels_all is None):\n labels_all = np.zeros((num_imgs_max, desc_side * desc_side),\n dtype=np.int32)\n imgs, labels, masks = batch\n labels = labels.cpu().numpy().astype(np.int32)\n else: # training dataloader\n store_labels = False\n imgs, _, _, masks = batch\n\n # imgs currently channel first, [0-1] range, floats\n imgs = (imgs * 255.).permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)\n masks = masks.cpu().numpy().astype(np.bool)\n\n curr_batch_sz, h, w, c = imgs.shape\n assert (h == archetype_config.input_sz and w == archetype_config.input_sz\n and c == archetype_config.in_channels)\n if b_i < num_batches - 1:\n assert (batch_sz == curr_batch_sz)\n\n start = b_i * batch_sz\n for i in range(curr_batch_sz):\n grey_img = cv2.cvtColor(imgs[i, :, :, :], cv2.COLOR_RGB2GRAY)\n locs, descs = vlfeat.vl_dsift(grey_img, step=SIFT_STEP)\n descs = descs.transpose((1, 0)) # 40*40, 128\n descs = descs.reshape(-1, SIFT_DLEN) # rows change slowest\n\n # get the corresponding box central mask/label\n mask = masks[i][central_inds_h, central_inds_w]\n\n offset = start + i\n descs_all[offset, :, :] = descs\n masks_all[offset, :] = mask\n if store_labels:\n label = labels[i][central_inds_h, central_inds_w]\n labels_all[offset, :] = label\n\n actual_num_imgs += curr_batch_sz\n\n descs_all = descs_all[:actual_num_imgs, :, :]\n masks_all = masks_all[:actual_num_imgs, :]\n num_unmasked = masks_all.sum()\n if store_labels:\n labels_all = labels_all[:actual_num_imgs, :]\n samples_labels = labels_all[masks_all].reshape(-1)\n assert (samples_labels.shape[0] == num_unmasked)\n\n samples = descs_all[masks_all, :].reshape(-1, SIFT_DLEN)\n assert (samples.shape[0] == num_unmasked)\n\n if not store_labels:\n return samples\n else:\n return samples, samples_labels\n\n\ndef _get_vectorised_colour_samples(archetype_config, dataloader):\n num_batches, batch_sz = len(dataloader), archetype_config.dataloader_batch_sz\n num_imgs_max = num_batches * batch_sz # estimate\n img_sz = archetype_config.input_sz\n\n # cluster individual pixels\n imgs_all = np.zeros(\n (num_imgs_max, img_sz, img_sz, archetype_config.in_channels),\n dtype=np.uint8)\n masks_all = np.zeros((num_imgs_max, img_sz, img_sz), dtype=np.bool)\n labels_all = None\n actual_num_imgs = 0\n for b_i, batch in enumerate(dataloader):\n if len(batch) == 3:\n store_labels = True\n\n if (labels_all is None):\n labels_all = np.zeros((num_imgs_max, img_sz, img_sz), dtype=np.int32)\n imgs, labels, masks = batch\n labels = labels.cpu().numpy().astype(np.int32)\n else:\n store_labels = False\n imgs, _, _, masks = batch\n\n # channels last\n imgs = (imgs * 255.).permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)\n masks = masks.cpu().numpy().astype(np.bool)\n\n curr_batch_sz, h, w, c = imgs.shape\n assert (h == archetype_config.input_sz and w == archetype_config.input_sz\n and c == archetype_config.in_channels)\n if b_i < num_batches - 1:\n assert (batch_sz == curr_batch_sz)\n\n start = b_i * batch_sz\n imgs_all[start:(start + curr_batch_sz), :, :, :] = imgs\n masks_all[start:(start + curr_batch_sz), :, :] = masks\n if store_labels:\n labels_all[start:(start + curr_batch_sz), :, :] = labels\n\n actual_num_imgs += curr_batch_sz\n\n imgs_all = imgs_all[:actual_num_imgs, :, :, :]\n masks_all = masks_all[:actual_num_imgs, :, :]\n num_unmasked = masks_all.sum()\n if store_labels:\n labels_all = labels_all[:actual_num_imgs, :, :]\n samples_labels = labels_all[masks_all].reshape(-1)\n assert (samples_labels.shape[0] == num_unmasked)\n\n samples = imgs_all[masks_all, :].reshape(-1, archetype_config.in_channels)\n assert (samples.shape[0] == num_unmasked)\n\n if not store_labels:\n return samples\n else:\n return samples, samples_labels\n\n\ndef main():\n # based on segmentation_multioutput_twohead - we pass in the config of the\n # IID run we are comparing against, so the settings can be copied\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_ind\", type=int, required=True)\n parser.add_argument(\"--out_root\", type=str,\n default=\"/scratch/shared/slow/xuji/iid_private\")\n parser.add_argument(\"--IID_model_ind\", type=int, required=True)\n parser.add_argument(\"--max_num_train\", type=int, required=True)\n parser.add_argument(\"--test_code\", default=False, action=\"store_true\")\n parser.add_argument(\"--do_sift\", default=False, action=\"store_true\")\n\n config = parser.parse_args()\n config.out_dir = os.path.join(config.out_root, str(config.model_ind))\n if not os.path.exists(config.out_dir):\n os.makedirs(config.out_dir)\n\n archetype_config_path = os.path.join(config.out_root,\n str(config.IID_model_ind),\n \"config.pickle\")\n print(\"Loading archetype config from: %s\" % archetype_config_path)\n with open(archetype_config_path, \"rb\") as config_f:\n archetype_config = pickle.load(config_f)\n assert (config.IID_model_ind == archetype_config.model_ind)\n assert (archetype_config.mode == \"IID\") # compare against fully unsup\n\n sample_fn = _get_vectorised_colour_samples\n if config.do_sift:\n sample_fn = _get_vectorised_sift_samples\n\n # set it to be only rgb (and ir if nec) but no sobel - we're clustering\n # single pixel colours\n archetype_config.include_rgb = True\n archetype_config.no_sobel = True\n if \"Coco\" in archetype_config.dataset:\n assert (not archetype_config.using_IR)\n archetype_config.in_channels = 3\n elif archetype_config.dataset == \"Potsdam\": # IR\n assert (archetype_config.using_IR)\n archetype_config.in_channels = 4\n\n # Data\n # -------------------------------------------------------------------------\n if \"Coco\" in archetype_config.dataset:\n dataloaders_head_A, mapping_assignment_dataloader, \\\n mapping_test_dataloader = \\\n make_Coco_dataloaders(archetype_config)\n\n elif archetype_config.dataset == \"Potsdam\":\n dataloaders_head_A, mapping_assignment_dataloader, \\\n mapping_test_dataloader = \\\n make_Potsdam_dataloaders(archetype_config)\n else:\n raise NotImplementedError\n\n # unlike in clustering script for STL - isn't any data from unknown classes\n dataloaders_head_B = dataloaders_head_A\n\n # networks and optimisers\n # ------------------------------------------------------\n assert (archetype_config.num_dataloaders == 1)\n dataloader = dataloaders_head_B[0]\n\n samples = sample_fn(archetype_config, dataloader)\n print(\"got training samples\")\n sys.stdout.flush()\n\n if config.test_code:\n print(\"testing code, taking 10000 samples only\")\n samples = samples[:10000, :]\n else:\n num_samples_train = min(samples.shape[0], config.max_num_train)\n print(\"taking %d samples\" % num_samples_train)\n chosen_inds = np.random.choice(samples.shape[0], size=num_samples_train,\n replace=False)\n samples = samples[chosen_inds, :]\n print(samples.shape)\n sys.stdout.flush()\n\n kmeans = MiniBatchKMeans(n_clusters=archetype_config.gt_k, verbose=1).fit(\n samples)\n print(\"trained kmeans\")\n sys.stdout.flush()\n\n # use mapping assign to assign output_k=gt_k to gt_k\n # and also assess on its predictions, since it's identical to\n # mapping_test_dataloader\n assign_samples, assign_labels = sample_fn(archetype_config,\n mapping_assignment_dataloader)\n num_samples = assign_samples.shape[0]\n assign_preds = kmeans.predict(assign_samples)\n print(\"finished prediction for mapping assign/test data\")\n sys.stdout.flush()\n\n assign_preds = torch.from_numpy(assign_preds).cuda()\n assign_labels = torch.from_numpy(assign_labels).cuda()\n\n if archetype_config.eval_mode == \"hung\":\n match = _hungarian_match(assign_preds, assign_labels,\n preds_k=archetype_config.gt_k,\n targets_k=archetype_config.gt_k)\n elif archetype_config.eval_mode == \"orig\": # flat!\n match = _original_match(assign_preds, assign_labels,\n preds_k=archetype_config.gt_k,\n targets_k=archetype_config.gt_k)\n elif archetype_config.eval_mode == \"orig_soft\":\n assert (False) # not used\n\n # reorder predictions to be same cluster assignments as gt_k\n found = torch.zeros(archetype_config.gt_k)\n reordered_preds = torch.zeros(num_samples).to(torch.int32).cuda()\n for pred_i, target_i in match:\n reordered_preds[assign_preds == pred_i] = target_i\n found[pred_i] = 1\n assert (found.sum() == archetype_config.gt_k) # each output_k must get mapped\n\n acc = _acc(reordered_preds, assign_labels, archetype_config.gt_k)\n\n print(\"got acc %f\" % acc)\n config.epoch_acc = [acc]\n config.centroids = kmeans.cluster_centers_\n config.match = match\n\n # write results and centroids to model_ind output file\n with open(os.path.join(config.out_dir, \"config.pickle\"), \"w\") as outfile:\n pickle.dump(config, outfile)\n\n with open(os.path.join(config.out_dir, \"config.txt\"), \"w\") as text_file:\n text_file.write(\"%s\" % config)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.zeros", "numpy.random.choice", "sklearn.cluster.MiniBatchKMeans", "numpy.zeros", "torch.from_numpy", "numpy.arange" ] ]
garston2/tensorflow
[ "bbe056e5a0ab81b67fcb6053400812b3d5805fc7" ]
[ "tensorflow/tensorboard/plugins/projector/projector_plugin_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Integration tests for the Embedding Projector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport io\nimport json\nimport os\nimport numpy as np\n\nfrom werkzeug import test as werkzeug_test\nfrom werkzeug import wrappers\n\nfrom google.protobuf import text_format\nfrom tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig\nfrom tensorflow.core.protobuf import saver_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.tensorboard.backend import application\nfrom tensorflow.tensorboard.backend.event_processing import event_multiplexer\nfrom tensorflow.tensorboard.plugins.projector import projector_plugin\n\n\nclass ProjectorAppTest(test.TestCase):\n\n def setUp(self):\n self.log_dir = self.get_temp_dir()\n\n def testRunsWithValidCheckpoint(self):\n self._GenerateProjectorTestData()\n self._SetupWSGIApp()\n run_json = self._GetJson('/data/plugin/projector/runs')\n self.assertEqual(run_json, ['.'])\n\n def testRunsWithNoCheckpoint(self):\n self._SetupWSGIApp()\n run_json = self._GetJson('/data/plugin/projector/runs')\n self.assertEqual(run_json, [])\n\n def testRunsWithInvalidModelCheckpointPath(self):\n checkpoint_file = os.path.join(self.log_dir, 'checkpoint')\n f = open(checkpoint_file, 'w')\n f.write('model_checkpoint_path: \"does_not_exist\"\\n')\n f.write('all_model_checkpoint_paths: \"does_not_exist\"\\n')\n f.close()\n self._SetupWSGIApp()\n\n run_json = self._GetJson('/data/plugin/projector/runs')\n self.assertEqual(run_json, [])\n\n def testInfoWithValidCheckpoint(self):\n self._GenerateProjectorTestData()\n self._SetupWSGIApp()\n\n info_json = self._GetJson('/data/plugin/projector/info?run=.')\n self.assertItemsEqual(info_json['embeddings'], [{\n 'tensorShape': [1, 2],\n 'tensorName': 'var1'\n }, {\n 'tensorShape': [10, 10],\n 'tensorName': 'var2'\n }, {\n 'tensorShape': [100, 100],\n 'tensorName': 'var3'\n }])\n\n def testTensorWithValidCheckpoint(self):\n self._GenerateProjectorTestData()\n self._SetupWSGIApp()\n\n url = '/data/plugin/projector/tensor?run=.&name=var1'\n tensor_bytes = self._Get(url).data\n tensor = np.reshape(np.fromstring(tensor_bytes, dtype='float32'), [1, 2])\n expected_tensor = np.array([[6, 6]], dtype='float32')\n self.assertTrue(np.array_equal(tensor, expected_tensor))\n\n def _SetupWSGIApp(self):\n multiplexer = event_multiplexer.EventMultiplexer(\n size_guidance=application.DEFAULT_SIZE_GUIDANCE,\n purge_orphaned_data=True)\n projector = projector_plugin.ProjectorPlugin()\n projector.get_plugin_apps(multiplexer, self.log_dir)\n plugins = {'projector': projector}\n wsgi_app = application.TensorBoardWSGIApp(\n self.log_dir, plugins, multiplexer, reload_interval=0)\n self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)\n\n def _Get(self, path):\n return self.server.get(path)\n\n def _GetJson(self, path):\n response = self.server.get(path)\n data = response.data\n if response.headers.get('Content-Encoding') == 'gzip':\n data = gzip.GzipFile('', 'rb', 9, io.BytesIO(data)).read()\n return json.loads(data.decode('utf-8'))\n\n def _GenerateProjectorTestData(self):\n config_path = os.path.join(self.log_dir, 'projector_config.pbtxt')\n config = ProjectorConfig()\n embedding = config.embeddings.add()\n # Add an embedding by its canonical tensor name.\n embedding.tensor_name = 'var1:0'\n config_pbtxt = text_format.MessageToString(config)\n with gfile.GFile(config_path, 'w') as f:\n f.write(config_pbtxt)\n\n # Write a checkpoint with some dummy variables.\n with ops.Graph().as_default():\n sess = session.Session()\n checkpoint_path = os.path.join(self.log_dir, 'model')\n variable_scope.get_variable(\n 'var1', [1, 2], initializer=init_ops.constant_initializer(6.0))\n variable_scope.get_variable('var2', [10, 10])\n variable_scope.get_variable('var3', [100, 100])\n sess.run(variables.global_variables_initializer())\n saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)\n saver.save(sess, checkpoint_path)\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "numpy.fromstring", "numpy.array", "tensorflow.python.platform.gfile.GFile", "numpy.array_equal", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.tensorboard.plugins.projector.projector_plugin.ProjectorPlugin", "tensorflow.python.client.session.Session", "tensorflow.python.training.saver.Saver", "tensorflow.tensorboard.backend.event_processing.event_multiplexer.EventMultiplexer", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.tensorboard.backend.application.TensorBoardWSGIApp", "tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2.ProjectorConfig", "tensorflow.python.platform.test.main", "tensorflow.python.ops.variables.global_variables_initializer" ] ]
Bobgy/model-analysis
[ "a964d2e8430b447c898d271fb6e6d8f5b99adf4b", "a964d2e8430b447c898d271fb6e6d8f5b99adf4b" ]
[ "tensorflow_model_analysis/api/model_eval_lib.py", "tensorflow_model_analysis/extractors/auto_slice_key_extractor_test.py" ]
[ "# Lint as: python3\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"API for Tensorflow Model Analysis.\"\"\"\n\n# TODO(b/149126671): Put ValidationResultsWriter in a separate file.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Standard __future__ imports\nfrom __future__ import print_function\n\nimport os\nimport pickle\nimport tempfile\n\nfrom typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union\n\nimport apache_beam as beam\nimport six\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nfrom tensorflow_model_analysis import config\nfrom tensorflow_model_analysis import constants\nfrom tensorflow_model_analysis import model_util\nfrom tensorflow_model_analysis import types\nfrom tensorflow_model_analysis import version as tfma_version\nfrom tensorflow_model_analysis.eval_saved_model import constants as eval_constants\nfrom tensorflow_model_analysis.evaluators import evaluator\nfrom tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator\nfrom tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2\nfrom tensorflow_model_analysis.extractors import extractor\nfrom tensorflow_model_analysis.extractors import input_extractor\nfrom tensorflow_model_analysis.extractors import predict_extractor\nfrom tensorflow_model_analysis.extractors import predict_extractor_v2\nfrom tensorflow_model_analysis.extractors import slice_key_extractor\nfrom tensorflow_model_analysis.extractors import tflite_predict_extractor\nfrom tensorflow_model_analysis.post_export_metrics import post_export_metrics\nfrom tensorflow_model_analysis.proto import config_pb2\nfrom tensorflow_model_analysis.proto import validation_result_pb2\nfrom tensorflow_model_analysis.slicer import slicer_lib as slicer\nfrom tensorflow_model_analysis.validators import validator\nfrom tensorflow_model_analysis.writers import metrics_and_plots_serialization\nfrom tensorflow_model_analysis.writers import metrics_plots_and_validations_writer\nfrom tensorflow_model_analysis.writers import writer\nfrom google.protobuf import json_format\n\n_EVAL_CONFIG_FILE = 'eval_config.json'\n\n\ndef _assert_tensorflow_version():\n \"\"\"Check that we're using a compatible TF version.\"\"\"\n # Fail with a clear error in case we are not using a compatible TF version.\n major, minor, _ = tf.version.VERSION.split('.')\n if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):\n raise RuntimeError(\n 'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '\n 'install the latest 1.x or 2.x version from '\n 'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)\n if int(major) == 2:\n tf.compat.v1.logging.warning(\n 'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '\n 'is currently in beta' % tf.version.VERSION)\n\n\ndef _check_version(version: Text, path: Text):\n if not version:\n raise ValueError(\n 'could not find TFMA version in raw deserialized dictionary for '\n 'file at %s' % path)\n # We don't actually do any checking for now, since we don't have any\n # compatibility issues.\n\n\ndef _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],\n eval_config: Optional[config.EvalConfig]):\n \"\"\"Returns True if legacy evaluation is being used.\"\"\"\n # A legacy evaluation is an evalution that uses only a single EvalSharedModel,\n # has no tags (or uses \"eval\" as its tag), and does not specify an eval_config\n # (or specifies an eval_config with no metrics). The legacy evaluation is\n # based on using add_metrics_callbacks to create a modified version of the\n # graph saved with an EvalSavedModel. The newer version of evaluation supports\n # both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside\n # of EvalConfig. The newer version works with both \"eval\" and serving models\n # and also supports multi-model evaluation. This function is used by code to\n # support backwards compatibility for callers that have not updated to use the\n # new EvalConfig.\n return (eval_shared_model and not isinstance(eval_shared_model, dict) and\n ((not eval_shared_model.model_loader.tags or\n eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and\n (not eval_config or not eval_config.metrics_specs)))\n\n\ndef _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,\n file_format: Text, model_locations: Dict[Text,\n Text]) -> Text:\n return json_format.MessageToJson(\n config_pb2.EvalRun(\n eval_config=eval_config,\n version=tfma_version.VERSION_STRING,\n data_location=data_location,\n file_format=file_format,\n model_locations=model_locations))\n\n\ndef _load_eval_run(\n output_path: Text\n) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:\n \"\"\"Returns eval config, data location, file format, and model locations.\"\"\"\n path = os.path.join(output_path, _EVAL_CONFIG_FILE)\n if tf.io.gfile.exists(path):\n with tf.io.gfile.GFile(path, 'r') as f:\n pb = json_format.Parse(f.read(), config_pb2.EvalRun())\n _check_version(pb.version, output_path)\n return (pb.eval_config, pb.data_location, pb.file_format,\n pb.model_locations)\n else:\n # Legacy suppport (to be removed in future).\n # The previous version did not include file extension.\n path = os.path.splitext(path)[0]\n serialized_record = six.next(\n tf.compat.v1.python_io.tf_record_iterator(path))\n final_dict = pickle.loads(serialized_record)\n _check_version(final_dict, output_path)\n old_config = final_dict['eval_config']\n slicing_specs = None\n if old_config.slice_spec:\n slicing_specs = [s.to_proto() for s in old_config.slice_spec]\n options = config.Options()\n options.compute_confidence_intervals.value = (\n old_config.compute_confidence_intervals)\n options.k_anonymization_count.value = old_config.k_anonymization_count\n return (config.EvalConfig(slicing_specs=slicing_specs,\n options=options), old_config.data_location, '', {\n '': old_config.model_location\n })\n\n\n# The field slicing_metrics is a nested dictionaries representing metrics for\n# different configuration as defined by MetricKey in metrics_for_slice.proto.\n# The levels corresponds to output name, class id, metric name and metric value\n# in this order. Note MetricValue uses oneof so metric values will always\n# contain only a single key representing the type in the oneof and the actual\n# metric value is in the value.\nEvalResult = NamedTuple( # pylint: disable=invalid-name\n 'EvalResult',\n [('slicing_metrics',\n List[Tuple[slicer.SliceKeyType,\n Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,\n Any]]]]]]]),\n ('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),\n ('config', config.EvalConfig), ('data_location', Text),\n ('file_format', Text), ('model_location', Text)])\n\n\n# Define types here to avoid type errors between OSS and internal code.\nValidationResult = validation_result_pb2.ValidationResult\n\n\ndef load_validation_result(\n validations_file: Text) -> Optional[ValidationResult]:\n \"\"\"Read and deserialize the ValidationResult.\"\"\"\n validation_records = []\n for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):\n validation_records.append(ValidationResult.FromString(record))\n if validation_records:\n assert len(validation_records) == 1\n return validation_records[0]\n\n\nclass EvalResults(object):\n \"\"\"Class for results from multiple model analysis run.\"\"\"\n\n def __init__(self,\n results: List[EvalResult],\n mode: Text = constants.UNKNOWN_EVAL_MODE):\n supported_modes = [\n constants.DATA_CENTRIC_MODE,\n constants.MODEL_CENTRIC_MODE,\n ]\n if mode not in supported_modes:\n raise ValueError('Mode ' + mode + ' must be one of ' +\n Text(supported_modes))\n\n self._results = results\n self._mode = mode\n\n def get_results(self) -> List[EvalResult]:\n return self._results\n\n def get_mode(self) -> Text:\n return self._mode\n\n\ndef make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:\n \"\"\"Run model analysis for a single model on multiple data sets.\n\n Args:\n results: A list of TFMA evaluation results.\n mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and\n tfma.MODEL_CENTRIC_MODE are supported.\n\n Returns:\n An EvalResults containing all evaluation results. This can be used to\n construct a time series view.\n \"\"\"\n return EvalResults(results, mode)\n\n\ndef load_eval_results(output_paths: List[Text],\n mode: Text,\n model_name: Optional[Text] = None) -> EvalResults:\n \"\"\"Run model analysis for a single model on multiple data sets.\n\n Args:\n output_paths: A list of output paths of completed tfma runs.\n mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and\n tfma.MODEL_CENTRIC_MODE are supported.\n model_name: The name of the model if multiple models are evaluated together.\n\n Returns:\n An EvalResults containing the evaluation results serialized at output_paths.\n This can be used to construct a time series view.\n \"\"\"\n results = [\n load_eval_result(output_path, model_name=model_name)\n for output_path in output_paths\n ]\n return make_eval_results(results, mode)\n\n\ndef load_eval_result(output_path: Text,\n model_name: Optional[Text] = None) -> EvalResult:\n \"\"\"Creates an EvalResult object for use with the visualization functions.\"\"\"\n eval_config, data_location, file_format, model_locations = (\n _load_eval_run(output_path))\n metrics_proto_list = (\n metrics_and_plots_serialization.load_and_deserialize_metrics(\n path=os.path.join(output_path, constants.METRICS_KEY),\n model_name=model_name))\n plots_proto_list = (\n metrics_and_plots_serialization.load_and_deserialize_plots(\n path=os.path.join(output_path, constants.PLOTS_KEY)))\n\n if model_name is None:\n model_location = list(model_locations.values())[0]\n else:\n model_location = model_locations[model_name]\n return EvalResult(\n slicing_metrics=metrics_proto_list,\n plots=plots_proto_list,\n config=eval_config,\n data_location=data_location,\n file_format=file_format,\n model_location=model_location)\n\n\ndef default_eval_shared_model(\n eval_saved_model_path: Text,\n add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,\n include_default_metrics: Optional[bool] = True,\n example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,\n additional_fetches: Optional[List[Text]] = None,\n blacklist_feature_fetches: Optional[List[Text]] = None,\n tags: Optional[List[Text]] = None,\n eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:\n \"\"\"Returns default EvalSharedModel.\n\n Args:\n eval_saved_model_path: Path to EvalSavedModel.\n add_metrics_callbacks: Optional list of callbacks for adding additional\n metrics to the graph (see EvalSharedModel for more information on how to\n configure additional metrics). Metrics for example count and example\n weights will be added automatically.\n include_default_metrics: True to include the default metrics that are part\n of the saved model graph during evaluation. Note that\n eval_config.options.include_default_metrics must also be true.\n example_weight_key: Example weight key (single-output model) or dict of\n example weight keys (multi-output model) keyed by output name.\n additional_fetches: Prefixes of additional tensors stored in\n signature_def.inputs that should be fetched at prediction time. The\n \"features\" and \"labels\" tensors are handled automatically and should not\n be included.\n blacklist_feature_fetches: List of tensor names in the features dictionary\n which should be excluded from the fetches request. This is useful in\n scenarios where features are large (e.g. images) and can lead to excessive\n memory use if stored.\n tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).\n eval_config: Eval config. Only used for setting default tags.\n \"\"\"\n if tags is None:\n if eval_config:\n # Default to serving unless all the signature_names are eval. We do not\n # support running with a mixture of eval and non-eval tags.\n signatures = [s.signature_name for s in eval_config.model_specs]\n if eval_constants.EVAL_TAG in signatures:\n if not all(s == eval_constants.EVAL_TAG for s in signatures):\n tf.compat.v1.logging.warning(\n 'mixture of eval and non-eval signatures used: '\n 'eval_config={}'.format(eval_config))\n tags = [eval_constants.EVAL_TAG]\n else:\n tags = [tf.saved_model.SERVING]\n else:\n tags = [eval_constants.EVAL_TAG]\n\n # Backwards compatibility for legacy add_metrics_callbacks implementation.\n if tags == [eval_constants.EVAL_TAG]:\n # PyType doesn't know about the magic exports we do in post_export_metrics.\n # Additionally, the lines seem to get reordered in compilation, so we can't\n # just put the disable-attr on the add_metrics_callbacks lines.\n # pytype: disable=module-attr\n if not add_metrics_callbacks:\n add_metrics_callbacks = []\n # Always compute example weight and example count.\n example_count_callback = post_export_metrics.example_count()\n add_metrics_callbacks.append(example_count_callback)\n if example_weight_key:\n if isinstance(example_weight_key, dict):\n for output_name, key in example_weight_key.items():\n example_weight_callback = post_export_metrics.example_weight(\n key, metric_tag=output_name)\n add_metrics_callbacks.append(example_weight_callback)\n else:\n example_weight_callback = post_export_metrics.example_weight(\n example_weight_key)\n add_metrics_callbacks.append(example_weight_callback)\n # pytype: enable=module-attr\n\n return types.EvalSharedModel(\n model_path=eval_saved_model_path,\n add_metrics_callbacks=add_metrics_callbacks,\n include_default_metrics=include_default_metrics,\n example_weight_key=example_weight_key,\n additional_fetches=additional_fetches,\n model_loader=types.ModelLoader(\n tags=tags,\n construct_fn=model_util.model_construct_fn(\n eval_saved_model_path=eval_saved_model_path,\n add_metrics_callbacks=add_metrics_callbacks,\n include_default_metrics=include_default_metrics,\n additional_fetches=additional_fetches,\n blacklist_feature_fetches=blacklist_feature_fetches,\n tags=tags)))\n\n\ndef default_extractors( # pylint: disable=invalid-name\n eval_shared_model: Union[types.EvalSharedModel,\n Dict[Text, types.EvalSharedModel]] = None,\n eval_config: config.EvalConfig = None,\n slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,\n desired_batch_size: Optional[int] = None,\n materialize: Optional[bool] = True) -> List[extractor.Extractor]:\n \"\"\"Returns the default extractors for use in ExtractAndEvaluate.\n\n Args:\n eval_shared_model: Shared model (single-model evaluation) or dict of shared\n models keyed by model name (multi-model evaluation). Required unless the\n predictions are provided alongside of the features (i.e. model-agnostic\n evaluations).\n eval_config: Eval config.\n slice_spec: Deprecated (use EvalConfig).\n desired_batch_size: Optional batch size for batching in Predict.\n materialize: True to have extractors create materialized output.\n\n Raises:\n NotImplementedError: If eval_config contains mixed serving and eval models.\n \"\"\"\n if eval_config is not None:\n eval_config = config.update_eval_config_with_defaults(eval_config)\n slice_spec = [\n slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs\n ]\n if _is_legacy_eval(eval_shared_model, eval_config):\n # Backwards compatibility for previous add_metrics_callbacks implementation.\n return [\n predict_extractor.PredictExtractor(\n eval_shared_model, desired_batch_size, materialize=materialize),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec, materialize=materialize)\n ]\n elif eval_shared_model:\n model_types = model_util.get_model_types(eval_config)\n if not model_types.issubset(constants.VALID_MODEL_TYPES):\n raise NotImplementedError(\n 'model type must be one of: {}. evalconfig={}'.format(\n str(constants.VALID_MODEL_TYPES), eval_config))\n if model_types == set([constants.TF_LITE]):\n return [\n input_extractor.InputExtractor(eval_config=eval_config),\n tflite_predict_extractor.TFLitePredictExtractor(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n desired_batch_size=desired_batch_size),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec, materialize=materialize)\n ]\n elif constants.TF_LITE in model_types:\n raise NotImplementedError(\n 'support for mixing tf_lite and non-tf_lite models is not '\n 'implemented: eval_config={}'.format(eval_config))\n\n elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG\n for s in eval_config.model_specs)):\n return [\n predict_extractor.PredictExtractor(\n eval_shared_model,\n desired_batch_size,\n materialize=materialize,\n eval_config=eval_config),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec, materialize=materialize)\n ]\n elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG\n for s in eval_config.model_specs)):\n raise NotImplementedError(\n 'support for mixing eval and non-eval models is not implemented: '\n 'eval_config={}'.format(eval_config))\n else:\n return [\n input_extractor.InputExtractor(eval_config=eval_config),\n predict_extractor_v2.PredictExtractor(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n desired_batch_size=desired_batch_size),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec, materialize=materialize)\n ]\n else:\n return [\n input_extractor.InputExtractor(eval_config=eval_config),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec, materialize=materialize)\n ]\n\n\ndef default_evaluators( # pylint: disable=invalid-name\n eval_shared_model: Optional[Union[types.EvalSharedModel,\n Dict[Text,\n types.EvalSharedModel]]] = None,\n eval_config: config.EvalConfig = None,\n compute_confidence_intervals: Optional[bool] = False,\n k_anonymization_count: int = 1,\n desired_batch_size: Optional[int] = None,\n serialize: bool = False,\n random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:\n \"\"\"Returns the default evaluators for use in ExtractAndEvaluate.\n\n Args:\n eval_shared_model: Optional shared model (single-model evaluation) or dict\n of shared models keyed by model name (multi-model evaluation). Only\n required if there are metrics to be computed in-graph using the model.\n eval_config: Eval config.\n compute_confidence_intervals: Deprecated (use eval_config).\n k_anonymization_count: Deprecated (use eval_config).\n desired_batch_size: Optional batch size for batching in combiner.\n serialize: Deprecated.\n random_seed_for_testing: Provide for deterministic tests only.\n \"\"\"\n disabled_outputs = []\n if eval_config:\n eval_config = config.update_eval_config_with_defaults(eval_config)\n disabled_outputs = eval_config.options.disabled_outputs.values\n if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):\n # no in-graph metrics present when tflite is used.\n if eval_shared_model:\n if isinstance(eval_shared_model, dict):\n eval_shared_model = {\n k: v._replace(include_default_metrics=False)\n for k, v in eval_shared_model.items()\n }\n else:\n eval_shared_model = eval_shared_model._replace(\n include_default_metrics=False)\n if (constants.METRICS_KEY in disabled_outputs and\n constants.PLOTS_KEY in disabled_outputs):\n return []\n if _is_legacy_eval(eval_shared_model, eval_config):\n # Backwards compatibility for previous add_metrics_callbacks implementation.\n if eval_config is not None:\n if eval_config.options.HasField('compute_confidence_intervals'):\n compute_confidence_intervals = (\n eval_config.options.compute_confidence_intervals.value)\n if eval_config.options.HasField('k_anonymization_count'):\n k_anonymization_count = eval_config.options.k_anonymization_count.value\n return [\n metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(\n eval_shared_model,\n compute_confidence_intervals=compute_confidence_intervals,\n k_anonymization_count=k_anonymization_count,\n desired_batch_size=desired_batch_size,\n serialize=serialize,\n random_seed_for_testing=random_seed_for_testing)\n ]\n else:\n return [\n metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(\n eval_config=eval_config, eval_shared_model=eval_shared_model)\n ]\n\n\ndef default_writers(\n output_path: Optional[Text],\n eval_shared_model: Optional[Union[types.EvalSharedModel,\n Dict[Text, types.EvalSharedModel]]] = None\n) -> List[writer.Writer]: # pylint: disable=invalid-name\n \"\"\"Returns the default writers for use in WriteResults.\n\n Args:\n output_path: Output path.\n eval_shared_model: Optional shared model (single-model evaluation) or dict\n of shared models keyed by model name (multi-model evaluation). Only\n required if legacy add_metrics_callbacks are used.\n \"\"\"\n add_metric_callbacks = []\n # The add_metric_callbacks are used in the metrics and plots serialization\n # code to post process the metric data by calling populate_stats_and_pop.\n # While both the legacy (V1) and new (V2) evaluation implementations support\n # EvalSavedModels using add_metric_callbacks, this particular code is only\n # required for the legacy evaluation based on the MetricsAndPlotsEvaluator.\n # The V2 MetricsAndPlotsEvaluator output requires no additional processing.\n # Since the V1 code only supports a single EvalSharedModel, we only set the\n # add_metrics_callbacks if a dict is not passed.\n if eval_shared_model and not isinstance(eval_shared_model, dict):\n add_metric_callbacks = eval_shared_model.add_metrics_callbacks\n\n output_paths = {\n constants.METRICS_KEY:\n os.path.join(output_path, constants.METRICS_KEY),\n constants.PLOTS_KEY:\n os.path.join(output_path, constants.PLOTS_KEY),\n constants.VALIDATIONS_KEY:\n os.path.join(output_path, constants.VALIDATIONS_KEY)\n }\n return [\n metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(\n output_paths=output_paths,\n add_metrics_callbacks=add_metric_callbacks),\n ]\n\n\n@beam.ptransform_fn\n@beam.typehints.with_input_types(bytes)\n@beam.typehints.with_output_types(types.Extracts)\ndef InputsToExtracts( # pylint: disable=invalid-name\n inputs: beam.pvalue.PCollection):\n \"\"\"Converts serialized inputs (e.g. examples) to Extracts.\"\"\"\n return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})\n\n\n@beam.ptransform_fn\n@beam.typehints.with_input_types(types.Extracts)\n@beam.typehints.with_output_types(evaluator.Evaluation)\ndef ExtractAndEvaluate( # pylint: disable=invalid-name\n extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],\n evaluators: List[evaluator.Evaluator]):\n \"\"\"Performs Extractions and Evaluations in provided order.\"\"\"\n # evaluation[k] = list of values for k\n evaluation = {}\n\n def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):\n for k, v in new_evaluation.items():\n if k not in evaluation:\n evaluation[k] = []\n evaluation[k].append(v)\n return evaluation\n\n # Run evaluators that run before extraction (i.e. that only require\n # the incoming input extract added by ReadInputs)\n for v in evaluators:\n if not v.run_after:\n update(evaluation, extracts | v.stage_name >> v.ptransform)\n for x in extractors:\n extracts = (extracts | x.stage_name >> x.ptransform)\n for v in evaluators:\n if v.run_after == x.stage_name:\n update(evaluation, extracts | v.stage_name >> v.ptransform)\n for v in evaluators:\n if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:\n update(evaluation, extracts | v.stage_name >> v.ptransform)\n\n # Merge multi-valued keys if necessary.\n result = {}\n for k, v in evaluation.items():\n if len(v) == 1:\n result[k] = v[0]\n continue\n\n # Note that we assume that if a key is multivalued, its values are\n # dictionaries with disjoint keys. The combined value will simply be the\n # disjoint union of all the dictionaries.\n result[k] = (\n v\n | 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()\n | 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(\n _CombineEvaluationDictionariesFn()))\n\n return result\n\n\nclass _CombineEvaluationDictionariesFn(beam.CombineFn):\n \"\"\"CombineFn to combine dictionaries generated by different evaluators.\"\"\"\n\n def create_accumulator(self) -> Dict[Text, Any]:\n return {}\n\n def _merge(self, accumulator: Dict[Text, Any],\n output_dict: Dict[Text, Any]) -> None:\n intersection = set(accumulator) & set(output_dict)\n if intersection:\n raise ValueError(\n 'Dictionaries generated by different evaluators should have '\n 'different keys, but keys %s appeared in the output of multiple '\n 'evaluators' % intersection)\n accumulator.update(output_dict)\n\n def add_input(self, accumulator: Dict[Text, Any],\n output_dict: Dict[Text, Any]) -> Dict[Text, Any]:\n if not isinstance(output_dict, dict):\n raise TypeError(\n 'for outputs written to by multiple evaluators, the outputs must all '\n 'be dictionaries, but got output of type %s, value %s' %\n (type(output_dict), str(output_dict)))\n self._merge(accumulator, output_dict)\n return accumulator\n\n def merge_accumulators(\n self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:\n result = self.create_accumulator()\n for acc in accumulators:\n self._merge(result, acc)\n return result\n\n def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:\n return accumulator\n\n\n@beam.ptransform_fn\n@beam.typehints.with_input_types(Union[evaluator.Evaluation,\n validator.Validation])\n@beam.typehints.with_output_types(beam.pvalue.PDone)\ndef WriteResults( # pylint: disable=invalid-name\n evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],\n writers: List[writer.Writer]):\n \"\"\"Writes Evaluation or Validation results using given writers.\n\n Args:\n evaluation_or_validation: Evaluation or Validation output.\n writers: Writes to use for writing out output.\n\n Raises:\n ValueError: If Evaluation or Validation is empty.\n\n Returns:\n beam.pvalue.PDone.\n \"\"\"\n if not evaluation_or_validation:\n raise ValueError('Evaluations and Validations cannot be empty')\n for w in writers:\n _ = evaluation_or_validation | w.stage_name >> w.ptransform\n return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)\n\n\n@beam.ptransform_fn\n@beam.typehints.with_input_types(beam.Pipeline)\n@beam.typehints.with_output_types(beam.pvalue.PDone)\ndef WriteEvalConfig( # pylint: disable=invalid-name\n pipeline: beam.Pipeline,\n eval_config: config.EvalConfig,\n output_path: Text,\n data_location: Optional[Text] = '',\n file_format: Optional[Text] = '',\n model_locations: Optional[Dict[Text, Text]] = None):\n \"\"\"Writes EvalConfig to file.\n\n Args:\n pipeline: Beam pipeline.\n eval_config: EvalConfig.\n output_path: Output path.\n data_location: Optional location for data used with config.\n file_format: Optional format for data used with config.\n model_locations: Optional location(s) for model(s) used with config.\n\n Returns:\n beam.pvalue.PDone.\n \"\"\"\n return (\n pipeline\n | 'CreateEvalConfig' >> beam.Create([\n _serialize_eval_run(eval_config, data_location, file_format,\n model_locations)\n ])\n | 'WriteEvalConfig' >> beam.io.WriteToText(\n os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))\n\n\n@beam.ptransform_fn\n@beam.typehints.with_output_types(beam.pvalue.PDone)\ndef ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name\n examples: beam.pvalue.PCollection,\n eval_shared_model: Optional[Union[types.EvalSharedModel,\n Dict[Text,\n types.EvalSharedModel]]] = None,\n eval_config: config.EvalConfig = None,\n extractors: Optional[List[extractor.Extractor]] = None,\n evaluators: Optional[List[evaluator.Evaluator]] = None,\n writers: Optional[List[writer.Writer]] = None,\n output_path: Optional[Text] = None,\n display_only_data_location: Optional[Text] = None,\n display_only_file_format: Optional[Text] = None,\n slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,\n write_config: Optional[bool] = True,\n compute_confidence_intervals: Optional[bool] = False,\n k_anonymization_count: int = 1,\n desired_batch_size: Optional[int] = None,\n random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:\n \"\"\"PTransform for performing extraction, evaluation, and writing results.\n\n Users who want to construct their own Beam pipelines instead of using the\n lightweight run_model_analysis functions should use this PTransform.\n\n Example usage:\n eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])\n eval_shared_model = tfma.default_eval_shared_model(\n eval_saved_model_path=model_location, eval_config=eval_config)\n with beam.Pipeline(runner=...) as p:\n _ = (p\n | 'ReadData' >> beam.io.ReadFromTFRecord(data_location)\n | 'ExtractEvaluateAndWriteResults' >>\n tfma.ExtractEvaluateAndWriteResults(\n eval_shared_model=eval_shared_model,\n eval_config=eval_config,\n ...))\n result = tfma.load_eval_result(output_path=output_path)\n tfma.view.render_slicing_metrics(result)\n\n Note that the exact serialization format is an internal implementation detail\n and subject to change. Users should only use the TFMA functions to write and\n read the results.\n\n Args:\n examples: PCollection of input examples. Can be any format the model accepts\n (e.g. string containing CSV row, TensorFlow.Example, etc).\n eval_shared_model: Optional shared model (single-model evaluation) or dict\n of shared models keyed by model name (multi-model evaluation). Only\n required if needed by default extractors, evaluators, or writers and for\n display purposes of the model path.\n eval_config: Eval config.\n extractors: Optional list of Extractors to apply to Extracts. Typically\n these will be added by calling the default_extractors function. If no\n extractors are provided, default_extractors (non-materialized) will be\n used.\n evaluators: Optional list of Evaluators for evaluating Extracts. Typically\n these will be added by calling the default_evaluators function. If no\n evaluators are provided, default_evaluators will be used.\n writers: Optional list of Writers for writing Evaluation output. Typically\n these will be added by calling the default_writers function. If no writers\n are provided, default_writers will be used.\n output_path: Path to output metrics and plots results.\n display_only_data_location: Optional path indicating where the examples were\n read from. This is used only for display purposes - data will not actually\n be read from this path.\n display_only_file_format: Optional format of the examples. This is used only\n for display purposes.\n slice_spec: Deprecated (use EvalConfig).\n write_config: Deprecated (use EvalConfig).\n compute_confidence_intervals: Deprecated (use EvalConfig).\n k_anonymization_count: Deprecated (use EvalConfig).\n desired_batch_size: Optional batch size for batching in Predict.\n random_seed_for_testing: Provide for deterministic tests only.\n\n Raises:\n ValueError: If EvalConfig invalid or matching Extractor not found for an\n Evaluator.\n\n Returns:\n PDone.\n \"\"\"\n eval_shared_models = eval_shared_model\n if not isinstance(eval_shared_model, dict):\n eval_shared_models = {'': eval_shared_model}\n\n if eval_config is None:\n model_specs = []\n for model_name, shared_model in eval_shared_models.items():\n example_weight_key = shared_model.example_weight_key\n example_weight_keys = {}\n if example_weight_key and isinstance(example_weight_key, dict):\n example_weight_keys = example_weight_key\n example_weight_key = ''\n model_specs.append(\n config.ModelSpec(\n name=model_name,\n example_weight_key=example_weight_key,\n example_weight_keys=example_weight_keys))\n slicing_specs = None\n if slice_spec:\n slicing_specs = [s.to_proto() for s in slice_spec]\n options = config.Options()\n options.compute_confidence_intervals.value = compute_confidence_intervals\n options.k_anonymization_count.value = k_anonymization_count\n if not write_config:\n options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)\n eval_config = config.EvalConfig(\n model_specs=model_specs, slicing_specs=slicing_specs, options=options)\n else:\n eval_config = config.update_eval_config_with_defaults(eval_config)\n\n config.verify_eval_config(eval_config)\n\n if not extractors:\n extractors = default_extractors(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n materialize=False,\n desired_batch_size=desired_batch_size)\n\n if not evaluators:\n evaluators = default_evaluators(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n random_seed_for_testing=random_seed_for_testing)\n\n for v in evaluators:\n evaluator.verify_evaluator(v, extractors)\n\n if not writers:\n writers = default_writers(\n output_path=output_path, eval_shared_model=eval_shared_model)\n\n # pylint: disable=no-value-for-parameter\n _ = (\n examples\n | 'InputsToExtracts' >> InputsToExtracts()\n | 'ExtractAndEvaluate' >> ExtractAndEvaluate(\n extractors=extractors, evaluators=evaluators)\n | 'WriteResults' >> WriteResults(writers=writers))\n\n if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:\n data_location = '<user provided PCollection>'\n if display_only_data_location is not None:\n data_location = display_only_data_location\n file_format = '<unknown>'\n if display_only_file_format is not None:\n file_format = display_only_file_format\n model_locations = {}\n for k, v in eval_shared_models.items():\n model_locations[k] = ('<unknown>' if v is None or v.model_path is None\n else v.model_path)\n _ = (\n examples.pipeline\n | WriteEvalConfig(eval_config, output_path, data_location, file_format,\n model_locations))\n # pylint: enable=no-value-for-parameter\n\n return beam.pvalue.PDone(examples.pipeline)\n\n\ndef run_model_analysis(\n eval_shared_model: Optional[Union[types.EvalSharedModel,\n Dict[Text,\n types.EvalSharedModel]]] = None,\n eval_config: config.EvalConfig = None,\n data_location: Text = '',\n file_format: Text = 'tfrecords',\n output_path: Optional[Text] = None,\n extractors: Optional[List[extractor.Extractor]] = None,\n evaluators: Optional[List[evaluator.Evaluator]] = None,\n writers: Optional[List[writer.Writer]] = None,\n pipeline_options: Optional[Any] = None,\n slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,\n write_config: Optional[bool] = True,\n compute_confidence_intervals: Optional[bool] = False,\n k_anonymization_count: int = 1,\n desired_batch_size: Optional[int] = None,\n random_seed_for_testing: Optional[int] = None\n) -> Union[EvalResult, EvalResults]:\n \"\"\"Runs TensorFlow model analysis.\n\n It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow\n Eval SavedModel and returns the results.\n\n This is a simplified API for users who want to quickly get something running\n locally. Users who wish to create their own Beam pipelines can use the\n Evaluate PTransform instead.\n\n Args:\n eval_shared_model: Optional shared model (single-model evaluation) or dict\n of shared models keyed by model name (multi-model evaluation). Only\n required if needed by default extractors, evaluators, or writers.\n eval_config: Eval config.\n data_location: The location of the data files.\n file_format: The file format of the data, can be either 'text' or\n 'tfrecords' for now. By default, 'tfrecords' will be used.\n output_path: The directory to output metrics and results to. If None, we use\n a temporary directory.\n extractors: Optional list of Extractors to apply to Extracts. Typically\n these will be added by calling the default_extractors function. If no\n extractors are provided, default_extractors (non-materialized) will be\n used.\n evaluators: Optional list of Evaluators for evaluating Extracts. Typically\n these will be added by calling the default_evaluators function. If no\n evaluators are provided, default_evaluators will be used.\n writers: Optional list of Writers for writing Evaluation output. Typically\n these will be added by calling the default_writers function. If no writers\n are provided, default_writers will be used.\n pipeline_options: Optional arguments to run the Pipeline, for instance\n whether to run directly.\n slice_spec: Deprecated (use EvalConfig).\n write_config: Deprecated (use EvalConfig).\n compute_confidence_intervals: Deprecated (use EvalConfig).\n k_anonymization_count: Deprecated (use EvalConfig).\n desired_batch_size: Optional batch size for batching in Predict.\n random_seed_for_testing: Provide for deterministic tests only.\n\n Returns:\n An EvalResult that can be used with the TFMA visualization functions.\n\n Raises:\n ValueError: If the file_format is unknown to us.\n \"\"\"\n _assert_tensorflow_version()\n\n if output_path is None:\n output_path = tempfile.mkdtemp()\n if not tf.io.gfile.exists(output_path):\n tf.io.gfile.makedirs(output_path)\n\n if eval_config is None:\n model_specs = []\n eval_shared_models = eval_shared_model\n if not isinstance(eval_shared_model, dict):\n eval_shared_models = {'': eval_shared_model}\n for model_name, shared_model in eval_shared_models.items():\n example_weight_key = shared_model.example_weight_key\n example_weight_keys = {}\n if example_weight_key and isinstance(example_weight_key, dict):\n example_weight_keys = example_weight_key\n example_weight_key = ''\n model_specs.append(\n config.ModelSpec(\n name=model_name,\n example_weight_key=example_weight_key,\n example_weight_keys=example_weight_keys))\n slicing_specs = None\n if slice_spec:\n slicing_specs = [s.to_proto() for s in slice_spec]\n options = config.Options()\n options.compute_confidence_intervals.value = compute_confidence_intervals\n options.k_anonymization_count.value = k_anonymization_count\n if not write_config:\n options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)\n eval_config = config.EvalConfig(\n model_specs=model_specs, slicing_specs=slicing_specs, options=options)\n\n with beam.Pipeline(options=pipeline_options) as p:\n if file_format == 'tfrecords':\n data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(\n file_pattern=data_location,\n compression_type=beam.io.filesystem.CompressionTypes.AUTO)\n elif file_format == 'text':\n data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)\n else:\n raise ValueError('unknown file_format: {}'.format(file_format))\n\n # pylint: disable=no-value-for-parameter\n _ = (\n data\n | 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n display_only_data_location=data_location,\n display_only_file_format=file_format,\n output_path=output_path,\n extractors=extractors,\n evaluators=evaluators,\n writers=writers,\n desired_batch_size=desired_batch_size,\n random_seed_for_testing=random_seed_for_testing))\n # pylint: enable=no-value-for-parameter\n\n if len(eval_config.model_specs) <= 1:\n return load_eval_result(output_path)\n else:\n results = []\n for spec in eval_config.model_specs:\n results.append(load_eval_result(output_path, model_name=spec.name))\n return EvalResults(results, constants.MODEL_CENTRIC_MODE)\n\n\ndef single_model_analysis(\n model_location: Text,\n data_location: Text,\n output_path: Text = None,\n slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:\n \"\"\"Run model analysis for a single model on a single data set.\n\n This is a convenience wrapper around run_model_analysis for a single model\n with a single data set. For more complex use cases, use\n tfma.run_model_analysis.\n\n Args:\n model_location: Path to the export eval saved model.\n data_location: The location of the data files.\n output_path: The directory to output metrics and results to. If None, we use\n a temporary directory.\n slice_spec: A list of tfma.slicer.SingleSliceSpec.\n\n Returns:\n An EvalResult that can be used with the TFMA visualization functions.\n \"\"\"\n # Get working_dir ready.\n if output_path is None:\n output_path = tempfile.mkdtemp()\n if not tf.io.gfile.exists(output_path):\n tf.io.gfile.makedirs(output_path)\n\n eval_config = config.EvalConfig(\n slicing_specs=[s.to_proto() for s in slice_spec])\n\n return run_model_analysis(\n eval_config=eval_config,\n eval_shared_model=default_eval_shared_model(\n eval_saved_model_path=model_location),\n data_location=data_location,\n output_path=output_path) # pytype: disable=bad-return-type\n\n\ndef multiple_model_analysis(model_locations: List[Text], data_location: Text,\n **kwargs) -> EvalResults:\n \"\"\"Run model analysis for multiple models on the same data set.\n\n Args:\n model_locations: A list of paths to the export eval saved model.\n data_location: The location of the data files.\n **kwargs: The args used for evaluation. See tfma.single_model_analysis() for\n details.\n\n Returns:\n A tfma.EvalResults containing all the evaluation results with the same order\n as model_locations.\n \"\"\"\n results = []\n for m in model_locations:\n results.append(single_model_analysis(m, data_location, **kwargs))\n return EvalResults(results, constants.MODEL_CENTRIC_MODE)\n\n\ndef multiple_data_analysis(model_location: Text, data_locations: List[Text],\n **kwargs) -> EvalResults:\n \"\"\"Run model analysis for a single model on multiple data sets.\n\n Args:\n model_location: The location of the exported eval saved model.\n data_locations: A list of data set locations.\n **kwargs: The args used for evaluation. See tfma.run_model_analysis() for\n details.\n\n Returns:\n A tfma.EvalResults containing all the evaluation results with the same order\n as data_locations.\n \"\"\"\n results = []\n for d in data_locations:\n results.append(single_model_analysis(model_location, d, **kwargs))\n return EvalResults(results, constants.DATA_CENTRIC_MODE)\n", "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test for auto_slice_key_extractor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Standard Imports\n\nimport apache_beam as beam\nfrom apache_beam.testing import util\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_model_analysis import constants\nfrom tensorflow_model_analysis.eval_saved_model import testutil\nfrom tensorflow_model_analysis.extractors import auto_slice_key_extractor\nfrom tensorflow_model_analysis.slicer import slicer_lib as slicer\n\nfrom google.protobuf import text_format\nfrom tensorflow_metadata.proto.v0 import statistics_pb2\n\n\nclass AutoSliceKeyExtractorTest(testutil.TensorflowModelAnalysisTest):\n\n def test_slice_spec_from_stats_and_schema(self):\n stats = text_format.Parse(\n \"\"\"\n datasets {\n features: {\n path { step: 'feature1' }\n type: STRING\n string_stats: {\n unique: 10\n }\n }\n features: {\n path { step: 'feature2' }\n type: STRING\n string_stats: {\n unique: 200\n }\n }\n features: {\n path { step: 'feature3' }\n type: INT\n string_stats: {\n unique: 10\n }\n }\n features: {\n path { step: 'feature4' }\n type: INT\n string_stats: {\n unique: 200\n }\n }\n features: {\n path { step: 'feature5' }\n type: INT\n num_stats: {\n }\n }\n features: {\n path { step: 'feature6' }\n type: FLOAT\n num_stats: {\n }\n }\n }\n \"\"\", statistics_pb2.DatasetFeatureStatisticsList())\n transformed_feature5 = (\n auto_slice_key_extractor.TRANSFORMED_FEATURE_PREFIX + 'feature5')\n transformed_feature6 = (\n auto_slice_key_extractor.TRANSFORMED_FEATURE_PREFIX + 'feature6')\n expected_slice_spec = [\n slicer.SingleSliceSpec(columns=['feature1']),\n slicer.SingleSliceSpec(columns=['feature3']),\n slicer.SingleSliceSpec(columns=[transformed_feature5]),\n slicer.SingleSliceSpec(columns=[transformed_feature6]),\n slicer.SingleSliceSpec(columns=['feature1', 'feature3']),\n slicer.SingleSliceSpec(columns=['feature1', transformed_feature5]),\n slicer.SingleSliceSpec(columns=['feature1', transformed_feature6]),\n slicer.SingleSliceSpec(columns=['feature3', transformed_feature5]),\n slicer.SingleSliceSpec(columns=['feature3', transformed_feature6]),\n slicer.SingleSliceSpec(\n columns=[transformed_feature5, transformed_feature6]),\n slicer.SingleSliceSpec()\n ]\n actual_slice_spec = auto_slice_key_extractor.slice_spec_from_stats(stats)\n self.assertEqual(actual_slice_spec, expected_slice_spec)\n\n def test_auto_extract_slice_keys(self):\n features = [\n {\n 'gender': np.array(['f']),\n 'age': np.array([20])\n },\n {\n 'gender': np.array(['m']),\n 'age': np.array([45])\n },\n {\n 'gender': np.array(['f']),\n 'age': np.array([15])\n },\n {\n 'gender': np.array(['m']),\n 'age': np.array([90])\n },\n ]\n stats = text_format.Parse(\n \"\"\"\n datasets {\n features: {\n path { step: 'gender' }\n type: STRING\n string_stats: {\n unique: 10\n }\n }\n features: {\n path { step: 'age' }\n type: INT\n num_stats: {\n histograms {\n buckets {\n low_value: 18\n high_value: 35\n }\n buckets {\n low_value: 35\n high_value: 80\n }\n type: QUANTILES\n }\n histograms {\n buckets {\n low_value: 18\n high_value: 80\n }\n type: STANDARD\n }\n }\n }\n }\n \"\"\", statistics_pb2.DatasetFeatureStatisticsList())\n transformed_age_feat_name = (\n auto_slice_key_extractor.TRANSFORMED_FEATURE_PREFIX + 'age')\n with beam.Pipeline() as pipeline:\n slice_keys_extracts = (\n pipeline\n | 'CreateTestInput' >> beam.Create(features)\n | 'FeaturesToExtracts' >>\n beam.Map(lambda x: {constants.FEATURES_KEY: x})\n |\n 'AutoExtractSlices' >> auto_slice_key_extractor._AutoExtractSliceKeys(\n slice_spec=[\n slicer.SingleSliceSpec(),\n slicer.SingleSliceSpec(columns=[transformed_age_feat_name]),\n slicer.SingleSliceSpec(columns=['gender']),\n slicer.SingleSliceSpec(\n columns=['gender', transformed_age_feat_name])\n ],\n statistics=stats))\n\n def check_result(got):\n try:\n self.assertEqual(4, len(got), 'got: %s' % got)\n expected_results = sorted([\n [(), (('gender', 'f'),),\n (\n ('gender', 'f'),\n (transformed_age_feat_name, 0),\n ), ((transformed_age_feat_name, 0),)],\n [(), (('gender', 'm'),),\n (\n ('gender', 'm'),\n (transformed_age_feat_name, 1),\n ), ((transformed_age_feat_name, 1),)],\n [(), (('gender', 'f'),),\n (\n ('gender', 'f'),\n (transformed_age_feat_name, 0),\n ), ((transformed_age_feat_name, 0),)],\n [(), (('gender', 'm'),),\n (\n ('gender', 'm'),\n (transformed_age_feat_name, 2),\n ), ((transformed_age_feat_name, 2),)],\n ])\n got_results = []\n for item in got:\n self.assertIn(constants.SLICE_KEY_TYPES_KEY, item)\n got_results.append(sorted(item[constants.SLICE_KEY_TYPES_KEY]))\n self.assertEqual(sorted(got_results), sorted(expected_results))\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(slice_keys_extracts, check_result)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.io.gfile.GFile", "tensorflow.version.VERSION.split", "tensorflow.io.gfile.makedirs", "tensorflow.io.gfile.exists", "tensorflow.compat.v1.logging.warning", "tensorflow.compat.v1.python_io.tf_record_iterator" ], [ "numpy.array", "tensorflow.test.main" ] ]
saridormi/commit_message_generation
[ "c25db61a5f41accfb566caaea5feb0d275751293" ]
[ "src/model/encoder_decoder_module.py" ]
[ "from copy import copy\nfrom typing import Optional\n\nimport torch\nimport pytorch_lightning as pl\nfrom transformers import (\n EncoderDecoderModel,\n RobertaModel,\n RobertaConfig,\n GPT2LMHeadModel,\n GPT2Config,\n RobertaTokenizer,\n GPT2Tokenizer,\n AdamW,\n get_linear_schedule_with_warmup,\n)\n\nimport nltk\n\nnltk.download(\"wordnet\")\n\n\nclass EncoderDecoderModule(pl.LightningModule):\n def __init__(\n self,\n learning_rate: float,\n src_tokenizer: RobertaTokenizer,\n trg_tokenizer: GPT2Tokenizer,\n num_epochs: int,\n num_batches: int,\n num_gpus: int,\n num_layers_encoder: Optional[int] = None,\n num_layers_decoder: Optional[int] = None,\n encoder_name_or_path: Optional[str] = None,\n decoder_name_or_path: Optional[str] = None,\n **kwargs,\n ):\n super().__init__()\n\n self._src_tokenizer = src_tokenizer\n self._trg_tokenizer = trg_tokenizer\n self._num_epochs = num_epochs\n self._num_batches = num_batches\n self._num_gpus = num_gpus\n self.learning_rate = learning_rate\n\n self.save_hyperparameters()\n\n if encoder_name_or_path is not None and decoder_name_or_path is not None:\n # use pretrained RoBERTa as encoder\n encoder = RobertaModel.from_pretrained(encoder_name_or_path)\n # resize embeddings to match vocabulary size\n encoder.resize_token_embeddings(len(self._src_tokenizer))\n # remove layers if necessary\n if num_layers_encoder is not None and num_layers_encoder < encoder.config.num_hidden_layers:\n encoder = EncoderDecoderModule.remove_layers_from_model(encoder, num_layers_encoder, is_gpt=False)\n\n # use pretrained GPT-2 as decoder\n config = GPT2Config.from_pretrained(decoder_name_or_path)\n config.is_decoder = True\n config.add_cross_attention = True\n decoder = GPT2LMHeadModel.from_pretrained(decoder_name_or_path, config=config)\n # remove layers if necessary\n if num_layers_decoder is not None and num_layers_decoder < decoder.config.n_layer:\n decoder = EncoderDecoderModule.remove_layers_from_model(decoder, num_layers_decoder, is_gpt=True)\n\n elif num_layers_decoder is not None and num_layers_encoder is not None:\n # use randomly initialized RoBERTa as encoder\n encoder_config = RobertaConfig()\n encoder_config.num_hidden_layers = num_layers_encoder\n encoder = RobertaModel(config=encoder_config)\n # resize embeddings to match vocabulary size\n encoder.resize_token_embeddings(len(self._src_tokenizer))\n\n # use randomly initialized GPT-2 as decoder\n decoder_config = GPT2Config()\n decoder_config.n_layer = num_layers_decoder\n decoder_config.is_decoder = True\n decoder_config.add_cross_attention = True\n decoder = GPT2LMHeadModel(config=decoder_config)\n else:\n raise ValueError(\n \"You have to specify either num_layers for training from scratch \\\n or paths for loading pretrained models\"\n )\n\n self.model = EncoderDecoderModel(encoder=encoder, decoder=decoder)\n\n # cache is currently not supported by EncoderDecoder framework\n self.model.decoder.config.use_cache = False\n\n # do not tie output embeddings to input embeddings\n self.model.config.tie_word_embeddings = False\n\n # to make logs for different batch sizes prettier\n self.examples_count = 0\n\n def forward(self, batch):\n return self.model(\n input_ids=batch[\"diff_input_ids\"],\n attention_mask=batch[\"diff_attention_mask\"],\n decoder_input_ids=batch[\"msg_input_ids\"],\n decoder_attention_mask=batch[\"msg_attention_mask\"],\n labels=batch[\"msg_labels\"],\n )\n\n def training_step(self, batch, batch_idx):\n self.examples_count += len(batch[\"diff_input_ids\"])\n loss, logits = self(batch)[:2]\n self.logger.experiment.log({\"train_loss_step\": loss}, step=self.examples_count)\n return {\"loss\": loss}\n\n def training_epoch_end(self, outputs):\n train_loss_mean = torch.stack([x[\"loss\"] for x in outputs]).mean()\n self.logger.experiment.log({\"train_loss_epoch\": train_loss_mean}, step=self.examples_count)\n\n def next_token_metrics_step(self, batch):\n loss, scores = self(batch)[:2]\n return {\"loss\": loss}\n\n def next_token_metrics_epoch_end(self, outputs, stage):\n \"\"\"\n Logic for validation & testing epoch end:\n 1) Calculate accuracy@1, accuracy@5, MRR@5\n 2) (in val stage only) Aggregate loss and log metric(s) for ModelCheckpoint\n 3) Log everything to wandb\n \"\"\"\n loss = torch.stack([x[\"loss\"] for x in outputs]).mean()\n metrics = {f\"{stage}_loss_epoch\": loss}\n if stage == \"val\":\n self.log(\"val_loss_epoch\", metrics[\"val_loss_epoch\"], on_step=False, on_epoch=True, prog_bar=True, logger=False)\n self.logger.experiment.log(metrics, step=self.examples_count)\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0):\n return self.next_token_metrics_step(batch)\n\n def validation_epoch_end(self, outputs):\n self.next_token_metrics_epoch_end(outputs, stage=\"val\")\n\n def test_step(self, batch, batch_idx):\n return self.next_token_metrics_step(batch)\n\n def test_epoch_end(self, outputs):\n self.next_token_metrics_epoch_end(outputs, stage=\"test\")\n\n def configure_optimizers(self):\n optimizer = AdamW(self.parameters(), lr=self.learning_rate)\n scheduler = {\n \"scheduler\": get_linear_schedule_with_warmup(\n optimizer, 4000 // self._num_gpus, self._num_epochs * self._num_batches\n ),\n \"interval\": \"step\",\n \"frequency\": 1,\n }\n return [optimizer], [scheduler]\n\n @staticmethod\n def remove_layers_from_model(teacher, num_layers, is_gpt):\n if not is_gpt:\n teacher_config = teacher.config\n student_config = copy(teacher.config)\n student_config.num_hidden_layers = num_layers\n student = RobertaModel(config=student_config)\n\n # copy all embeddings\n student.embeddings.word_embeddings = teacher.embeddings.word_embeddings\n student.embeddings.position_embeddings = teacher.embeddings.position_embeddings\n student.embeddings.token_type_embeddings = teacher.embeddings.token_type_embeddings\n student.embeddings.LayerNorm = teacher.embeddings.LayerNorm\n student.embeddings.dropout = teacher.embeddings.dropout\n\n # uniformly pick from middle layers from teacher\n # it is basically np.linspace(0, teacher_config.num_hidden_layers,\n # num=student_config.num_hidden_layers, endpoint=True)\n step = (teacher_config.num_hidden_layers - 1) / (student_config.num_hidden_layers - 1)\n for student_layer, teacher_layer in enumerate(\n int(i * step) for i in range(student_config.num_hidden_layers)\n ):\n student.encoder.layer[student_layer] = teacher.encoder.layer[teacher_layer]\n\n else:\n teacher_config = teacher.config\n student_config = copy(teacher.config)\n student_config.n_layer = num_layers\n\n student = GPT2LMHeadModel(config=student_config)\n\n # Copying all embeddings\n student.transformer.wte = teacher.transformer.wte\n student.transformer.wpe = teacher.transformer.wpe\n student.transformer.drop = teacher.transformer.drop\n # Maybe there is something else in BERT that need to be copied!\n # Specific thing for GPT2LMHead. Not necessary for BERT\n student.tie_weights()\n # Uniformly pick from middle layers from teacher\n # It is basically np.linspace(0, teacher_config.n_layer, num=student_config.n_layer, endpoint=True)\n step = (teacher_config.n_layer - 1) / (student_config.n_layer - 1)\n for student_layer, teacher_layer in enumerate(int(i * step) for i in range(student_config.n_layer)):\n student.transformer.h[student_layer] = teacher.transformer.h[teacher_layer]\n return student\n" ]
[ [ "torch.stack" ] ]
Xaenalt/model_server
[ "f977dbf1246ebf85e960ca058e814deac7c6a16c" ]
[ "example_client/multi_inputs.py" ]
[ "#\n# Copyright (c) 2019-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Multi-threaded sample to run a RMNet & SSDMobilenet v2 that will\n# detect only person, bike and vehicle (change the output parsing\n# for more classes)\n#\n# Example usage:\n# RMNet: python3.6 multi_inputs.py -n \"RMNet\" -l \"data\" -o \"detection_out\"\n# -d 1024 -i 127.0.0.1 -p 9001 -c 1\n# -f /var/repos/github/sample-videos/person-bicycle-car-detection.mp4\n# SSDMobileNet: python3.6 multi_inputs.py -n \"SSDMobileNet\" -l \"image_tensor\"\n# -o \"DetectionOutput\" -d 300 -i 127.0.0.1 -p 9001 -c 1\n# -f /var/repos/github/sample-videos/person-bicycle-car-detection.mp4\n\nfrom __future__ import print_function\nfrom argparse import ArgumentParser, SUPPRESS\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\nfrom time import time, sleep\n\nimport sys\nimport os\nimport cv2\nimport grpc\nimport threading\nimport logging as log\nfrom tensorflow import make_tensor_proto, make_ndarray\n\n# global data (shared between threads & main)\nCLASSES = [\"None\", \"Pedestrian\", \"Vehicle\", \"Bike\", \"Other\"]\nCOLORS = [(255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255), (128, 128, 128)]\nSRC_TYPE = [\"Camera\", \"Video\"]\n\nexit_ok = False # manage thread loop\nCAM_WIDTH = 640 # camera width\nCAM_HEIGHT = 480 # camera height\nCAM_FPS = 30 # camera speed\nCONFIDENCE_THRESHOLD = 0.75 # detection confidence\n\n#####################################################################################\n\ndef build_argparser():\n parser = ArgumentParser(add_help=False)\n args = parser.add_argument_group('Options')\n args.add_argument('-h', '--help', action='help', default=SUPPRESS,\n help='Show this help message and exit.')\n args.add_argument('-n', '--network_name', required=True,\n type=str, help='Network name')\n args.add_argument('-l', '--input_layer', required=True,\n type=str, help='Input layer name')\n args.add_argument('-o', '--output_layer', required=True,\n type=str, help='Output layer name')\n args.add_argument('-d', '--frame_size', required=True,\n type=int, help='Input frame width and height that matches used model')\n args.add_argument('-c', '--num_cameras', help='Number of cameras to be used',\n required=False, type=int, default=1)\n args.add_argument('-f', '--file', help='Path to the video file',\n required=False, type=str)\n args.add_argument('-i', '--ip', help='ip address of the ovms', required=True)\n args.add_argument('-p', '--port', help='port of the ovms', required=True)\n\n return parser\n\n# Decoding idea based on the link below. Not very accurate. So pls implement yours\n# https://github.com/opencv/open_model_zoo/blob/master/intel_models/\\\n# person-vehicle-bike-detection-crossroad-0078/\\\n# description/person-vehicle-bike-detection-crossroad-0078.md\ndef parse_output(thr_id, res, frame):\n for batch, data in enumerate(res):\n pred = data[0]\n for values in enumerate(pred):\n # tuple\n index = values[0]\n l_pred = values[1]\n\n # actual predictions\n img_id = l_pred[0]\n label = l_pred[1]\n conf = l_pred[2]\n x_min = l_pred[3]\n y_min = l_pred[4]\n x_max = l_pred[5]\n y_max = l_pred[6]\n\n # preventing any wrong array indexing (for RMNet)\n if label > 4:\n # Unsupported class label detected. Change to `other`.\n label = 4\n\n # Do you want confidence level to be passed from command line?\n if img_id != -1 and conf >= CONFIDENCE_THRESHOLD:\n # draw the bounding boxes on the frame\n height, width = frame.shape[:2]\n cv2.rectangle(frame, (int(width * x_min), int(height * y_min)),\n (int(width * x_max), int(height * y_max)), COLORS[int(label)], 2)\n cv2.putText(frame, str(CLASSES[int(label)]), (int(width * x_min)-10,\n int(height * y_min)-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n COLORS[int(label)], 2)\n return frame\n\n# This is common for both the camera & video files\ndef thread_function(thr_id, network_name, input_layer, output_layer, input_dimension,\n ip, port, disp_buf, src_type, src_name):\n\n if src_type == \"Camera\":\n # UVC camera init - camera threads always come first and we use it\n # to generate the camera indexes\n cam = cv2.VideoCapture(thr_id)\n if not (cam.isOpened()):\n log.error(\"Failed to open the UVC camera {}\".format(thr_id))\n return\n\n cam.set(cv2.CAP_PROP_FRAME_WIDTH, CAM_WIDTH)\n cam.set(cv2.CAP_PROP_FRAME_HEIGHT, CAM_HEIGHT)\n # not all UVC cameras honor below request\n cam.set(cv2.CAP_PROP_FPS, CAM_FPS)\n # If your camera sends other than MJPEG, change below\n cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*\"MJPG\"))\n elif src_type == \"Video\":\n # Assumption: src_name will be valid\n cam = cv2.VideoCapture(src_name)\n\n # inference stats\n fps = 0 # camera fps\n inf_fps = 0 # inference fps\n dropped_fps = 0 # dropped frame fps\n cam_start_time = time()\n\n # ovms connection\n channel = grpc.insecure_channel(\"{}:{}\".format(ip, port))\n stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n\n request = predict_pb2.PredictRequest()\n # Note: Pls maintain the same name while launching ovms docker container\n request.model_spec.name = network_name\n\n global exit_ok\n while exit_ok == False:\n ret, frame = cam.read()\n\n if src_type == \"Video\":\n # restart the video file when it reaches the end\n if not ret:\n cam.set(cv2.CAP_PROP_POS_FRAMES, 0)\n continue\n # normalize the video frame dimension to that of the camera\n else:\n # to maintain the frame inferencing parity with the cameras, lets sleep\n # here to maintain cam_fps speed\n sleep((1000 / CAM_FPS) / 1000)\n # enable below line to keep video file & camera output window dimensions the same\n # frame = cv2.resize(frame, (CAM_WIDTH, CAM_HEIGHT))\n\n fps = fps + 1\n if (time() - cam_start_time) * 1000 >= 1000:\n log.warning('{}{} fps: {}, Inf fps: {}, dropped fps: {}'\n .format(src_type, thr_id, fps, inf_fps, dropped_fps))\n fps = 0\n inf_fps = 0\n dropped_fps = 0\n cam_start_time = time()\n\n # resize the frame to what network input layer expects it to be\n image = cv2.resize(frame, (input_dimension, input_dimension))\n image = image.transpose(2, 0, 1).reshape(1, 3, input_dimension, input_dimension)\n image = image.astype('float32')\n\n inf_time = time()\n # send the input as protobuf\n request.inputs[input_layer].CopyFrom(\n make_tensor_proto(image, shape=None))\n\n try:\n result = stub.Predict(request, 10.0)\n except Exception as e:\n log.error('Caught exception {}'.format(e))\n cam.release()\n return\n duration = time() - inf_time\n\n # decode the received output as protobuf\n res = make_ndarray(result.outputs[output_layer])\n\n if not res.any():\n log.error('Thr{}: Predictions came back with wrong output layer name'.format(thr_id))\n dropped_fps = dropped_fps + 1\n disp_buf[thr_id] = frame\n else:\n log.debug('Predictions came back fine')\n inf_fps = inf_fps + 1\n disp_buf[thr_id] = parse_output(thr_id, res, frame)\n\n # while exit_ok == False\n\n cam.release()\n log.warning('Exiting thread {}'.format(thr_id))\n\n#####################################################################################\n\ndef main():\n log.basicConfig(format=\"[$(levelname)s ] %(message)s\", level=log.INFO, stream=sys.stdout)\n args = build_argparser().parse_args()\n num_cam = args.num_cameras if (args.num_cameras) else 0\n vid_src = args.file\n network_name = args.network_name\n input_layer = args.input_layer\n output_layer = args.output_layer\n input_dimension = args.frame_size\n ip_addr = args.ip\n port_no = args.port\n\n if not args.file and not args.num_cameras:\n log.error('Please supply either the camera or the video file. Try -f for options')\n return\n\n if not ip_addr or not port_no:\n log.error('Please supply valid IP and/or port number of OVMS server')\n return\n\n video_files = []\n if vid_src:\n if os.path.isdir(vid_src):\n for r, d, f in os.walk(vid_src):\n for f_ in f:\n # only mp4 files supported as of now\n if '.mp4' in f_:\n video_files.append(r + f_)\n elif os.path.isfile(vid_src):\n if '.mp4' in vid_src:\n video_files.append(vid_src)\n\n # thread management\n thr = [None] * (num_cam + len(video_files))\n # display buffers shared between camera threads\n disp_buf = {}\n\n # Known issue: Depending on the USB enumeration, camera nodes need not be\n # in sequence. Pls pass the device node info through a file or command line\n # if it happens in your system\n for i in range(num_cam):\n disp_buf[i] = None\n thr[i] = threading.Thread(target=thread_function,\n args=(i, network_name, input_layer, output_layer, input_dimension,\n ip_addr, port_no, disp_buf, SRC_TYPE[0], None))\n thr[i].start()\n\n for i in range(num_cam, num_cam + len(video_files)):\n disp_buf[i] = None\n thr[i] = threading.Thread(target=thread_function,\n args=(i, network_name, input_layer, output_layer, input_dimension,\n ip_addr, port_no, disp_buf, SRC_TYPE[1], video_files[i - num_cam]))\n thr[i].start()\n\n # For whatever reasons, cv2.imshow() doesnt work from threads. Hence we shove the\n # infered data to the main thread to display.\n global exit_ok\n while exit_ok == False:\n for i in range(num_cam + len(video_files)):\n if disp_buf[i] is not None:\n cv2.imshow('Predictions {}'.format(i), disp_buf[i])\n disp_buf[i] = None\n\n # exit the program if 'q' is pressed on any window\n if cv2.waitKey(1) == ord('q'):\n exit_ok = True\n break\n\n # wait for all the threads to join\n for i in range(num_cam):\n thr[i].join()\n\n # close all open windows\n cv2.destroyAllWindows()\n log.warning('Good Bye!')\n\nif __name__ == '__main__':\n sys.exit(main() or 0)\n" ]
[ [ "tensorflow.make_tensor_proto", "tensorflow.make_ndarray" ] ]
LaurenceBeard/improver
[ "b7cfe44f3a802d2a3d65f76a325215033c9de074", "b7cfe44f3a802d2a3d65f76a325215033c9de074" ]
[ "improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py", "improver/utilities/solar.py" ]
[ "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2019 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nUnit tests for ConvertLocationAndScaleParameters\n\"\"\"\nimport unittest\n\nimport numpy as np\nfrom scipy import stats\nfrom iris.tests import IrisTest\n\nfrom improver.ensemble_copula_coupling.ensemble_copula_coupling import (\n ConvertLocationAndScaleParameters as Plugin)\n\n\nclass Test__init__(IrisTest):\n\n \"\"\"Test the __init__ method.\"\"\"\n\n def test_valid_distribution(self):\n \"\"\"Test for a valid distribution.\"\"\"\n plugin = Plugin(distribution=\"norm\")\n self.assertEqual(plugin.distribution, stats.norm)\n self.assertEqual(plugin.shape_parameters, [])\n\n def test_valid_distribution_with_shape_parameters(self):\n \"\"\"Test for a valid distribution with shape parameters.\"\"\"\n plugin = Plugin(distribution=\"truncnorm\", shape_parameters=[0, np.inf])\n self.assertEqual(plugin.distribution, stats.truncnorm)\n self.assertEqual(plugin.shape_parameters, [0, np.inf])\n\n def test_invalid_distribution(self):\n \"\"\"Test for an invalid distribution.\"\"\"\n msg = \"The distribution requested\"\n with self.assertRaisesRegex(AttributeError, msg):\n Plugin(distribution=\"elephant\")\n\n\nclass Test__repr__(IrisTest):\n\n \"\"\"Test string representation of plugin.\"\"\"\n\n def test_basic(self):\n \"\"\"Test string representation\"\"\"\n expected_string = (\"<ConvertLocationAndScaleParameters: \"\n \"distribution: norm; shape_parameters: []>\")\n result = str(Plugin())\n self.assertEqual(result, expected_string)\n\n\nclass Test__rescale_shape_parameters(IrisTest):\n\n \"\"\"Test the _rescale_shape_parameters\"\"\"\n\n def setUp(self):\n \"\"\"Set up values for testing.\"\"\"\n self.location_parameter = np.array([-1, 0, 1])\n self.scale_parameter = np.array([1, 1.5, 2])\n\n def test_truncated_at_zero(self):\n \"\"\"Test scaling shape parameters implying a truncation at zero.\"\"\"\n expected = [np.array([1., 0, -0.5]),\n np.array([np.inf, np.inf, np.inf])]\n shape_parameters = [0, np.inf]\n plugin = Plugin(distribution=\"truncnorm\",\n shape_parameters=shape_parameters)\n plugin._rescale_shape_parameters(\n self.location_parameter, self.scale_parameter)\n self.assertArrayAlmostEqual(plugin.shape_parameters, expected)\n\n def test_discrete_shape_parameters(self):\n \"\"\"Test scaling discrete shape parameters.\"\"\"\n expected = [np.array([-3, -2.666667, -2.5]), np.array([7, 4, 2.5])]\n shape_parameters = [-4, 6]\n plugin = Plugin(distribution=\"truncnorm\",\n shape_parameters=shape_parameters)\n plugin._rescale_shape_parameters(\n self.location_parameter, self.scale_parameter)\n self.assertArrayAlmostEqual(plugin.shape_parameters, expected)\n\n def test_alternative_distribution(self):\n \"\"\"Test specifying a distribution other than truncated normal. In\n this instance, no rescaling is applied.\"\"\"\n shape_parameters = [0, np.inf]\n plugin = Plugin(distribution=\"norm\",\n shape_parameters=shape_parameters)\n plugin._rescale_shape_parameters(\n self.location_parameter, self.scale_parameter)\n self.assertArrayEqual(plugin.shape_parameters, shape_parameters)\n\n def test_no_shape_parameters_exception(self):\n \"\"\"Test raising an exception when shape parameters are not specified\n for the truncated normal distribution.\"\"\"\n plugin = Plugin(distribution=\"truncnorm\")\n msg = \"For the truncated normal distribution\"\n with self.assertRaisesRegex(ValueError, msg):\n plugin._rescale_shape_parameters(\n self.location_parameter, self.scale_parameter)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2019 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\" Utilities to find the relative position of the sun.\"\"\"\n\nimport datetime as dt\n\nimport cf_units as unit\nimport numpy as np\n\nfrom improver import BasePlugin\nfrom improver.utilities.spatial import (\n lat_lon_determine, transform_grid_to_lat_lon)\nfrom improver.utilities.temporal import iris_time_to_datetime\n\n\ndef calc_solar_declination(day_of_year):\n \"\"\"\n Calculate the Declination for the day of the year.\n\n Calculation equivalent to the calculation defined in\n NOAA Earth System Research Lab Low Accuracy Equations\n https://www.esrl.noaa.gov/gmd/grad/solcalc/sollinks.html\n\n Args:\n day_of_year (int):\n Day of the year 0 to 365, 0 = 1st January\n\n Returns:\n float:\n Declination in degrees.North-South\n \"\"\"\n # Declination (degrees):\n # = -(axial_tilt)*cos(360./orbital_year * day_of_year - solstice_offset)\n if day_of_year < 0 or day_of_year > 365:\n msg = ('Day of the year must be between 0 and 365')\n raise ValueError(msg)\n solar_declination = -23.5 * np.cos(np.radians(0.9856 * day_of_year + 9.3))\n return solar_declination\n\n\ndef calc_solar_hour_angle(longitudes, day_of_year, utc_hour):\n \"\"\"\n Calculate the Solar Hour angle for each element of an array of longitudes.\n\n Calculation equivalent to the calculation defined in\n NOAA Earth System Research Lab Low Accuracy Equations\n https://www.esrl.noaa.gov/gmd/grad/solcalc/sollinks.html\n\n Args:\n longitudes (float or numpy.ndarray):\n A single Longitude or array of Longitudes\n longitudes needs to be between 180.0 and -180.0 degrees\n day_of_year (int):\n Day of the year 0 to 365, 0 = 1st January\n utc_hour (float):\n Hour of the day in UTC\n\n Returns:\n solar_hour_angle (float or numpy.ndarray)\n Hour angles in degrees East-West\n \"\"\"\n if day_of_year < 0 or day_of_year > 365:\n msg = ('Day of the year must be between 0 and 365')\n raise ValueError(msg)\n if utc_hour < 0.0 or utc_hour > 24.0:\n msg = ('Hour must be between 0 and 24.0')\n raise ValueError(msg)\n thetao = 2*np.pi*day_of_year/365.0\n eqt = (0.000075 + 0.001868 * np.cos(thetao) -\n 0.032077 * np.sin(thetao) - 0.014615 * np.cos(2*thetao) -\n 0.040849 * np.sin(2*thetao))\n\n # Longitudinal Correction from the Grenwich Meridian\n lon_correction = 24.0*longitudes/360.0\n # Solar time (hours):\n solar_time = utc_hour + lon_correction + eqt*12/np.pi\n # Hour angle (degrees):\n solar_hour_angle = (solar_time - 12.0) * 15.0\n\n return solar_hour_angle\n\n\ndef calc_solar_elevation(latitudes, longitudes, day_of_year, utc_hour,\n return_sine=False):\n \"\"\"\n Calculate the Solar elevation.\n\n Args:\n latitudes (float or numpy.ndarray):\n A single Latitude or array of Latitudes\n latitudes needs to be between -90.0 and 90.0\n longitudes (float or numpy.ndarray):\n A single Longitude or array of Longitudes\n longitudes needs to be between 180.0 and -180.0\n day_of_year (int):\n Day of the year 0 to 365, 0 = 1st January\n utc_hour (float):\n Hour of the day in UTC in hours\n return_sine (bool):\n If True return sine of solar elevation.\n Default False.\n\n Returns:\n float or numpy.ndarray:\n Solar elevation in degrees for each location.\n \"\"\"\n if np.min(latitudes) < -90.0 or np.max(latitudes) > 90.0:\n msg = ('Latitudes must be between -90.0 and 90.0')\n raise ValueError(msg)\n if day_of_year < 0 or day_of_year > 365:\n msg = ('Day of the year must be between 0 and 365')\n raise ValueError(msg)\n if utc_hour < 0.0 or utc_hour > 24.0:\n msg = ('Hour must be between 0 and 24.0')\n raise ValueError(msg)\n declination = calc_solar_declination(day_of_year)\n decl = np.radians(declination)\n hour_angle = calc_solar_hour_angle(longitudes, day_of_year, utc_hour)\n rad_hours = np.radians(hour_angle)\n lats = np.radians(latitudes)\n # Calculate solar position:\n\n solar_elevation = ((np.sin(decl) * np.sin(lats) +\n np.cos(decl) * np.cos(lats) *\n np.cos(rad_hours)))\n if not return_sine:\n solar_elevation = np.degrees(np.arcsin(solar_elevation))\n\n return solar_elevation\n\n\ndef daynight_terminator(longitudes, day_of_year, utc_hour):\n \"\"\"\n Calculate the Latitude values of the daynight terminator\n for the given longitudes.\n\n Args:\n longitudes (numpy.ndarray):\n Array of longitudes.\n longitudes needs to be between 180.0 and -180.0 degrees\n day_of_year (int):\n Day of the year 0 to 365, 0 = 1st January\n utc_hour (float):\n Hour of the day in UTC\n\n Returns:\n numpy.ndarray:\n latitudes of the daynight terminator\n \"\"\"\n if day_of_year < 0 or day_of_year > 365:\n msg = ('Day of the year must be between 0 and 365')\n raise ValueError(msg)\n if utc_hour < 0.0 or utc_hour > 24.0:\n msg = ('Hour must be between 0 and 24.0')\n raise ValueError(msg)\n declination = calc_solar_declination(day_of_year)\n decl = np.radians(declination)\n hour_angle = calc_solar_hour_angle(longitudes, day_of_year, utc_hour)\n rad_hour = np.radians(hour_angle)\n lats = np.arctan(-np.cos(rad_hour)/np.tan(decl))\n lats = np.degrees(lats)\n return lats\n\n\nclass DayNightMask(BasePlugin):\n \"\"\"\n Plugin Class to generate a daynight mask for the provided cube\n \"\"\"\n def __init__(self):\n \"\"\" Initial the DayNightMask Object \"\"\"\n self.night = 0\n self.day = 1\n\n def __repr__(self):\n \"\"\"Represent the configured plugin instance as a string.\"\"\"\n result = ('<DayNightMask : '\n 'Day = {}, Night = {}>'.format(self.day, self.night))\n return result\n\n def _create_daynight_mask(self, cube):\n \"\"\"\n Create blank daynight mask cube\n\n Args:\n cube (iris.cube.Cube):\n cube with the times and coordinates required for mask\n\n Returns:\n iris.cube.Cube:\n Blank daynight mask cube. The resulting cube will be the\n same shape as the time, y, and x coordinate, other coordinates\n will be ignored although they might appear as attributes\n on the cube as it is extracted from the first slice.\n \"\"\"\n daynight_mask = next(cube.slices([cube.coord('time'),\n cube.coord(axis='y'),\n cube.coord(axis='x')])).copy()\n daynight_mask.long_name = 'day_night_mask'\n daynight_mask.standard_name = None\n daynight_mask.var_name = None\n daynight_mask.units = unit.Unit('1')\n daynight_mask.data = np.ones(daynight_mask.data.shape,\n dtype='int')*self.night\n return daynight_mask\n\n def _daynight_lat_lon_cube(self, mask_cube, day_of_year, utc_hour):\n \"\"\"\n Calculate the daynight mask for the provided Lat Lon cube\n\n Args:\n mask_cube (iris.cube.Cube):\n daynight mask cube - data initially set to self.night\n day_of_year (int):\n day of the year 0 to 365, 0 = 1st January\n utc_hour (float):\n Hour in UTC\n\n Returns:\n iris.cube.Cube:\n daynight mask cube - daytime set to self.day\n \"\"\"\n lons = mask_cube.coord('longitude').points\n lats = mask_cube.coord('latitude').points\n terminator_lats = daynight_terminator(lons, day_of_year, utc_hour)\n lons_zeros = np.zeros_like(lons)\n lats_zeros = np.zeros_like(lats).reshape(len(lats), 1)\n lats_on_lon = lats.reshape(len(lats), 1) + lons_zeros\n terminator_on_lon = lats_zeros + terminator_lats\n dec = calc_solar_declination(day_of_year)\n if dec > 0.0:\n index = np.where(lats_on_lon >= terminator_on_lon)\n else:\n index = np.where(lats_on_lon < terminator_on_lon)\n mask_cube.data[index] = self.day\n return mask_cube\n\n def process(self, cube):\n \"\"\"\n Calculate the daynight mask for the provided cube. Note that only the\n hours and minutes of the dtval variable are used. To ensure consistent\n behaviour with changes of second or subsecond precision, the second\n component is added to the time object. This means that when the hours\n and minutes are used, we have correctly rounded to the nearest minute,\n e.g.::\n\n dt(2017, 1, 1, 11, 59, 59) -- +59 --> dt(2017, 1, 1, 12, 0, 58)\n dt(2017, 1, 1, 12, 0, 1) -- +1 --> dt(2017, 1, 1, 12, 0, 2)\n dt(2017, 1, 1, 12, 0, 30) -- +30 --> dt(2017, 1, 1, 12, 1, 0)\n\n Args:\n cube (iris.cube.Cube):\n input cube\n\n Returns:\n iris.cube.Cube:\n daynight mask cube, daytime set to self.day\n nighttime set to self.night.\n The resulting cube will be the same shape as\n the time, y, and x coordinate, other coordinates\n will be ignored although they might appear as attributes\n on the cube as it is extracted from the first slice.\n \"\"\"\n daynight_mask = self._create_daynight_mask(cube)\n dtvalues = iris_time_to_datetime(daynight_mask.coord('time'))\n for i, dtval in enumerate(dtvalues):\n mask_cube = daynight_mask[i]\n day_of_year = (dtval - dt.datetime(dtval.year, 1, 1)).days\n dtval = dtval + dt.timedelta(seconds=dtval.second)\n utc_hour = (dtval.hour * 60.0 + dtval.minute) / 60.0\n trg_crs = lat_lon_determine(mask_cube)\n # Grids that are not Lat Lon\n if trg_crs is not None:\n lats, lons = transform_grid_to_lat_lon(mask_cube)\n solar_el = calc_solar_elevation(lats, lons,\n day_of_year, utc_hour)\n mask_cube.data[np.where(solar_el > 0.0)] = self.day\n else:\n mask_cube = self._daynight_lat_lon_cube(mask_cube,\n day_of_year, utc_hour)\n daynight_mask.data[i, ::] = mask_cube.data\n return daynight_mask\n" ]
[ [ "numpy.array" ], [ "numpy.max", "numpy.zeros_like", "numpy.sin", "numpy.arcsin", "numpy.tan", "numpy.ones", "numpy.min", "numpy.degrees", "numpy.radians", "numpy.where", "numpy.cos" ] ]
rogall-e/advent_of_code
[ "a8c41fb63478dd7b99c88c4fa99b7ba0bab3842d" ]
[ "2021/day4_part1.py" ]
[ "import numpy as np # import numpy\n\nwith open(\"data/day4.txt\") as f:\n drawing_numbers = f.readline()\n board_lst = []\n board_line = []\n counter = 0\n\n for line in f:\n if line != '\\n':\n board_line.append(line.strip()) \n if len(board_line) == 5:\n board_lst.append(board_line)\n board_line = []\n \ndrawing_numbers = drawing_numbers.strip().split(',')\n\n\ndef create_board(board_lst):\n board_array = []\n for item in board_lst:\n board = [x for x in item.split(' ') if x.strip() != '']\n board_array.append(board)\n board_array = np.array(board_array)\n board_array = board_array.astype(float)\n return board_array\n\ndef check_winning(board_lst, number_lst):\n winning_condition = {\n 'Answer': 0,\n 'counter': 625\n }\n for item in board_lst:\n board = create_board(item)\n counter=0\n for number in number_lst:\n number = float(number) \n counter += 1\n if number in board:\n result = np.where(board == number)\n board[int(result[0])][int(result[1])] = np.nan\n if np.all(np.isnan(board), axis=1).any() or np.all(np.isnan(board), axis=0).any():\n if counter < winning_condition['counter']:\n winning_condition['counter'] = counter\n winning_condition['Answer'] = number * np.nansum(board)\n print('The Answer is:', winning_condition)\n \n \n \ncheck_winning(board_lst, drawing_numbers)" ]
[ [ "numpy.where", "numpy.array", "numpy.isnan", "numpy.nansum" ] ]
wangleon/gamse
[ "ed2a3730469a1eeef3def1beca990e9d2641a53b" ]
[ "gamse/utils/onedarray.py" ]
[ "from itertools import tee\nimport numpy as np\nimport scipy.interpolate as intp\nfrom scipy.signal import savgol_filter\n\ndef get_edge_bin(array):\n \"\"\"Detect the edge indcies of a binary 1-D array.\n\n Args:\n array (:class:`numpy.ndarray`): A list or Numpy 1d array, with binary\n (0/1) or boolean (True/False) values.\n\n Returns:\n list: A list containing starting and ending indices of the non-zero\n blocks.\n\n Examples:\n\n .. code-block:: python\n\n >>> a = [0,1,1,0,0,0,1,0,1]\n >>> get_edge_bin(a)\n [(1, 3), (6, 7), (8, 9)]\n >>> b = [True, False, True, True, False, False]\n >>> get_edge_bin(b)\n [(0, 1), (2, 4)]\n \"\"\"\n array1 = np.int64(array)\n array1 = np.insert(array1, 0, 0)\n array1 = np.append(array1, 0)\n tmp = array1 - np.roll(array1, 1)\n i1_lst = np.nonzero(tmp == 1)[0] - 1\n i2_lst = np.nonzero(tmp ==-1)[0] - 1\n return list(zip(i1_lst, i2_lst))\n\ndef get_local_minima(x, window=None):\n \"\"\"Get the local minima of a 1d array in a window.\n\n Args:\n x (:class:`numpy.ndarray`): A list or Numpy 1d array.\n window (*int* or :class:`numpy.ndarray`): An odd integer or a list of\n odd integers as the lengthes of searching window.\n Returns:\n tuple: A tuple containing:\n\n * **index** (:class:`numpy.ndarray`): A numpy 1d array containing \n indices of all local minima.\n * **x[index]** (:class:`numpy.ndarray`): A numpy 1d array containing\n values of all local minima.\n\n \"\"\"\n x = np.array(x)\n dif = np.diff(x)\n ind = dif > 0\n tmp = np.logical_xor(ind, np.roll(ind,1))\n idx = np.logical_and(tmp,ind)\n index = np.where(idx)[0]\n if window is None:\n # window is not given\n return index, x[index]\n else:\n # window is given\n if isinstance(window, int):\n # window is an integer\n window = np.repeat(window, len(x))\n elif isinstance(window, np.ndarray):\n # window is a numpy array\n #if np.issubdtype(window.dtype, int):\n if window.dtype.type in [np.int16, np.int32, np.int64]:\n pass\n else:\n # window are not integers\n print('window array are not integers')\n raise ValueError\n else:\n raise ValueError\n\n if 0 in window%2:\n # not all of the windows are odd\n raise ValueError\n\n halfwin_lst = (window-1)//2\n index_lst = []\n for i in index:\n halfwin = halfwin_lst[i]\n i1 = max(0, i-halfwin)\n i2 = min(i+halfwin+1, len(x))\n if i == x[i1:i2].argmin() + i1:\n index_lst.append(i)\n if len(index_lst)>0:\n index_lst = np.array(index_lst)\n return index_lst, x[index_lst]\n else:\n return np.array([]), np.array([])\n\ndef implete_none(lst):\n \"\"\"Replace the None elemnets at the beginning and the end of list by auto\n increment integers.\n \n Convert the first and last few `None` elements to auto increment integers.\n These integers are determined by the first and last integers in the input\n array.\n While the `None` elements between two integers in the input list will\n remain.\n\n Args:\n lst (list): A list contaning None values.\n Returns:\n newlst (list): A list containing auto increment integers.\n\t\n Examples:\n .. code-block:: python\n\n >>> a = [None,None,3,4,None,5,6,None,None]\n >>> implete_none(a)\n [1, 2, 3, 4, None, 5, 6, 7, 8]\n\n \"\"\"\n # filter the None values\n notnone_lst = [v for v in lst if v is not None]\n for i, v in enumerate(lst):\n if v == notnone_lst[0]:\n # first not-None element and its index\n notnone1 = i\n value1 = v\n if v == notnone_lst[-1]:\n # last not-None element and its index\n notnone2 = i\n value2 = v\n newlst = []\n for i,v in enumerate(lst):\n if i < notnone1:\n newlst.append(value1-(notnone1-i))\n elif i > notnone2:\n newlst.append(value2+(i-notnone2))\n else:\n newlst.append(v)\n return newlst\n\n\ndef derivative(*args, **kwargs):\n \"\"\"Get the first derivative of data arrays (*x*, *y*).\n\n If **y** is not given, the first argument will be taken as **y**, and the\n differential of the input array will be returned.\n\n Args:\n x (list or :class:`numpy.ndarray`): X-values of the input array (optional).\n y (list or :class:`numpy.ndarray`): Y-values of the input array.\n points (int): Number of points used to calculate derivative\n (optional, default is 3).\n\n Returns:\n :class:`numpy.ndarray`: Derivative of the input array.\n \"\"\"\n if len(args) == 1:\n y = np.array(args[0], dtype=np.float64)\n x = np.arange(y.size)\n elif len(args) == 2:\n x = np.array(args[0], dtype=np.float64)\n y = np.array(args[1], dtype=np.float64)\n else:\n raise ValueError\n npts = x.size\n points = kwargs.pop('points', 3)\n if points == 3:\n der = (np.roll(y,-1) - np.roll(y,1))/(np.roll(x,-1) - np.roll(x,1))\n a = np.array([-3., 4., -1.])\n der[0] = (a*y[0:3]).sum() / (a*x[0:3]).sum()\n der[-1] = (-a[::-1]*y[-3:]).sum() / (-a[::-1]*x[-3:]).sum()\n return der\n else:\n raise ValueError\n\ndef pairwise(array):\n \"\"\"Return pairwises of an iterable arrary.\n\n Args:\n array (list or :class:`numpy.ndarray`): The input iterable array.\n Returns:\n :class:`zip`: zip objects.\n \"\"\"\n a, b = tee(array)\n next(b, None)\n return zip(a, b)\n\ndef smooth(array, points, deg):\n \"\"\"Smooth an array.\n\n Args:\n array (:class:`numpy.ndarray`): Input array.\n points (int): Points of smoothing.\n deg (int): Degree of smoothing.\n\n Returns:\n :class:`numpy.ndarray`: smoothed array\n\n \"\"\"\n n = array.size\n if points == 5:\n if deg == 2:\n w_2 = np.array([31., 9., -3., -5., 3.])/35.\n w_1 = np.array([ 9., 13., 12., 6., -5.])/35.\n w_0 = np.array([-3., 12., 17., 12., -3.])/35.\n elif deg == 3:\n w_2 = np.array([69., 4., -6., 4., -1.])/70.\n w_1 = np.array([ 2., 27., 12., -8., 2.])/35.\n w_0 = np.array([-3., 12., 17., 12., -3.])/35.\n\n a = np.zeros((n, n))\n a[0, 0:5] = w_2\n a[1, 0:5] = w_1\n for i in np.arange(2, n-2):\n a[i, i-2:i+3] = w_0\n a[-2, -5:] = w_1[::-1]\n a[-1, -5:] = w_2[::-1]\n\n result = np.matrix(a)*np.matrix(array.reshape(-1,1))\n return np.array(result)[:,0]\n\n\ndef iterative_savgol_filter(y, winlen=5, order=3, maxiter=10,\n upper_clip=None, lower_clip=None):\n \"\"\"Smooth the input array with Savitzky-Golay filter with lower and/or\n upper clippings.\n\n Args:\n y (:class:`numpy.ndarray`): Input array.\n winlen (int): Window length of Savitzky-Golay filter.\n order (int): Order of Savitzky-Gaoly filter.\n maxiter (int): Maximum number of iterations.\n lower_clip (float): Lower sigma-clipping value.\n upper_clip (float): Upper sigma-clipping value.\n\n Returns:\n tuple: A tuple containing:\n\n * **ysmooth** (:class:`numpy.ndarray`) – Smoothed y values.\n * **yres** (:class:`numpy.ndarray`) – Residuals of y values.\n * **mask** (:class:`numpy.ndarray`) – Mask of y values.\n * **std** (float) – Standard deviation.\n \"\"\"\n x = np.arange(y.size)\n mask = np.ones_like(y, dtype=np.bool)\n\n for ite in range(maxiter):\n\n # fill masked values in y using interpolation\n f = intp.InterpolatedUnivariateSpline(x[mask], y[mask], k=3)\n ysmooth = savgol_filter(f(x), window_length=winlen, polyorder=order)\n yres = y - ysmooth\n std = yres[mask].std()\n\n # generate new mask\n # make a copy of existing mask\n new_mask = mask * np.ones_like(mask, dtype=np.bool)\n # give new mask with lower and upper clipping value\n if lower_clip is not None:\n new_mask *= (yres > -lower_clip * std)\n if upper_clip is not None:\n new_mask *= (yres < upper_clip * std)\n\n if new_mask.sum() == mask.sum():\n break\n mask = new_mask\n\n return ysmooth, yres, mask, std\n" ]
[ [ "numpy.array", "numpy.ones_like", "numpy.matrix", "scipy.interpolate.InterpolatedUnivariateSpline", "numpy.zeros", "numpy.roll", "numpy.logical_and", "numpy.diff", "numpy.nonzero", "numpy.where", "numpy.arange", "numpy.append", "numpy.int64", "numpy.insert" ] ]
RosaYen/DP_FL_recreation
[ "30607645d9633483a4afa50c0e00bea65c0fb355" ]
[ "DP_FL_recreate/opacus/tests/layers_grad_test.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport unittest\n\nimport torch\nimport torch.nn as nn\nfrom opacus import PerSampleGradientClipper\nfrom opacus.dp_model_inspector import DPModelInspector\nfrom opacus.layers import DPLSTM, DPMultiheadAttention, SequenceBias\nfrom opacus.utils.clipping import ConstantFlatClipper\n\n\nclass LayersGradTest(unittest.TestCase):\n def setUp(self):\n self.validator = DPModelInspector()\n\n def _reset_seeds(self):\n torch.manual_seed(1337)\n torch.cuda.manual_seed(1337)\n\n def _run_once(self, layer, criterion, *args):\n self._reset_seeds()\n layer.zero_grad()\n output = layer(*args)\n if isinstance(output, tuple):\n output = output[0]\n output = output.squeeze()\n\n y = torch.zeros_like(output)\n loss = criterion(output, y)\n loss.backward()\n\n def _check_one_layer(self, layer, *args, **kwargs):\n self._check_one_layer_with_criterion(\n layer, nn.L1Loss(reduction=\"mean\"), *args, **kwargs\n )\n self._check_one_layer_with_criterion(\n layer, nn.L1Loss(reduction=\"sum\"), *args, **kwargs\n )\n\n def _check_one_layer_with_criterion(self, layer, criterion, *args, **kwargs):\n self.validator.validate(layer)\n for name, param in layer.named_parameters():\n if (\"weight\" in name) or (\"bias\" in name):\n nn.init.uniform_(param, -1.0, 1.0)\n\n # run without DP\n self._run_once(layer, criterion, *args)\n vanilla_run_grads = [\n (name, p.grad.detach())\n for (name, p) in layer.named_parameters()\n if p.requires_grad\n ]\n\n # run with DP\n clipper = PerSampleGradientClipper(\n layer,\n ConstantFlatClipper(1e9),\n batch_first=kwargs.get(\"batch_first\", True),\n loss_reduction=criterion.reduction,\n )\n self._run_once(layer, criterion, *args)\n\n for param_name, param in layer.named_parameters():\n if param.requires_grad:\n self.assertTrue(\n hasattr(param, \"grad_sample\"),\n f\"Per-sample gradients haven't been computed for {param_name}\",\n )\n\n clipper.clip_and_accumulate()\n clipper.pre_step()\n\n private_run_grads = [\n (name, p.grad.detach())\n for (name, p) in layer.named_parameters()\n if p.requires_grad\n ]\n\n # compare\n for (vanilla_name, vanilla_grad), (private_name, private_grad) in zip(\n vanilla_run_grads, private_run_grads\n ):\n assert vanilla_name == private_name\n\n self.assertTrue(\n torch.allclose(vanilla_grad, private_grad, atol=10e-5, rtol=10e-3),\n f\"Gradient mismatch. Parameter: {layer}.{vanilla_name}, loss: {criterion.reduction}\",\n )\n\n clipper.close()\n\n def test_conv1d(self):\n x = torch.randn(64, 16, 24)\n layer = nn.Conv1d(16, 32, 3, 1)\n\n self._check_one_layer(layer, x)\n\n def test_conv2d(self):\n x = torch.randn(64, 16, 24, 24)\n layer = nn.Conv2d(16, 32, 3, 1)\n\n self._check_one_layer(layer, x)\n\n def test_linear(self):\n self._check_one_layer(nn.Linear(8, 4), torch.randn(16, 8))\n self._check_one_layer(nn.Linear(8, 4), torch.randn(16, 8, 8))\n\n def test_layernorm(self):\n x = torch.randn(64, 16, 24, 24)\n\n self._check_one_layer(nn.LayerNorm(24), x)\n self._check_one_layer(nn.LayerNorm((24, 24)), x)\n self._check_one_layer(nn.LayerNorm((16, 24, 24)), x)\n\n def test_groupnorm(self):\n self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10))\n self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10, 9))\n self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10, 9, 8))\n\n def test_instancenorm(self):\n self._check_one_layer(\n nn.InstanceNorm1d(16, affine=True), torch.randn(64, 16, 10)\n )\n self._check_one_layer(\n nn.InstanceNorm2d(16, affine=True), torch.randn(64, 16, 10, 9)\n )\n self._check_one_layer(\n nn.InstanceNorm3d(16, affine=True), torch.randn(64, 16, 10, 9, 8)\n )\n\n def test_sequence_bias(self):\n x = torch.randn(4, 3, 2)\n layer = SequenceBias(2)\n\n self._check_one_layer(layer, x, batch_first=False)\n\n def test_multihead_attention(self):\n x = torch.randn(16, 24, 32)\n\n layer = DPMultiheadAttention(32, 1)\n self._check_one_layer(layer, x, x, x, batch_first=False)\n\n layer = DPMultiheadAttention(32, 1, bias=True, add_bias_kv=True, dropout=0.05)\n self._check_one_layer(layer, x, x, x, batch_first=False)\n\n layer = DPMultiheadAttention(32, 1, bias=True, add_bias_kv=True)\n self._check_one_layer(layer, x, x, x, batch_first=False)\n\n layer = DPMultiheadAttention(\n 32, 1, bias=True, add_bias_kv=True, add_zero_attn=True\n )\n self._check_one_layer(layer, x, x, x, batch_first=False)\n\n q = torch.randn(16, 24, 32)\n k = torch.randn(20, 24, 28)\n v = torch.randn(20, 24, 28)\n layer = DPMultiheadAttention(\n 32, 1, bias=True, add_bias_kv=True, add_zero_attn=True, kdim=28, vdim=28\n )\n self._check_one_layer(layer, q, k, v, batch_first=False)\n\n def test_embedding(self):\n layer = nn.Embedding(256, 100)\n x1 = torch.randint(0, 255, (128, 42)).long()\n x2 = torch.randint(0, 255, (64,)).long()\n self._check_one_layer(layer, x1)\n self._check_one_layer(layer, x2)\n\n def test_lstm_batch_first(self):\n # input size : 25 output size : 12 minibatch : 30 sequence length : 20\n # Test batch_first=True case\n layer = DPLSTM(25, 12, 1, batch_first=True)\n x = torch.randn(30, 20, 25)\n self._check_one_layer(layer, x, batch_first=True)\n\n def test_lstm_batch_second(self):\n # input size : 25 output size : 12 minibatch : 30 sequence length : 20\n\n # Test batch_first=False case\n layer = DPLSTM(25, 12, 1, batch_first=False)\n x = torch.randn(20, 30, 25)\n self._check_one_layer(layer, x, batch_first=False)\n" ]
[ [ "torch.nn.Embedding", "torch.nn.Linear", "torch.nn.InstanceNorm1d", "torch.cuda.manual_seed", "torch.nn.LayerNorm", "torch.nn.init.uniform_", "torch.nn.Conv1d", "torch.nn.GroupNorm", "torch.nn.L1Loss", "torch.manual_seed", "torch.nn.Conv2d", "torch.randint", "torch.nn.InstanceNorm2d", "torch.zeros_like", "torch.nn.InstanceNorm3d", "torch.allclose", "torch.randn" ] ]