repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
Dmytro-Skorniakov/pyvenn
[ "b3d3057d155f3f11e37a93b55305f2b6929cf608" ]
[ "demo.py" ]
[ "# coding: utf-8\n\n# ipython notebook requires this\n# %matplotlib inline\n\n# python console requires this\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nfrom pyvenn import venn\n\nlabels = venn.get_labels([range(10), range(5, 15)], fill=['number', 'logic'])\nfig, ax = venn.venn2(labels, names=['list 1', 'list 2'])\nfig.savefig('venn2.png', bbox_inches='tight')\nplt.close()\n\nlabels = venn.get_labels([range(10), range(5, 15), range(3, 8)], fill=['number', 'logic'])\nfig, ax = venn.venn3(labels, names=['list 1', 'list 2', 'list 3'])\nfig.savefig('venn3.png', bbox_inches='tight')\nplt.close()\n\nlabels = venn.get_labels([range(10), range(5, 15), range(3, 8), range(8, 17)], fill=['number', 'logic'])\nfig, ax = venn.venn4(labels, names=['list 1', 'list 2', 'list 3', 'list 4'])\nfig.savefig('venn4.png', bbox_inches='tight')\nplt.close()\n\nlabels = venn.get_labels([range(10), range(5, 15), range(3, 8), range(8, 17), range(10, 20)], fill=['number', 'logic'])\nfig, ax = venn.venn5(labels, names=['list 1', 'list 2', 'list 3', 'list 4', 'list 5'])\nfig.savefig('venn5.png', bbox_inches='tight')\nplt.close()\n\nlabels = venn.get_labels([range(10), range(5, 15), range(3, 8), range(8, 17), range(10, 20), range(13, 25)], fill=['number', 'logic'])\nfig, ax = venn.venn6(labels, names=['list 1', 'list 2', 'list 3', 'list 4', 'list 5', 'list 6'])\nfig.savefig('venn6.png', bbox_inches='tight')\nplt.close()\n\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.close" ] ]
tzaumiaan/mono_depth_assisted_tracking_by_detection
[ "33a4ca8c5550d30111269848eab4148da2deaeea" ]
[ "inference_pipeline/inference_core.py" ]
[ "\"\"\"Inference core module.\n\nThis module is the top level wrapper for the whole detection-by-tracking\nframework. The wrapper takes care of a set of building blocks, including\ndetector, estimator, DA solver, tracker, and a bunch of unitilies to \nprovide a clean interface to be executed frame by frame.\n\n\"\"\"\n# package dependency\nimport numpy as np\nfrom absl import logging\nfrom datetime import datetime\n\n# internal dependency\nfrom detector import detector\nfrom estimator import estimator\nfrom tracker import tracker\nfrom da_solver import associate\nfrom geo_utils import egomo_vec2mat, cam_proj, coord_transform\nfrom common_utils import get_tdiff\n\nclass inference_wrapper:\n \"\"\"Inference wrapper class.\n \n This class works as the wrapper of the inference core. It is initialized to \n set up detector and estimator instances, and then it provides `run_frame`\n to be used directly frame by frame. Within it there are 2 major functions:\n `main_pipeline` and `traj_pipeline`, taking care of 2D plane detection and\n tracking and 3D trjectory projection, respectively.\n \n attributes:\n det (detector): Object detector instance.\n est (estimator): Depth and egomotion estimator instance.\n t_list (list of tracker): List of trackers.\n tid_new (int): Global latest index for new tracker. A unique tracker ID is\n issued to a newly created tracker based on this global index\n to prevent ID conflict.\n image_seq (list of numpy.array): List of a triplet of images, stored for \n egomotion detection.\n egomo_trmat (numpy.array): Egomotion transformation matrix, stored as the \n accumulated ego pose from frame 0, dim = (4, 4).\n frame_idx (int): Frame index, starting from 0.\n k_mat (numpy.array): Camera intrinsic matrix, dim = (3, 3).\n \n \"\"\"\n\n def __init__(self, config, k_mat, init_egomo_vec=None):\n \"\"\"Model initialization.\n \n Args:\n config (dict): Configuration of the whole inference pipeline, including\n necessary frozen PB and parameters for Tensorflow models.\n k_mat (numpy.array): Camera intrinsic matrix, dim = (3, 3).\n init_egomo_vec (numpy.array): Initial pose of camera, dim = (6,).\n \n \"\"\"\n self.det = detector(config['detector'])\n self.est = estimator(config['estimator'])\n self.t_cfg = config['tracker']\n self.t_list = []\n self.tid_new = 0\n self.image_seq = []\n self.egomo_trmat = None\n self.frame_idx = -1\n self.k_mat = k_mat\n if init_egomo_vec is None:\n self.init_egomo_vec = np.zeros(6)\n else:\n self.init_egomo_vec = np.array(init_egomo_vec)\n \n def run_frame(self, image):\n \"\"\"Frame routine, including main pipeline, triplet buil-up, and trajectory\n pipeline.\n \n Args:\n image (numpy.array): Image array, dim = (h, w, c).\n \n Return:\n frame_idx (int): Frame index.\n disp (numpy.array): Disparity map, for visualization, dim = (h, w, c).\n egomo_trmat (numpy.array): Accumulated egomotion transformation matrix, \n for visualization, dim = (4, 4).\n t_list (list of tracker): List of trackers for visualization.\n \n \"\"\"\n self.frame_idx += 1\n # run main pipeline\n t0 = datetime.now()\n disp = self.main_pipeline(image)\n t1 = datetime.now()\n logging.info('main pipeline: {}'.format(get_tdiff(t0, t1)))\n \n # prepare image sequence of 3 for trajectory pipeline\n t0 = datetime.now()\n self.image_seq.append(image)\n if len(self.image_seq) > 3:\n del self.image_seq[0]\n t1 = datetime.now()\n logging.info('image stack: {}'.format(get_tdiff(t0, t1)))\n\n # run trajectory pipeline\n t0 = datetime.now()\n if len(self.image_seq) >= 3:\n self.egomo_trmat = self.traj_pipeline(prev_trmat=self.egomo_trmat)\n t1 = datetime.now()\n logging.info('traj pipeline: {}'.format(get_tdiff(t0, t1)))\n return self.frame_idx, disp, self.egomo_trmat, self.t_list\n \n def main_pipeline(self, image):\n \"\"\"Main pipeline of tracking-by-detection.\n \n From one image, we can obtain a list of detected objects along with their\n bounding boxes, labels, and depth. Objects are tracked with the data\n association solver and a list of trackers.\n\n Args:\n image (numpy.array): Image array, dim = (h, w, c).\n \n Return:\n disp (numpy.array): Disparity map, for visualization, dim = (h, w, c).\n \n \"\"\"\n # detection\n t0 = datetime.now()\n bbox_list, score_list, label_list = self.det.inference(image)\n t1 = datetime.now()\n logging.info('main pipeline (det): {}'.format(get_tdiff(t0, t1)))\n \n # estimation\n t0 = datetime.now()\n disp = self.est.inference(image)\n depth_list = self.est.calc_depth(bbox_list)\n t1 = datetime.now()\n logging.info('main pipeline (est): {}'.format(get_tdiff(t0, t1)))\n \n # tracker predict\n t0 = datetime.now()\n for t in self.t_list:\n t.predict()\n t1 = datetime.now()\n logging.info('main pipeline (trk_pred): {}'.format(get_tdiff(t0, t1)))\n \n # associate\n t0 = datetime.now()\n matched_pair, unmatched_bbox_list, _ = associate(bbox_list, label_list, self.t_list)\n t1 = datetime.now()\n logging.info('main pipeline (da_solver): {}'.format(get_tdiff(t0, t1)))\n \n t0 = datetime.now()\n # update trackers for matched_pair\n for m in matched_pair:\n t = self.t_list[m[1]]\n bbox = bbox_list[m[0]]\n depth = depth_list[m[0]]\n est_dict = {\n 'label': label_list[m[0]],\n 'score': score_list[m[0]]}\n t.update(self.frame_idx, bbox, depth, est_dict)\n \n # update in-track status of all trackers\n for t in self.t_list:\n t.update_status(self.frame_idx)\n \n # purge out dead trackers\n self.t_list = [t for t in self.t_list if t.get_status()]\n\n # create new trackers for unmatched_bbox_list\n for b_idx in unmatched_bbox_list:\n bbox = bbox_list[b_idx]\n depth = depth_list[b_idx]\n est_dict = {\n 'label': label_list[b_idx],\n 'score': score_list[b_idx]}\n self.t_list.append(tracker(self.t_cfg, self.tid_new, bbox, depth, est_dict))\n self.tid_new += 1\n\n t1 = datetime.now()\n logging.info('main pipeline (trk_upd): {}'.format(get_tdiff(t0, t1)))\n\n # disparity map for display\n return disp\n\n def traj_pipeline(self, prev_trmat=None):\n \"\"\"Trajectory pipeline of tracking-by-detection.\n \n Given a previous egomotion transformation matrix and a triplet of images,\n we can obtain the egomotion for the new frame and accumulate it on previous\n pose to generate 3D coordinate transformation matrix. Then all objects are\n projected to 3D coordinate to generate absolute trajectories. Those \n trajectories are stored in the dictionary of each tracker.\n\n Args:\n prev_trmat (numpy.array): Previously accumulated egomotion transformation\n matrix, dim = (4, 4).\n \n Return:\n egomo_trmat (numpy.array): Updated egomotion transformation matrix, dim \n = (4, 4).\n \n \"\"\"\n # image_seq = [image(frame_idx-2), image(frame_idx-1), image(frame_idx)]\n # egomotion update\n egomo = self.est.get_egomotion(self.image_seq)\n\n # egomotion transformation\n assert self.frame_idx >= 2, 'invalid self.frame_idx'\n if prev_trmat is None:\n assert self.frame_idx == 2, 'invalid self.frame_idx'\n # initialization of ego transformation matrix\n init_trmat = egomo_vec2mat(self.init_egomo_vec)\n prev_trmat = np.matmul(init_trmat, egomo_vec2mat(egomo[0])) # frame 0 to 1\n egomo_trmat = np.matmul(prev_trmat, egomo_vec2mat(egomo[1]))\n\n # tracker list update\n for t in self.t_list:\n # skip lost trackers\n if t.get_status()==False:\n continue\n # bounding box & depth\n bbox, depth = t.get_bbox(), t.get_depth()\n # project to 3d camera coordinate\n p3d_cam = cam_proj(self.k_mat, bbox, depth)\n # transform to world coordinate\n p3d = coord_transform(egomo_trmat, p3d_cam)\n t.add_attr_to_est_dict('traj', p3d)\n \n return egomo_trmat\n \n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
Meistereder29/rl-course
[ "7816088830da9dbd143abe50b51f5acd52f25c35" ]
[ "ex07-fa/playground.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 22 17:10:20 2020\n\n@author: Gregor\n\"\"\"\nimport numpy as np\n\nx1 = -12\n\n\nfor i in range(20):\n y1 = int(np.round((x1+12)*19/18))\n x2 = -7\n for j in range(20):\n y2 = int(np.round((x2+7)*19/14))\n x2 +=(14/19)\n state = 20 * y1 + y2\n print(\"State = \"+str(state))\n x1 +=(18/19)\n \n " ]
[ [ "numpy.round" ] ]
shangoma/Hate_Speech
[ "597795d40e3516ec83feb33e40c1d544c58deb74" ]
[ "evaluator.py" ]
[ "'''Scripts for evaluation,\n metrics: (macro) F1, AUC, FNED, FPED\n\n Because the data is skewed distributed, therefore,\n we use the macro f1 score to measure the performance.\n'''\nimport pandas as pd\nfrom sklearn import metrics\nfrom sklearn.utils.class_weight import compute_class_weight\nfrom sklearn.utils import shuffle\nimport numpy as np\n\nimport json\nfrom collections import Counter\n\n\ndef cal_fpr(fp, tn):\n '''False positive rate'''\n return fp/(fp+tn)\n\n\ndef cal_fnr(fn, tp):\n '''False negative rate'''\n return fn/(fn+tp)\n\n\ndef cal_tpr(tp, fn):\n '''True positive rate'''\n return tp/(tp+fn)\n\n\ndef cal_tnr(tn, fp):\n '''True negative rate'''\n return tn/(tn+fp)\n\n\ndef eval(dpath, opt):\n '''Fairness Evaluation\n dpath: input eval file path\n opt: output results path\n '''\n df = pd.read_csv(dpath, sep='\\t', na_values='x')\n # get the task name from the file, gender or ethnicity\n tasks = ['gender', 'age', 'country', 'ethnicity']\n\n scores = {\n 'accuracy': 0.0,\n 'f1-macro': 0.0, # macro f1 score\n 'f1-weight': 0.0, # weighted f1 score\n 'auc': 0.0,\n }\n\n # accuracy, f1, auc\n scores['accuracy'] = metrics.accuracy_score(\n y_true=df.label, y_pred=df.pred\n )\n scores['f1-macro'] = metrics.f1_score(\n y_true=df.label, y_pred=df.pred,\n average='macro'\n )\n scores['f1-weight'] = metrics.f1_score(\n y_true=df.label, y_pred=df.pred,\n average='weighted'\n )\n fpr, tpr, _ = metrics.roc_curve(\n y_true=df.label, y_score=df.pred_prob,\n )\n scores['auc'] = metrics.auc(fpr, tpr)\n\n '''fairness gaps'''\n for task in tasks:\n\n '''Filter out some tasks'''\n if ('Polish' in dpath or 'Italian' in dpath) and\\\n task in ['country', 'ethnicity']:\n continue\n\n scores[task] = {\n 'fned': 0.0, # gap between fnr\n 'fped': 0.0, # gap between fpr\n 'tped': 0.0, # gap between tpr\n 'tned': 0.0, # gap between tnr\n }\n # filter out the one does not have attributes\n task_df = df[df[task].notnull()]\n \n # get overall confusion matrix\n tn, fp, fn, tp = metrics.confusion_matrix(\n y_true=task_df.label, y_pred=task_df.pred\n ).ravel()\n\n # get the unique types of demographic groups\n uniq_types = task_df[task].unique()\n for group in uniq_types:\n # calculate group specific confusion matrix\n group_df = task_df[task_df[task] == group]\n \n g_tn, g_fp, g_fn, g_tp = metrics.confusion_matrix(\n y_true=group_df.label, y_pred=group_df.pred\n ).ravel()\n\n # calculate and accumulate the gaps\n scores[task]['fned'] = scores[task]['fned'] + abs(\n cal_fnr(fn, tp)-cal_fnr(g_fn, g_tp)\n )\n scores[task]['fped'] = scores[task]['fped'] + abs(\n cal_fpr(fp, tn)-cal_fpr(g_fp, g_tn)\n )\n scores[task]['tped'] = scores[task]['tped'] + abs(\n cal_tpr(tp, fn)-cal_tpr(g_tp, g_fn)\n )\n scores[task]['tned'] = scores[task]['tned'] + abs(\n cal_tnr(tn, fp)-cal_tnr(g_tn, g_fp)\n )\n with open(opt, 'w') as wfile:\n wfile.write(json.dumps(scores))\n print(scores)\n\n\ndef data_iter(datap, batch_size=64, if_shuffle=True, if_sample=False):\n doc_idx = 2\n data = {'x': [], 'y': []}\n class_wt = dict()\n \n with open(datap) as dfile:\n dfile.readline()\n for line in dfile:\n line = line.strip().split('\\t')\n # split indices\n data['x'].append(list(map(int, line[doc_idx].split())))\n data['y'].append(int(line[-1]))\n\n # if over sample the minority \n if if_sample:\n label_count = Counter(data['y'])\n for label_tmp in label_count:\n sample_num = label_count.most_common(1)[0][1] - label_count[label_tmp]\n if sample_num == 0:\n continue\n sample_indices = np.random.choice(\n list(range(len(data['y']))),\n size=sample_num\n )\n for idx in sample_indices:\n data['x'].append(data['x'][idx])\n data['y'].append(data['y'][idx])\n \n # calculate the class weight\n class_wt = dict(zip(\n np.unique(data['y']), compute_class_weight(\n 'balanced', np.unique(data['y']), \n data['y']\n )\n ))\n\n # if shuffle the dataset\n if if_shuffle:\n data['x'], data['y'] = shuffle(data['x'], data['y'])\n\n steps = len(data['x']) // batch_size\n if len(data['x']) % batch_size != 0:\n steps += 1\n\n for step in range(steps):\n yield class_wt, \\\n np.asarray(data['x'][step*batch_size: (step+1)*batch_size]),\\\n np.asarray(data['y'][step*batch_size: (step+1)*batch_size])\n\n" ]
[ [ "pandas.read_csv", "numpy.unique", "numpy.asarray", "sklearn.utils.shuffle", "sklearn.metrics.confusion_matrix", "sklearn.metrics.roc_curve", "sklearn.metrics.auc", "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score" ] ]
jjpalacio/tflearn
[ "5c23566de6e614a36252a5828d107d001a0d0482" ]
[ "tflearn/data_utils.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport random\nimport numpy as np\nfrom PIL import Image\nimport pickle\nimport csv\nimport warnings\nimport tensorflow as tf\ntry: #py3\n from urllib.parse import urlparse\n from urllib import request\nexcept: #py2\n from urlparse import urlparse\n from six.moves.urllib import request\nfrom io import BytesIO\n\n\"\"\"\nPreprocessing provides some useful functions to preprocess data before\ntraining, such as pictures dataset building, sequence padding, etc...\n\nNote: Those preprocessing functions are only meant to be directly applied to\ndata, they are not meant to be use with Tensors or Layers.\n\"\"\"\n\n_EPSILON = 1e-8\n\n\n# =======================\n# TARGETS (LABELS) UTILS\n# =======================\n\n\ndef to_categorical(y, nb_classes=None):\n \"\"\" to_categorical.\n\n Convert class vector (integers from 0 to nb_classes)\n to binary class matrix, for use with categorical_crossentropy.\n\n Arguments:\n y: `array`. Class vector to convert.\n nb_classes: `int`. The total number of classes.\n \"\"\"\n if nb_classes:\n y = np.asarray(y, dtype='int32')\n if len(y.shape) > 2:\n print(\"Warning: data array ndim > 2\")\n if len(y.shape) > 1:\n y = y.reshape(-1)\n Y = np.zeros((len(y), nb_classes))\n Y[np.arange(len(y)), y] = 1.\n return Y\n else:\n y = np.array(y)\n return (y[:, None] == np.unique(y)).astype(np.float32)\n\n\n# =====================\n# SEQUENCES UTILS\n# =====================\n\n\ndef pad_sequences(sequences, maxlen=None, dtype='int32', padding='post',\n truncating='post', value=0.):\n \"\"\" pad_sequences.\n\n Pad each sequence to the same length: the length of the longest sequence.\n If maxlen is provided, any sequence longer than maxlen is truncated to\n maxlen. Truncation happens off either the beginning or the end (default)\n of the sequence. Supports pre-padding and post-padding (default).\n\n Arguments:\n sequences: list of lists where each element is a sequence.\n maxlen: int, maximum length.\n dtype: type to cast the resulting sequence.\n padding: 'pre' or 'post', pad either before or after each sequence.\n truncating: 'pre' or 'post', remove values from sequences larger than\n maxlen either in the beginning or in the end of the sequence\n value: float, value to pad the sequences to the desired value.\n\n Returns:\n x: `numpy array` with dimensions (number_of_sequences, maxlen)\n\n Credits: From Keras `pad_sequences` function.\n \"\"\"\n lengths = [len(s) for s in sequences]\n\n nb_samples = len(sequences)\n if maxlen is None:\n maxlen = np.max(lengths)\n\n x = (np.ones((nb_samples, maxlen)) * value).astype(dtype)\n for idx, s in enumerate(sequences):\n if len(s) == 0:\n continue # empty list was found\n if truncating == 'pre':\n trunc = s[-maxlen:]\n elif truncating == 'post':\n trunc = s[:maxlen]\n else:\n raise ValueError(\"Truncating type '%s' not understood\" % truncating)\n\n if padding == 'post':\n x[idx, :len(trunc)] = trunc\n elif padding == 'pre':\n x[idx, -len(trunc):] = trunc\n else:\n raise ValueError(\"Padding type '%s' not understood\" % padding)\n return x\n\n\ndef string_to_semi_redundant_sequences(string, seq_maxlen=25, redun_step=3, char_idx=None):\n \"\"\" string_to_semi_redundant_sequences.\n\n Vectorize a string and returns parsed sequences and targets, along with\n the associated dictionary.\n\n Arguments:\n string: `str`. Lower-case text from input text file.\n seq_maxlen: `int`. Maximum length of a sequence. Default: 25.\n redun_step: `int`. Redundancy step. Default: 3.\n char_idx: 'dict'. A dictionary to convert chars to positions. Will be automatically generated if None\n\n Returns:\n A tuple: (inputs, targets, dictionary)\n \"\"\"\n\n print(\"Vectorizing text...\")\n\n if char_idx is None:\n char_idx = chars_to_dictionary(string)\n\n len_chars = len(char_idx)\n\n sequences = []\n next_chars = []\n for i in range(0, len(string) - seq_maxlen, redun_step):\n sequences.append(string[i: i + seq_maxlen])\n next_chars.append(string[i + seq_maxlen])\n\n X = np.zeros((len(sequences), seq_maxlen, len_chars), dtype=np.bool)\n Y = np.zeros((len(sequences), len_chars), dtype=np.bool)\n for i, seq in enumerate(sequences):\n for t, char in enumerate(seq):\n X[i, t, char_idx[char]] = 1\n Y[i, char_idx[next_chars[i]]] = 1\n\n print(\"Text total length: {:,}\".format(len(string)))\n print(\"Distinct chars : {:,}\".format(len_chars))\n print(\"Total sequences : {:,}\".format(len(sequences)))\n\n return X, Y, char_idx\n\n\ndef textfile_to_semi_redundant_sequences(path, seq_maxlen=25, redun_step=3,\n to_lower_case=False, pre_defined_char_idx=None):\n \"\"\" Vectorize Text file \"\"\"\n text = open(path).read()\n if to_lower_case:\n text = text.lower()\n return string_to_semi_redundant_sequences(text, seq_maxlen, redun_step, pre_defined_char_idx)\n\n\ndef chars_to_dictionary(string):\n \"\"\" Creates a dictionary char:integer for each unique character \"\"\"\n chars = set(string)\n # sorted tries to keep a consistent dictionary, if you run a second time for the same char set\n char_idx = {c: i for i, c in enumerate(sorted(chars))}\n return char_idx\n\n\ndef random_sequence_from_string(string, seq_maxlen):\n rand_index = random.randint(0, len(string) - seq_maxlen - 1)\n return string[rand_index: rand_index + seq_maxlen]\n\n\ndef random_sequence_from_textfile(path, seq_maxlen):\n text = open(path).read()\n return random_sequence_from_string(text, seq_maxlen)\n\n\nclass VocabularyProcessor(object):\n \"\"\" Vocabulary Processor.\n\n Maps documents to sequences of word ids.\n\n Arguments:\n max_document_length: Maximum length of documents.\n if documents are longer, they will be trimmed, if shorter - padded.\n min_frequency: Minimum frequency of words in the vocabulary.\n vocabulary: CategoricalVocabulary object.\n\n Attributes:\n vocabulary_: CategoricalVocabulary object.\n\n \"\"\"\n\n def __init__(self,\n max_document_length,\n min_frequency=0,\n vocabulary=None,\n tokenizer_fn=None):\n from tensorflow.contrib.learn.python.learn.preprocessing.text import \\\n VocabularyProcessor as _VocabularyProcessor\n self.__dict__['_vocabulary_processor'] = _VocabularyProcessor(\n max_document_length,\n min_frequency,\n vocabulary,\n tokenizer_fn)\n\n def __getattr__(self, key):\n return getattr(self._vocabulary_processor, key)\n\n def __setattr__(self, key, value):\n setattr(self._vocabulary_processor, key, value)\n\n def fit(self, raw_documents, unused_y=None):\n \"\"\" fit.\n\n Learn a vocabulary dictionary of all tokens in the raw documents.\n\n Arguments:\n raw_documents: An iterable which yield either str or unicode.\n unused_y: to match fit format signature of estimators.\n\n Returns:\n self\n \"\"\"\n return self._vocabulary_processor.fit(raw_documents, unused_y)\n\n def fit_transform(self, raw_documents, unused_y=None):\n \"\"\" fit_transform.\n\n Learn the vocabulary dictionary and return indices of words.\n\n Arguments:\n raw_documents: An iterable which yield either str or unicode.\n unused_y: to match fit_transform signature of estimators.\n\n Returns:\n X: iterable, [n_samples, max_document_length] Word-id matrix.\n \"\"\"\n return self._vocabulary_processor.fit_transform(raw_documents,\n unused_y)\n\n def transform(self, raw_documents):\n \"\"\" transform.\n\n Transform documents to word-id matrix.\n\n Convert words to ids with vocabulary fitted with fit or the one\n provided in the constructor.\n\n Arguments:\n raw_documents: An iterable which yield either str or unicode.\n\n Yields:\n X: iterable, [n_samples, max_document_length] Word-id matrix.\n \"\"\"\n return self._vocabulary_processor.transform(raw_documents)\n\n def reverse(self, documents):\n \"\"\" reverse.\n\n Reverses output of vocabulary mapping to words.\n\n Arguments:\n documents: iterable, list of class ids.\n\n Returns:\n Iterator over mapped in words documents.\n \"\"\"\n return self._vocabulary_processor.reverse(documents)\n\n def save(self, filename):\n \"\"\" save.\n\n Saves vocabulary processor into given file.\n\n Arguments:\n filename: Path to output file.\n \"\"\"\n return self._vocabulary_processor.save(filename)\n\n @classmethod\n def restore(cls, filename):\n \"\"\" restore.\n\n Restores vocabulary processor from given file.\n\n Arguments:\n filename: Path to file to load from.\n\n Returns:\n VocabularyProcessor object.\n \"\"\"\n return self._vocabulary_processor.restore(filename)\n\n\n# ===================\n# IMAGES UTILS\n# ===================\n\ndef build_hdf5_image_dataset(target_path, image_shape, output_path='dataset.h5',\n mode='file', categorical_labels=True,\n normalize=True, grayscale=False,\n files_extension=None, chunks=False, image_base_path='', float_labels=False):\n \"\"\" Build HDF5 Image Dataset.\n\n Build an HDF5 dataset by providing either a root folder or a plain text\n file with images path and class id.\n\n 'folder' mode: Root folder should be arranged as follow:\n ```\n ROOT_FOLDER -> SUBFOLDER_0 (CLASS 0) -> CLASS0_IMG1.jpg\n -> CLASS0_IMG2.jpg\n -> ...\n -> SUBFOLDER_1 (CLASS 1) -> CLASS1_IMG1.jpg\n -> ...\n -> ...\n ```\n Note that if sub-folders are not integers from 0 to n_classes, an id will\n be assigned to each sub-folder following alphabetical order.\n\n 'file' mode: Plain text file should be formatted as follow:\n ```\n /path/to/img1 class_id\n /path/to/img2 class_id\n /path/to/img3 class_id\n ```\n\n Examples:\n ```\n # Load path/class_id image file:\n dataset_file = 'my_dataset.txt'\n\n # Build a HDF5 dataset (only required once)\n from tflearn.data_utils import build_hdf5_image_dataset\n build_hdf5_image_dataset(dataset_file, image_shape=(128, 128),\n mode='file', output_path='dataset.h5',\n categorical_labels=True, normalize=True)\n\n # Load HDF5 dataset\n import h5py\n h5f = h5py.File('dataset.h5', 'r')\n X = h5f['X']\n Y = h5f['Y']\n\n # Build neural network and train\n network = ...\n model = DNN(network, ...)\n model.fit(X, Y)\n ```\n\n Arguments:\n target_path: `str`. Path of root folder or images plain text file.\n image_shape: `tuple (height, width)`. The images shape. Images that\n doesn't match that shape will be resized.\n output_path: `str`. The output path for the hdf5 dataset. Default:\n 'dataset.h5'\n mode: `str` in ['file', 'folder']. The data source mode. 'folder'\n accepts a root folder with each of his sub-folder representing a\n class containing the images to classify.\n 'file' accepts a single plain text file that contains every\n image path with their class id.\n Default: 'folder'.\n categorical_labels: `bool`. If True, labels are converted to binary\n vectors.\n normalize: `bool`. If True, normalize all pictures by dividing\n every image array by 255.\n grayscale: `bool`. If true, images are converted to grayscale.\n files_extension: `list of str`. A list of allowed image file\n extension, for example ['.jpg', '.jpeg', '.png']. If None,\n all files are allowed.\n chunks: `bool` Whether to chunks the dataset or not. You should use\n chunking only when you really need it. See HDF5 documentation.\n If chunks is 'True' a sensitive default will be computed.\n image_base_path: `str`. Base path for the images listed in the file mode.\n float_labels: `bool`. Read float labels instead of integers in file mode.\n\n \"\"\"\n import h5py\n\n assert image_shape, \"Image shape must be defined.\"\n assert image_shape[0] and image_shape[1], \\\n \"Image shape error. It must be a tuple of int: ('width', 'height').\"\n assert mode in ['folder', 'file'], \"`mode` arg must be 'folder' or 'file'\"\n\n if mode == 'folder':\n images, labels = directory_to_samples(target_path,\n flags=files_extension)\n else:\n with open(target_path, 'r') as f:\n images, labels = [], []\n for l in f.readlines():\n l = l.strip('\\n').split()\n l[0] = image_base_path + l[0]\n images.append(l[0])\n if float_labels:\n labels.append(float(l[1]))\n else:\n labels.append(int(l[1]))\n\n n_classes = np.max(labels) + 1\n\n d_imgshape = (len(images), image_shape[1], image_shape[0], 3) \\\n if not grayscale else (len(images), image_shape[1], image_shape[0])\n d_labelshape = (len(images), n_classes) \\\n if categorical_labels else (len(images), )\n x_chunks = None\n y_chunks = None\n if chunks is True:\n x_chunks = (1,)+ d_imgshape[1:]\n if len(d_labelshape) > 1:\n y_chunks = (1,) + d_labelshape[1:]\n dataset = h5py.File(output_path, 'w')\n dataset.create_dataset('X', d_imgshape, chunks=x_chunks)\n dataset.create_dataset('Y', d_labelshape, chunks=y_chunks)\n\n for i in range(len(images)):\n img = load_image(images[i])\n width, height = img.size\n if width != image_shape[0] or height != image_shape[1]:\n img = resize_image(img, image_shape[0], image_shape[1])\n if grayscale:\n img = convert_color(img, 'L')\n elif img.mode == 'L' or img.mode == 'RGBA':\n img = convert_color(img, 'RGB')\n\n img = pil_to_nparray(img)\n if normalize:\n img /= 255.\n dataset['X'][i] = img\n if categorical_labels:\n dataset['Y'][i] = to_categorical([labels[i]], n_classes)[0]\n else:\n dataset['Y'][i] = labels[i]\n\n\ndef get_img_channel(image_path):\n \"\"\"\n Load a image and return the channel of the image\n :param image_path:\n :return: the channel of the image\n \"\"\"\n img = load_image(image_path)\n img = pil_to_nparray(img)\n try:\n channel = img.shape[2]\n except:\n channel = 1\n return channel\n\n\ndef image_preloader(target_path, image_shape, mode='file', normalize=True,\n grayscale=False, categorical_labels=True,\n files_extension=None, filter_channel=False, image_base_path='', float_labels=False):\n \"\"\" Image PreLoader.\n\n Create a python array (`Preloader`) that loads images on the fly (from\n disk or url). There is two ways to provide image samples 'folder' or\n 'file', see the specifications below.\n\n 'folder' mode: Load images from disk, given a root folder. This folder\n should be arranged as follow:\n ```\n ROOT_FOLDER -> SUBFOLDER_0 (CLASS 0) -> CLASS0_IMG1.jpg\n -> CLASS0_IMG2.jpg\n -> ...\n -> SUBFOLDER_1 (CLASS 1) -> CLASS1_IMG1.jpg\n -> ...\n -> ...\n ```\n Note that if sub-folders are not integers from 0 to n_classes, an id will\n be assigned to each sub-folder following alphabetical order.\n\n 'file' mode: A plain text file listing every image path and class id.\n This file should be formatted as follow:\n ```\n /path/to/img1 class_id\n /path/to/img2 class_id\n /path/to/img3 class_id\n ```\n\n Note that load images on the fly and convert is time inefficient,\n so you can instead use `build_hdf5_image_dataset` to build a HDF5 dataset\n that enable fast retrieval (this function takes similar arguments).\n\n Examples:\n ```\n # Load path/class_id image file:\n dataset_file = 'my_dataset.txt'\n\n # Build the preloader array, resize images to 128x128\n from tflearn.data_utils import image_preloader\n X, Y = image_preloader(dataset_file, image_shape=(128, 128),\n mode='file', categorical_labels=True,\n normalize=True)\n\n # Build neural network and train\n network = ...\n model = DNN(network, ...)\n model.fit(X, Y)\n ```\n\n Arguments:\n target_path: `str`. Path of root folder or images plain text file.\n image_shape: `tuple (height, width)`. The images shape. Images that\n doesn't match that shape will be resized.\n mode: `str` in ['file', 'folder']. The data source mode. 'folder'\n accepts a root folder with each of his sub-folder representing a\n class containing the images to classify.\n 'file' accepts a single plain text file that contains every\n image path with their class id.\n Default: 'folder'.\n categorical_labels: `bool`. If True, labels are converted to binary\n vectors.\n normalize: `bool`. If True, normalize all pictures by dividing\n every image array by 255.\n grayscale: `bool`. If true, images are converted to grayscale.\n files_extension: `list of str`. A list of allowed image file\n extension, for example ['.jpg', '.jpeg', '.png']. If None,\n all files are allowed.\n filter_channel: `bool`. If true, images which the channel is not 3 should\n be filter.\n image_base_path: `str`. Base path for the images listed in the file mode.\n float_labels: `bool`. Read float labels instead of integers in file mode.\n\n Returns:\n (X, Y): with X the images array and Y the labels array.\n\n \"\"\"\n assert mode in ['folder', 'file']\n if mode == 'folder':\n images, labels = directory_to_samples(target_path,\n flags=files_extension, filter_channel=filter_channel)\n else:\n with open(target_path, 'r') as f:\n images, labels = [], []\n for l in f.readlines():\n l = l.strip('\\n').split()\n l[0] = image_base_path + l[0]\n if not files_extension or any(flag in l[0] for flag in files_extension):\n if filter_channel:\n if get_img_channel(l[0]) != 3:\n continue\n images.append(l[0])\n if float_labels:\n labels.append(float(l[1]))\n else:\n labels.append(int(l[1]))\n\n n_classes = np.max(labels) + 1\n X = ImagePreloader(images, image_shape, normalize, grayscale)\n Y = LabelPreloader(labels, n_classes, categorical_labels)\n\n return X, Y\n\n\ndef load_image(in_image):\n \"\"\" Load an image, returns PIL.Image. \"\"\"\n # if the path appears to be an URL\n if urlparse(in_image).scheme in ('http', 'https',):\n # set up the byte stream\n img_stream = BytesIO(request.urlopen(in_image).read())\n # and read in as PIL image\n img = Image.open(img_stream)\n else:\n # else use it as local file path\n img = Image.open(in_image)\n return img\n\n\ndef resize_image(in_image, new_width, new_height, out_image=None,\n resize_mode=Image.ANTIALIAS):\n \"\"\" Resize an image.\n\n Arguments:\n in_image: `PIL.Image`. The image to resize.\n new_width: `int`. The image new width.\n new_height: `int`. The image new height.\n out_image: `str`. If specified, save the image to the given path.\n resize_mode: `PIL.Image.mode`. The resizing mode.\n\n Returns:\n `PIL.Image`. The resize image.\n\n \"\"\"\n img = in_image.resize((new_width, new_height), resize_mode)\n if out_image:\n img.save(out_image)\n return img\n\n\ndef convert_color(in_image, mode):\n \"\"\" Convert image color with provided `mode`. \"\"\"\n return in_image.convert(mode)\n\n\ndef pil_to_nparray(pil_image):\n \"\"\" Convert a PIL.Image to numpy array. \"\"\"\n pil_image.load()\n return np.asarray(pil_image, dtype=\"float32\")\n\n\ndef image_dirs_to_samples(directory, resize=None, convert_gray=None,\n filetypes=None):\n print(\"Starting to parse images...\")\n if filetypes:\n if filetypes not in [list, tuple]: filetypes = list(filetypes)\n samples, targets = directory_to_samples(directory, flags=filetypes)\n for i, s in enumerate(samples):\n samples[i] = load_image(s)\n if resize:\n samples[i] = resize_image(samples[i], resize[0], resize[1])\n if convert_gray:\n samples[i] = convert_color(samples[i], 'L')\n samples[i] = pil_to_nparray(samples[i])\n samples[i] /= 255.\n print(\"Parsing Done!\")\n return samples, targets\n\n\ndef build_image_dataset_from_dir(directory,\n dataset_file=\"my_tflearn_dataset.pkl\",\n resize=None, convert_gray=None,\n filetypes=None, shuffle_data=False,\n categorical_Y=False):\n try:\n X, Y = pickle.load(open(dataset_file, 'rb'))\n except Exception:\n X, Y = image_dirs_to_samples(directory, resize, convert_gray, filetypes)\n if categorical_Y:\n Y = to_categorical(Y, np.max(Y) + 1) # First class is '0'\n if shuffle_data:\n X, Y = shuffle(X, Y)\n pickle.dump((X, Y), open(dataset_file, 'wb'))\n return X, Y\n\n\ndef random_flip_leftright(x):\n if bool(random.getrandbits(1)):\n return np.fliplr(x)\n else:\n return x\n\n\ndef random_flip_updown(x):\n if bool(random.getrandbits(1)):\n return np.flipud(x)\n else:\n return x\n\n\n# ==================\n# DATA UTILS\n# ==================\n\n\ndef shuffle(*arrs):\n \"\"\" shuffle.\n\n Shuffle given arrays at unison, along first axis.\n\n Arguments:\n *arrs: Each array to shuffle at unison.\n\n Returns:\n Tuple of shuffled arrays.\n\n \"\"\"\n arrs = list(arrs)\n for i, arr in enumerate(arrs):\n assert len(arrs[0]) == len(arrs[i])\n arrs[i] = np.array(arr)\n p = np.random.permutation(len(arrs[0]))\n return tuple(arr[p] for arr in arrs)\n\n\ndef samplewise_zero_center(X):\n \"\"\" samplewise_zero_center.\n\n Zero center each sample by subtracting it by its mean.\n\n Arguments:\n X: `array`. The batch of samples to center.\n\n Returns:\n A numpy array with same shape as input.\n\n \"\"\"\n for i in range(len(X)):\n X[i] -= np.mean(X[i], axis=1, keepdims=True)\n return X\n\n\ndef samplewise_std_normalization(X):\n \"\"\" samplewise_std_normalization.\n\n Scale each sample with its standard deviation.\n\n Arguments:\n X: `array`. The batch of samples to scale.\n\n Returns:\n A numpy array with same shape as input.\n\n \"\"\"\n for i in range(len(X)):\n X[i] /= (np.std(X[i], axis=1, keepdims=True) + _EPSILON)\n return X\n\n\ndef featurewise_zero_center(X, mean=None):\n \"\"\" featurewise_zero_center.\n\n Zero center every sample with specified mean. If not specified, the mean\n is evaluated over all samples.\n\n Arguments:\n X: `array`. The batch of samples to center.\n mean: `float`. The mean to use for zero centering. If not specified, it\n will be evaluated on provided data.\n\n Returns:\n A numpy array with same shape as input. Or a tuple (array, mean) if no\n mean value was specified.\n\n \"\"\"\n if mean is None:\n mean = np.mean(X, axis=0)\n return X - mean, mean\n else:\n return X - mean\n\n\ndef featurewise_std_normalization(X, std=None):\n \"\"\" featurewise_std_normalization.\n\n Scale each sample by the specified standard deviation. If no std\n specified, std is evaluated over all samples data.\n\n Arguments:\n X: `array`. The batch of samples to scale.\n std: `float`. The std to use for scaling data. If not specified, it\n will be evaluated over the provided data.\n\n Returns:\n A numpy array with same shape as input. Or a tuple (array, std) if no\n std value was specified.\n\n \"\"\"\n if std is None:\n std = np.std(X, axis=0)\n return X / std, std\n else:\n return X / std\n\n\ndef directory_to_samples(directory, flags=None, filter_channel=False):\n \"\"\" Read a directory, and list all subdirectories files as class sample \"\"\"\n samples = []\n targets = []\n label = 0\n try: # Python 2\n classes = sorted(os.walk(directory).next()[1])\n except Exception: # Python 3\n classes = sorted(os.walk(directory).__next__()[1])\n for c in classes:\n c_dir = os.path.join(directory, c)\n try: # Python 2\n walk = os.walk(c_dir).next()\n except Exception: # Python 3\n walk = os.walk(c_dir).__next__()\n for sample in walk[2]:\n if not flags or any(flag in sample for flag in flags):\n if filter_channel:\n if get_img_channel(os.path.join(c_dir, sample)) != 3:\n continue\n samples.append(os.path.join(c_dir, sample))\n targets.append(label)\n label += 1\n return samples, targets\n\n\n# ==================\n# OTHERS\n# ==================\n\ndef load_csv(filepath, target_column=-1, columns_to_ignore=None,\n has_header=True, categorical_labels=False, n_classes=None):\n \"\"\" load_csv.\n\n Load data from a CSV file. By default the labels are considered to be the\n last column, but it can be changed by filling 'target_column' parameter.\n\n Arguments:\n filepath: `str`. The csv file path.\n target_column: The id of the column representing the labels.\n Default: -1 (The last column).\n columns_to_ignore: `list of int`. A list of columns index to ignore.\n has_header: `bool`. Whether the csv file has a header or not.\n categorical_labels: `bool`. If True, labels are returned as binary\n vectors (to be used with 'categorical_crossentropy').\n n_classes: `int`. Total number of class (needed if\n categorical_labels is True).\n\n Returns:\n A tuple (data, target).\n\n \"\"\"\n\n from tensorflow.python.platform import gfile\n with gfile.Open(filepath) as csv_file:\n data_file = csv.reader(csv_file)\n if not columns_to_ignore:\n columns_to_ignore = []\n if has_header:\n header = next(data_file)\n data, target = [], []\n # Fix column to ignore ids after removing target_column\n for i, c in enumerate(columns_to_ignore):\n if c > target_column:\n columns_to_ignore[i] -= 1\n for i, d in enumerate(data_file):\n target.append(d.pop(target_column))\n data.append([_d for j, _d in enumerate(d) if j not in columns_to_ignore])\n if categorical_labels:\n assert isinstance(n_classes, int), \"n_classes not specified!\"\n target = to_categorical(target, n_classes)\n return data, target\n\n\nclass Preloader(object):\n def __init__(self, array, function):\n self.array = array\n self.function = function\n\n def __getitem__(self, id):\n if type(id) in [list, np.ndarray]:\n return [self.function(self.array[i]) for i in id]\n elif isinstance(id, slice):\n return [self.function(arr) for arr in self.array[id]]\n else:\n return self.function(self.array[id])\n\n def __len__(self):\n return len(self.array)\n\n\nclass ImagePreloader(Preloader):\n def __init__(self, array, image_shape, normalize=True, grayscale=False):\n fn = lambda x: self.preload(x, image_shape, normalize, grayscale)\n super(ImagePreloader, self).__init__(array, fn)\n\n def preload(self, path, image_shape, normalize=True, grayscale=False):\n img = load_image(path)\n width, height = img.size\n if width != image_shape[0] or height != image_shape[1]:\n img = resize_image(img, image_shape[0], image_shape[1])\n if grayscale:\n img = convert_color(img, 'L')\n img = pil_to_nparray(img)\n if grayscale:\n img = np.reshape(img, img.shape + (1,))\n if normalize:\n img /= 255.\n return img\n\n\nclass LabelPreloader(Preloader):\n def __init__(self, array, n_class=None, categorical_label=True):\n fn = lambda x: self.preload(x, n_class, categorical_label)\n super(LabelPreloader, self).__init__(array, fn)\n\n def preload(self, label, n_class, categorical_label):\n if categorical_label:\n #TODO: inspect assert bug\n #assert isinstance(n_class, int)\n return to_categorical([label], n_class)[0]\n else:\n return label\n\n\ndef is_array(X):\n return type(X) in [np.array, np.ndarray, list]\n\n\ndef get_num_features(X):\n if isinstance(X, tf.Tensor):\n return X.get_shape().as_list()[-1]\n elif is_array(X):\n return list(np.shape(X))[-1]\n else:\n raise ValueError(\"Unknown data type.\")\n\n\ndef get_num_classes(Y):\n if is_array(Y):\n # Assume max integer is number of classes\n return np.max(Y) + 1\n elif isinstance(Y, tf.Tensor):\n return ValueError(\"Cannot automatically retrieve number of classes \"\n \"from a Tensor. Please fill 'num_classes' argument.\")\n else:\n raise ValueError(\"Unknown data type.\")\n\n\ndef get_num_sample(X):\n if is_array(X):\n return np.shape(X)[0]\n elif isinstance(X, tf.Tensor):\n return X.get_shape()[0]\n else:\n raise ValueError(\"Unknown data type.\")\n\n\n# ==================\n# STATS UTILS\n# ==================\n\ndef get_max(X):\n return np.max(X)\n\n\ndef get_mean(X):\n return np.mean(X)\n\n\ndef get_std(X):\n return np.std(X)\n" ]
[ [ "numpy.unique", "numpy.asarray", "numpy.fliplr", "numpy.reshape", "numpy.flipud", "numpy.ones", "numpy.max", "numpy.std", "numpy.mean", "tensorflow.contrib.learn.python.learn.preprocessing.text.VocabularyProcessor", "numpy.shape", "numpy.array", "tensorflow.python.platform.gfile.Open" ] ]
snakedragon/udacity-dlnd
[ "2e5550f183f4eeb7d7c4a91f022df54f0f63c6f3" ]
[ "tv-script-generation/load_word2vec.py" ]
[ "\nimport os\nimport tensorflow as tf\nimport numpy as np\nfrom collections import Counter\nfrom itertools import chain\n\nembedding_dim = 100\nfname = 'data/glove.6B.%dd.txt'%embedding_dim\n\nglove_index_dict = {}\n\n\nwith open(fname, 'r') as fp:\n glove_symbols = len(fp.readlines())\n\nglove_embedding_weights = np.empty((glove_symbols, embedding_dim))\n\nprint(\"the number of words\",glove_symbols)\n\nwith open(fname, 'r') as fp:\n i = 0\n for ls in fp:\n ls = ls.strip().split()\n w = ls[0]\n glove_index_dict[w] = i\n glove_embedding_weights[i,:] = np.asarray(ls[1:],dtype=np.float32)\n i += 1\n\nprint(glove_embedding_weights[0:2])\n\n\n" ]
[ [ "numpy.asarray", "numpy.empty" ] ]
Biles430/FPF_PIV
[ "66fa80dbd8414c1c6c522f74eb858c4a4725dde9" ]
[ "piv_outer.py" ]
[ "import pandas as pd\nfrom pandas import DataFrame\nimport numpy as np\nimport PIV\nimport h5py\nimport matplotlib.pyplot as plt\nimport hotwire as hw\n\n################################################\n# PURPOSE\n# 1. Compute Integral Parameters\n# 2. Outer Normalize\n# 3. Plot\n##################################################\n#note- vel and axis are flipped to properlly calc delta\n\n\ndef piv_outer(date, num_tests, legend1):\n\t#initalize variables\n\tumean_fov = dict()\n\tvmean_fov = dict()\n\tumean = dict()\n\tvmean = dict()\n\turms = dict()\n\tvrms = dict()\n\tuvprime = dict()\n\tx = dict()\n\ty = dict()\n\tfor j in range(0, num_tests):\n\t\t#read in variables\n\t\tname = 'data/PIV_' + date + '_' +str(j) + '.h5'\n\t\tumean_fov[j] = np.array(pd.read_hdf(name, 'umean'))\n\t\tvmean_fov[j] = np.array(pd.read_hdf(name, 'vmean'))\n\t\tumean[j] = np.array(pd.read_hdf(name, 'umean_profile_avg'))\n\t\tvmean[j] = np.array(pd.read_hdf(name, 'vmean_profile_avg'))\n\t\turms[j] = np.array(pd.read_hdf(name, 'urms_profile_avg'))\n\t\tvrms[j] = np.array(pd.read_hdf(name, 'vrms_profile_avg'))\n\t\tuvprime[j] = np.array(pd.read_hdf(name, 'uvprime_profile_avg'))\n\t\tx[j] = np.array(pd.read_hdf(name, 'xaxis'))\n\t\ty[j] = np.array(pd.read_hdf(name, 'yaxis'))\n\n\t###2. Outer Normalize #############\n\t###################################\n\n\t###3. PLOTS ######################\n\t###################################\n\tmarker_u = ['-xr', '-or','-sr']\n\tmarker_v = ['-xb', '-ob','-sb']\n\t#mean profiles\n\t#U vs y\n\tplt.figure()\n\tfor j in range(0, num_tests):\n\t\tplt.plot(y[j], umean[j], marker_u[j])\n\tplt.ylabel('U (m/sec)', fontsize=14)\n\tplt.xlabel('Wall Normal Position (m)', fontsize=14)\n\tplt.legend(legend1, loc=0)\n\tplt.show()\n\n\t#V vs y\n\tplt.figure()\n\tfor j in range(0, num_tests):\n\t\tplt.plot(y[j], vmean[j], marker_v[j])\n\tplt.ylabel('V (m/sec)', fontsize=14)\n\tplt.xlabel('Wall Normal Position (m)', fontsize=14)\n\tplt.legend(legend1, loc=0)\n\tplt.show()\n\n\n\t#urms vs y\n\tplt.figure()\n\tfor j in range(0, num_tests):\n\t\tplt.plot(y[j], urms[j], marker_u[j])\n\tplt.ylabel('$U_{rms}$ (m/sec)', fontsize=20)\n\tplt.xlabel('Wall Normal Position (m)', fontsize=14)\n\tplt.legend(legend1, loc=0)\n\tplt.show()\n\n\t#vrms vs y\n\tplt.figure()\n\tfor j in range(0, num_tests):\n\t\tplt.plot(y[j], vrms[j], marker_v[j])\n\tplt.ylabel('$V_{rms}$ (m/sec)', fontsize=20)\n\tplt.xlabel('Wall Normal Position (m)', fontsize=14)\n\tplt.legend(legend1, loc=0)\n\tplt.show()\n\n\t#uprime vs y\n\tplt.figure()\n\tfor j in range(0, num_tests):\n\t\tplt.plot(y[j], uvprime[j], marker_u[j])\n\tplt.ylabel('$u^,v^,$', fontsize=20)\n\tplt.xlabel('Wall Normal Position (m)', fontsize=14)\n\tplt.legend(legend1, loc=0)\n\tplt.show()\n\n\t### Mean Vecotr plot\n\tskip_num = 5\n\tumean_fov2 = umean_fov[0]\n\tvmean_fov2 = vmean_fov[0]\n\tx2 = x[0]\n\tumean_fov2 = umean_fov2[:, 0:-1:skip_num]\n\tvmean_fov2 = vmean_fov2[:, 0:-1:skip_num]\n\tx2 = x2[0:-1:skip_num]\n\ty2 = y[0]\n\n\tY = np.tile(y2, (len(x2), 1))\n\tY = np.transpose(Y)\n\tX = np.tile(x2-.0543, (len(y2), 1))\n\tmean_fov2 = (umean_fov2**2 + vmean_fov2**2)**(1/2)\n\n\tcontour_levels = np.arange(0, 5, .05)\n\tplt.figure()\n\tc = plt.contourf(X, Y, mean_fov2, levels = contour_levels, linewidth=40, alpha=.6)\n\tcbar = plt.colorbar(c)\n\tcbar.ax.set_ylabel('Velocity (m/sec)')\n\tplt.hold(True)\n\tq = plt.quiver(X, Y, umean_fov2, vmean_fov2, angles='xy', scale=50, width=.0025)\n\tp = plt.quiverkey(q, .11, -.025, 4,\"4 m/s\",coordinates='data',color='r')\n\tplt.axis([0, .1, 0, .2])\n\tplt.ylabel('Wall Normal Position, $y/\\delta$', fontsize=18)\n\tplt.xlabel('Streamwise Position, x (m)', fontsize=14)\n\tplt.title('Mean PIV Vector Field', fontsize=14)\n\tplt.show()\n\tprint('Done!')\n\treturn\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_hdf", "matplotlib.pyplot.contourf", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.hold", "matplotlib.pyplot.plot", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.quiverkey", "matplotlib.pyplot.axis", "numpy.transpose", "matplotlib.pyplot.quiver", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
gohsyi/learning-with-noisy-labels
[ "bd6086131389d6d3c9998e1908fe6e4a17ae7deb" ]
[ "run_svm.py" ]
[ "import datetime\nimport numpy as np\nimport multiprocessing as mp\n\nfrom sklearn.svm import SVC\n\nfrom utils import logger\nfrom utils.dataloader import DataLoader\nfrom utils.misc import set_global_seeds, make_arg_list\n\nCLASS_WEIGHTS = [0.1, 0.2, 0.25, 0.33, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 10.0]\n\n\ndef find_best_c1(args):\n set_global_seeds(args['seed'])\n dataset = DataLoader(args['dataset'])\n X_train, X_test, X_val, y_train, y_test, y_val = dataset.prepare_train_test_val(args)\n results = []\n for c1 in CLASS_WEIGHTS:\n model = SVC(gamma='auto', class_weight={0: 1., 1: c1})\n model.fit(X_train, y_train)\n results.append(model.score(X_val, y_val))\n return results\n\n\ndef run_c_svm(args):\n set_global_seeds(args['seed'])\n dataset = DataLoader(args['dataset'])\n X_train, X_test, X_val, y_train, y_test, y_val = dataset.prepare_train_test_val(args)\n model = SVC(gamma='auto', class_weight={0: 1., 1: args['C1']})\n model.fit(X_train, y_train)\n return model.score(X_test, y_test)\n\n\ndef run(args):\n logger.configure(f'logs/{args[\"dataset\"]}/svm/{datetime.datetime.now().strftime(\"%y-%m-%d-%H-%M-%S\")}')\n logger.info(args)\n\n pool = mp.Pool(mp.cpu_count())\n svm_arg = args.copy()\n\n if 'C1' not in svm_arg.keys():\n best_c1 = pool.map(find_best_c1, make_arg_list(svm_arg))\n best_c1 = np.mean(best_c1, 0)\n if 'verbose' in svm_arg.keys() and svm_arg['verbose']:\n for i in range(len(best_c1)):\n logger.record_tabular(f'[C-SVM] C1 = {CLASS_WEIGHTS[i]}', best_c1[i])\n logger.dump_tabular()\n best_c1 = CLASS_WEIGHTS[best_c1.argmax()]\n logger.record_tabular('[C-SVM] best C1', best_c1)\n svm_arg['C1'] = best_c1\n\n results_svm = pool.map(run_c_svm, make_arg_list(svm_arg))\n\n logger.record_tabular('[C-SVM] accuracy mean', np.mean(results_svm))\n logger.record_tabular('[C-SVM] accuracy max', np.max(results_svm))\n logger.record_tabular('[C-SVM] accuracy min', np.min(results_svm))\n logger.record_tabular('[C-SVM] accuracy std', np.std(results_svm))\n logger.dump_tabular()\n\n\nif __name__ == '__main__':\n from utils.parser import parse_args\n run(parse_args().__dict__)\n" ]
[ [ "numpy.min", "numpy.max", "numpy.std", "numpy.mean", "sklearn.svm.SVC" ] ]
AlwaysGemini/PaddleSeg
[ "a8c38bd6eca539b8fa470b8d59f7b22e6daf3a94" ]
[ "paddleseg/models/shufflenet_slim.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom paddleseg.cvlibs import manager, param_init\nfrom paddleseg.models import layers\nfrom paddleseg.utils import utils\n\n__all__ = ['ShuffleNetV2']\n\n\n@manager.MODELS.add_component\nclass ShuffleNetV2(nn.Layer):\n def __init__(self, num_classes, pretrained=None, align_corners=False):\n super().__init__()\n self.pretrained = pretrained\n self.num_classes = num_classes\n self.align_corners = align_corners\n\n self.conv_bn0 = _ConvBNReLU(3, 36, 3, 2, 1)\n self.conv_bn1 = _ConvBNReLU(36, 18, 1, 1, 0)\n\n self.block1 = nn.Sequential(\n SFNetV2Module(36, stride=2, out_channels=72),\n SFNetV2Module(72, stride=1), SFNetV2Module(72, stride=1),\n SFNetV2Module(72, stride=1))\n\n self.block2 = nn.Sequential(\n SFNetV2Module(72, stride=2), SFNetV2Module(144, stride=1),\n SFNetV2Module(144, stride=1), SFNetV2Module(144, stride=1),\n SFNetV2Module(144, stride=1), SFNetV2Module(144, stride=1),\n SFNetV2Module(144, stride=1), SFNetV2Module(144, stride=1))\n\n self.depthwise_separable0 = _SeparableConvBNReLU(144, 64, 3, stride=1)\n self.depthwise_separable1 = _SeparableConvBNReLU(82, 64, 3, stride=1)\n\n weight_attr = paddle.ParamAttr(\n learning_rate=1.,\n regularizer=paddle.regularizer.L2Decay(coeff=0.),\n initializer=nn.initializer.XavierUniform())\n self.deconv = nn.Conv2DTranspose(\n 64,\n self.num_classes,\n 2,\n stride=2,\n padding=0,\n weight_attr=weight_attr,\n bias_attr=True)\n\n self.init_weight()\n\n def forward(self, x):\n ## Encoder\n conv1 = self.conv_bn0(x) # encoder 1\n shortcut = self.conv_bn1(conv1) # shortcut 1\n\n pool = F.max_pool2d(\n conv1, kernel_size=3, stride=2, padding=1) # encoder 2\n\n # Block 1\n conv = self.block1(pool) # encoder 3\n\n # Block 2\n conv = self.block2(conv) # encoder 4\n\n ### decoder\n conv = self.depthwise_separable0(conv)\n shortcut_shape = paddle.shape(shortcut)[2:]\n conv_b = F.interpolate(\n conv,\n shortcut_shape,\n mode='bilinear',\n align_corners=self.align_corners)\n concat = paddle.concat(x=[shortcut, conv_b], axis=1)\n decode_conv = self.depthwise_separable1(concat)\n logit = self.deconv(decode_conv)\n return [logit]\n\n def init_weight(self):\n for layer in self.sublayers():\n if isinstance(layer, nn.Conv2D):\n param_init.normal_init(layer.weight, std=0.001)\n elif isinstance(layer, (nn.BatchNorm, nn.SyncBatchNorm)):\n param_init.constant_init(layer.weight, value=1.0)\n param_init.constant_init(layer.bias, value=0.0)\n if self.pretrained is not None:\n utils.load_pretrained_model(self, self.pretrained)\n\n\nclass _ConvBNReLU(nn.Layer):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n groups=1,\n **kwargs):\n super().__init__()\n weight_attr = paddle.ParamAttr(\n learning_rate=1, initializer=nn.initializer.KaimingUniform())\n self._conv = nn.Conv2D(\n in_channels,\n out_channels,\n kernel_size,\n padding=padding,\n stride=stride,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=False,\n **kwargs)\n\n self._batch_norm = layers.SyncBatchNorm(out_channels)\n\n def forward(self, x):\n x = self._conv(x)\n x = self._batch_norm(x)\n x = F.relu(x)\n return x\n\n\nclass _ConvBN(nn.Layer):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n groups=1,\n **kwargs):\n super().__init__()\n weight_attr = paddle.ParamAttr(\n learning_rate=1, initializer=nn.initializer.KaimingUniform())\n self._conv = nn.Conv2D(\n in_channels,\n out_channels,\n kernel_size,\n padding=padding,\n stride=stride,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=False,\n **kwargs)\n\n self._batch_norm = layers.SyncBatchNorm(out_channels)\n\n def forward(self, x):\n x = self._conv(x)\n x = self._batch_norm(x)\n return x\n\n\nclass _SeparableConvBNReLU(nn.Layer):\n def __init__(self, in_channels, out_channels, kernel_size, **kwargs):\n super().__init__()\n self.depthwise_conv = _ConvBN(\n in_channels,\n out_channels=in_channels,\n kernel_size=kernel_size,\n padding=int(kernel_size / 2),\n groups=in_channels,\n **kwargs)\n self.piontwise_conv = _ConvBNReLU(\n in_channels,\n out_channels,\n kernel_size=1,\n groups=1,\n stride=1,\n padding=0)\n\n def forward(self, x):\n x = self.depthwise_conv(x)\n x = self.piontwise_conv(x)\n return x\n\n\nclass SFNetV2Module(nn.Layer):\n def __init__(self, input_channels, stride, out_channels=None):\n super().__init__()\n if stride == 1:\n branch_channel = int(input_channels / 2)\n else:\n branch_channel = input_channels\n\n if out_channels is None:\n self.in_channels = int(branch_channel)\n else:\n self.in_channels = int(out_channels / 2)\n\n self._depthwise_separable_0 = _SeparableConvBNReLU(\n input_channels, self.in_channels, 3, stride=stride)\n self._conv = _ConvBNReLU(\n branch_channel, self.in_channels, 1, stride=1, padding=0)\n self._depthwise_separable_1 = _SeparableConvBNReLU(\n self.in_channels, self.in_channels, 3, stride=stride)\n\n self.stride = stride\n\n def forward(self, input):\n\n if self.stride == 1:\n shortcut, branch = paddle.split(x=input, num_or_sections=2, axis=1)\n else:\n branch = input\n shortcut = self._depthwise_separable_0(input)\n\n branch_1x1 = self._conv(branch)\n branch_dw1x1 = self._depthwise_separable_1(branch_1x1)\n output = paddle.concat(x=[shortcut, branch_dw1x1], axis=1)\n\n # channel shuffle\n out_shape = paddle.shape(output)\n h, w = out_shape[2], out_shape[3]\n output = paddle.reshape(x=output, shape=[0, 2, self.in_channels, h, w])\n output = paddle.transpose(x=output, perm=[0, 2, 1, 3, 4])\n output = paddle.reshape(x=output, shape=[0, 2 * self.in_channels, h, w])\n return output\n\n\nif __name__ == '__main__':\n import numpy as np\n import os\n\n np.random.seed(100)\n paddle.seed(100)\n\n net = ShuffleNetV2(10)\n img = np.random.random(size=(4, 3, 100, 100)).astype('float32')\n img = paddle.to_tensor(img)\n out = net(img)\n print(out)\n\n net.forward = paddle.jit.to_static(net.forward)\n save_path = os.path.join('.', 'model')\n in_var = paddle.ones([4, 3, 100, 100])\n paddle.jit.save(net, save_path, input_spec=[in_var])\n" ]
[ [ "numpy.random.random", "numpy.random.seed" ] ]
netx-repo/byteps
[ "95bbc0dabd76453504a2a73c6fcfeb47242d5ca7" ]
[ "examples-byteps/tensorflow/transformer/official/vision/image_classification/imagenet_preprocessing.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Provides utilities to preprocess images.\n\nTraining images are sampled using the provided bounding boxes, and subsequently\ncropped to the sampled bounding box. Images are additionally flipped randomly,\nthen resized to the target output size (without aspect-ratio preservation).\n\nImages used during evaluation are resized (with aspect-ratio preservation) and\ncentrally cropped.\n\nAll images undergo mean color subtraction.\n\nNote that these steps are colloquially referred to as \"ResNet preprocessing,\"\nand they differ from \"VGG preprocessing,\" which does not use bounding boxes\nand instead does an aspect-preserving resize followed by random crop during\ntraining. (These both differ from \"Inception preprocessing,\" which introduces\ncolor distortion steps.)\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom absl import logging\nimport tensorflow as tf\n\nDEFAULT_IMAGE_SIZE = 224\nNUM_CHANNELS = 3\nNUM_CLASSES = 1001\n\nNUM_IMAGES = {\n 'train': 1281167,\n 'validation': 50000,\n}\n\n_NUM_TRAIN_FILES = 1024\n_SHUFFLE_BUFFER = 10000\n\n_R_MEAN = 123.68\n_G_MEAN = 116.78\n_B_MEAN = 103.94\nCHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]\n\n# The lower bound for the smallest side of the image for aspect-preserving\n# resizing. For example, if an image is 500 x 1000, it will be resized to\n# _RESIZE_MIN x (_RESIZE_MIN * 2).\n_RESIZE_MIN = 256\n\n\ndef process_record_dataset(dataset,\n is_training,\n batch_size,\n shuffle_buffer,\n parse_record_fn,\n num_epochs=1,\n dtype=tf.float32,\n datasets_num_private_threads=None,\n drop_remainder=False,\n tf_data_experimental_slack=False):\n \"\"\"Given a Dataset with raw records, return an iterator over the records.\n\n Args:\n dataset: A Dataset representing raw records\n is_training: A boolean denoting whether the input is for training.\n batch_size: The number of samples per batch.\n shuffle_buffer: The buffer size to use when shuffling records. A larger\n value results in better randomness, but smaller values reduce startup\n time and use less memory.\n parse_record_fn: A function that takes a raw record and returns the\n corresponding (image, label) pair.\n num_epochs: The number of epochs to repeat the dataset.\n dtype: Data type to use for images/features.\n datasets_num_private_threads: Number of threads for a private\n threadpool created for all datasets computation.\n drop_remainder: A boolean indicates whether to drop the remainder of the\n batches. If True, the batch dimension will be static.\n tf_data_experimental_slack: Whether to enable tf.data's\n `experimental_slack` option.\n\n Returns:\n Dataset of (image, label) pairs ready for iteration.\n \"\"\"\n # Defines a specific size thread pool for tf.data operations.\n if datasets_num_private_threads:\n options = tf.data.Options()\n options.experimental_threading.private_threadpool_size = (\n datasets_num_private_threads)\n dataset = dataset.with_options(options)\n logging.info(\n 'datasets_num_private_threads: %s', datasets_num_private_threads)\n\n if is_training:\n # Shuffles records before repeating to respect epoch boundaries.\n dataset = dataset.shuffle(buffer_size=shuffle_buffer)\n # Repeats the dataset for the number of epochs to train.\n dataset = dataset.repeat()\n\n # Parses the raw records into images and labels.\n dataset = dataset.map(\n lambda value: parse_record_fn(value, is_training, dtype),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)\n\n # Operations between the final prefetch and the get_next call to the iterator\n # will happen synchronously during run time. We prefetch here again to\n # background all of the above processing work and keep it out of the\n # critical training path. Setting buffer_size to tf.data.experimental.AUTOTUNE\n # allows DistributionStrategies to adjust how many batches to fetch based\n # on how many devices are present.\n dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\n options = tf.data.Options()\n options.experimental_slack = tf_data_experimental_slack\n dataset = dataset.with_options(options)\n\n return dataset\n\n\ndef get_filenames(is_training, data_dir):\n \"\"\"Return filenames for dataset.\"\"\"\n if is_training:\n return [\n os.path.join(data_dir, 'train-%05d-of-01024' % i)\n for i in range(_NUM_TRAIN_FILES)]\n else:\n return [\n os.path.join(data_dir, 'validation-%05d-of-00128' % i)\n for i in range(128)]\n\n\ndef parse_example_proto(example_serialized):\n \"\"\"Parses an Example proto containing a training example of an image.\n\n The output of the build_image_data.py image preprocessing script is a dataset\n containing serialized Example protocol buffers. Each Example proto contains\n the following fields (values are included as examples):\n\n image/height: 462\n image/width: 581\n image/colorspace: 'RGB'\n image/channels: 3\n image/class/label: 615\n image/class/synset: 'n03623198'\n image/class/text: 'knee pad'\n image/object/bbox/xmin: 0.1\n image/object/bbox/xmax: 0.9\n image/object/bbox/ymin: 0.2\n image/object/bbox/ymax: 0.6\n image/object/bbox/label: 615\n image/format: 'JPEG'\n image/filename: 'ILSVRC2012_val_00041207.JPEG'\n image/encoded: <JPEG encoded string>\n\n Args:\n example_serialized: scalar Tensor tf.string containing a serialized\n Example protocol buffer.\n\n Returns:\n image_buffer: Tensor tf.string containing the contents of a JPEG file.\n label: Tensor tf.int32 containing the label.\n bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]\n where each coordinate is [0, 1) and the coordinates are arranged as\n [ymin, xmin, ymax, xmax].\n \"\"\"\n # Dense features in Example proto.\n feature_map = {\n 'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string,\n default_value=''),\n 'image/class/label': tf.io.FixedLenFeature([], dtype=tf.int64,\n default_value=-1),\n 'image/class/text': tf.io.FixedLenFeature([], dtype=tf.string,\n default_value=''),\n }\n sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32)\n # Sparse features in Example proto.\n feature_map.update(\n {k: sparse_float32 for k in [\n 'image/object/bbox/xmin', 'image/object/bbox/ymin',\n 'image/object/bbox/xmax', 'image/object/bbox/ymax']})\n\n features = tf.io.parse_single_example(serialized=example_serialized,\n features=feature_map)\n label = tf.cast(features['image/class/label'], dtype=tf.int32)\n\n xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)\n ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)\n xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)\n ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)\n\n # Note that we impose an ordering of (y, x) just to make life difficult.\n bbox = tf.concat([ymin, xmin, ymax, xmax], 0)\n\n # Force the variable number of bounding boxes into the shape\n # [1, num_boxes, coords].\n bbox = tf.expand_dims(bbox, 0)\n bbox = tf.transpose(a=bbox, perm=[0, 2, 1])\n\n return features['image/encoded'], label, bbox\n\n\ndef parse_record(raw_record, is_training, dtype):\n \"\"\"Parses a record containing a training example of an image.\n\n The input record is parsed into a label and image, and the image is passed\n through preprocessing steps (cropping, flipping, and so on).\n\n Args:\n raw_record: scalar Tensor tf.string containing a serialized\n Example protocol buffer.\n is_training: A boolean denoting whether the input is for training.\n dtype: data type to use for images/features.\n\n Returns:\n Tuple with processed image tensor in a channel-last format and\n one-hot-encoded label tensor.\n \"\"\"\n image_buffer, label, bbox = parse_example_proto(raw_record)\n\n image = preprocess_image(\n image_buffer=image_buffer,\n bbox=bbox,\n output_height=DEFAULT_IMAGE_SIZE,\n output_width=DEFAULT_IMAGE_SIZE,\n num_channels=NUM_CHANNELS,\n is_training=is_training)\n image = tf.cast(image, dtype)\n\n # Subtract one so that labels are in [0, 1000), and cast to float32 for\n # Keras model.\n label = tf.cast(tf.cast(tf.reshape(label, shape=[1]), dtype=tf.int32) - 1,\n dtype=tf.float32)\n return image, label\n\n\ndef get_parse_record_fn(use_keras_image_data_format=False):\n \"\"\"Get a function for parsing the records, accounting for image format.\n\n This is useful by handling different types of Keras models. For instance,\n the current resnet_model.resnet50 input format is always channel-last,\n whereas the keras_applications mobilenet input format depends on\n tf.keras.backend.image_data_format(). We should set\n use_keras_image_data_format=False for the former and True for the latter.\n\n Args:\n use_keras_image_data_format: A boolean denoting whether data format is keras\n backend image data format. If False, the image format is channel-last. If\n True, the image format matches tf.keras.backend.image_data_format().\n\n Returns:\n Function to use for parsing the records.\n \"\"\"\n def parse_record_fn(raw_record, is_training, dtype):\n image, label = parse_record(raw_record, is_training, dtype)\n if use_keras_image_data_format:\n if tf.keras.backend.image_data_format() == 'channels_first':\n image = tf.transpose(image, perm=[2, 0, 1])\n return image, label\n return parse_record_fn\n\n\ndef input_fn(is_training,\n data_dir,\n batch_size,\n num_epochs=1,\n dtype=tf.float32,\n datasets_num_private_threads=None,\n parse_record_fn=parse_record,\n input_context=None,\n drop_remainder=False,\n tf_data_experimental_slack=False,\n training_dataset_cache=False,\n filenames=None):\n \"\"\"Input function which provides batches for train or eval.\n\n Args:\n is_training: A boolean denoting whether the input is for training.\n data_dir: The directory containing the input data.\n batch_size: The number of samples per batch.\n num_epochs: The number of epochs to repeat the dataset.\n dtype: Data type to use for images/features\n datasets_num_private_threads: Number of private threads for tf.data.\n parse_record_fn: Function to use for parsing the records.\n input_context: A `tf.distribute.InputContext` object passed in by\n `tf.distribute.Strategy`.\n drop_remainder: A boolean indicates whether to drop the remainder of the\n batches. If True, the batch dimension will be static.\n tf_data_experimental_slack: Whether to enable tf.data's\n `experimental_slack` option.\n training_dataset_cache: Whether to cache the training dataset on workers.\n Typically used to improve training performance when training data is in\n remote storage and can fit into worker memory.\n filenames: Optional field for providing the file names of the TFRecords.\n\n Returns:\n A dataset that can be used for iteration.\n \"\"\"\n if filenames is None:\n filenames = get_filenames(is_training, data_dir)\n dataset = tf.data.Dataset.from_tensor_slices(filenames)\n\n if input_context:\n logging.info(\n 'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d',\n input_context.input_pipeline_id, input_context.num_input_pipelines)\n dataset = dataset.shard(input_context.num_input_pipelines,\n input_context.input_pipeline_id)\n\n if is_training:\n # Shuffle the input files\n dataset = dataset.shuffle(buffer_size=_NUM_TRAIN_FILES)\n\n # Convert to individual records.\n # cycle_length = 10 means that up to 10 files will be read and deserialized in\n # parallel. You may want to increase this number if you have a large number of\n # CPU cores.\n dataset = dataset.interleave(\n lambda path: tf.data.TFRecordDataset(path, buffer_size=128*1024*1024),\n cycle_length=10,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n if is_training and training_dataset_cache:\n # Improve training performance when training data is in remote storage and\n # can fit into worker memory.\n dataset = dataset.cache()\n\n return process_record_dataset(\n dataset=dataset,\n is_training=is_training,\n batch_size=batch_size,\n shuffle_buffer=_SHUFFLE_BUFFER,\n parse_record_fn=parse_record_fn,\n num_epochs=num_epochs,\n dtype=dtype,\n datasets_num_private_threads=datasets_num_private_threads,\n drop_remainder=drop_remainder,\n tf_data_experimental_slack=tf_data_experimental_slack,\n )\n\n\ndef _decode_crop_and_flip(image_buffer, bbox, num_channels):\n \"\"\"Crops the given image to a random part of the image, and randomly flips.\n\n We use the fused decode_and_crop op, which performs better than the two ops\n used separately in series, but note that this requires that the image be\n passed in as an un-decoded string Tensor.\n\n Args:\n image_buffer: scalar string Tensor representing the raw JPEG image buffer.\n bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]\n where each coordinate is [0, 1) and the coordinates are arranged as\n [ymin, xmin, ymax, xmax].\n num_channels: Integer depth of the image buffer for decoding.\n\n Returns:\n 3-D tensor with cropped image.\n\n \"\"\"\n # A large fraction of image datasets contain a human-annotated bounding box\n # delineating the region of the image containing the object of interest. We\n # choose to create a new bounding box for the object which is a randomly\n # distorted version of the human-annotated bounding box that obeys an\n # allowed range of aspect ratios, sizes and overlap with the human-annotated\n # bounding box. If no box is supplied, then we assume the bounding box is\n # the entire image.\n sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(\n tf.image.extract_jpeg_shape(image_buffer),\n bounding_boxes=bbox,\n min_object_covered=0.1,\n aspect_ratio_range=[0.75, 1.33],\n area_range=[0.05, 1.0],\n max_attempts=100,\n use_image_if_no_bounding_boxes=True)\n bbox_begin, bbox_size, _ = sample_distorted_bounding_box\n\n # Reassemble the bounding box in the format the crop op requires.\n offset_y, offset_x, _ = tf.unstack(bbox_begin)\n target_height, target_width, _ = tf.unstack(bbox_size)\n crop_window = tf.stack([offset_y, offset_x, target_height, target_width])\n\n # Use the fused decode and crop op here, which is faster than each in series.\n cropped = tf.image.decode_and_crop_jpeg(\n image_buffer, crop_window, channels=num_channels)\n\n # Flip to add a little more random distortion in.\n cropped = tf.image.random_flip_left_right(cropped)\n return cropped\n\n\ndef _central_crop(image, crop_height, crop_width):\n \"\"\"Performs central crops of the given image list.\n\n Args:\n image: a 3-D image tensor\n crop_height: the height of the image following the crop.\n crop_width: the width of the image following the crop.\n\n Returns:\n 3-D tensor with cropped image.\n \"\"\"\n shape = tf.shape(input=image)\n height, width = shape[0], shape[1]\n\n amount_to_be_cropped_h = (height - crop_height)\n crop_top = amount_to_be_cropped_h // 2\n amount_to_be_cropped_w = (width - crop_width)\n crop_left = amount_to_be_cropped_w // 2\n return tf.slice(\n image, [crop_top, crop_left, 0], [crop_height, crop_width, -1])\n\n\ndef _mean_image_subtraction(image, means, num_channels):\n \"\"\"Subtracts the given means from each image channel.\n\n For example:\n means = [123.68, 116.779, 103.939]\n image = _mean_image_subtraction(image, means)\n\n Note that the rank of `image` must be known.\n\n Args:\n image: a tensor of size [height, width, C].\n means: a C-vector of values to subtract from each channel.\n num_channels: number of color channels in the image that will be distorted.\n\n Returns:\n the centered image.\n\n Raises:\n ValueError: If the rank of `image` is unknown, if `image` has a rank other\n than three or if the number of channels in `image` doesn't match the\n number of values in `means`.\n \"\"\"\n if image.get_shape().ndims != 3:\n raise ValueError('Input must be of size [height, width, C>0]')\n\n if len(means) != num_channels:\n raise ValueError('len(means) must match the number of channels')\n\n # We have a 1-D tensor of means; convert to 3-D.\n # Note(b/130245863): we explicitly call `broadcast` instead of simply\n # expanding dimensions for better performance.\n means = tf.broadcast_to(means, tf.shape(image))\n\n return image - means\n\n\ndef _smallest_size_at_least(height, width, resize_min):\n \"\"\"Computes new shape with the smallest side equal to `smallest_side`.\n\n Computes new shape with the smallest side equal to `smallest_side` while\n preserving the original aspect ratio.\n\n Args:\n height: an int32 scalar tensor indicating the current height.\n width: an int32 scalar tensor indicating the current width.\n resize_min: A python integer or scalar `Tensor` indicating the size of\n the smallest side after resize.\n\n Returns:\n new_height: an int32 scalar tensor indicating the new height.\n new_width: an int32 scalar tensor indicating the new width.\n \"\"\"\n resize_min = tf.cast(resize_min, tf.float32)\n\n # Convert to floats to make subsequent calculations go smoothly.\n height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)\n\n smaller_dim = tf.minimum(height, width)\n scale_ratio = resize_min / smaller_dim\n\n # Convert back to ints to make heights and widths that TF ops will accept.\n new_height = tf.cast(height * scale_ratio, tf.int32)\n new_width = tf.cast(width * scale_ratio, tf.int32)\n\n return new_height, new_width\n\n\ndef _aspect_preserving_resize(image, resize_min):\n \"\"\"Resize images preserving the original aspect ratio.\n\n Args:\n image: A 3-D image `Tensor`.\n resize_min: A python integer or scalar `Tensor` indicating the size of\n the smallest side after resize.\n\n Returns:\n resized_image: A 3-D tensor containing the resized image.\n \"\"\"\n shape = tf.shape(input=image)\n height, width = shape[0], shape[1]\n\n new_height, new_width = _smallest_size_at_least(height, width, resize_min)\n\n return _resize_image(image, new_height, new_width)\n\n\ndef _resize_image(image, height, width):\n \"\"\"Simple wrapper around tf.resize_images.\n\n This is primarily to make sure we use the same `ResizeMethod` and other\n details each time.\n\n Args:\n image: A 3-D image `Tensor`.\n height: The target height for the resized image.\n width: The target width for the resized image.\n\n Returns:\n resized_image: A 3-D tensor containing the resized image. The first two\n dimensions have the shape [height, width].\n \"\"\"\n return tf.compat.v1.image.resize(\n image, [height, width], method=tf.image.ResizeMethod.BILINEAR,\n align_corners=False)\n\n\ndef preprocess_image(image_buffer, bbox, output_height, output_width,\n num_channels, is_training=False):\n \"\"\"Preprocesses the given image.\n\n Preprocessing includes decoding, cropping, and resizing for both training\n and eval images. Training preprocessing, however, introduces some random\n distortion of the image to improve accuracy.\n\n Args:\n image_buffer: scalar string Tensor representing the raw JPEG image buffer.\n bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]\n where each coordinate is [0, 1) and the coordinates are arranged as\n [ymin, xmin, ymax, xmax].\n output_height: The height of the image after preprocessing.\n output_width: The width of the image after preprocessing.\n num_channels: Integer depth of the image buffer for decoding.\n is_training: `True` if we're preprocessing the image for training and\n `False` otherwise.\n\n Returns:\n A preprocessed image.\n \"\"\"\n if is_training:\n # For training, we want to randomize some of the distortions.\n image = _decode_crop_and_flip(image_buffer, bbox, num_channels)\n image = _resize_image(image, output_height, output_width)\n else:\n # For validation, we want to decode, resize, then just crop the middle.\n image = tf.image.decode_jpeg(image_buffer, channels=num_channels)\n image = _aspect_preserving_resize(image, _RESIZE_MIN)\n image = _central_crop(image, output_height, output_width)\n\n image.set_shape([output_height, output_width, num_channels])\n\n return _mean_image_subtraction(image, CHANNEL_MEANS, num_channels)\n" ]
[ [ "tensorflow.concat", "tensorflow.stack", "tensorflow.cast", "tensorflow.minimum", "tensorflow.image.decode_and_crop_jpeg", "tensorflow.image.random_flip_left_right", "tensorflow.data.TFRecordDataset", "tensorflow.io.VarLenFeature", "tensorflow.data.Options", "tensorflow.image.decode_jpeg", "tensorflow.compat.v1.image.resize", "tensorflow.unstack", "tensorflow.shape", "tensorflow.keras.backend.image_data_format", "tensorflow.image.extract_jpeg_shape", "tensorflow.transpose", "tensorflow.slice", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.io.parse_single_example", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.io.FixedLenFeature" ] ]
zhuchen03/influence
[ "fec7d4759da4843e356976f00e2af95cf0ea3078" ]
[ "scripts/run_spam_experiment.py" ]
[ "from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals \n\nimport os\nimport math\nimport numpy as np\nimport pandas as pd\nimport sklearn.linear_model as linear_model\n\nimport scipy\nimport sklearn\n\nimport influence.experiments as experiments\nfrom influence.nlprocessor import NLProcessor\nfrom influence.binaryLogisticRegressionWithLBFGS import BinaryLogisticRegressionWithLBFGS\nfrom load_spam import load_spam\n\nimport tensorflow as tf\n\nfrom influence.dataset import DataSet\nfrom tensorflow.contrib.learn.python.learn.datasets import base\n\n# def load_adult_dataset():\n\n# train_set = np.load('/scratch0/GoGradients/data/adult/train.npy')\n# test_set = np.load('/scratch0/GoGradients/data/adult/test.npy')\n\n# X_train, y_train = train_set[:,:-1], (train_set[:,-1]+1)/2\n# X_test, y_test = test_set[:,:-1], (test_set[:,-1]+1)/2 #.reshape(-1,1)\n\n# train = DataSet(X_train, y_train)\n# test = DataSet(X_test, y_test)\n\n# return base.Datasets(train=train, validation=test, test=test)\n\nnp.random.seed(42)\n\n\ndata_sets = load_spam()\n# data_sets = load_adult_dataset()\n\nnum_classes = 2\n\ninput_dim = data_sets.train.x.shape[1]\nweight_decay = 0.0001\n# weight_decay = 1000 / len(lr_data_sets.train.labels)\nbatch_size = 100\ninitial_learning_rate = 0.001 \nkeep_probs = None\ndecay_epochs = [1000, 10000]\nmax_lbfgs_iter = 1000\n\ntf.reset_default_graph()\n\ntf_model = BinaryLogisticRegressionWithLBFGS(\n input_dim=input_dim,\n weight_decay=weight_decay,\n max_lbfgs_iter=max_lbfgs_iter,\n num_classes=num_classes, \n batch_size=batch_size,\n data_sets=data_sets,\n initial_learning_rate=initial_learning_rate,\n keep_probs=keep_probs,\n decay_epochs=decay_epochs,\n mini_batch=False,\n train_dir='output',\n log_dir='log',\n model_name='spam_logreg')\n\ntf_model.train()\n\n# test_idx = 8\n# actual_loss_diffs, predicted_loss_diffs_cg, indices_to_remove = experiments.test_retraining(\n# tf_model,\n# test_idx,\n# iter_to_load=0,\n# force_refresh=False,\n# num_to_remove=500,\n# remove_type='maxinf',\n# random_seed=0)\n\nX_train = np.copy(tf_model.data_sets.train.x)\nY_train = np.copy(tf_model.data_sets.train.labels)\nX_test = np.copy(tf_model.data_sets.test.x)\nY_test = np.copy(tf_model.data_sets.test.labels) \n\n\nnum_train_examples = Y_train.shape[0] \nnum_flip_vals = 6\nnum_check_vals = 6\nnum_random_seeds = 40\n\ndims = (num_flip_vals, num_check_vals, num_random_seeds, 3)\nfixed_influence_loo_results = np.zeros(dims)\nfixed_loss_results = np.zeros(dims)\nfixed_random_results = np.zeros(dims)\n\nflipped_results = np.zeros((num_flip_vals, num_random_seeds, 3))\n\norig_results = tf_model.sess.run(\n [tf_model.loss_no_reg, tf_model.accuracy_op], \n feed_dict=tf_model.all_test_feed_dict)\n \nprint('Orig loss: %.5f. Accuracy: %.3f' % (orig_results[0], orig_results[1]))\n\nfor flips_idx in range(num_flip_vals):\n for random_seed_idx in range(num_random_seeds):\n \n random_seed = flips_idx * (num_random_seeds * 3) + (random_seed_idx * 2) \n np.random.seed(random_seed)\n \n num_flips = int(num_train_examples / 20) * (flips_idx + 1) \n idx_to_flip = np.random.choice(num_train_examples, size=num_flips, replace=False)\n Y_train_flipped = np.copy(Y_train)\n Y_train_flipped[idx_to_flip] = 1 - Y_train[idx_to_flip] \n \n tf_model.update_train_x_y(X_train, Y_train_flipped)\n tf_model.train() \n flipped_results[flips_idx, random_seed_idx, 1:] = tf_model.sess.run(\n [tf_model.loss_no_reg, tf_model.accuracy_op], \n feed_dict=tf_model.all_test_feed_dict)\n print('Flipped loss: %.5f. Accuracy: %.3f' % (\n flipped_results[flips_idx, random_seed_idx, 1], flipped_results[flips_idx, random_seed_idx, 2]))\n \n train_losses = tf_model.sess.run(tf_model.indiv_loss_no_reg, feed_dict=tf_model.all_train_feed_dict)\n train_loo_influences = tf_model.get_loo_influences()\n\n for checks_idx in range(num_check_vals):\n np.random.seed(random_seed + 1)\n num_checks = int(num_train_examples / 20) * (checks_idx + 1)\n\n print('### Flips: %s, rs: %s, checks: %s' % (num_flips, random_seed_idx, num_checks))\n\n fixed_influence_loo_results[flips_idx, checks_idx, random_seed_idx, :], \\\n fixed_loss_results[flips_idx, checks_idx, random_seed_idx, :], \\\n fixed_random_results[flips_idx, checks_idx, random_seed_idx, :] \\\n = experiments.test_mislabeled_detection_batch(\n tf_model, \n X_train, Y_train,\n Y_train_flipped,\n X_test, Y_test, \n train_losses, train_loo_influences,\n num_flips, num_checks)\n\n\nnp.savez(\n 'output/spam_results', \n orig_results=orig_results,\n flipped_results=flipped_results,\n fixed_influence_loo_results=fixed_influence_loo_results,\n fixed_loss_results=fixed_loss_results,\n fixed_random_results=fixed_random_results\n)" ]
[ [ "numpy.savez", "numpy.random.seed", "numpy.random.choice", "numpy.copy", "tensorflow.reset_default_graph", "numpy.zeros" ] ]
OE9NAT/bacharbeit
[ "067b0fc81cd306233cd97e124395ac898a82f092" ]
[ "program/sequenz/FIDseq.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 9 09:37:39 2021\n\n@author: luki\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft, fftshift, fftfreq\nimport limr\n\nl = limr.limr('./pulseN_test_USB.cpp');\n\nl.noi = -1 #hardcoded initialization of the lime. needed if parameters (e.g. Gain, tgi, tdi, are changed and need to be set to chip)\n\n#target frequency of the experiment\ntgtfreq = 83.62e6\n\n#IF or base band frequency\nif_frq = 1.2e6\n\nl.lof = tgtfreq-if_frq # LO frequency (target7 frequency - base band frequency)\nl.sra = 30.72e6 # Sampling Rate\nl.nav = 1000 # number of averages\nl.nrp = 1 # number of repetitions\n\nl.tdi = -45 # TX I DC correction\nl.tdq = 0 # TX Q DC correction\nl.tgi = 2047 # TX I Gain correction \nl.tgq = 2039 # TX Q Gain correction \nl.tpc = 3 # TX phase adjustment\n \nl.rgi = 2047 # RX I Gain correction\nl.rgq = 2047 # RX Q Gain correction\nl.rdi = 0 # RX I DC correction\nl.rdq = 0 # RX Q DC correction\nl.rpc = 0 # RX phase adjustment\n\n#repetition and acquisition time (acquisition time can only be an integer multiple of the buffer size from Cpp, so the number here will automatically\n#be adjusted in the ways that it fits to an integer multiply of the buffer size\nl.trp = 5e-3 # repetition time\nl.tac = 82e-6 # acquisition time (gives minimum buffer size)\nl.t3d = [1, 0, 50, 10] # GPIO Pin3 is centered around the pulse (used as a Gate Signal)\n\n# pulse durations\nl.pfr = [if_frq] # pulse frequency\nl.pdr = [3e-6] # pulse duration\nl.pam = [1] # relative pulse amplitude (only makes sense if 2 or more pulses are in the sequence)\nl.pof = [300] # pulse arrangement 1 means immediate start of the pulse (3us from zero approx. is then start of the first pulse)\n \n\nl.npu = len(l.pfr) # number of pulses\n\nl.rgn = 55.0 # RX gain\nl.tgn = 40.0 # TX gain\n\nRX_gainfactor = 1\n \nif l.rgn == 40:\n RX_gainfactor = 1\nelse:\n RX_gainfactor = 10**((l.rgn-40)/20)\n\nl.rlp = 3.0e6 # RX BW (IF or base band low pass filter)\nl.tlp = 130.0e6 # RX BW\n\nl.spt = './pulse/FID' # directory to save to\nl.fpa = 'setup'\n\n\nl.run()\n\n#read back file and plot time signal + shifted fft \nif (1 == 1):\n\n #reads back the file which was recently saved\n l.readHDF()\n\n #evaluation range, defines: blanking time and window length\n evran = [22.5, 42.5]\n \n #np.where sometimes does not work out, so it is put in a try except\n #always check the console for errors\n try:\n evidx = np.where( (l.HDF.tdx > evran[0]) & (l.HDF.tdx < evran[1]) )[0]\n except:\n print(\"error due to np.where evaluation!\")\n \n #time domain x and y data \n tdx = l.HDF.tdx[evidx]\n tdy = l.HDF.tdy[evidx]\n\n #correcting a offset in the time domain by subtracting the mean\n tdy_mean = tdy-np.mean(tdy)\n\n #fft of the corrected time domain data\n fdy1 = fftshift(fft(tdy_mean,axis=0),axes=0)\n\n #fft freq and fft shift is here used to scale the x axis (frequency axis)\n fdx1 = fftfreq(len(fdy1))*30.72\n fdx1 = fftshift(fdx1)\n\n #scaling factor which converts the y axis (usually a proportional number of points) into uV\n fac_p_to_uV = 447651/1e6\n \n tdy_mean = tdy_mean/l.nav/fac_p_to_uV/RX_gainfactor\n \n plt.figure(1);\n plt.plot(tdx,tdy_mean)\n plt.xlabel(\"t in µs\")\n plt.ylabel(\"Amplitude in µV\")\n plt.show()\n \n \n #get LO frequency and add it to the base band fft x-Axis in order to illustrate the applied frequency\n #for single side spectrum and shift (only single frequency)\n lof=l.HDF.attr_by_key('lof')\n\n for i in range(0, len(fdx1)):\n fdx1[i] = fdx1[i]+lof[0]/1e6\n\n\n #cutting out the window of the interessting part of the computed fft spectrum (here from 83 - 84 MHz)\n #window of interest\n shifter = 12#0#50\n stopper = 270#300\n #here the right side of the spectrum is selected\n y=abs((fdy1[int(len(fdy1)/2)+shifter:len(fdy1)-1-stopper]))/len(tdx)/fac_p_to_uV/l.nav/RX_gainfactor\n x=fdx1[int(len(fdy1)/2)+shifter:len(fdy1)-1-stopper]\n \n plt.figure(5);\n plt.plot(x, y)\n plt.xlabel(\"f in MHz\")\n plt.ylabel(\"Amplitude in µV\")\n plt.show() \n \n \n #print std (for SNR determination -> noise analysis without sample)\n print(\"std rms frequency domain next to peak X: \" + str(np.std(y)))\n #print max of fft (for SNR evaluation - should give peak maximum)\n print(\"MAX of Signal: \" + str(max(y)))\n" ]
[ [ "matplotlib.pyplot.plot", "scipy.fftpack.fft", "matplotlib.pyplot.ylabel", "numpy.std", "numpy.mean", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.where", "scipy.fftpack.fftshift", "matplotlib.pyplot.figure" ] ]
mgaldieri/flask-pyaffective
[ "dc7b878dde2cb56d975f8d0d091b7b1e4e2d2b00" ]
[ "utils/sensors.py" ]
[ "# -*- coding:utf-8 -*-\n__author__ = 'mgaldieri'\n\nimport uuid\n\nimport numpy as np\n\nfrom pyaffective.emotions import OCC, OCEAN\n\n\nclass InfluenceValue:\n DIRECT = 1\n INVERSE = -1\n\n def __init__(self, weight=1.0, relation=None):\n self.weight = weight\n self.relation = relation if relation else InfluenceValue.DIRECT\n\n\nclass Influence:\n def __init__(self, influence=None,\n openness=InfluenceValue(),\n conscientiousness=InfluenceValue(),\n extraversion=InfluenceValue(),\n agreeableness=InfluenceValue(),\n neuroticism=InfluenceValue()):\n if not influence:\n influence = []\n if hasattr(influence, '__iter__') and len(influence) == 5:\n if all(isinstance(x, list) for x in influence) or all(isinstance(x, tuple) for x in influence):\n # self.influence = influence\n self.openness = InfluenceValue(influence[0][0], influence[0][1])\n self.conscientiousness = InfluenceValue(influence[1][0], influence[1][1])\n self.extraversion = InfluenceValue(influence[2][0], influence[2][1])\n self.agreeableness = InfluenceValue(influence[3][0], influence[3][1])\n self.neuroticism = InfluenceValue(influence[4][0], influence[4][1])\n elif all(isinstance(x, dict) for x in influence):\n # self.influence = influence\n self.openness = InfluenceValue(influence[0].get('weight', 0.0), influence[0].get('relation', 0.0))\n self.conscientiousness = InfluenceValue(influence[1].get('weight', 0.0), influence[1].get('relation', 0.0))\n self.extraversion = InfluenceValue(influence[2].get('weight', 0.0), influence[2].get('relation', 0.0))\n self.agreeableness = InfluenceValue(influence[3].get('weight', 0.0), influence[3].get('relation', 0.0))\n self.neuroticism = InfluenceValue(influence[4].get('weight', 0.0), influence[4].get('relation', 0.0))\n else:\n raise ValueError('Valores de influência incompatíveis.')\n elif all(isinstance(x, InfluenceValue) for x in [openness,\n conscientiousness,\n extraversion,\n agreeableness,\n neuroticism]):\n self.openness = openness\n self.conscientiousness = conscientiousness\n self.extraversion = extraversion\n self.agreeableness = agreeableness\n self.neuroticism = neuroticism\n # self.influence = np.array([openness,\n # conscientiousness,\n # extraversion,\n # agreeableness,\n # neuroticism], dtype=np.float64)\n else:\n raise ValueError('Valores de influência incompatíveis.')\n\n\nclass Sensor:\n def __init__(self, _id=None, name='No name', minval=0, maxval=1023, influences=None, **kwargs):\n self.id = _id if _id else uuid.uuid4()\n self.name = name\n self.minval = minval\n self.maxval = maxval\n if not influences:\n influences = []\n if isinstance(influences, list) and len(influences) == 24:\n if all(isinstance(x, Influence) for x in influences):\n self.admiration = influences[0]\n self.gloating = influences[1]\n self.gratification = influences[2]\n self.gratitude = influences[3]\n self.hope = influences[4]\n self.happy_for = influences[5]\n self.joy = influences[6]\n self.liking = influences[7]\n self.love = influences[8]\n self.pride = influences[9]\n self.relief = influences[10]\n self.satisfaction = influences[11]\n\n self.anger = influences[12]\n self.disliking = influences[13]\n self.disappointment = influences[14]\n self.distress = influences[15]\n self.fear = influences[16]\n self.fears_confirmed = influences[17]\n self.hate = influences[18]\n self.pity = influences[19]\n self.remorse = influences[20]\n self.reproach = influences[21]\n self.resentment = influences[22]\n self.shame = influences[23]\n else:\n raise Exception('Tipo de influência incompatível. Por favor utilize o tipo Influence.')\n else:\n self.admiration = kwargs.get('admiration', Influence())\n self.gloating = kwargs.get('gloating', Influence())\n self.gratification = kwargs.get('gratification', Influence())\n self.gratitude = kwargs.get('gratitude', Influence())\n self.hope = kwargs.get('hope', Influence())\n self.happy_for = kwargs.get('happy_for', Influence())\n self.joy = kwargs.get('joy', Influence())\n self.liking = kwargs.get('liking', Influence())\n self.love = kwargs.get('love', Influence())\n self.pride = kwargs.get('pride', Influence())\n self.relief = kwargs.get('relief', Influence())\n self.satisfaction = kwargs.get('satisfaction', Influence())\n\n self.anger = kwargs.get('anger', Influence())\n self.disliking = kwargs.get('disliking', Influence())\n self.disappointment = kwargs.get('disappointment', Influence())\n self.distress = kwargs.get('distress', Influence())\n self.fear = kwargs.get('fear', Influence())\n self.fears_confirmed = kwargs.get('fears_confirmed', Influence())\n self.hate = kwargs.get('hate', Influence())\n self.pity = kwargs.get('pity', Influence())\n self.remorse = kwargs.get('remorse', Influence())\n self.reproach = kwargs.get('reproach', Influence())\n self.resentment = kwargs.get('resentment', Influence())\n self.shame = kwargs.get('shame', Influence())\n\n def occ(self, personality=OCEAN(), value=0):\n _val = self.map_value(value, self.minval, self.maxval)\n admiration = _val * self._get_att_factor(personality, self.admiration)\n gloating = _val * self._get_att_factor(personality, self.gloating)\n gratification = _val * self._get_att_factor(personality, self.gratification)\n gratitude = _val * self._get_att_factor(personality, self.gratitude)\n hope = _val * self._get_att_factor(personality, self.hope)\n happy_for = _val * self._get_att_factor(personality, self.happy_for)\n joy = _val * self._get_att_factor(personality, self.joy)\n liking = _val * self._get_att_factor(personality, self.liking)\n love = _val * self._get_att_factor(personality, self.love)\n pride = _val * self._get_att_factor(personality, self.pride)\n relief = _val * self._get_att_factor(personality, self.relief)\n satisfaction = _val * self._get_att_factor(personality, self.satisfaction)\n\n anger = _val * self._get_att_factor(personality, self.anger)\n disliking = _val * self._get_att_factor(personality, self.disliking)\n disappointment = _val * self._get_att_factor(personality, self.disappointment)\n distress = _val * self._get_att_factor(personality, self.distress)\n fear = _val * self._get_att_factor(personality, self.fear)\n fears_confirmed = _val * self._get_att_factor(personality, self.fears_confirmed)\n hate = _val * self._get_att_factor(personality, self.hate)\n pity = _val * self._get_att_factor(personality, self.pity)\n remorse = _val * self._get_att_factor(personality, self.remorse)\n reproach = _val * self._get_att_factor(personality, self.reproach)\n resentment = _val * self._get_att_factor(personality, self.resentment)\n shame = _val * self._get_att_factor(personality, self.shame)\n\n return OCC(admiration, gloating, gratification, gratitude, hope, happy_for, joy, liking, love, pride, relief, satisfaction,\n anger, disliking, disappointment, distress, fear, fears_confirmed, hate, pity, remorse, reproach, resentment, shame)\n\n def pad(self, personality=OCEAN(), value=0):\n return self.occ(personality, value).pad\n\n @staticmethod\n def _get_att_factor(personality=OCEAN(), influence=Influence()):\n openness = Sensor.map_value(personality.openness, 0, influence.openness.relation)\n conscientiousness = Sensor.map_value(personality.conscientiousness, 0, influence.conscientiousness.relation)\n extraversion = Sensor.map_value(personality.extraversion, 0, influence.extraversion.relation)\n agreeableness = Sensor.map_value(personality.agreeableness, 0, influence.agreeableness.relation)\n neuroticism = Sensor.map_value(personality.neuroticism, 0, influence.neuroticism.relation)\n\n values = [openness, conscientiousness, extraversion, agreeableness, neuroticism]\n weights = [influence.openness.weight,\n influence.conscientiousness.weight,\n influence.extraversion.weight,\n influence.agreeableness.weight,\n influence.neuroticism.weight]\n return np.average(values, axis=0, weights=weights)\n\n @staticmethod\n def map_value(value=0.0, in_min=0.0, in_max=1.0, out_min=0.0, out_max=1.0):\n return (float(value) - float(in_min)) * (float(out_max) - float(out_min)) / (\n float(in_max) - float(in_min)) + float(out_min)\n\n def __repr__(self):\n return '<Sensor #%s: %s>' % (self.id, self.name)\n" ]
[ [ "numpy.average" ] ]
buvta/HarvestText
[ "870a00cba856a83a00105fcb4cc6de06387a9729" ]
[ "harvesttext/word_discover.py" ]
[ "import jieba\nimport jieba.analyse\nimport logging\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nfrom collections import defaultdict\nfrom tqdm import tqdm\nfrom .resources import get_baidu_stopwords\nfrom .algorithms.word_discoverer import WordDiscoverer\nfrom .algorithms.entity_discoverer import NFLEntityDiscoverer, NERPEntityDiscover\nfrom .algorithms.keyword import textrank\n\nclass WordDiscoverMixin:\n \"\"\"\n 新词、关键词发现模块:\n - 基于凝聚度和左右熵的新词发现\n - 基于模式的专有名词发现\n - 命名实体识别\n - 实验性质的实体别名发现算法\n \"\"\"\n def word_discover(self, doc, threshold_seeds=[], auto_param=True,\n excluding_types=[], excluding_words='baidu_stopwords', # 可以排除已经登录的某些种类的实体,或者某些指定词\n max_word_len=5, min_freq=0.00005, min_entropy=1.4, min_aggregation=50,\n ent_threshold=\"both\", mem_saving=None, sort_by='freq', exclude_number=True):\n '''新词发现,基于 http://www.matrix67.com/blog/archives/5044 实现及微调\n\n :param doc: (string or list) 待进行新词发现的语料,如果是列表的话,就会自动用换行符拼接\n :param threshold_seeds: list of string, 设定能接受的“质量”最差的种子词,更差的词语将会在新词发现中被过滤\n :param auto_param: bool, 使用默认的算法参数\n :param excluding_types: list of str, 设定要过滤掉的特定词性或已经登录到ht的实体类别\n :param excluding_words: list of str, 设定要过滤掉的特定词\n :param max_word_len: 允许被发现的最长的新词长度\n :param min_freq: 被发现的新词,在给定文本中需要达到的最低频率\n :param min_entropy: 被发现的新词,在给定文本中需要达到的最低左右交叉熵\n :param min_aggregation: 被发现的新词,在给定文本中需要达到的最低凝聚度\n :param ent_threshold: \"both\": (默认)在使用左右交叉熵进行筛选时,两侧都必须超过阈值; \"avg\": 两侧的平均值达到阈值即可\n :param mem_saving: bool or None, 采用一些过滤手段来减少内存使用,但可能影响速度。如果不指定,对长文本自动打开,而对短文本不使用\n :param sort_by: 以下string之一: {'freq': 词频, 'score': 综合分数, 'agg':凝聚度} 按照特定指标对得到的词语信息排序,默认使用词频\n :param exclude_number: (默认True)过滤发现的纯数字新词\n :return: info: 包含新词作为index, 以及对应各项指标的DataFrame\n '''\n if type(doc) != str:\n doc = \"\\n\".join(doc)\n # 采用经验参数,此时后面的参数设置都无效\n if auto_param: # 根据自己的几个实验确定的参数估计值,没什么科学性,但是应该能得到还行的结果\n length = len(doc)\n min_entropy = np.log(length) / 10\n min_freq = min(0.00005, 20.0 / length)\n min_aggregation = np.sqrt(length) / 15\n mem_saving = bool(length > 300000) if mem_saving is None else mem_saving\n # ent_threshold: 确定左右熵的阈值对双侧都要求\"both\",或者只要左右平均值达到\"avg\"\n # 对于每句话都很极短的情况(如长度<8),经常出现在左右边界的词语可能难以被确定,这时ent_threshold建议设为\"avg\"\n mem_saving = False if mem_saving is None else mem_saving\n\n try:\n ws = WordDiscoverer(doc, max_word_len, min_freq, min_entropy, min_aggregation, ent_threshold, mem_saving)\n except Exception as e:\n logging.log(logging.ERROR, str(e))\n info = {\"text\": [], \"freq\": [], \"left_ent\": [], \"right_ent\": [], \"agg\": []}\n info = pd.DataFrame(info)\n info = info.set_index(\"text\")\n return info\n\n if len(excluding_types) > 0:\n if \"#\" in list(excluding_types)[0]: # 化为无‘#’标签\n excluding_types = [x[1:-1] for x in excluding_types]\n ex_mentions = set(x for enty in self.entity_mention_dict\n if enty in self.entity_type_dict and\n self.entity_type_dict[enty] in excluding_types\n for x in self.entity_mention_dict[enty])\n else:\n ex_mentions = set()\n assert excluding_words == 'baidu_stopwords' or (hasattr(excluding_words, '__iter__') and type(excluding_words) != str)\n if excluding_words == 'baidu_stopwords':\n ex_mentions |= get_baidu_stopwords()\n else:\n ex_mentions |= set(excluding_words)\n\n info = ws.get_df_info(ex_mentions, exclude_number)\n\n # 利用种子词来确定筛选优质新词的标准,种子词中最低质量的词语将被保留(如果一开始就被找到的话)\n if len(threshold_seeds) > 0:\n min_score = 100000\n for seed in threshold_seeds:\n if seed in info.index:\n min_score = min(min_score, info.loc[seed, \"score\"])\n if (min_score >= 100000):\n min_score = 0\n else:\n min_score *= 0.9 # 留一些宽松的区间\n info = info[info[\"score\"] > min_score]\n if sort_by:\n info.sort_values(by=sort_by, ascending=False, inplace=True)\n\n return info\n\n def find_entity_with_rule(self, text, rulesets=[], add_to_dict=True, type0=\"添加词\"):\n '''利用规则从分词结果中的词语找到实体,并可以赋予相应的类型再加入实体库\n\n :param text: string, 一段文本\n :param rulesets: list of (tuple of rules or single rule) from match_patterns,\n list中包含多个规则,满足其中一种规则的词就认为属于这个type\n 而每种规则由tuple或单个条件(pattern)表示,一个词必须满足其中的一个或多个条件。\n :param add_to_dict: 是否把找到的结果直接加入词典\n :param type0: 赋予满足条件的词语的实体类型, 仅当add_to_dict时才有意义\n :return: found_entities\n\n '''\n found_entities = set()\n for word in self.seg(text):\n for ruleset in rulesets: # 每个ruleset是或关系,只要满足一个就添加并跳过其他\n toAdd = True\n if type(ruleset) == type((1, 2)): # tuple\n for pattern0 in ruleset:\n if not pattern0(word):\n toAdd = False\n break\n else: # single rule\n pattern0 = ruleset\n if not pattern0(word):\n toAdd = False\n if toAdd:\n found_entities.add(word)\n break\n if add_to_dict:\n for entity0 in found_entities:\n self.add_new_entity(entity0, entity0, type0)\n self.prepare()\n return found_entities\n\n def named_entity_recognition(self, sent, standard_name=False, return_posseg=False):\n '''利用pyhanlp的命名实体识别,找到句子中的(人名,地名,机构名,其他专名)实体。harvesttext会预先链接已知实体\n\n :param sent: string, 文本\n :param standard_name: bool, 是否把连接到的已登录转化为标准名\n :param return_posseg: bool, 是否返回包括命名实体识别的,带词性分词结果\n :param book: bool, 预先识别\n :return: entity_type_dict: 发现的命名实体信息,字典 {实体名: 实体类型}\n (return_posseg=True时) possegs: list of (单词, 词性)\n '''\n from pyhanlp import HanLP, JClass\n if not self.hanlp_prepared:\n self.hanlp_prepare()\n self.standard_name = standard_name\n entities_info = self.entity_linking(sent)\n sent2 = self.decoref(sent, entities_info)\n StandardTokenizer = JClass(\"com.hankcs.hanlp.tokenizer.StandardTokenizer\")\n StandardTokenizer.SEGMENT.enableAllNamedEntityRecognize(True)\n entity_type_dict = {}\n try:\n possegs = []\n for x in StandardTokenizer.segment(sent2):\n # 三种前缀代表:人名(nr),地名(ns),机构名(nt)\n tag0 = str(x.nature)\n if tag0.startswith(\"nr\"):\n entity_type_dict[x.word] = \"人名\"\n elif tag0.startswith(\"ns\"):\n entity_type_dict[x.word] = \"地名\"\n elif tag0.startswith(\"nt\"):\n entity_type_dict[x.word] = \"机构名\"\n elif tag0.startswith(\"nz\"):\n entity_type_dict[x.word] = \"其他专名\"\n possegs.append((x.word, tag0))\n except:\n pass\n if return_posseg:\n return entity_type_dict, possegs\n else:\n return entity_type_dict\n def entity_discover(self, text, return_count=False, method=\"NFL\", min_count=5, pinyin_tolerance=0, **kwargs):\n \"\"\"无监督地从较大量文本中发现实体的类别和多个同义mention。建议对千句以上的文本来挖掘,并且文本的主题比较集中。\n 效率:在测试环境下处理一个约10000句的时间大约是20秒。另一个约200000句的语料耗时2分半\n 精度:算法准确率不高,但是可以初步聚类,建议先save_entities后, 再进行手动进行调整,然后load_entities再用于进一步挖掘\n\n ref paper: Mining Entity Synonyms with Efficient Neural Set Generation(https://arxiv.org/abs/1811.07032v1)\n\n :param text: string or list of string\n :param return_count: (default False) 是否再返回每个mention的出现次数\n :param method: 使用的算法, 目前可选 \"NFL\" (NER+Fasttext+Louvain+模式修复,基于语义和规则发现同义实体,但可能聚集过多错误实体), \"NERP\"(NER+模式修复, 仅基于规则发现同义实体)\n :param min_count: (default 5) mininum freq of word to be included\n :param pinyin_tolerance: {None, 0, 1} 合并拼音相同(取0时)或者差别只有一个(取1时)的候选词到同一组实体,默认使用(0)\n :param kwargs: 根据算法决定的参数,目前, \"NERP\"不需要额外参数,而\"NFL\"可接受的额外参数有:\n\n emb_dim: (default 50) fasttext embedding's dimensions\n\n threshold: (default 0.98) [比较敏感,调参重点]larger for more entities, threshold for add an edge between 2 entities if cos_dim exceeds\n\n ft_iters: (default 20) larger for more entities, num of iterations used by fasttext\n\n use_subword: (default True) whether to use fasttext's subword info\n\n min_n: (default 1) min length of used subword\n\n max_n: (default 4) max length of used subword\n\n :return: entity_mention_dict, entity_type_dict\n \"\"\"\n text = text if type(text) == str else \"\\n\".join(text)\n method = method.upper()\n assert method in {\"NFL\", \"NERP\"}\n # discover candidates with NER\n print(\"Doing NER\")\n sent_words = []\n type_entity_dict = defaultdict(set)\n entity_count = defaultdict(int)\n wd_count = defaultdict(int)\n for sent in tqdm(self.cut_sentences(text)):\n NERs0, possegs = self.named_entity_recognition(sent, return_posseg=True)\n sent_wds0 = []\n for wd, pos in possegs:\n if wd in NERs0:\n zh_pos = NERs0[wd]\n entity_name = wd.lower() + \"_\" + zh_pos\n type_entity_dict[zh_pos].add(entity_name)\n sent_wds0.append(entity_name)\n entity_count[entity_name] += 1\n else:\n sent_wds0.append(wd)\n wd_count[wd] += 1\n sent_words.append(sent_wds0)\n\n entity_count = pd.Series(entity_count)\n entity_count = entity_count[entity_count >= min_count]\n pop_words_cnt = {wd:cnt for wd, cnt in wd_count.items() if cnt >= min_count}\n id2word = entity_count.index.tolist()\n word2id = {wd: i for (i, wd) in enumerate(id2word)}\n\n type_entity_dict2 = {k: list(v) for k, v in type_entity_dict.items()}\n if method == \"NFL\":\n discoverer = NFLEntityDiscoverer(sent_words, type_entity_dict2, entity_count, pop_words_cnt, word2id, id2word,\n min_count, pinyin_tolerance, self.pinyin_adjlist, **kwargs)\n elif method == \"NERP\":\n discoverer = NERPEntityDiscover(sent_words, type_entity_dict2, entity_count, pop_words_cnt, word2id, id2word,\n min_count, pinyin_tolerance, self.pinyin_adjlist, **kwargs)\n entity_mention_dict, entity_type_dict = discoverer.entity_mention_dict, discoverer.entity_type_dict\n mention_count = discoverer.mention_count # 新添加的mention的count在discoverer里更新\n if return_count:\n return entity_mention_dict, entity_type_dict, mention_count\n else:\n return entity_mention_dict, entity_type_dict\n \n def extract_keywords(self, text, topK, with_score=False, min_word_len=2, stopwords=\"baidu\", allowPOS=\"default\", method=\"jieba_tfidf\", **kwargs):\n \"\"\"用各种算法抽取关键词(目前均为无监督),结合了ht的实体分词来提高准确率\n\n 目前支持的算法类型(及额外参数):\n\n - jieba_tfidf: (默认)jieba自带的基于tfidf的关键词抽取算法,idf统计信息来自于其语料库\n - textrank: 基于textrank的关键词抽取算法\n - block_type: 默认\"doc\"。 支持三种级别,\"sent\", \"para\", \"doc\",每个block之间的临近词语不建立连边\n - window: 默认2, 邻接的几个词语之内建立连边\n - weighted: 默认False, 时候使用加权图计算textrank\n - 构建词图时会过滤不符合min_word_len, stopwords, allowPOS要求的词语\n\n :params text: 从中挖掘关键词的文档\n :params topK: int, 从每个文档中抽取的关键词(最大)数量\n :params with_score: bool, 默认False, 是否同时返回算法提供的分数(如果有的话)\n :params min_word_len: 默认2, 被纳入关键词的词语不低于此长度\n :param stopwords: 字符串列表/元组/集合,或者'baidu'为默认百度停用词,在算法中引入的停用词,一般能够提升准确度\n :params allowPOS: iterable of str,关键词应当属于的词性,默认为\"default\" {'n', 'ns', 'nr', 'nt', 'nz', 'vn', 'v', 'an', 'a', 'i'}以及已登录的实体词类型\n :params method: 选择用于抽取的算法,目前支持\"jieba_tfidf\", \"tfidf\", \"textrank\"\n :params kwargs: 其他算法专属参数\n\n\n \"\"\"\n assert method in {\"jieba_tfidf\", \"textrank\"}, print(\"目前不支持的算法\")\n if allowPOS == 'default':\n # ref: 结巴分词标注兼容_ICTCLAS2008汉语词性标注集 https://www.cnblogs.com/hpuCode/p/4416186.html\n allowPOS = {'n', 'ns', 'nr', 'nt', 'nz', 'vn', 'v', 'an', 'a', 'i'}\n else:\n assert hasattr(allowPOS, \"__iter__\")\n # for HT, we consider registered entity types specifically\n allowPOS |= set(self.type_entity_mention_dict)\n\n assert stopwords == 'baidu' or (hasattr(stopwords, '__iter__') and type(stopwords) != str)\n stopwords = get_baidu_stopwords() if stopwords == 'baidu' else set(stopwords)\n \n if method == \"jieba_tfidf\":\n kwds = jieba.analyse.extract_tags(text, topK=int(2*topK), allowPOS=allowPOS, withWeight=with_score)\n if with_score:\n kwds = [(kwd, score) for (kwd, score) in kwds if kwd not in stopwords][:topK]\n else:\n kwds = kwds[:topK]\n elif method == \"textrank\":\n block_type = kwargs.get(\"block_type\", \"doc\")\n assert block_type in {\"sent\", \"para\", \"doc\"}\n window = kwargs.get(\"window\", 2)\n weighted = kwargs.get(\"weighted\", True)\n if block_type == \"doc\":\n blocks = [text]\n elif block_type == \"para\":\n blocks = [para.strip() for para in text.split(\"\\n\") if para.strip() != \"\"]\n elif block_type == \"sent\":\n blocks = self.cut_sentences(text)\n block_pos = (self.posseg(block.strip(), stopwords=stopwords) for block in blocks)\n block_words = [[wd for wd, pos in x \n if pos in allowPOS and len(wd) >= min_word_len] \n for x in block_pos]\n kwds = textrank(block_words, topK, with_score, window, weighted)\n \n return kwds\n\n \n " ]
[ [ "numpy.log", "pandas.Series", "numpy.sqrt", "pandas.DataFrame" ] ]
Originofamonia/pylon
[ "e26202b2c1cfbb8b5c444f840763f0ce839f048a" ]
[ "trainer/callbacks/base_cb.py" ]
[ "import os\nfrom collections import defaultdict\nfrom functools import partial\n\nimport pandas as pd\nimport torch\n\nfrom ..numpy_writer import *\nfrom ..params_grads import *\nfrom ..save import *\nfrom ..stateful import *\nfrom ..types import *\n\n\ndef set_order(order):\n \"\"\"decorator to set callback's method order\n usage: \n @set_order(100)\n def method(self):\n pass\n \"\"\"\n def inner(meth):\n def fn(*args, **kwargs):\n return meth(*args, **kwargs)\n\n fn._order = order\n return fn\n\n return inner\n\n\nclass Callback(Stateful):\n \"\"\"\n when not to use callbacks:\n - if it is required for correct forward pass of a model, that should be in trainer\n \"\"\"\n _order = 100\n\n def save(self, path: str):\n \"\"\"save state to file\"\"\"\n if self.is_state_empty():\n # don't need to save empty state\n return\n dirname = os.path.dirname(path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n safe_torch_save(self.get_state(), path)\n\n def load(self, path: str, map_location=None):\n \"\"\"load state from file\"\"\"\n if self.is_state_empty():\n # this cb doesn't need a state\n # caution: cb that needs a state must have the \"footprint\"\n # of the states, so that it would not be empty at first!\n # unless it will not be loaded!\n return\n self.load_state(torch.load(path, map_location=map_location))\n\n def on_train_begin(self, **kwargs):\n pass\n\n def on_ep_begin(self, **kwargs):\n pass\n\n def on_batch_begin(self, **kwargs):\n pass\n\n def on_forward_begin(self, **kwargs):\n pass\n\n def on_forward_end(self, **kwargs):\n pass\n\n def on_backward_begin(self, **kwargs):\n pass\n\n def on_backward_end(self, **kwargs):\n pass\n\n def on_step_begin(self, **kwargs):\n pass\n\n def on_step_end(self, **kwargs):\n pass\n\n def on_batch_end(self, **kwargs):\n pass\n\n def on_ep_end(self, **kwargs):\n pass\n\n def on_train_end(self, **kwargs):\n pass\n\n def on_abrupt_end(self, **kwargs):\n pass\n\n def __str__(self):\n return self.__repr__()\n\n\nclass StatsCallback(Callback):\n \"\"\"\n base class for callbacks that keep stats as \"history\" \n \"\"\"\n def __init__(self, n_log_cycle=1):\n super().__init__()\n self.n_log_cycle = n_log_cycle\n self._state['hist'] = defaultdict(list)\n # to be put to the progress bar (only)\n self.stats = {}\n # to be put to the history\n # we have buffer so that, we can update many times per iteration\n # not need to collect everything first\n self.buffer = {}\n\n @classmethod\n def collect_latest(cls, callbacks):\n \"\"\"collect latest entries from all callbacks (that is StatsCallback), excluding i_itr\"\"\"\n out = {}\n for cb in callbacks:\n if isinstance(cb, StatsCallback):\n for k, v in cb.last_hist.items():\n if k != 'i_itr':\n out[k] = v\n return out\n\n @classmethod\n def combine_callbacks(cls, callbacks):\n \"\"\"merge dataframes from callbacks\"\"\"\n out = None\n for cb in callbacks:\n if isinstance(cb, StatsCallback):\n df = cb.df\n if 'i_itr' in df:\n if out is None:\n out = cb.df\n else:\n out = pd.merge(out, cb.df, on='i_itr', how='outer')\n return out\n\n @property\n def hist(self):\n return self._state['hist']\n\n @property\n def last_hist(self) -> Dict:\n return {k: v[-1] for k, v in self.hist.items()}\n\n @property\n def df(self):\n return pd.DataFrame(self.hist)\n\n def on_batch_begin(self, **kwargs):\n # clear the buffer\n self.buffer = {}\n\n def on_batch_end(self, **kwargs):\n \"\"\"auto-flush after each iteration.\n don't forget to flush if you overload this method.\"\"\"\n self._flush()\n\n def add_to_bar(self, d):\n \"\"\"update the stats which shall be shown in the progress bar (only)\"\"\"\n assert 'i_itr' in d\n i_itr = d['i_itr']\n if self.is_log_cycle(i_itr):\n d = self._eval(d)\n self.stats.update(_strip(d))\n\n def add_to_bar_and_hist(self, d):\n \"\"\"both update the progress bar and write to the buffer (history), don't forget to flush\"\"\"\n assert 'i_itr' in d\n i_itr = d['i_itr']\n if self.is_log_cycle(i_itr):\n d = self._eval(d)\n self.stats.update(_strip(d))\n self.buffer.update(_strip(d))\n\n def add_to_hist(self, d):\n \"\"\"buffer before putting into the history after flushing\"\"\"\n assert 'i_itr' in d\n i_itr = d['i_itr']\n if self.is_log_cycle(i_itr):\n d = self._eval(d)\n self.buffer.update(_strip(d))\n\n def _flush(self):\n \"\"\"save the buffer to history\"\"\"\n d = self.buffer\n if len(d) > 0:\n assert 'i_itr' in d, f'i_itr is not present in {self}'\n _append_dict(self.hist, d)\n # should not clear the buffer,\n # it might be used by others\n\n def is_log_cycle(self, i_itr):\n return i_itr % self.n_log_cycle == 0\n\n def _eval(self, d):\n for k, v in d.items():\n d[k] = _get_val(v)\n return d\n\n\ndef _strip(x):\n \"\"\"remvoe tensor-hood from the input structure\"\"\"\n if isinstance(x, Tensor):\n x = x.item()\n elif isinstance(x, dict):\n x = {k: _strip(v) for k, v in x.items()}\n return x\n\n\nclass BoardCallback(StatsCallback):\n \"\"\"writes into a tensorboard\"\"\"\n def __init__(self, n_log_cycle: int = 1, n_log_hist_cycle=None):\n super().__init__()\n self.writer = None\n self.n_log_cycle = n_log_cycle\n if n_log_hist_cycle is None:\n self.n_log_hist_cycle = n_log_cycle\n else:\n self.n_log_hist_cycle = n_log_hist_cycle\n\n def on_train_begin(self, callbacks, **kwargs):\n \"\"\"automatically discovers the tensorboard cb\"\"\"\n for cb in callbacks:\n if isinstance(cb, TensorboardCb):\n self.writer = cb.writer\n\n def add_to_bar(self, d):\n \"\"\"update the stats which shall be shown in the progress bar (only)\"\"\"\n self.add_to_board(d)\n super().add_to_bar(d)\n\n def add_to_bar_and_hist(self, d):\n \"\"\"both update the progress bar and write to the buffer (history), don't forget to flush\"\"\"\n self.add_to_board(d)\n super().add_to_bar_and_hist(d)\n\n def add_to_hist(self, d):\n \"\"\"buffer before putting into the history after flushing\"\"\"\n self.add_to_board(d)\n super().add_to_hist(d)\n\n def add_to_board(self, d):\n \"\"\"write a dictionary to tensorboard\"\"\"\n assert 'i_itr' in d\n i_itr = d['i_itr']\n if self.is_log_cycle(i_itr):\n d = self._eval(d)\n for k, v in d.items():\n self.add_to_board_scalar(k, v, i_itr)\n\n def add_to_board_scalar(self, name, val, i_itr):\n \"\"\"write a scalar to tensorboard\"\"\"\n if self.should_write(i_itr):\n self.writer.add_scalar(name, _get_val(val), i_itr)\n\n def add_to_board_histogram(self, name, val, i_itr):\n if self.should_write_histogram(i_itr):\n self.writer.add_histogram(name, _get_val(val), i_itr)\n\n def should_write(self, i_itr):\n return self.writer is not None and self.is_log_cycle(i_itr)\n\n def should_write_histogram(self, i_itr):\n return self.writer is not None and self.is_log_cycle_hist(i_itr)\n\n def is_log_cycle_hist(self, i_itr):\n return i_itr % self.n_log_hist_cycle == 0\n\n\nclass NumpyWriterCb(Callback):\n \"\"\"if this is present, boardcallback will write into the tensorboard\"\"\"\n # make sure it initializes before others use it\n _order = 90\n\n def __init__(self, path, n_max_width=1000, **kwargs):\n super().__init__(**kwargs)\n self.path = path\n self.n_max_width = n_max_width\n self.np_writer = None\n\n def on_train_begin(self, **kwargs):\n self.np_writer = NumpyWriter(self.path, n_max_width=self.n_max_width)\n\n def on_train_end(self, **kwargs):\n if self.np_writer is not None:\n self.np_writer.flush()\n self.np_writer.close()\n\n\nclass BaseNumpyWriterCb(Callback):\n def __init__(self, n_log_hist_cycle: int):\n super().__init__()\n self.np_writer = None\n self.n_log_hist_cycle = n_log_hist_cycle\n\n def on_train_begin(self, callbacks, **kwargs):\n \"\"\"automatically discovers the tensorboard cb\"\"\"\n for cb in callbacks:\n if isinstance(cb, NumpyWriterCb):\n self.np_writer = cb.np_writer\n\n def write_hist(self, name, val, i_itr):\n if i_itr % self.n_log_hist_cycle == 0:\n if self.np_writer is not None:\n self.np_writer.write_hist(name, _get_val(val), i_itr)\n\n def on_batch_end(self, trainer, i_itr, **kwargs):\n if self.np_writer is not None:\n self.np_writer.flush()\n\n\nclass NumpyWeightHistCb(BaseNumpyWriterCb):\n def __init__(self, n_log_hist_cycle: int):\n super().__init__(n_log_hist_cycle)\n\n def on_batch_end(self, trainer, i_itr, **kwargs):\n self.write_hist('weight',\n lambda: params_to_vec(trainer.net.parameters()), i_itr)\n super().on_batch_end(trainer=trainer, i_itr=i_itr, **kwargs)\n\n\nclass TensorboardCb(Callback):\n \"\"\"if this is present, boardcallback will write into the tensorboard\n the path will be extended with a unique random string.\n \n Args:\n resume: if True, use the previous random string; else use a new random string\n \"\"\"\n def __init__(self, path, resume=True):\n super().__init__()\n self._state['start_time'] = self.start_time_string()\n self.path = path\n self.resume = resume\n self.writer = None\n\n def start_time_string(self):\n \"\"\"this string will be extended to the path to create a unique path\"\"\"\n from datetime import datetime\n return datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n\n # make sure it initializes before others (normal) use it\n # but it should be \"after\" the autoresume\n @set_order(91)\n def on_train_begin(self, **kwargs):\n from torch.utils.tensorboard import SummaryWriter\n if not self.resume:\n # if not resume re-generate the string\n self._state['start_time'] = self.start_time_string()\n self.writer = SummaryWriter(self.path + '/' +\n self._state['start_time'],\n flush_secs=10)\n\n def on_train_end(self, **kwargs):\n if self.writer is not None:\n self.writer.close()\n\n\ndef get_val_from_statcbs(key, callbacks):\n for cb in callbacks:\n if isinstance(cb, StatsCallback):\n if key in cb.stats:\n v = cb.stats[key]\n return v\n raise ValueError(f'{key} not found')\n\n\ndef _get_val(v):\n \"\"\"get val from a function or a value\"\"\"\n if callable(v):\n return v()\n return v\n\n\ndef _append_dict(dict_of_list, dict):\n \"\"\"\n append a dict into a dict of lists\n before doing that, all lists should have the same size first!\n append None to the smaller lists.\n \"\"\"\n def fill_na():\n lengths = [len(v) for v in dict_of_list.values()]\n if len(lengths) == 0:\n max_len = 0\n else:\n max_len = max(lengths)\n\n # equate the dict sizes with None\n for k in dict_of_list:\n while len(dict_of_list[k]) < max_len:\n dict_of_list[k].append(None)\n\n fill_na()\n for k, v in dict.items():\n dict_of_list[k].append(v)\n fill_na()\n\n\ndef callback_call(callbacks: List[Callback], method: str, kwargs):\n \"\"\"call a list of callbacks\"\"\"\n if callbacks is None:\n return\n if not isinstance(callbacks, list):\n callbacks = [callbacks]\n # ignore None callbacks\n callbacks = [cb for cb in callbacks if cb is not None]\n\n # the final return is the \"OR\" of all return values\n out = None\n for cb in sorted(callbacks, key=partial(_get_cb_order, meth=method)):\n fn = getattr(cb, method, None)\n assert fn is not None, f'the callback {cb} does not have {method}'\n if fn is not None:\n try:\n res = fn(**kwargs)\n assert res is None or isinstance(\n res, bool\n ), f'returns from the callback {cb} must be either None or a boolean'\n except TypeError as e:\n print(f'type error: {e} ... at {cb}')\n raise e\n except Exception as e:\n print(f'error {e} ... at {cb}')\n raise e\n\n if res is not None:\n if out is None: out = res\n out |= res\n return out\n\n\ndef _get_cb_order(cb, meth):\n fn = getattr(cb, meth, None)\n if fn is None:\n return cb._order\n # return the method's order (if not use the cb's order)\n order = getattr(fn, '_order', cb._order)\n return order\n\n\nif __name__ == \"__main__\":\n a = StatsCallback()\n a.add_to_bar_and_hist({'a': 10, 'i_itr': 1})\n print(a.stats)\n" ]
[ [ "pandas.merge", "torch.utils.tensorboard.SummaryWriter", "pandas.DataFrame", "torch.load" ] ]
WhiteboardLiveCoding/ImageSegmentation
[ "bf0f47320a2a455d21a4f5dc1163f1bb8157989c" ]
[ "image_segmentation/picture.py" ]
[ "import logging\n\nimport cv2\nimport numpy as np\n\nfrom image_segmentation.extended_image import ExtendedImage\nfrom image_segmentation.line import Line\n\nLOGGER = logging.getLogger()\n\n\nclass Picture(ExtendedImage):\n INDENTATION_THRESHOLD = 50\n ARTIFACT_PERCENTAGE_THRESHOLD = 0.08\n MINIMUM_LINE_OVERLAP = 0.25\n\n def __init__(self, image, x_axis, y_axis, width, height, preferences=None):\n super().__init__(image, x_axis, y_axis, width, height, preferences)\n\n self.lines = []\n self.indentation_threshold = self.INDENTATION_THRESHOLD\n\n if self.preferences and self.preferences.show_pic:\n cv2.imshow(\"Full picture\", image)\n cv2.waitKey(0)\n\n def get_line(self, n):\n if 0 >= n or n > len(self.lines):\n return None\n\n return self.lines[n - 1]\n\n def get_line_coordinates(self, n):\n line = self.get_line(n)\n\n if line:\n return line.get_bounding_box()\n else:\n return {}\n\n def get_character_coordinates(self, n, p):\n line = self.get_line(n)\n\n if line:\n return line.get_character_coordinates(p)\n else:\n return {}\n\n def get_segments(self):\n self.lines = self._segment_image(self.get_image())\n LOGGER.debug(\"Getting code for the %d lines detected.\", len(self.lines))\n return self.lines\n\n def get_indentation_threshold(self):\n return self.indentation_threshold\n\n def _segment_image(self, gray_image):\n lines = []\n img = self.get_contoured(gray_image)\n\n sorted_ctrs = self._find_contours(img)\n sorted_ctrs = self._merge_subcontours(sorted_ctrs)\n\n if not sorted_ctrs:\n return []\n\n # Get average height and width of all lines\n average_width = sum(cv2.boundingRect(ctr)[2] for i, ctr in enumerate(sorted_ctrs)) / len(sorted_ctrs)\n average_height = sum(cv2.boundingRect(ctr)[3] for i, ctr in enumerate(sorted_ctrs)) / len(sorted_ctrs)\n\n for i, ctr in enumerate(sorted_ctrs):\n # Get bounding box\n x_axis, y_axis, width, height = cv2.boundingRect(ctr)\n\n # Discard lines which have a very small width or height (based on the threshold)\n if width < (average_width * self.ARTIFACT_PERCENTAGE_THRESHOLD) or \\\n height < (average_height * self.ARTIFACT_PERCENTAGE_THRESHOLD):\n continue\n\n roi = gray_image[y_axis:y_axis + height, x_axis:x_axis + width]\n mask = self._get_mask(img, sorted_ctrs, i)[y_axis:y_axis + height, x_axis:x_axis + width]\n\n result = cv2.bitwise_and(roi, roi, mask=mask)\n\n if len(self._find_contours(result)) >= 2:\n lines.append(Line(result, x_axis, y_axis, width, height, self.preferences))\n\n # Sort lines based on y offset\n lines = sorted(lines, key=lambda line: line.get_y())\n LOGGER.debug(\"%d lines detected.\", len(lines))\n return lines\n\n def _get_mask(self, img, contours, contour_index):\n mask = np.zeros_like(img)\n cv2.drawContours(mask, contours, contour_index, 255, -1)\n return mask\n\n def get_contoured(self, gray_image):\n img = np.copy(gray_image)\n\n points, used_contours = self.get_center_points(gray_image)\n average_distance, standard_deviation = self.average_node_distance(points)\n\n self.indentation_threshold = average_distance\n horizontal_distance = int(1.5 * average_distance + 2 * standard_deviation)\n\n for ctr, point in zip(used_contours, points):\n x_axis, y_axis, width, height = cv2.boundingRect(ctr)\n x_center, y_center = point[0], point[1]\n\n minimum_height = round(0.9 * min(y_center - y_axis, y_axis + height - y_center))\n\n cv2.rectangle(\n img,\n (x_center - horizontal_distance, y_center - minimum_height),\n (x_center + horizontal_distance, y_center + minimum_height),\n (255, 255, 255),\n -1\n )\n\n return img\n\n def _merge_subcontours(self, sorted_ctrs):\n merged = []\n for i, ctr in enumerate(sorted_ctrs):\n x1, y1, width1, height1 = cv2.boundingRect(ctr)\n\n remove = None\n add = True\n\n for merged_ctr in merged:\n x2, y2, width2, height2 = cv2.boundingRect(merged_ctr)\n\n if (x1 <= x2 and y1 <= y2 and x1 + width1 >= x2 + width2 and y1 + height1 >= y2 + height2) or \\\n (y1 < y2 < y1 + height1 and (y1 + height1 - y2) / height1 > self.MINIMUM_LINE_OVERLAP):\n merged.append(np.concatenate((ctr, merged_ctr), axis=0))\n remove = merged_ctr\n add = False\n break\n\n if add:\n merged.append(ctr)\n else:\n merged = [x for x in merged if x.shape != remove.shape or not np.equal(x, remove).all()]\n\n return merged\n" ]
[ [ "numpy.concatenate", "numpy.copy", "numpy.zeros_like", "numpy.equal" ] ]
kashifcap/image_captioning
[ "233ff629eb7424eea88bd49b4b9d701f247dc111" ]
[ "utils/image_processing.py" ]
[ "import math\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image, ImageOps\nfrom keras.preprocessing import image\nfrom keras.applications.inception_v3 import preprocess_input\n\n#%%\n\n# Image-Preprocessing Function : takes path of the image as the only argument\ndef preprocess(image_path):\n \n #Loading Image file\n img=tf.keras.preprocessing.image.load_img(image_path)\n \n #Resizing image to 299x299px for feeding it into InceptionV3 Model\n img = img.resize((299,299),resample=0)\n \n #Converting image into numpy array and returning the array\n x=image.img_to_array(img)\n x=np.expand_dims(x,axis=0)\n x=preprocess_input(x)\n return x\n\t\n#%%\n \n# Image-Encoding Function : takes PIL image and the trained model as the two arguments\ndef encode(image, model_new):\n \n #Pre-processing image using the 'preprocess' function\n image=preprocess(image)\n \n #Creating, reshaping and returning the numpy feature-vector of the image\n fea_vec=model_new.predict(image)\n fea_vec=np.reshape(fea_vec,fea_vec.shape[1])\n return fea_vec" ]
[ [ "numpy.reshape", "numpy.expand_dims", "tensorflow.keras.preprocessing.image.load_img" ] ]
fcharras/copain
[ "19633e0240a50e69236328c1a03ba97e00cc74f0" ]
[ "copain/nn.py" ]
[ "import torch\nimport torch.nn as nn\nfrom copain.utils import WeightInitializer\n\n\nclass CopainANN(nn.Module):\n def __init__(\n self,\n n_actions,\n input_dim,\n nb_values_per_dim,\n starting_nb_embeddings,\n nb_embeddings_step,\n depth,\n embedding_size,\n hidden_dim,\n p_dropout,\n initialize_fn=None,\n initialize_fn_kwargs=None,\n ):\n super().__init__()\n\n self.n_actions = n_actions\n\n self._embedding_bag = _DynamicEmbeddingBag(\n input_dim,\n nb_values_per_dim,\n embedding_size,\n \"sum\",\n starting_nb_embeddings,\n nb_embeddings_step,\n initialize_fn,\n initialize_fn_kwargs,\n )\n\n feed_forward_steps = []\n for d in range(depth):\n in_dim = hidden_dim if (d > 0) else (embedding_size)\n out_dim = hidden_dim if (d < (depth - 1)) else n_actions\n feed_forward_steps.extend(\n [nn.Dropout(p_dropout), nn.ReLU(), nn.Linear(in_dim, out_dim)]\n )\n\n feed_forward_steps.extend([nn.Dropout(p_dropout), nn.ReLU()])\n\n self._feed_forward = nn.Sequential(*feed_forward_steps)\n\n self.apply(WeightInitializer(initialize_fn, initialize_fn_kwargs))\n\n def forward(self, X):\n return self._feed_forward(self._embedding_bag(X))\n\n\nclass _DynamicEmbeddingBag(nn.Module):\n\n # TODO: improve so that are registered only memory entries for which\n # at least 2 distincts values have been observed\n def __init__(\n self,\n input_dim,\n nb_values_per_dim,\n embedding_dim,\n mode,\n starting_nb_embeddings,\n nb_embeddings_step,\n initialize_fn,\n initialize_fn_kwargs,\n ):\n super().__init__()\n # 0 is a value for \"unknown\"\n self._embedding_dim = embedding_dim\n self._initializer = WeightInitializer(initialize_fn, initialize_fn_kwargs)\n self._embedding_bag = nn.EmbeddingBag(\n starting_nb_embeddings, embedding_dim, mode=mode, padding_idx=0\n )\n self._mapping = torch.nn.Parameter(\n torch.zeros((input_dim, nb_values_per_dim), dtype=torch.int32),\n requires_grad=False,\n )\n self._mapping_filtered = torch.nn.Parameter(\n torch.zeros((input_dim, nb_values_per_dim), dtype=torch.int32),\n requires_grad=False,\n )\n self._max_mapping_ = 0\n self._max_mapping = 0\n self.nb_embeddings_step = nb_embeddings_step\n\n def update_embeddings(self):\n if self._max_mapping > self._max_mapping_:\n # ignore the data for which only one value has been mapped, since this value alone is not\n # significant.\n self._mapping_filtered[:] = self._mapping[:]\n self._mapping_filtered[(self._mapping_filtered != 0).sum(1) <= 1] = 0\n self._max_mapping_ = self._max_mapping\n\n needed_nb_embeddings = self._max_mapping + 1\n current_nb_embeddings = self._embedding_bag.weight.shape[0]\n\n if current_nb_embeddings >= needed_nb_embeddings:\n return False\n\n needed_nb_embeddings += self.nb_embeddings_step\n\n current_device = self._embedding_bag.weight.device\n self._embedding_bag.to(\"cpu\")\n\n prev_embedding_bag_weight = self._embedding_bag.state_dict()[\"weight\"]\n del self._embedding_bag\n\n self._embedding_bag = nn.EmbeddingBag(\n needed_nb_embeddings, self._embedding_dim, mode=\"sum\", padding_idx=0\n )\n\n self._embedding_bag.apply(self._initializer)\n\n new_state = self._embedding_bag.state_dict()\n new_state[\"weight\"] = torch.vstack(\n (prev_embedding_bag_weight, new_state[\"weight\"][current_nb_embeddings:])\n )\n self._embedding_bag.load_state_dict(new_state)\n\n self._embedding_bag.to(current_device)\n\n print(\"%s embeddings\" % str(self._embedding_bag.weight.shape[0]))\n\n return True\n\n def _detect_unindexed_data(self, X, X_remapped, row_ix):\n if (X_remapped > 0).all():\n return\n\n # detect the indexes who are not mapped yet and schedule their registration\n X_remapped = self._mapping[row_ix, X]\n unmapped_row, unmapped_col = torch.nonzero(X_remapped == 0, as_tuple=True)\n\n if not unmapped_row.shape[0]:\n return\n\n unmapped_value = X[unmapped_row, unmapped_col]\n\n unmapped_value, unmapped_col = torch.unique(\n torch.vstack((unmapped_value.long(), unmapped_col.long())), dim=1\n )\n\n nb_unmapped_values = unmapped_value.shape[0]\n unmapped_value_index = torch.arange(\n nb_unmapped_values, dtype=torch.int32, device=X.device\n )\n self._mapping[unmapped_col, unmapped_value] = (\n self._max_mapping + unmapped_value_index + 1\n )\n self._max_mapping += nb_unmapped_values + 1\n\n def forward(self, X):\n \"\"\"X of size batch_size x ram_size and dtype int in (0-255)\"\"\"\n # first step: transform X to the new mapping\n row_ix = (\n torch.arange(X.shape[1], dtype=torch.int32).repeat((X.shape[0], 1)).long()\n )\n X = X.long()\n X_remapped = self._mapping_filtered[row_ix, X]\n if self.training:\n self._detect_unindexed_data(X, X_remapped, row_ix)\n\n return self._embedding_bag(X_remapped)\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.zeros", "torch.vstack", "torch.nn.Linear", "torch.nonzero", "torch.arange", "torch.nn.ReLU", "torch.nn.EmbeddingBag" ] ]
areding/6420-pymc
[ "181ee40b8bf4a2c9fb237c4d388c4f62ea41bfeb" ]
[ "original_examples/Codes4Unit5/norcau.py" ]
[ "g# -*- coding: utf-8 -*-\r\n\"\"\"\r\nBayes estimator delta(x) for x=2,\r\nfor Normal-Cauchy Model\r\nCreated on Thu Dec 28 11:56:56 2017\r\n@author: bv20\r\n\"\"\"\r\n\r\nfrom scipy import integrate, inf, exp\r\nx = 2\r\nnum = lambda th: th * exp(-0.5*(x-th)**2)/(1+th**2)\r\ndenom = lambda th: exp(-0.5*(x-th)**2)/(1+th**2) \r\ndelta2 = integrate.quad(num,-inf,inf)[0]/integrate.quad(denom,-inf,inf)[0]\r\n#delta(2)\r\nprint(delta2) #1.2821951026935339\r\n\r\n# Errors\r\n\r\nnumerator =integrate.quad(num,-inf,inf)[0] #0.9159546679977636\r\ndenominator=integrate.quad(denom,-inf,inf)[0] #0.714364503556127\r\nerrnum=integrate.quad(num,-inf,inf)[1] #1.0415234856193602e-09\r\nerrdenom=integrate.quad(denom,-inf,inf)[1] #1.2022419107752649e-08\r\n\r\nerr = delta2 * (errnum/numerator + errdenom/denominator)\r\nprint(err) #2.3036713479165735e-08" ]
[ [ "scipy.integrate.quad", "scipy.exp" ] ]
Shumway82/tf_base
[ "09f60f773f14526281d36cf764c33e90791fb18d" ]
[ "tfcore/utilities/utils.py" ]
[ "import glob\nimport io\nimport json\nimport os\nimport re\nimport shutil\nimport zipfile\nfrom collections import deque\nfrom inspect import signature\nfrom keras import backend as K\n\nimport numpy as np\nimport scipy\nimport tensorflow as tf\nfrom tensorflow.contrib.framework.python.framework import checkpoint_utils\n\n\ndef reduce_var(x, axis=None, keepdims=False):\n \"\"\"Variance of a tensor, alongside the specified axis.\n\n # Arguments\n x: A tensor or variable.\n axis: An integer, the axis to compute the variance.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`,\n the reduced dimension is retained with length 1.\n\n # Returns\n A tensor with the variance of elements of `x`.\n \"\"\"\n m = tf.reduce_mean(x, axis=axis, keep_dims=True)\n devs_squared = tf.square(x - m)\n return tf.reduce_mean(devs_squared, axis=axis, keep_dims=keepdims)\n\n\ndef reduce_std(x, axis=None, keepdims=False):\n \"\"\"Standard deviation of a tensor, alongside the specified axis.\n\n # Arguments\n x: A tensor or variable.\n axis: An integer, the axis to compute the standard deviation.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`,\n the reduced dimension is retained with length 1.\n\n # Returns\n A tensor with the standard deviation of elements of `x`.\n \"\"\"\n return tf.sqrt(reduce_var(x, axis=axis, keepdims=keepdims))\n\n\ndef multiple_one_hot(cat_int_tensor, depth_list):\n \"\"\"Creates one-hot-encodings for multiple categorical attributes and\n concatenates the resulting encodings\n\n Args:\n cat_tensor (tf.Tensor): tensor with mutiple columns containing categorical features\n depth_list (list): list of the no. of values (depth) for each categorical\n\n Returns:\n one_hot_enc_tensor (tf.Tensor): concatenated one-hot-encodings of cat_tensor\n \"\"\"\n one_hot_enc_tensor = tf.one_hot(cat_int_tensor[:, 0], depth_list[0], axis=1)\n for col in range(1, len(depth_list)):\n add = tf.one_hot(cat_int_tensor[:, col], depth_list[col], axis=1)\n one_hot_enc_tensor = tf.concat([one_hot_enc_tensor, add], axis=1)\n\n return one_hot_enc_tensor\n\n\ndef get_patches(input, patch_size=64, stride=0.0):\n size = [1, patch_size, patch_size, 1]\n patch_stride = [1, int(patch_size * (1 - stride)), int(patch_size * (1.0 - stride)), 1]\n patches = tf.extract_image_patches(input, size, patch_stride, [1, 1, 1, 1], 'VALID')\n return tf.reshape(patches, [-1, patch_size, patch_size, 3])\n\n\ndef conv_cond_concat(x, y):\n \"\"\" Concatenate conditioning vector on feature map axis.\n # Arguments\n x: 4D-Tensor\n y: 4D-Tensor\n\n # Return\n 4D-Tensor\n \"\"\"\n\n x_shapes = x.get_shape()\n y_shapes = y.get_shape()\n return tf.concat(3, [x, y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])])\n\n\ndef add_noise(img_batch):\n \"\"\" Add noise to a tensor\n\n # Arguments\n img_batch: A batch of a images or feature-maps.\n\n # Return\n A 4D-Tensor\n \"\"\"\n for i in range(img_batch.shape[0]):\n noise = tf.random_normal(shape=tf.shape(img_batch.shape[1:]), mean=0.0, stddev=0.02,\n dtype=tf.float32)\n noise = np.clip(noise, -1., 1.)\n img_batch[i, :] += noise\n img_batch = tf.clip_by_value(img_batch, -1.0, 1.0)\n return img_batch\n\n\ndef downscale(x, factor):\n \"\"\" Downsale a Tensor\n\n # Arguments\n x: A 4D-Tensor\n factor: Scaling Factor as type of int\n\n # Return\n A downsceled 4D-Tensor\n \"\"\"\n arr = np.zeros([factor, factor, 3, 3])\n arr[:, :, 0, 0] = 1.0 / factor ** 2\n weight = tf.constant(arr, dtype=tf.float32)\n downscaled = tf.nn.conv2d(x, weight, strides=[1, factor, factor, 1], padding='SAME')\n return downscaled\n\n\ndef normalize_weights(weights, values_range=1.0):\n \"\"\" Normalize weights to a definded value\n\n # Arguments\n weights: 2D-Tensor\n values_range: Normalize that the sum of weights corresponds to this value.\n\n # Return\n Normalized 2D-Tensor\n \"\"\"\n return weights * values_range / tf.reduce_sum(weights)\n\n\ndef save_variables(sess, model_dir, scope='generator', global_step=0):\n model_dir = os.path.join(model_dir, 'TrainableVariables', scope)\n if os.path.exists(model_dir):\n shutil.rmtree(model_dir)\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n values = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)\n vars = {}\n for value in values:\n name = value.name\n name = name.replace(':', '=')\n name = name.replace('/', '-')\n vars.update({name: value})\n saver = tf.train.Saver(vars)\n saver.save(sess, os.path.join(model_dir, scope), global_step=global_step)\n\n\ndef load_variable(sess, value, model_dir):\n model_dir = os.path.join(model_dir, 'TrainableVariables')\n value_files = sorted(glob.glob(os.path.join(model_dir, \"*\")))\n if len(value_files) == 0:\n return\n name = value.name\n name = name.replace(':', '=')\n name = name.replace('/', '-')\n\n model_dir = os.path.join(model_dir, name)\n if not os.path.exists(model_dir):\n print('Variable ' + value.name + ' not existing...')\n return\n\n saver = tf.train.Saver({name: value})\n saver.restore(sess, model_dir)\n print('Variable ' + value.name + ' loaded...')\n\n\ndef save_model(sess, model_dir, model_name, scope='generator', global_step=0):\n model_dir = os.path.join(model_dir, model_name)\n if os.path.exists(model_dir):\n shutil.rmtree(model_dir)\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope))\n saver.save(sess, os.path.join(model_dir, scope), global_step=global_step, write_meta_graph=True,\n write_state=True)\n print(\" [*] Model saving SUCCESS - \" + os.path.join(model_dir, scope))\n return model_dir\n\n\ndef load_model(sess, model_dir, model_name, scope='generator'):\n model_dir = os.path.join(model_dir, model_name)\n\n ckpt = tf.train.get_checkpoint_state(model_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n vars_model = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)\n\n vars_ckpt = checkpoint_utils.list_variables(os.path.join(model_dir, ckpt_name))\n\n vars_in_model = [var.name.split(':')[0] for var in vars_model]\n vars_in_ckpt = [var[0] for var in vars_ckpt]\n vars_to_remove = []\n for var in vars_in_model:\n if var not in vars_in_ckpt:\n print(' [!] ' + var + ' not exists')\n for i in range(len(vars_model)):\n if vars_model[i].name.split(':')[0] == var:\n vars_to_remove.append(vars_model[i])\n for var in vars_to_remove:\n vars_model.remove(var)\n\n saver = tf.train.Saver(vars_model)\n saver.restore(sess, os.path.join(model_dir, ckpt_name))\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n print(\" [*] Model load SUCCESS - \" + os.path.abspath(os.path.join(model_dir, ckpt_name)))\n return True, counter\n except Exception as err:\n print(\" [!] Model load FAILED - \" + os.path.abspath(model_dir) + ', ' + str(err))\n\n print(\" [!] Model load FAILED - no checkpoint in \" + os.path.abspath(model_dir))\n return True, 0\n\n\ndef save(sess, saver, checkpoint_dir, epoch, step, resize_factor):\n model_name = \"Model_R\" + str(resize_factor) + '-' + str(epoch)\n\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n if saver is None:\n saver = tf.train.Saver()\n saver.save(sess, os.path.join(checkpoint_dir, model_name), global_step=step)\n print(' [*] Checkpoint saved: ' + checkpoint_dir)\n\n\ndef load(sess, checkpoint_dir):\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n saver = tf.train.Saver()\n saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n epoch = 0 # int(find_between(ckpt_name, '-', '-'))\n print(' [*] Checkpoint loaded: ' + checkpoint_dir)\n return True, counter, epoch\n else:\n print(' [!] Checkpoint FAILED: ' + checkpoint_dir)\n return False, 0\n\n\ndef load_pretrained_model(sess, variables, checkpoint_dir):\n print(\" [*] Reading checkpoints...\")\n print(' [*] Checkpoint: ' + checkpoint_dir)\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n restorer = tf.train.Saver(variables)\n restorer.restore(sess, os.path.join(checkpoint_dir, ckpt_name))\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n epoch = 0 # int(find_between(ckpt_name, '-', '-'))\n print(\" [*] Load SUCCESS\")\n return True, counter, epoch\n else:\n print(\" [!] Load FAILED\")\n return False, 0, 0\n\n\ndef get_global_steps(checkpoint_dir):\n if not os.path.exists(checkpoint_dir):\n print(\" [*] Global Steps 0\")\n return 0\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n print(\" [*] Global Steps \" + str(counter))\n return counter\n else:\n print(\" [!] Global Steps 0\")\n return 0\n\n\ndef average_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n\n\n \"\"\"\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(grads, 0)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n\n\ndef normalize_array(batch):\n return np.asarray([normalize(image) for image in batch], dtype=np.float32)\n\n\ndef normalize(image, normalization_type='tanh'):\n \"\"\"\n Normalize Image\n\n # Arguments\n image: A `Tensor` of type `float32` or `float64`.\n activation: A value of type 'str'. If 'tanh' = [-1, 1],\n 'sigmoid' = [0, 1], 'selu' mean = 0 and stddev = 1\n return: A `Tensor` of type `float32` or `float64`.\n # Return\n Normalized Tensor\n \"\"\"\n if normalization_type is 'sigmoid':\n return np.asarray(image / 255.0, dtype=np.float32)\n elif normalization_type is 'selu':\n return np.array(\n [(image[i] - image[i].mean()) / image[i].std() for i in range(image.shape[0])])\n else:\n return np.asarray(image / 127.5 - 1.0, dtype=np.float32)\n\n\ndef inormalize(image, dtype=np.float32):\n return np.asarray((image + 1.0) * 127.5, dtype=dtype)\n\n\ndef save_images(images, size, image_path, normalized=False):\n num_im = size[0] * size[1]\n if normalized:\n return imsave(inverse_transform(images[:num_im]) * 255, size, image_path)\n else:\n return imsave(images[:num_im], size, image_path)\n\n\ndef inverse_transform(images):\n return (images + 1.) / 2.\n\n\ndef imsave(images, size=None, path=None):\n if size == [1, 1] or size is None:\n return scipy.misc.imsave(path, images[0])\n else:\n return scipy.misc.imsave(path, merge(images, size))\n\n\ndef merge(images, size):\n h, w = images.shape[1], images.shape[2]\n BW = size[0]\n BH = size[1]\n img = np.zeros((w * size[1], h * size[0], 3))\n idx = 0\n for bx in range(BW):\n for by in range(BH):\n img[int(by * h):int((by + 1) * h), int(bx * w):int((bx + 1) * w)] = images[idx]\n idx += 1\n\n return img\n\n\ndef save_experiment(dest_path, source_path='../../../tf_core/tfcore'):\n archiv_zip = zipfile.ZipFile(os.path.join(dest_path, 'experiment.zip'), 'w')\n\n for folder, subfolders, files in os.walk(source_path):\n\n for file in files:\n if file.endswith('.py'):\n archiv_zip.write(os.path.join(folder, file))\n\n archiv_zip.close()\n print(' [*] Experiment-ZIP saved at ' + source_path)\n\n\ndef save_config(dict, path, name='experiment'):\n path = os.path.join(path, name + '.json')\n with io.open(path, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(dict,\n indent=4, sort_keys=True,\n separators=(',', ': '), ensure_ascii=False)\n outfile.writelines(str(str_))\n print(' [*] Config saved at ' + path)\n return\n\n\ndef pad_borders(net, k_size, mode=\"SYMMETRIC\"):\n pad = int((k_size - 1) / 2)\n net = tf.pad(net, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode=mode)\n return net\n\n\nclass CLR:\n\n def __init__(self, sess, start_lr=0.0001, end_lr=0.00001, step_size=20, gamma=0.95, max_lex=50):\n self.sess = sess\n self.base_lr = start_lr\n self.max_lr = end_lr\n self.step_size = step_size\n self.gamma = gamma\n self.loss_averages_op = None\n self.loss_ema = None\n self.max_len = max_lex\n self.queue = deque(maxlen=self.max_len)\n self.moving_aves = deque(maxlen=self.max_len)\n self.max_derivativ = 0\n self.index_max_derivativ = 0\n self.learning_rate = start_lr\n\n def set_base_learning_rate(self, value):\n self.base_lr = value\n\n def set_max_learning_rate(self, value):\n self.max_lr = value\n\n def get_moving_average(self, value, iteration):\n self.queue.append(value)\n n = len(self.queue)\n if len(self.queue) > self.max_len:\n self.queue.popleft()\n if len(self.moving_aves) >= self.max_len - 1:\n self.moving_aves.popleft()\n cumsum = [0]\n for i, x in enumerate(list(self.queue), 1):\n cumsum.append(cumsum[i - 1] + x)\n if i >= n:\n moving_ave = (cumsum[i] - cumsum[i - n]) / n\n self.moving_aves.append(moving_ave)\n\n if len(self.moving_aves) > 30:\n derivativ = self.moving_aves[-20] - self.moving_aves[-1]\n if self.max_derivativ < derivativ:\n self.max_derivativ = derivativ\n self.index_max_derivativ = iteration\n self.base_lr = self.learning_rate\n print('loss_av: ' + str(derivativ) + ' max_derivativ ' + str(\n self.max_derivativ) + ' min_lr ' + str(self.base_lr) + ' min_lr_it ' + str(\n iteration))\n self.learning_rate += 0.00001\n return self.learning_rate\n\n def get_learning_rate(self, iteration, lr_type='exp_range'):\n \"\"\"Given the inputs, calculates the lr that should be applicable for this iteration\"\"\"\n iteration += self.step_size\n cycle = np.floor(1 + iteration / (2 * self.step_size))\n x = np.abs(iteration / self.step_size - 2 * cycle + 1)\n lr = self.base_lr\n if lr_type == 'exp_range':\n scale = (self.gamma ** cycle)\n max_lr = self.max_lr * (1 / self.gamma)\n lr = self.base_lr + (max_lr - self.base_lr) * np.maximum(0, (1 - x)) * scale\n\n if lr_type == 'exp_range2':\n scale = (self.gamma ** cycle)\n a = (self.base_lr + (self.max_lr - self.base_lr) * scale)\n lr = a + (self.max_lr - self.base_lr) * np.maximum(0, (1 - x)) * scale\n\n return lr\n\n\ndef to_numpy(tf_function,\n input_shapes=(None, None, None, 3),\n input_types=None):\n \"\"\" This function convert a function using tensorflow tensors\n into a function using numpy arrays. The resulting function\n has the same signature as the input function.\n\n # Arguments\n tf_function: The tensorflow function to convert\n input_shapes: The information concerning the input shapes.\n You can be explicit by passing a list of shapes.\n Though is you pass only one shape, it's going to be\n assumed that the shape is the same for all inputs.\n input_types: The information concerning the input types.\n Defaults to `tf.float32` for compatibility.\n You can pass a list of types or just a single type.\n If you pass a single type, it will be assumed to be\n the same for all inputs.\n\n # Returns\n\n A function with the same signature as `tf_function` but can\n take numpy arrays as input.\n\n Example.\n ```python\n def dummy_tf(tensor1, tensor2):\n concatenation = tf.concat([tensor1, tensor2], 0)\n return concatenation, tensor2\n\n np_function = to_numpy(dummy_tf, (None, 2))\n\n arr1 = np.random.uniform((10,2))\n arr2 = np.random.uniform((10,2))\n\n result_concat, result_2 = np_function(arr1, arr2)\n ```\n \"\"\"\n\n number_of_arguments = len((signature(tf_function)).parameters)\n\n if not isinstance(input_shapes, list):\n input_shapes = [input_shapes for _ in range(number_of_arguments)]\n\n if input_types is None:\n input_types = tf.float32\n if not isinstance(input_types, list):\n input_types = [input_types for _ in range(number_of_arguments)]\n\n placeholders = [tf.placeholder(dtype, shape)\n for dtype, shape in zip(input_types, input_shapes)]\n\n outputs = tf_function(*placeholders)\n if isinstance(outputs, tuple):\n outputs = list(outputs)\n else:\n outputs = [outputs]\n\n numpy_function_lists = K.function(placeholders, outputs)\n\n def np_function(*args):\n output_list = numpy_function_lists(list(args))\n if len(output_list) == 1:\n return output_list[0]\n else:\n return tuple(output_list)\n\n return np_function\n\n\ndef reduce_var(x, axis=None, keepdims=False):\n \"\"\"Variance of a tensor, alongside the specified axis.\n\n # Arguments\n x: A tensor or variable.\n axis: An integer, the axis to compute the variance.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`,\n the reduced dimension is retained with length 1.\n\n # Returns\n A tensor with the variance of elements of `x`.\n \"\"\"\n m = tf.reduce_mean(x, axis=axis, keep_dims=True)\n devs_squared = tf.square(x - m)\n return tf.reduce_mean(devs_squared, axis=axis, keep_dims=keepdims)\n\n\ndef reduce_std(x, axis=None, keepdims=False):\n \"\"\"Standard deviation of a tensor, alongside the specified axis.\n\n # Arguments\n x: A tensor or variable.\n axis: An integer, the axis to compute the standard deviation.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`,\n the reduced dimension is retained with length 1.\n\n # Returns\n A tensor with the standard deviation of elements of `x`.\n \"\"\"\n return tf.sqrt(reduce_var(x, axis=axis, keepdims=keepdims))\n\n\ndef rgb_to_ycbcr(im, asuint8=False):\n \"\"\" A function to convert rgb images to YCbCr.\n\n # Arguments\n\n im: The rgb image to convert. Must be a Tensorflow\n tensor with positive values (0-255).\n asuint8:\n Set to `True` if you want a 8 bits tensor.\n If `False` returns a float32 tensor.\n\n # Returns\n\n An tensor with values between 0 and 255.\n \"\"\"\n im = tf.cast(im, tf.float32)\n\n xform_rgb_to_ycbcr = tf.constant(np.array([[.299, .587, .114],\n [-.1687, -.3313, .5],\n [.5, -.4187, -.0813]], dtype=np.float32).T)\n\n ycbcr = K.dot(im, xform_rgb_to_ycbcr)\n tmp = ycbcr[..., 1:] + 128\n ycbcr = tf.concat([ycbcr[..., :1], tmp], axis=-1)\n if asuint8:\n ycbcr = tf.clip_by_value(ycbcr, 0, 255)\n ycbcr = tf.round(ycbcr)\n return tf.cast(ycbcr, tf.uint8)\n return ycbcr\n\n\ndef ycbcr_to_rgb(im, asuint8=False):\n \"\"\" A function to convert YCbCr images to rgb.\n\n # Arguments\n\n im: The YCbCr image to convert. Must be a Tensorflow\n tensor with positive values (0-255).\n asuint8:\n Set to `True` if you want a 8 bits tensor.\n If `False` returns a float32 tensor.\n\n # Returns\n\n A tensor with values between 0 and 255.\n \"\"\"\n rgb = tf.cast(im, tf.float32)\n tmp = rgb[..., 1:] - 128\n rgb = tf.concat([rgb[..., :1], tmp], axis=-1)\n\n xform_ycbcr_to_rgb = tf.constant(np.array([[1, 0, 1.402],\n [1, -0.34414, -.71414],\n [1, 1.772, 0]], dtype=np.float32).T)\n\n rgb = K.dot(rgb, xform_ycbcr_to_rgb)\n if asuint8:\n rgb = tf.clip_by_value(rgb, 0, 255)\n rgb = tf.round(rgb)\n return tf.cast(rgb, tf.uint8)\n return rgb\n" ]
[ [ "tensorflow.concat", "numpy.asarray", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.pad", "tensorflow.nn.conv2d", "numpy.clip", "tensorflow.get_collection", "tensorflow.extract_image_patches", "tensorflow.square", "tensorflow.train.Saver", "numpy.zeros", "tensorflow.shape", "scipy.misc.imsave", "tensorflow.placeholder", "tensorflow.one_hot", "numpy.floor", "tensorflow.round", "numpy.array", "tensorflow.clip_by_value", "tensorflow.train.get_checkpoint_state", "tensorflow.constant", "numpy.abs", "numpy.maximum", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.ones" ] ]
ADALabUCSD/DeepPostures
[ "f51acc8fea2aa76fe0150f87284f624840016095" ]
[ "MSSE-2021/commons.py" ]
[ "# Copyright 2021 Supun Nakandala. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport os\nimport h5py\nimport numpy as np\n\nimport tensorflow\nif int(tensorflow.__version__.split(\".\")[0]) == 2:\n import tensorflow.compat.v1 as tf\nelse:\n import tensorflow as tf\n\nfrom datetime import datetime, timedelta\n\ndef input_iterator(data_root, subject_id, train=False):\n fnames = [name.split('.')[0] for name in os.listdir(os.path.join(data_root, subject_id)) if not name.startswith('.')]\n fnames.sort()\n for i in range(len(fnames) - 1):\n assert datetime.strptime(fnames[i+1], \"%Y-%m-%d\").date() - datetime.strptime(fnames[i], \"%Y-%m-%d\").date() == timedelta(days=1)\n\n data_batch = []\n timestamps_batch = []\n label_batch = []\n for fname in fnames:\n h5f = h5py.File(os.path.join(data_root, subject_id, '{}.h5'.format(fname)), 'r')\n timestamps = h5f.get('time')[:]\n data = h5f.get('data')[:]\n sleeping = h5f.get('sleeping')[:]\n non_wear = h5f.get('non_wear')[:]\n label = h5f.get('label')[:]\n \n for d, t, s, nw, l in zip(data, timestamps, sleeping, non_wear, label):\n # if train and l == -1:\n # raise Exception('Missing ground truth label information in pre-processed data')\n \n if s == 1 or nw == 1 or (train and l == -1):\n if len(timestamps_batch) > 0:\n yield np.array(data_batch), np.array(timestamps_batch), np.array(label_batch)\n data_batch = []\n timestamps_batch = []\n label_batch = []\n continue\n\n data_batch.append(d)\n timestamps_batch.append(t)\n label_batch.append(l)\n \n h5f.close()\n\n if len(timestamps_batch) > 0:\n yield np.array(data_batch), np.array(timestamps_batch), np.array(label_batch)\n\n\ndef cnn_bi_lstm_model(x, amp_factor, bil_lstm_win_size, num_classes):\n logits = cnn_model(x, amp_factor=amp_factor)\n logits = tf.reshape(logits, [-1, bil_lstm_win_size, 256*amp_factor])\n\n forward_cell = tf.nn.rnn_cell.LSTMCell(128)\n backward_cell = tf.nn.rnn_cell.LSTMCell(128)\n encoder_outputs,_ = tf.nn.bidirectional_dynamic_rnn(\n forward_cell,\n backward_cell,\n logits,\n dtype=tf.float32\n )\n encoder_outputs = tf.concat(encoder_outputs, axis=2)\n logits = tf.reshape(tf.layers.dense(encoder_outputs, units=num_classes), [-1, bil_lstm_win_size, num_classes])\n return logits\n \n\ndef cnn_model(x, amp_factor=1):\n with tf.variable_scope('model'):\n conv1 = tf.layers.conv2d(x, filters=32*amp_factor, kernel_size=[5, 3],\n data_format='channels_last', padding= \"same\",\n strides=(2, 1),\n activation=tf.nn.relu)\n pool1 = conv1\n\n conv2 = tf.layers.conv2d(pool1, filters=64*amp_factor, kernel_size=[5, 1],\n data_format='channels_last', padding= \"same\",\n strides=(2, 1),\n activation=tf.nn.relu)\n pool2 = conv2\n\n conv3 = tf.layers.conv2d(pool2, filters=128*amp_factor, kernel_size=[5, 1],\n data_format='channels_last', padding= \"same\",\n strides=(2, 1),\n activation=tf.nn.relu)\n pool3 = conv3\n\n conv4 = tf.layers.conv2d(pool3, filters=256*amp_factor, kernel_size=[5, 1],\n data_format='channels_last', padding= \"same\",\n strides=(2, 1), \n activation=tf.nn.relu)\n pool4 = conv4\n\n conv5 = tf.layers.conv2d(pool4, filters=256*amp_factor, kernel_size=[5, 1],\n data_format='channels_last', padding= \"same\",\n strides=(2, 1), \n activation=tf.nn.relu)\n pool5 = conv5 \n pool5 = tf.transpose(pool5, [0, 3, 1, 2])\n size = pool5.shape[-1] * pool5.shape[-2] * pool5.shape[-3]\n\n logits = tf.layers.dense(tf.reshape(pool5,(-1, size)), units=256*amp_factor)\n return logits\n" ]
[ [ "tensorflow.layers.conv2d", "tensorflow.__version__.split", "tensorflow.concat", "tensorflow.transpose", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.reshape", "tensorflow.layers.dense", "tensorflow.nn.bidirectional_dynamic_rnn", "tensorflow.variable_scope", "numpy.array" ] ]
Flolight/100DaysOfMLCode
[ "0b9bc8944f7d18b2ca5f1ea282cea322ad64bc5e" ]
[ "courses/MachineLearningAZ_Python_HandsOn/regressions/decision_tree/decision_tree_regression.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 16 11:32:57 2020\n\n@author: flo\n\"\"\"\n\n# Decision Tree regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing dataset\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values\n\n\n# Splitting into Training and testing set\n\"\"\"from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state = 0)\n\"\"\"\n# Feature Scaling\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)\ny_test = sc_y.transform(np.reshape(y_test, (-1, 1)))\"\"\"\n\n# Fitting the Decision tree regression model to the dataset\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state = 0)\nregressor.fit(X, y)\n\n\n# Predicting a result with the Decision tree regression model\ny_pred = regressor.predict(np.array([[6.5]]))\n\n# Visualising the Decision tree regression results\nplt.scatter(X, y, color = 'red')\nplt.plot(X, regressor.predict(X), color = 'blue')\nplt.title('Salary depending on position (Decision tree)')\nplt.xlabel('Position level')\nplt.ylabel('Salary')\n\n# Visualising the Decision tree regression results (for higher resolution and smoother curve)\nX_grid = np.arange(min(X), max(X), 0.01)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color = 'red')\nplt.plot(X_grid, regressor.predict(X_grid), color = 'blue')\nplt.title('Salary depending on position (Decision tree Regression)')\nplt.xlabel('Position level')\nplt.ylabel('Salary')" ]
[ [ "pandas.read_csv", "sklearn.tree.DecisionTreeRegressor", "matplotlib.pyplot.title", "matplotlib.pyplot.scatter", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.ylabel" ] ]
Abhishek-Aditya-bs/Streaming-Spark-For-Machine-Learning
[ "ba95a7d2d6bb15bacfbbf5b3c95317310b36d54f" ]
[ "models/deepImageSVM.py" ]
[ "from typing import List\n\nimport numpy as np\nimport warnings\n\nfrom joblibspark import register_spark\n\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.utils import parallel_backend\nfrom sklearn.metrics import log_loss, precision_score, recall_score\n\nfrom pyspark.sql.dataframe import DataFrame\nfrom sklearn.metrics import confusion_matrix\n\nwarnings.filterwarnings('ignore')\nregister_spark()\n\nclass DeepImageSVM:\n def __init__(self, loss='log', penalty='l2'):\n self.model = SGDClassifier(loss=loss, penalty=penalty, random_state=0)\n \n def configure_model(self, configs):\n model = self.model\n model.alpha = configs.learning_rate\n model.warm_start = False\n model.n_iter_ = configs.max_epochs\n return model\n\n def train(self, df: DataFrame, svm : SGDClassifier, path) -> List:\n with open(path, \"rb\") as f:\n X = np.load(f)\n y = np.array(df.select(\"label\").collect()).reshape(-1)\n print(X.shape)\n print(y)\n\n with parallel_backend(\"spark\", n_jobs=4):\n svm.partial_fit(X,y,np.arange(0,10).tolist())\n predictions = svm.predict(X)\n predictions = np.array(predictions)\n predictions_prob = svm.predict_proba(X)\n predictions_prob = np.array(predictions_prob)\n predictions_prob[np.isnan(predictions_prob)] = 0 \n accuracy = svm.score(X,y)\n loss = log_loss(y,predictions_prob,labels=np.arange(0,10), eps=1e-1)\n precision = precision_score(y,predictions, labels=np.arange(0,10),average=\"macro\")\n recall = recall_score(y,predictions, labels=np.arange(0,10),average=\"macro\")\n f1 = 2*precision*recall/(precision+recall)\n\n return [svm,predictions, accuracy, loss, precision, recall, f1]\n\n def predict(self, df: DataFrame, svm : SGDClassifier, path) -> List:\n with open(path, \"rb\") as f:\n X = np.load(f)\n y = np.array(df.select(\"label\").collect()).reshape(-1)\n \n predictions = svm.predict(X)\n predictions = np.array(predictions)\n predictions_prob = svm.predict_proba(X)\n accuracy = svm.score(X,y)\n predictions_prob = np.array(predictions_prob)\n predictions_prob[np.isnan(predictions_prob)] = 0 \n loss = log_loss(y,predictions_prob,labels=np.arange(0,10),eps=1e-1)\n precision = precision_score(y,predictions, labels=np.arange(0,10),average=\"macro\")\n recall = recall_score(y,predictions, labels=np.arange(0,10),average=\"macro\")\n f1 = 2*precision*recall/(precision+recall)\n cm = confusion_matrix(y, predictions)\n return [predictions, accuracy, loss, precision, recall, f1, cm]" ]
[ [ "numpy.isnan", "numpy.arange", "sklearn.metrics.confusion_matrix", "sklearn.utils.parallel_backend", "numpy.load", "numpy.array", "sklearn.linear_model.SGDClassifier" ] ]
gdsfactory/ubc
[ "f780778a06dad80c3e0df36c534d88000adc1c87" ]
[ "ubcsp/mzi_spectrum.py" ]
[ "\"\"\"\nbased on https://github.com/SiEPIC-Kits/SiEPIC_Photonics_Package\n\"\"\"\n\nimport numpy as np\nfrom ubcsp.waveguide import beta, neff, wavelength_um\n\n\ndef mzi_spectrum(\n L1_um,\n L2_um,\n wavelength_um=wavelength_um,\n beta=beta,\n alpha=1e-3,\n neff=neff,\n n1=2.4,\n n2=-1,\n n3=0,\n):\n \"\"\"Returns MZI spectrum:\n\n Args:\n L1_um\n L2_um\n wavelength_um\n beta: propagation constant\n \"\"\"\n if callable(beta):\n beta = beta(wavelength_um, neff=neff, alpha=alpha, n1=n1, n2=n2, n3=n3)\n\n return 0.25 * np.abs(np.exp(-1j * beta * L1_um) + np.exp(-1j * beta * L2_um)) ** 2\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n\n # plt.plot(wavelength_um, mzi_spectrum(100, 110))\n plt.plot(wavelength_um, 10 * np.log10(mzi_spectrum(L1_um=40, L2_um=255)))\n plt.show()\n" ]
[ [ "numpy.exp", "matplotlib.pyplot.show" ] ]
poliyev/poptimizer
[ "71935c4365b0572e65b6d3172f925701dda283db", "71935c4365b0572e65b6d3172f925701dda283db" ]
[ "poptimizer/data/adapters/html/parser.py", "poptimizer/data/adapters/gateways/tests/test_cbr.py" ]
[ "\"\"\"Парсер html-таблиц.\"\"\"\nfrom datetime import datetime\nfrom typing import Callable, List, Union\n\nimport aiohttp\nimport bs4\nimport pandas as pd\n\nfrom poptimizer.data.adapters.html import description\nfrom poptimizer.shared import connections\n\nDescriptions = List[description.ColDesc]\nParseFuncType = Callable[[str], Union[None, float, datetime]]\n\n\nasync def get_html(\n url: str,\n session: aiohttp.ClientSession = connections.HTTP_SESSION,\n) -> str:\n \"\"\"Загружает html-код страницы.\"\"\"\n async with session.get(url) as respond:\n try:\n respond.raise_for_status()\n except aiohttp.ClientResponseError:\n raise description.ParserError(f\"Данные {url} не загружены\")\n return await respond.text()\n\n\ndef _get_table_from_html(html: str, table_num: int) -> str:\n \"\"\"Выбирает таблицу по номеру из html-страницы.\"\"\"\n soup = bs4.BeautifulSoup(html, \"lxml\")\n try:\n table = soup.find_all(\"table\")[table_num]\n except IndexError:\n raise description.ParserError(f\"На странице нет таблицы {table_num}\")\n return f\"<html>{table}</html>\"\n\n\ndef _get_raw_df(table: str, cols_desc: Descriptions) -> pd.DataFrame:\n \"\"\"Формирует изначальный DataFrame из html-таблицы.\"\"\"\n converters = {desc.num: desc.parser_func for desc in cols_desc if desc.parser_func is not None}\n raw_name = cols_desc[0].raw_name\n num_of_headers = len(raw_name)\n header_nums = list(range(num_of_headers))\n\n return pd.read_html(\n table,\n header=header_nums,\n converters=converters,\n thousands=\" \",\n displayed_only=False,\n )[0]\n\n\ndef _validate_header(columns: pd.Index, cols_desc: Descriptions) -> None:\n \"\"\"Проверяет, что заголовки соответствуют описанию.\"\"\"\n for desc in cols_desc:\n header = columns[desc.num]\n if not isinstance(header, tuple):\n header = [header]\n raw_name = desc.raw_name\n if all(part in name for part, name in zip(raw_name, header)):\n continue\n raise description.ParserError(f\"Неверный заголовок: {desc.raw_name} не входит в {header}\")\n\n\ndef _get_selected_col(df: pd.DataFrame, cols_desc: Descriptions) -> pd.DataFrame:\n \"\"\"Выбирает столбцы в соответствии с описанием и форматирует их.\"\"\"\n selected_col = [desc.num for desc in cols_desc]\n df = df.iloc[:, selected_col]\n df.columns = [desc.name for desc in cols_desc]\n index_name = cols_desc[0].name\n return df.set_index(index_name)\n\n\ndef get_df_from_html(html: str, table_num: int, cols_desc: Descriptions) -> pd.DataFrame:\n \"\"\"Получает таблицу из html-страницы и форматирует ее в соответствии с описанием.\"\"\"\n table = _get_table_from_html(html, table_num)\n df = _get_raw_df(table, cols_desc)\n _validate_header(df.columns, cols_desc)\n return _get_selected_col(df, cols_desc)\n\n\nasync def get_df_from_url(url: str, table_num: int, cols_desc: Descriptions) -> pd.DataFrame:\n \"\"\"Загружает таблицу по URL и форматирует ее в соответствии с описанием.\"\"\"\n html = await get_html(url)\n return get_df_from_html(html, table_num, cols_desc)\n", "\"\"\"Тесты загрузки данных о максимальных ставках депозитов с сайта ЦБР.\"\"\"\nfrom datetime import datetime\n\nimport pandas as pd\nimport pytest\n\nfrom poptimizer.data.adapters.gateways import cbr\nfrom poptimizer.data.adapters.html import parser\nfrom poptimizer.shared import col\n\n\ndef test_date_parser():\n \"\"\"Проверка обработки разных декад в датах.\"\"\"\n assert cbr.date_parser(\"III.05.2021\") == datetime(2021, 5, 21)\n assert cbr.date_parser(\"II.04.2021\") == datetime(2021, 4, 11)\n assert cbr.date_parser(\"I.03.2021\") == datetime(2021, 3, 1)\n assert cbr.date_parser(\"IV.03.2021\") is None\n\n\nDF = pd.DataFrame(\n [[4.1], [3.9]],\n index=[\"2020-01-20\", \"2014-11-25\"],\n columns=[col.RF],\n)\nDF_REZ = pd.DataFrame(\n [[0.039], [0.041]],\n index=[\"2014-11-25\", \"2020-01-20\"],\n columns=[col.RF],\n)\n\n\n@pytest.mark.asyncio\nasync def test_loader(mocker):\n \"\"\"Сортировка полученных данных и перевод в проценты.\"\"\"\n mocker.patch.object(parser, \"get_df_from_url\", return_value=DF)\n\n loader = cbr.RFGateway()\n pd.testing.assert_frame_equal(await loader(), DF_REZ)\n" ]
[ [ "pandas.read_html" ], [ "pandas.DataFrame" ] ]
souptc/pytorch
[ "c8fd1bbc1101d76903144c03424fc325ee92cfe2" ]
[ "torch/distributed/algorithms/model_averaging/hierarchical_model_averager.py" ]
[ "# Copyright 2022 Cruise LLC\nimport logging\nimport warnings\nfrom collections import OrderedDict\nfrom typing import Union, Iterable, Dict\n\nimport torch\nimport torch.distributed as dist\nimport torch.distributed.algorithms.model_averaging.averagers as averagers\nimport torch.distributed.algorithms.model_averaging.utils as utils\n\nlogger = logging.getLogger(__name__)\n\n\nclass HierarchicalModelAverager(averagers.ModelAverager):\n r\"\"\"\n Runs hierarchical model averaging (`hierarchical SGD <https://arxiv.org/pdf/2010.12998.pdf>`_).\n Process groups of different sizes are organized in a hierarhicy, and they average parameters\n by using different periods concurrently after the warm-up stage.\n This is an extension of :class:`~torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager`\n that supports `post-local SGD <https://arxiv.org/abs/1808.07217>`_, which essentially only supports\n a two-level hierarchy: the intra-machine level and the global level, where the intra-machine\n level is usually embedded in :meth:`~torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook`.\n Similarly, the process groups within this class do not have such an intra-machine process\n subgroup, which should be embedded by the post-local SGD communication hook instead.\n\n Args:\n period_group_size_dict: An ordered dict mapping keys of model averaging period to\n process group size, used for initializing process groups of\n different sizes in a hierarchy to average parameters concurrently.\n Particularly, at each iteration, there will be at most a single\n process group that runs averaging -- the period of such group should\n have the largest period which the current step can be divided by.\n For example, if the dict has three keys: 2, 4, and 8,\n then this means totally three process groups will be created to\n average parameters every 2, 4, and 8 iterations, respectively.\n At the 4th iteration, only the second process group will run\n averaging, because the first process group should be a\n subset of the second process group, and no need to execute the first\n process group redundantly.\n On the other hand, the third process group can only be triggered\n every 8 iterations, so it will not be triggered at the 4th iteration.\n warmup_steps (int): The number of warm-up steps. During this stage, model averaging is skipped.\n process_group (ProcessGroup, optional): The overall process group containing all the processes that runs model averaging.\n If ``None``, the default process group, which is created\n by :func:`torch.distributed.init_process_group`, will be used.\n (default: ``None``)\n\n Example::\n >>> from collections import OrderedDict\n >>> import torch\n >>> import torch.distributed as dist\n >>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import (\n >>> PostLocalSGDState,\n >>> post_localSGD_hook,\n >>> )\n >>> import torch.distributed.algorithms.model_averaging.hierarchical_model_averager as hierarchicalSGD\n >>> import torch.nn as nn\n >>>\n >>> dist.init_process_group(\"nccl\", rank=rank, world_size=16)\n >>> torch.cuda.set_device(rank)\n >>> module = nn.Linear(1, 1, bias=False).to(rank)\n >>> model = nn.parallel.DistributedDataParallel(\n >>> module, device_ids=[rank], output_device=rank\n >>> )\n >>> # Register a post-localSGD communication hook.\n >>> # Assume that each machine has 4 GPUs, then each intra-machine subgroup has a size of 4.\n >>> subgroup, _ = dist.new_subgroups()\n >>> state = PostLocalSGDState(subgroup=subgroup, start_localSGD_iter=100)\n >>> model.register_comm_hook(state, post_localSGD_hook)\n >>>\n >>> # Average parameters among each group of 8 processes every 4 iterations, and among all\n >>> # the 16 processes every 16 iterations.\n >>> averager = hierarchicalSGD.HierarchicalModelAverager(\n >>> period_group_size_dict=OrderedDict([(4, 8), (16, 16)]), warmup_steps=100)\n >>> # Note that ``warmup_steps`` must be the same as ``start_localSGD_iter`` used in ``PostLocalSGDState``.\n >>> # In the first 100 steps, run global gradient averaging like normal DDP at every step.\n >>> # After 100 steps, run model averaging at two levels.\n >>> for step in range(0, 200):\n >>> optimizer.zero_grad()\n >>> loss = loss_fn(output, labels)\n >>> loss.backward()\n >>> optimizer.step()\n >>> # Average parameters after ``optimizer.step()``.\n >>> # Thus, the inter-node communication only occurs periodically after ``warmup_steps``.\n >>> averager.average_parameters(model.parameters())\n\n .. warning ::\n The last group size in the dict must be the size of the provided ``process_group``,\n which indicates model averaging at the highest level of the hierarchy.\n If ``process_group`` is not provided, then the last group size should be equal to the world size.\n\n .. warning ::\n `HierarchicalModelAverager` is experimental and subject to change.\n \"\"\"\n\n def __init__(self, period_group_size_dict=None, warmup_steps=0, process_group=None):\n super().__init__(process_group)\n if not period_group_size_dict:\n raise ValueError(\"Arg ``period_group_size_dict`` must not be empty.\")\n self._periods = list(period_group_size_dict.keys())\n if self._periods[0] <= 0:\n raise ValueError(\"The minimum period in arg ``period_group_size_dict`` must be a positive value.\")\n elif self._periods[-1] == 1:\n warnings.warn(\n \"When the maximum period in arg ``period_group_size_dict`` is 1, \"\n \"no need to use model averaging because the communication cost \"\n \"of all-reducing parameters will be no less than the cost of all-reducing gradients \"\n \"by DistributedDataParallel in the backward pass. Therefore, only \"\n \"DistributedDataParallel should be used for this case.\"\n )\n overall_group_size = dist.get_world_size(group=self.process_group)\n if list(period_group_size_dict.values())[-1] != overall_group_size:\n raise ValueError(\n f\"The last value in arg ``period_process_group_dict`` {list(period_group_size_dict.values())[-1]} \"\n \"must be equal to the size of arg ``process_group`` {overall_group_size}.\"\n )\n\n self.period_process_group_dict = OrderedDict()\n logger.info(\"Model averaging hierarchy:\")\n for period, group_size in period_group_size_dict.items():\n logger.info(\n f\"\\tEach group that has {group_size} processes average parameters every {period} iterations, \"\n \"if no higher-level averaging.\")\n if group_size != overall_group_size:\n self.period_process_group_dict[period], _ = dist.new_subgroups(\n group_size=group_size, group=self.process_group)\n else:\n self.period_process_group_dict[period] = self.process_group\n\n if warmup_steps < 0:\n raise ValueError(\"Arg ``warmup_steps`` must be a non-negative number.\")\n self.warmup_steps = warmup_steps\n\n def _find_process_group(self):\n \"\"\"\n Returns a process group as the value of an ``period_process_group_dict`` entry,\n if ``step`` can be divided by a period in the keys of ``period_process_group_dict``.\n If ``step`` can be divided by multiple periods in the keys of ``period_process_group_dict``,\n then the returned process group is the one corresponding to the largest period,\n since this process group will be used for averaging parameters at this ``step``.\n Returns ``None`` if not found.\n \"\"\"\n for period in reversed(self._periods):\n if self.step % period == 0:\n return self.period_process_group_dict[period]\n return None\n\n def average_parameters(self, params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]]):\n \"\"\"\n Averages parameters or parameter groups of an optimizer if ``step`` is no less than ``warmup_steps``\n and it can be divided by a period in the keys of ``period_process_group_dict``,\n where ``step`` is increased by 1 at each iteration in the training loop.\n If ``step`` can be divided by multiple periods in the keys of ``period_process_group_dict``,\n only the largest period is used, and the corresponding process group is used for averaging parameters.\n Args:\n params: The parameters of a model or parameter groups of an optimizer.\n \"\"\"\n if self.step >= self.warmup_steps:\n group = self._find_process_group()\n if group is not None:\n utils.average_parameters_or_parameter_groups(params, group)\n self.step += 1\n" ]
[ [ "torch.distributed.new_subgroups", "torch.distributed.get_world_size", "torch.distributed.algorithms.model_averaging.utils.average_parameters_or_parameter_groups" ] ]
tangwei94/statmech_tm_solver.jl
[ "001288de3643c9cd962a14e739efb5257656a682" ]
[ "helpers/triangular_AF_ising_fulldiag/plot_triangular_ising_fulldiag_Ek.py" ]
[ "import numpy as np \nimport matplotlib \nimport matplotlib.pyplot as plt \nimport io \n\nmatplotlib.rcParams['mathtext.fontset'] = 'stix'\nmatplotlib.rcParams['font.family'] = 'STIXGeneral'\nplt.rcParams['font.size'] = 15\n\nf = io.open(\"result_triangular_ising_fulldiag.txt\", \"r\")\ndata = np.loadtxt(f)\nf.close()\n\nfig, axes = plt.subplots(4, 2, figsize=(8, 16))\naxes = axes.flatten()\n\nfor ix, n in enumerate([5, 6, 7, 8, 9, 10, 11, 12]):\n msk = (np.isclose(data[:, 0], n))\n w_reals = data[msk, 1]\n w_imags = data[msk, 2]\n\n w_norms = np.sqrt(w_reals**2 + w_imags**2)\n w_norms /= max(w_norms)\n w_angles = np.angle(w_reals + 1j*w_imags)\n\n axes[ix].set(xlabel=r'$\\mathrm{arg} w / \\pi$', ylabel='-log |w|', xlim=(-1, 1), ylim=(0, 3))\n\n axes[ix].plot(w_angles / np.pi, -np.log(w_norms), 'o', color='tab:red', alpha=0.5)\n\n axes[ix].text(0.1, 0.9, \"L={:d}\".format(n), horizontalalignment='center', transform=axes[ix].transAxes, fontsize='small')\n\nfig.tight_layout()\nplt.savefig(\"result_triangular_ising_fulldiag_Ek.pdf\", bbox_inches='tight')\nplt.close(fig)\n\n" ]
[ [ "numpy.log", "numpy.sqrt", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "numpy.angle", "numpy.loadtxt", "numpy.isclose" ] ]
lindenmp/NASA_aus_firedata
[ "e65b058983d64daa1ff662e6c19bb7fa77f0637a" ]
[ "preprocessing.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os, sys\nimport pandas as pd\nimport numpy as np\nimport numpy.matlib\nimport scipy as sp\n\nimport geopandas as gpd\n\n# Plotting\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# Load map of australia using postcode data\n\n# In[2]:\n\n\naus_map = gpd.read_file('/Users/lindenmp/Dropbox/PersonalProjects/NASA_aus_firedata/aus_map/aus_poas.shp')\n\n\n# In[3]:\n\n\naus_map.head()\n\n\n# Pull out VIC/NSW\n\n# In[4]:\n\n\naus_map['state'].unique()\n\n\n# The 'state' column only has 'VIC'... that's weird. Let's use the postcodes to stitch together state maps\n# \n# Wikipedia to the rescue! https://en.wikipedia.org/wiki/Postcodes_in_Australia\n# \n# Victoria spans postcodes 3000 to 3999 inclusive\n\n# In[5]:\n\n\nvic_map = aus_map.loc[np.logical_and(aus_map['code'] >= 3000, aus_map['code'] <= 3999),:]\nax = vic_map.plot()\n\n\n# NSW spans postcodes 2000 to 2999, if you include the ACT\n\n# In[6]:\n\n\nnsw_map = aus_map.loc[np.logical_and(aus_map['code'] >= 2000, aus_map['code'] <= 2999),:]\nax = nsw_map.plot()\n\n\n# There is some weird spot way out east that shouldn't be there - around 160 longitude\n\n# In[7]:\n\n\nxmin, ymin, xmax, ymax = nsw_map.total_bounds\nprint(xmin, ymin, xmax, ymax)\n\n\n# Cut at 155 longitude\n\n# In[8]:\n\n\nnsw_map = nsw_map.cx[xmin:155, ymin:ymax]\n\n\n# In[9]:\n\n\nax = nsw_map.plot()\n\n\n# Better!\n# \n# Merge\n\n# In[10]:\n\n\nmy_map = pd.concat((nsw_map, vic_map))\nax = my_map.plot()\n\n\n# In[11]:\n\n\nmy_map.reset_index(drop = True, inplace = True)\nmy_map['state'] = 'None'\nmy_map.head()\n\n\n# Load in fire data\n\n# In[12]:\n\n\n# df = gpd.read_file('/Users/lindenmp/Dropbox/PersonalProjects/NASA_aus_firedata/data/DL_FIRE_V1_101558/fire_nrt_V1_101558.shp')\n# df = gpd.read_file('/Users/lindenmp/Dropbox/PersonalProjects/NASA_aus_firedata/data/DL_FIRE_M6_101557/fire_nrt_M6_101557.shp')\ndf = gpd.read_file('/Users/lindenmp/Dropbox/PersonalProjects/NASA_aus_firedata/data/DL_FIRE_M6_101557/fire_archive_M6_101557.shp')\ndf.shape\n\n\n# In[13]:\n\n\ndf.head()\n\n\n# ## Retain only fire data that intersects with my map\n# \n# Scrub out postcode boundaries and store in separate variable. We'll use this below to retain fire data inside our map.\n\n# In[14]:\n\n\nmy_map_nopost = my_map.dissolve(by='state')\nmy_map_nopost.reset_index(drop = True, inplace = True)\nmy_map_nopost.drop(['POA_NAME','code'], axis = 1, inplace = True)\n\n\n# In[15]:\n\n\nax = my_map_nopost.plot()\n\n\n# Drop rows outside the general bounds of my map (long,lat)\n\n# In[16]:\n\n\nxmin, ymin, xmax, ymax = my_map_nopost.total_bounds\nprint(xmin, ymin, xmax, ymax)\n\n\n# In[17]:\n\n\ndf = df.cx[xmin:xmax, ymin:ymax]\ndf.reset_index(drop = True, inplace = True)\ndf.shape\n\n\n# In[18]:\n\n\ndf.head()\n\n\n# In[19]:\n\n\nfig, ax = plt.subplots(figsize = (5,5))\nmy_map.plot(ax = ax)\ndf.plot(ax = ax, color = 'r')\n\n\n# In[20]:\n\n\nfrom IPython.display import clear_output\n\n\n# In[21]:\n\n\ndef update_progress(progress, my_str = ''):\n bar_length = 20\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n if progress < 0:\n progress = 0\n if progress >= 1:\n progress = 1\n\n block = int(round(bar_length * progress))\n\n clear_output(wait = True)\n text = my_str + \" Progress: [{0}] {1:.1f}%\".format( \"#\" * block + \"-\" * (bar_length - block), progress * 100)\n print(text)\n\n\n# In[22]:\n\n\nmask = my_map_nopost.geometry.unary_union\n\n\n# In[23]:\n\n\nmy_bool = np.zeros((df.shape[0],), dtype=bool)\n\n\n# In[24]:\n\n\nfor data in df.iterrows():\n update_progress(data[0]/df.shape[0])\n my_bool[data[0]] = data[1].geometry.within(mask)\nupdate_progress(1)\n\n\n# In[25]:\n\n\nfig, ax = plt.subplots(figsize = (5,5))\nmy_map.plot(ax = ax)\ndf.loc[my_bool,:].plot(ax = ax, color = 'r')\n\n\n# In[26]:\n\n\ndf.loc[my_bool,:].shape\n\n\n# ## Save out\n\n# In[27]:\n\n\nmy_map.to_file('/Users/lindenmp/Dropbox/PersonalProjects/NASA_aus_firedata/data/my_map.shp')\ndf.loc[my_bool,:].to_file('/Users/lindenmp/Dropbox/PersonalProjects/NASA_aus_firedata/data/df.shp')\n\n" ]
[ [ "pandas.concat", "numpy.logical_and", "numpy.zeros", "matplotlib.pyplot.subplots" ] ]
nemodrive/path_generation
[ "64d36342e46a83ed0ade5801bb69370d41d9ecbb" ]
[ "utils/save_training.py" ]
[ "# AndreiN, 2019\n\nimport os\nimport torch\nimport numpy as np\nimport shutil\nimport itertools\nimport glob\nimport re\n\n\ndef get_training_data_path(model_dir, best=False, index=None):\n if best:\n return os.path.join(model_dir, \"training_data_best.pt\")\n\n if index is not None:\n fld = os.path.join(model_dir, \"checkpoints\")\n if not os.path.isdir(fld):\n os.mkdir(fld)\n return os.path.join(fld, f\"training_data_{index}.pt\")\n\n return os.path.join(model_dir, \"training_data.pt\")\n\n\ndef get_last_training_path_idx(model_dir):\n if os.path.exists(model_dir):\n path = os.path.join(model_dir, \"training_data_*.pt\")\n\n max_index = 0\n for path in glob.glob(path):\n try:\n max_index = max(max_index,\n int(re.findall(\"training_data_([1-9]\\d*|0).pt\", path)[0]))\n except:\n pass\n\n return max_index\n return 0\n\n\nclass SaveData:\n def __init__(self, out_dir, save_best=True, save_all=False):\n self.out_dir = out_dir\n self.save_best = save_best\n self.save_all = save_all\n\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n self.best_loss = np.inf\n\n start_idx = get_last_training_path_idx(out_dir)\n self.index = itertools.count(start=start_idx, step=1)\n\n def load_training_data(self, out_dir=None, best=False):\n \"\"\" If best is set to false, the last training model is loaded \"\"\"\n out_dir = out_dir if out_dir is not None else self.out_dir\n\n training_data = None\n if best:\n path = get_training_data_path(out_dir, best=best)\n if os.path.isfile(path):\n training_data = torch.load(path)\n\n if training_data is None:\n path = get_training_data_path(out_dir, best=False)\n try:\n training_data = torch.load(path)\n except OSError:\n training_data = None # dict({\"loss\": np.inf, \"epoch\": 0})\n\n if training_data is not None and \"loss\" in training_data:\n self.best_loss = training_data[\"loss\"]\n\n return training_data\n\n def save_training_data(self, data, loss, other=None, model_dir=None):\n model_dir = model_dir if model_dir is not None else self.out_dir\n\n trainig_data = dict()\n trainig_data = data\n\n if other is not None:\n trainig_data.update(other)\n\n # Save standard\n path = get_training_data_path(model_dir)\n torch.save(trainig_data, path)\n\n if loss < self.best_loss:\n self.best_loss = loss\n best_path = get_training_data_path(model_dir, best=True)\n shutil.copyfile(path, best_path)\n\n if self.save_all:\n index_path = get_training_data_path(model_dir, index=next(self.index))\n shutil.copyfile(path, index_path)\n\n\n\n\n" ]
[ [ "torch.load", "torch.save" ] ]
huangyh09/limix
[ "bed5b8e0aaa9b11f19bdd13b76d21510e56064be", "bed5b8e0aaa9b11f19bdd13b76d21510e56064be" ]
[ "limix/qc/_covariance.py", "limix/stats/_confusion.py" ]
[ "def normalise_covariance(K, out=None):\n \"\"\"\n Variance rescaling of covariance matrix 𝙺.\n\n Let n be the number of rows (or columns) of 𝙺 and let\n mᵢ be the average of the values in the i-th column.\n Gower rescaling is defined as\n\n .. math::\n\n 𝙺(n - 1)/(𝚝𝚛𝚊𝚌𝚎(𝙺) - ∑mᵢ).\n\n Notes\n -----\n The reasoning of the scaling is as follows.\n Let 𝐠 be a vector of n independent samples and let 𝙲 be the Gower's centering\n matrix.\n The unbiased variance estimator is\n\n .. math::\n\n v = ∑ (gᵢ-ḡ)²/(n-1) = 𝚝𝚛𝚊𝚌𝚎((𝐠-ḡ𝟏)ᵀ(𝐠-ḡ𝟏))/(n-1) = 𝚝𝚛𝚊𝚌𝚎(𝙲𝐠𝐠ᵀ𝙲)/(n-1)\n\n Let 𝙺 be the covariance matrix of 𝐠.\n The expectation of the unbiased variance estimator is\n\n .. math::\n\n 𝐄[v] = 𝚝𝚛𝚊𝚌𝚎(𝙲𝐄[𝐠𝐠ᵀ]𝙲)/(n-1) = 𝚝𝚛𝚊𝚌𝚎(𝙲𝙺𝙲)/(n-1),\n\n assuming that 𝐄[gᵢ]=0.\n We thus divide 𝙺 by 𝐄[v] to achieve an unbiased normalisation on the random variable\n gᵢ.\n\n Parameters\n ----------\n K : array_like\n Covariance matrix to be normalised.\n out : array_like, optional\n Result destination. Defaults to ``None``.\n\n Examples\n --------\n .. doctest::\n\n >>> from numpy import dot, mean, zeros\n >>> from numpy.random import RandomState\n >>> from limix.qc import normalise_covariance\n >>>\n >>> random = RandomState(0)\n >>> X = random.randn(10, 10)\n >>> K = dot(X, X.T)\n >>> Z = random.multivariate_normal(zeros(10), K, 500)\n >>> print(\"%.3f\" % mean(Z.var(1, ddof=1)))\n 9.824\n >>> Kn = normalise_covariance(K)\n >>> Zn = random.multivariate_normal(zeros(10), Kn, 500)\n >>> print(\"%.3f\" % mean(Zn.var(1, ddof=1)))\n 1.008\n\n .. _Dask: https://dask.pydata.org/\n \"\"\"\n from numpy import asarray\n import dask.array as da\n from pandas import DataFrame\n import xarray as xr\n\n if isinstance(K, DataFrame):\n K = K.astype(float)\n trace = K.values.trace()\n elif isinstance(K, da.Array):\n trace = da.diag(K).sum()\n elif isinstance(K, xr.DataArray):\n trace = da.diag(K.data).sum()\n else:\n K = asarray(K, float)\n trace = K.trace()\n\n c = asarray((K.shape[0] - 1) / (trace - K.mean(axis=0).sum()), float)\n if out is None:\n return K * c\n\n _copyto(out, K)\n _inplace_mult(out, c)\n\n return out\n\n\ndef _copyto(dst, src):\n from numpy import copyto, ndarray\n import dask.array as da\n from pandas import DataFrame\n\n if isinstance(dst, DataFrame):\n copyto(dst.values, src)\n elif isinstance(dst, ndarray) and isinstance(src, da.Array):\n copyto(dst, src.compute())\n else:\n copyto(dst, src)\n\n\ndef _inplace_mult(out, c):\n import dask.array as da\n\n if isinstance(c, da.Array):\n c = c.compute()\n out *= c\n", "from __future__ import division\n\nimport logging\n\n\ndef _get_jit():\n try:\n from numba import jit\n except ImportError:\n\n def jit(x, *args, **kwargs):\n return x\n\n return jit\n\n\ndef _get_walk_left():\n jit = _get_jit()\n\n @jit(cache=True)\n def _walk_left(pos, c, dist):\n step = 0\n middle = pos[c]\n i = c\n while i > 0 and step < dist:\n i -= 1\n step = middle - pos[i]\n if step > dist:\n i += 1\n return i\n\n return _walk_left\n\n\ndef _get_walk_right():\n jit = _get_jit()\n\n @jit(cache=True)\n def _walk_right(pos, c, dist):\n step = 0\n middle = pos[c]\n i = c\n n = len(pos)\n while i < n - 1 and step < dist:\n i += 1\n step = pos[i] - middle\n if step > dist:\n i -= 1\n return i\n\n return _walk_right\n\n\ndef roc_curve(multi_score, method, max_fpr=0.05):\n from numpy import arange, empty_like, mean, std, sqrt\n\n max_fpr = float(max_fpr)\n fprs = arange(0.0, max_fpr, step=0.001)\n tprs = empty_like(fprs)\n tprs_stde = empty_like(fprs)\n for (i, fpr) in enumerate(fprs):\n tprs_ = multi_score.get_tprs(method, fpr=fpr, approach=\"rank\")\n tprs[i] = mean(tprs_)\n tprs_stde[i] = std(tprs_) / sqrt(len(tprs_))\n return (fprs, tprs, tprs_stde)\n\n\n# TODO: convert to numpy style\ndef confusion_matrix(df, wsize=50000):\n \"\"\"Provide a couple of scores based on the idea of windows around\n genetic markers.\n\n :param causals: Indices defining the causal markers.\n :param pos: Within-chromossome base-pair position of each candidate\n marker, in crescent order.\n \"\"\"\n from numpy import argsort, asarray, concatenate, where\n\n logger = logging.getLogger(__name__)\n wsize = int(wsize)\n\n if \"chrom\" not in df:\n df = df.assign(chrom=[\"1\"] * len(df))\n\n df.sort_values(by=[\"chrom\", \"pos\"], inplace=True)\n\n chromids = df[\"chrom\"].unique()\n\n offset = 0\n idx_offset = 0\n pos = []\n causal = []\n pv = []\n for cid in sorted(chromids):\n df0 = df.query(\"chrom=='%s'\" % cid)\n\n pos.append(offset + asarray(df0[\"pos\"], float))\n pv.append(asarray(df0[\"pv\"], float))\n offset += pos[-1][-1] + wsize // 2 + 1\n\n if df0[\"causal\"].sum() > 0:\n causal.append(idx_offset + where(df0[\"causal\"])[0])\n idx_offset += len(df0)\n\n pos = concatenate(pos)\n pv = concatenate(pv)\n causal = concatenate(causal)\n causal = asarray(causal, int)\n\n total_size = pos[-1] - pos[0]\n if wsize > 0.1 * total_size:\n perc = wsize // total_size * 100\n logger.warn(\n \"The window size is %d%% of the total candidate\" + \" region.\", int(perc)\n )\n\n ld_causal_markers = set()\n for _, c in enumerate(causal):\n if wsize == 1:\n right = left = pos[c]\n else:\n left = _get_walk_left()(pos, c, wsize // 2)\n right = _get_walk_right()(pos, c, wsize // 2)\n for i in range(left, right + 1):\n ld_causal_markers.add(i)\n\n P = len(ld_causal_markers)\n N = len(pos) - P\n\n ld_causal_markers = list(ld_causal_markers)\n\n logger.info(\"Found %d positive and %d negative markers.\", P, N)\n\n return ConfusionMatrix(P, N, ld_causal_markers, argsort(pv))\n\n\ndef getter(func):\n class ItemGetter(object):\n def __getitem__(self, i):\n return func(i)\n\n def __lt__(self, other):\n from numpy import s_\n\n return func(s_[:]) < other\n\n def __le__(self, other):\n from numpy import s_\n\n return func(s_[:]) <= other\n\n def __gt__(self, other):\n from numpy import s_\n\n return func(s_[:]) > other\n\n def __ge__(self, other):\n from numpy import s_\n\n return func(s_[:]) >= other\n\n def __eq__(self, other):\n from numpy import s_\n\n return func(s_[:]) == other\n\n return ItemGetter()\n\n\n# TODO: document it\nclass ConfusionMatrix(object):\n def __init__(self, P, N, true_set, idx_rank):\n from numpy import empty, asarray, searchsorted\n\n self._TP = empty(P + N + 1, dtype=int)\n self._FP = empty(P + N + 1, dtype=int)\n if len(idx_rank) != P + N:\n raise ValueError(\n \"Rank indices array has to have length equal\" + \" to ``P + N``.\"\n )\n\n true_set = asarray(true_set, int)\n true_set.sort()\n\n idx_rank = asarray(idx_rank, int)\n\n ins_pos = searchsorted(true_set, idx_rank)\n _confusion_matrix_tp_fp(P + N, ins_pos, true_set, idx_rank, self._TP, self._FP)\n self._N = N\n self._P = P\n\n @property\n def TP(self):\n return getter(lambda i: self._TP[i])\n\n @property\n def FP(self):\n return getter(lambda i: self._FP[i])\n\n @property\n def TN(self):\n return getter(lambda i: self._N - self.FP[i])\n\n @property\n def FN(self):\n return getter(lambda i: self._P - self.TP[i])\n\n @property\n def sensitivity(self):\n \"\"\" Sensitivity (also known as true positive rate.)\n \"\"\"\n return getter(lambda i: self.TP[i] / self._P)\n\n @property\n def tpr(self):\n return self.sensitivity\n\n @property\n def recall(self):\n return self.sensitivity\n\n @property\n def specifity(self):\n \"\"\" Specifity (also known as true negative rate.)\n \"\"\"\n return getter(lambda i: self.TN[i] / self._N)\n\n @property\n def precision(self):\n from numpy import nan\n\n return getter(\n lambda i: nan if i == 0 else self.TP[i] / (self.TP[i] + self.FP[i])\n )\n\n @property\n def npv(self):\n \"\"\" Negative predictive value.\n \"\"\"\n return getter(lambda i: self.TN[i] / (self.TN[i] + self.FN[i]))\n\n @property\n def fallout(self):\n \"\"\" Fall-out (also known as false positive rate.)\n \"\"\"\n return getter(lambda i: 1 - self.specifity[i])\n\n @property\n def fpr(self):\n return self.fallout\n\n @property\n def fnr(self):\n \"\"\" False negative rate.\n \"\"\"\n return getter(lambda i: 1 - self.sensitivity[i])\n\n @property\n def fdr(self):\n \"\"\" False discovery rate.\n \"\"\"\n return getter(lambda i: 1 - self.precision[i])\n\n @property\n def accuracy(self):\n \"\"\" Accuracy.\n \"\"\"\n return getter(lambda i: (self.TP[i] + self.TN[i]) / (self._N + self._P))\n\n @property\n def f1score(self):\n \"\"\" F1 score (harmonic mean of precision and sensitivity).\n \"\"\"\n\n def denominator(i):\n return 2 * self.TP[i] + self.FP[i] + self.FN[i]\n\n return getter(lambda i: 2 * self.TP[i] / denominator(i))\n\n def roc(self):\n from numpy import argsort\n\n tpr = self.tpr[1:]\n fpr = self.fpr[1:]\n\n idx = argsort(fpr)\n fpr = fpr[idx]\n tpr = tpr[idx]\n\n return (fpr, tpr)\n\n\ndef auc(fpr, tpr):\n left = fpr[0]\n area = 0.0\n for i in range(1, len(fpr)):\n width = fpr[i] - left\n area += width * tpr[i - 1]\n left = fpr[i]\n area += (1 - left) * tpr[-1]\n return area\n\n\ndef _confusion_matrix_tp_fp(n, ins_pos, true_set, idx_rank, TP, FP):\n TP[0] = 0\n FP[0] = 0\n i = 0\n while i < n:\n FP[i + 1] = FP[i]\n TP[i + 1] = TP[i]\n\n j = ins_pos[i]\n if j == len(true_set) or true_set[j] != idx_rank[i]:\n FP[i + 1] += 1\n else:\n TP[i + 1] += 1\n i += 1\n" ]
[ [ "numpy.copyto", "numpy.asarray" ], [ "numpy.asarray", "numpy.arange", "numpy.empty_like", "numpy.concatenate", "numpy.std", "numpy.mean", "numpy.searchsorted", "numpy.argsort", "numpy.where", "numpy.empty" ] ]
hyoseupjang/Fractal_Practice
[ "4a7221f25b705364aafe1b0ce796672a5daa7b21" ]
[ "Visualize.py" ]
[ "import numpy as np\nfrom PIL import Image\nxres,yres=np.fromfile('data.dat',dtype=np.int32,count=2,offset=0)\ndata=np.fromfile('data.dat',dtype=np.int32,offset=8)\nprint('File loaded...')\ndata=np.array(data.reshape(yres,xres),dtype=np.uint8)\nprint('Converted to uint8... Rendering. ')\nImage.fromarray(data,'L').save('output.png')" ]
[ [ "numpy.fromfile" ] ]
awhite862/kelvin
[ "a2e5c3acd8bbf9a9b3e88f321849374e7070f000" ]
[ "kelvin/hubbard_system.py" ]
[ "import logging\nimport numpy\nfrom cqcpy import ft_utils\nfrom cqcpy.ov_blocks import one_e_blocks\nfrom cqcpy.ov_blocks import two_e_blocks\nfrom cqcpy.ov_blocks import two_e_blocks_full\nfrom cqcpy import utils\nfrom .system import System\n\neinsum = numpy.einsum\n\n\nclass HubbardSystem(System):\n \"\"\"Hubbard model system in a mean-field basis\n\n Attributes:\n T (float): Temperature.\n model: Object specifying details of the model.\n Pa: Mean-field alpha density\n Pa: Mean-field beta density\n mu (float): Chemical potential.\n Na (float): Number of alpha electrons.\n Nb (float): Number of beta electrons.\n \"\"\"\n def __init__(self, T, model, Pa=None, Pb=None, mu=None,\n na=None, nb=None, ua=None, ub=None, orbtype='u'):\n self.T = T\n self.model = model\n self.Pa = Pa\n self.Pb = Pb\n self.orbtype = orbtype\n if na is None:\n assert(nb is None)\n assert(mu is not None)\n self.mu = mu\n self.na = na\n self.nb = nb\n self.beta = 1.0 / self.T if self.T > 0.0 else 1.0e20\n else:\n self.na = na\n self.nb = nb\n assert(na > 0)\n assert(nb > 0)\n assert(self.T == 0.0)\n self.beta = 1.0e20\n self.mu = None\n\n # Build T = 0 fock matrices\n if Pa is None and Pb is None:\n if ua is None or ub is None:\n raise Exception(\"No reference provided\")\n if na is None or nb is None:\n raise Exception(\"No reference provided\")\n self.Pa = numpy.einsum('pi,qi->pq', ua[:na], ua[:na])\n self.Pb = numpy.einsum('pi,qi->pq', ua[:nb], ua[:nb])\n self.ua = ua\n self.ub = ub\n # build and diagonalize fock matrices\n V = self.model.get_umatS()\n Va = V - V.transpose((0, 1, 3, 2))\n Fa = self.r_hcore()\n Fb = self.r_hcore()\n Fa += numpy.einsum('pqrs,qs->pr', Va, Pa)\n Fa += numpy.einsum('pqrs,qs->pr', V, Pb)\n Fb += numpy.einsum('pqrs,qs->pr', Va, Pb)\n Fb += numpy.einsum('pqrs,pr->qs', V, Pa)\n self.Fa = Fa\n self.Fb = Fb\n if ua is None:\n assert(ub is None)\n self.ea, self.ua = numpy.linalg.eigh(self.Fa)\n self.eb, self.ub = numpy.linalg.eigh(self.Fb)\n else:\n self.ea = numpy.einsum('ij,ip,jq->pq', self.Fa, self.ua, self.ua).diagonal()\n self.eb = numpy.einsum('ij,ip,jq->pq', self.Fb, self.ua, self.ub).diagonal()\n\n def has_g(self):\n return True\n\n def has_u(self):\n if self.orbtype == 'g':\n return False\n else:\n return True\n\n def has_r(self):\n return False\n\n def verify(self, T, mu):\n if T > 0.0:\n s = T == self.T and mu == self.mu\n else:\n s = T == self.T\n if not s:\n return False\n else:\n return True\n\n def const_energy(self):\n return 0.0\n\n # TODO: clean this up\n def get_mp1(self):\n if self.T == 0:\n # orbital energies\n ea, eb = self.u_energies_tot()\n Va, Vb, Vabab = self.u_aint_tot()\n foa = numpy.zeros(ea.shape)\n fob = numpy.zeros(eb.shape)\n for i in range(self.na):\n foa[i] = 1.0\n for i in range(self.nb):\n fob[i] = 1.0\n E1 = -0.5*numpy.einsum('ijij,i,j->', Va, foa, foa)\n E1 -= 0.5*numpy.einsum('ijij,i,j->', Vb, fob, fob)\n E1 -= numpy.einsum('ijij,i,j->', Vabab, foa, fob)\n Fa, Fb = self.u_fock()\n Fao = Fa.oo - numpy.diag(self.ea[:self.na])\n Fbo = Fb.oo - numpy.diag(self.eb[:self.nb])\n E1 += numpy.einsum('ii->', Fao)\n E1 += numpy.einsum('ii->', Fbo)\n return E1\n else:\n Va, Vb, Vabab = self.u_aint_tot()\n ea, eb = self.u_energies_tot()\n foa = ft_utils.ff(self.beta, ea, self.mu)\n fob = ft_utils.ff(self.beta, eb, self.mu)\n E1 = -0.5*numpy.einsum('ijij,i,j->', Va, foa, foa)\n E1 = -0.5*numpy.einsum('ijij,i,j->', Vb, fob, fob)\n E1 = -numpy.einsum('ijij,i,j->', Vabab, foa, fob)\n Fa, Fb = self.u_fock_tot()\n Fao = Fa - numpy.diag(ea)\n Fbo = Fb - numpy.diag(eb)\n E1 += numpy.einsum('ii,i->', Fao, foa)\n E1 += numpy.einsum('ii,i->', Fbo, fob)\n return E1\n\n def u_d_mp1(self, dveca, dvecb):\n if self.T > 0:\n Va, Vb, Vabab = self.u_aint_tot()\n ea, eb = self.u_energies_tot()\n foa = ft_utils.ff(self.beta, ea, self.mu)\n fva = ft_utils.ffv(self.beta, ea, self.mu)\n veca = dveca*foa*fva\n fob = ft_utils.ff(self.beta, eb, self.mu)\n fvb = ft_utils.ffv(self.beta, eb, self.mu)\n vecb = dvecb*fob*fvb\n Fa, Fb = self.u_fock_tot()\n D = -einsum('ii,i->', Fa - numpy.diag(ea), veca)\n D += -einsum('ii,i->', Fb - numpy.diag(eb), vecb)\n D += einsum('ijij,i,j->', Va, veca, foa)\n D += einsum('ijij,i,j->', Vb, vecb, fob)\n D += einsum('ijij,i,j->', Vabab, veca, fob)\n D += einsum('ijij,i,j->', Vabab, foa, vecb)\n Fa, Fb = self.u_fock_d_tot(dveca, dvecb)\n D += einsum('ii,i->', Fa, foa)\n D += einsum('ii,i->', Fb, fob)\n return D\n else:\n logging.warning(\"Derivative of MP1 energy is zero at OK\")\n return 0.0\n\n def g_d_mp1(self, dvec):\n if self.T > 0:\n V = self.g_aint_tot()\n en = self.g_energies_tot()\n fo = ft_utils.ff(self.beta, en, self.mu)\n fv = ft_utils.ffv(self.beta, en, self.mu)\n vec = dvec*fo*fv\n F = self.g_fock_tot()\n D = -einsum('ii,i->', F - numpy.diag(en), vec)\n D += einsum('ijij,i,j->', V, vec, fo)\n F = self.g_fock_d_tot(dvec)\n D += einsum('ii->i', F, fo)\n return D\n else:\n logging.warning(\"Derivative of MP1 energy is zero at OK\")\n return 0.0\n\n def u_mp1_den(self):\n if self.T > 0:\n Va, Vb, Vabab = self.u_aint_tot()\n beta = self.beta\n ea, eb = self.u_energies_tot()\n foa = ft_utils.ff(beta, ea, self.mu)\n fva = ft_utils.ffv(beta, ea, self.mu)\n veca = foa*fva\n fob = ft_utils.ff(beta, eb, self.mu)\n fvb = ft_utils.ffv(beta, eb, self.mu)\n vecb = fob*fvb\n T = self.model.get_tmatS()\n Ta = numpy.einsum('ij,ip,jq->pq', T, self.ua, self.ua)\n Tb = numpy.einsum('ij,ip,jq->pq', T, self.ub, self.ub)\n Da = -beta*einsum('ii,i->i', Ta - numpy.diag(ea), veca)\n Db = -beta*einsum('ii,i->i', Tb - numpy.diag(eb), vecb)\n Da += -beta*einsum('ijij,i,j->i', Va, veca, foa)\n Db += -beta*einsum('ijij,i,j->i', Vb, vecb, fob)\n Da += -beta*einsum('ijij,i,j->i', Vabab, veca, fob)\n Db += -beta*einsum('ijij,i,j->j', Vabab, foa, vecb)\n return Da, Db\n else:\n logging.warning(\"Derivative of MP1 energy is zero at OK\")\n return 0.0\n\n def g_mp1_den(self):\n if self.T > 0:\n V = self.g_aint_tot()\n beta = self.beta\n en = self.g_energies_tot()\n T = self.model.get_tmat()\n Utot = utils.block_diag(self.ua, self.ub)\n T = numpy.einsum('ij,ip,jq->pq', T, Utot, Utot)\n fo = ft_utils.ff(beta, en, self.mu)\n fv = ft_utils.ffv(beta, en, self.mu)\n vec = fo*fv\n D = -beta*numpy.einsum('ii,i->i', T - numpy.diag(en), vec)\n D += -beta*numpy.einsum('ijij,i,j->i', V, vec, fo)\n return D\n else:\n logging.warning(\"Derivative of MP1 energy is zero at OK\")\n return 0.0\n\n def r_energies(self):\n raise Exception(\"Restricted energies are not definted\")\n\n def u_energies(self):\n if self.T > 0.0:\n raise Exception(\"Undefined ov blocks at FT\")\n mu = self.mu\n if mu is None:\n eoa = self.ea[:self.na]\n eob = self.eb[:self.nb]\n eva = self.ea[self.na:]\n evb = self.eb[self.nb:]\n return (eoa, eva, eob, evb)\n else:\n ea = self.ea\n eb = self.eb\n return (ea[ea < mu], ea[ea > mu], eb[eb < mu], eb[eb > mu])\n\n def g_energies(self):\n if self.T > 0.0:\n raise Exception(\"Undefined ov blocks at FT\")\n mu = self.mu\n if mu is None:\n ea = self.ea\n eb = self.eb\n eoa = ea[0:self.na]\n eob = eb[0:self.nb]\n eva = ea[self.na:]\n evb = ea[self.nb:]\n return (numpy.hstack((eoa, eob)), numpy.hstack((eva, evb)))\n else:\n dtot = self.g_energies_tot()\n return (dtot[dtot < mu], dtot[dtot > mu])\n\n def r_energies_tot(self):\n raise Exception(\"Unrestricted reference\")\n\n def u_energies_tot(self):\n ea = self.ea\n eb = self.eb\n return ea, eb\n\n def g_energies_tot(self):\n ea = self.ea\n eb = self.eb\n return numpy.hstack((ea, eb))\n\n def r_fock(self):\n raise Exception(\"Restricted mean-field is not implemented\")\n\n def u_fock(self):\n oa, va, ob, vb = self._u_get_ov()\n oidxa = numpy.r_[oa]\n vidxa = numpy.r_[va]\n oidxb = numpy.r_[ob]\n vidxb = numpy.r_[vb]\n foa = numpy.zeros(self.ea.shape)\n fob = numpy.zeros(self.eb.shape)\n for i in range(self.na):\n foa[i] = 1.0\n for i in range(self.nb):\n fob[i] = 1.0\n Va, Vb, Vab = self.u_aint_tot()\n T = self.model.get_tmatS()\n Fa = numpy.einsum('ij,ip,jq->pq', T, self.ua, self.ua)\n Fb = numpy.einsum('ij,ip,jq->pq', T, self.ub, self.ub)\n Fa += numpy.einsum('pqrs,q,s->pr', Va, foa, foa)\n Fa += numpy.einsum('pqrs,q,s->pr', Vab, fob, fob)\n Fb += numpy.einsum('pqrs,q,s->pr', Vb, fob, fob)\n Fb += numpy.einsum('pqrs,p,r->qs', Vab, foa, foa)\n Fooa = Fa[numpy.ix_(oidxa, oidxa)]\n Fova = Fa[numpy.ix_(oidxa, vidxa)]\n Fvoa = Fa[numpy.ix_(vidxa, oidxa)]\n Fvva = Fa[numpy.ix_(vidxa, vidxa)]\n Foob = Fb[numpy.ix_(oidxb, oidxb)]\n Fovb = Fb[numpy.ix_(oidxb, vidxb)]\n Fvob = Fb[numpy.ix_(vidxb, oidxb)]\n Fvvb = Fb[numpy.ix_(vidxb, vidxb)]\n Fa_blocks = one_e_blocks(Fooa, Fova, Fvoa, Fvva)\n Fb_blocks = one_e_blocks(Foob, Fovb, Fvob, Fvvb)\n return Fa_blocks, Fb_blocks\n\n def g_fock(self):\n if self.T > 0.0:\n raise Exception(\"Undefined ov blocks at FT\")\n o, v = self._get_ov()\n T = self.model.get_tmatS()\n Tg = utils.block_diag(T, T)\n V = self.g_aint_tot()\n n = self.ea.shape[0] + self.eb.shape[0]\n do = numpy.zeros((n))\n for io in o:\n do[io] = 1.0\n utot = utils.block_diag(self.ua, self.ub)\n F = numpy.einsum('ij,ip,jq->pq', Tg, utot, utot)\n F += numpy.einsum('pqrs,q,s->pr', V, do, do)\n oidx = numpy.r_[o]\n vidx = numpy.r_[v]\n Foo = F[numpy.ix_(oidx, oidx)]\n Fvv = F[numpy.ix_(vidx, vidx)]\n Fov = F[numpy.ix_(oidx, vidx)]\n Fvo = F[numpy.ix_(vidx, oidx)]\n return one_e_blocks(Foo, Fov, Fvo, Fvv)\n\n def r_fock_tot(self):\n raise Exception(\"Restricted Fock operator not defined\")\n return self.model.get_tmatS()\n\n def g_fock_tot(self):\n T = self.model.get_tmat()\n d = self.g_energies_tot()\n n = d.shape[0]\n if self.T > 0.0:\n fo = ft_utils.ff(self.beta, d, self.mu)\n I = numpy.identity(n)\n den = numpy.einsum('pi,i,qi->pq', I, fo, I)\n else:\n to = numpy.zeros((n, self.N))\n o, v = self._get_ov()\n for i, io in enumerate(o):\n to[i, io] = 1.0\n den = numpy.einsum('pi,qi->pq', to, to)\n V = self.g_aint_tot()\n JK = numpy.einsum('prqs,rs->pq', V, den)\n Utot = utils.block_diag(self.ua, self.ub)\n return JK + numpy.einsum('ij,ip,jq->pq', T, Utot, Utot)\n\n def u_fock_tot(self):\n Ta = self.model.get_tmatS()\n Tb = Ta\n da, db = self.u_energies_tot()\n na = da.shape[0]\n nb = db.shape[0]\n if self.T > 0.0:\n foa = ft_utils.ff(self.beta, da, self.mu)\n fob = ft_utils.ff(self.beta, db, self.mu)\n Ia = numpy.identity(na)\n Ib = numpy.identity(nb)\n dena = numpy.einsum('pi,i,qi->pq', Ia, foa, Ia)\n denb = numpy.einsum('pi,i,qi->pq', Ib, fob, Ib)\n else:\n N = self.ea.shape[0]\n toa = numpy.zeros((na, N))\n tob = numpy.zeros((nb, N))\n oa, va, ob, vb = self._u_get_ov()\n for i, io in enumerate(oa):\n toa[i, io] = 1.0\n for i, io in enumerate(ob):\n tob[i, io] = 1.0\n dena = numpy.einsum('pi,qi->pq', toa, toa)\n denb = numpy.einsum('pi,qi->pq', tob, tob)\n Va, Vb, Vabab = self.u_aint_tot()\n JKa = numpy.einsum('prqs,rs->pq', Va, dena)\n JKa += numpy.einsum('prqs,rs->pq', Vabab, denb)\n JKb = numpy.einsum('prqs,rs->pq', Vb, denb)\n JKb += numpy.einsum('prqs,pq->rs', Vabab, dena)\n Fa = JKa.copy()\n Fb = JKb.copy()\n Fa += numpy.einsum('ij,ip,jq->pq', Ta, self.ua, self.ua)\n Fb += numpy.einsum('ij,ip,jq->pq', Tb, self.ub, self.ub)\n return Fa, Fb\n\n def u_fock_d_tot(self, dveca, dvecb):\n da, db = self.u_energies_tot()\n na = da.shape[0]\n nb = db.shape[0]\n if self.T == 0.0:\n logging.warning(\"Occupation derivatives are zero at 0K\")\n return numpy.zeros((na, na)), numpy.zeros((nb, nb))\n foa = ft_utils.ff(self.beta, da, self.mu)\n fob = ft_utils.ff(self.beta, db, self.mu)\n fva = ft_utils.ffv(self.beta, da, self.mu)\n fvb = ft_utils.ffv(self.beta, db, self.mu)\n veca = dveca*foa*fva\n vecb = dvecb*fob*fvb\n Ia = numpy.identity(na)\n Ib = numpy.identity(nb)\n dena = numpy.einsum('pi,i,qi->pq', Ia, veca, Ia)\n denb = numpy.einsum('pi,i,qi->pq', Ib, vecb, Ib)\n Va, Vb, Vabab = self.u_aint_tot()\n JKa = numpy.einsum('prqs,rs->pq', Va, dena)\n JKa += numpy.einsum('prqs,rs->pq', Vabab, denb)\n JKb = numpy.einsum('prqs,rs->pq', Vb, denb)\n JKb += numpy.einsum('prqs,pq->rs', Vabab, dena)\n Fa = -JKa\n Fb = -JKb\n return Fa, Fb\n\n def g_fock_d_tot(self, dvec):\n d = self.g_energies_tot()\n n = d.shape[0]\n if self.T == 0.0:\n logging.warning(\"Occupations derivatives are zero at 0K\")\n return numpy.zeros((n, n))\n fo = ft_utils.ff(self.beta, d, self.mu)\n fv = ft_utils.ffv(self.beta, d, self.mu)\n vec = dvec*fo*fv\n I = numpy.identity(n)\n den = einsum('pi,i,qi->pq', I, vec, I)\n V = self.g_aint_tot()\n JK = einsum('prqs,rs->pq', V, den)\n return -JK\n\n def u_fock_d_den(self):\n da, db = self.u_energies_tot()\n na = da.shape[0]\n nb = db.shape[0]\n if self.T == 0.0:\n logging.warning(\"Occupation derivatives are zero at 0K\")\n return (numpy.zeros((na, na, na)),\n numpy.zeros((na, na, nb)),\n numpy.zeros((nb, nb, na)),\n numpy.zeros((nb, nb, nb)))\n foa = ft_utils.ff(self.beta, da, self.mu)\n fob = ft_utils.ff(self.beta, db, self.mu)\n fva = ft_utils.ffv(self.beta, da, self.mu)\n fvb = ft_utils.ffv(self.beta, db, self.mu)\n veca = foa*fva\n vecb = fob*fvb\n Va, Vb, Vabab = self.u_aint_tot()\n JKaa = numpy.einsum('piqi,i->pqi', Va, veca)\n JKab = numpy.einsum('piqi,i->pqi', Vabab, vecb)\n JKbb = numpy.einsum('piqi,i->pqi', Vb, vecb)\n JKba = numpy.einsum('iris,i->rsi', Vabab, veca)\n return JKaa, JKab, JKbb, JKba\n\n def g_fock_d_den(self):\n d = self.g_energies_tot()\n n = d.shape[0]\n if self.T == 0.0:\n logging.warning(\"Occupations derivatives are zero at 0K\")\n return numpy.zeros((n, n))\n fo = ft_utils.ff(self.beta, d, self.mu)\n fv = ft_utils.ffv(self.beta, d, self.mu)\n vec = fo*fv\n V = self.g_aint_tot()\n JK = einsum('piqi,i->pqi', V, vec)\n return JK\n\n def r_hcore(self):\n return self.model.get_tmatS()\n\n def g_hcore(self):\n return utils.block_diag(self.model.get_tmat(), self.model.get_tmat())\n\n def u_aint(self):\n if self.T > 0.0:\n raise Exception(\"Undefined ov blocks at FT\")\n oa, va, ob, vb = self._u_get_ov()\n oidxa = numpy.r_[oa]\n vidxa = numpy.r_[va]\n oidxb = numpy.r_[ob]\n vidxb = numpy.r_[vb]\n Va, Vb, Vabab = self.u_aint_tot()\n\n Vvvvv = Va[numpy.ix_(vidxa, vidxa, vidxa, vidxa)]\n Vvvvo = Va[numpy.ix_(vidxa, vidxa, vidxa, oidxa)]\n Vvovv = Va[numpy.ix_(vidxa, oidxa, vidxa, vidxa)]\n Vvvoo = Va[numpy.ix_(vidxa, vidxa, oidxa, oidxa)]\n Vvovo = Va[numpy.ix_(vidxa, oidxa, vidxa, oidxa)]\n Voovv = Va[numpy.ix_(oidxa, oidxa, vidxa, vidxa)]\n Vvooo = Va[numpy.ix_(vidxa, oidxa, oidxa, oidxa)]\n Vooov = Va[numpy.ix_(oidxa, oidxa, oidxa, vidxa)]\n Voooo = Va[numpy.ix_(oidxa, oidxa, oidxa, oidxa)]\n Va = two_e_blocks(\n vvvv=Vvvvv, vvvo=Vvvvo,\n vovv=Vvovv, vvoo=Vvvoo,\n vovo=Vvovo, oovv=Voovv,\n vooo=Vvooo, ooov=Vooov,\n oooo=Voooo)\n Vvvvv = Vb[numpy.ix_(vidxb, vidxb, vidxb, vidxb)]\n Vvvvo = Vb[numpy.ix_(vidxb, vidxb, vidxb, oidxb)]\n Vvovv = Vb[numpy.ix_(vidxb, oidxb, vidxb, vidxb)]\n Vvvoo = Vb[numpy.ix_(vidxb, vidxb, oidxb, oidxb)]\n Vvovo = Vb[numpy.ix_(vidxb, oidxb, vidxb, oidxb)]\n Voovv = Vb[numpy.ix_(oidxb, oidxb, vidxb, vidxb)]\n Vvooo = Vb[numpy.ix_(vidxb, oidxb, oidxb, oidxb)]\n Vooov = Vb[numpy.ix_(oidxb, oidxb, oidxb, vidxb)]\n Voooo = Vb[numpy.ix_(oidxb, oidxb, oidxb, oidxb)]\n Vb = two_e_blocks(\n vvvv=Vvvvv, vvvo=Vvvvo,\n vovv=Vvovv, vvoo=Vvvoo,\n vovo=Vvovo, oovv=Voovv,\n vooo=Vvooo, ooov=Vooov,\n oooo=Voooo)\n\n Vvvvv = Vabab[numpy.ix_(vidxa, vidxb, vidxa, vidxb)]\n Vvvvo = Vabab[numpy.ix_(vidxa, vidxb, vidxa, oidxb)]\n Vvvov = Vabab[numpy.ix_(vidxa, vidxb, oidxa, vidxb)]\n Vvovv = Vabab[numpy.ix_(vidxa, oidxb, vidxa, vidxb)]\n Vovvv = Vabab[numpy.ix_(oidxa, vidxb, vidxa, vidxb)]\n Vvvoo = Vabab[numpy.ix_(vidxa, vidxb, oidxa, oidxb)]\n Vvoov = Vabab[numpy.ix_(vidxa, oidxb, oidxa, vidxb)]\n Vvovo = Vabab[numpy.ix_(vidxa, oidxb, vidxa, oidxb)]\n Vovvo = Vabab[numpy.ix_(oidxa, vidxb, vidxa, oidxb)]\n Vovov = Vabab[numpy.ix_(oidxa, vidxb, oidxa, vidxb)]\n Voovv = Vabab[numpy.ix_(oidxa, oidxb, vidxa, vidxb)]\n Vvooo = Vabab[numpy.ix_(vidxa, oidxb, oidxa, oidxb)]\n Vovoo = Vabab[numpy.ix_(oidxa, vidxb, oidxa, oidxb)]\n Voovo = Vabab[numpy.ix_(oidxa, oidxb, vidxa, oidxb)]\n Vooov = Vabab[numpy.ix_(oidxa, oidxb, oidxa, vidxb)]\n Voooo = Vabab[numpy.ix_(oidxa, oidxb, oidxa, oidxb)]\n Vabab = two_e_blocks_full(\n vvvv=Vvvvv, vvvo=Vvvvo, vvov=Vvvov, vovv=Vvovv,\n ovvv=Vovvv, vvoo=Vvvoo, vovo=Vvovo, ovvo=Vovvo,\n voov=Vvoov, ovov=Vovov, oovv=Voovv, vooo=Vvooo,\n ovoo=Vovoo, oovo=Voovo, ooov=Vooov, oooo=Voooo)\n return Va, Vb, Vabab\n\n def g_aint(self, code=0):\n Umat = self.g_aint_tot()\n o, v = self._get_ov()\n Vvvvv = None\n Vvvvo = None\n Vvovv = None\n Vvvoo = None\n Vvovo = None\n Voovv = None\n Vvooo = None\n Vooov = None\n Voooo = None\n oidx = numpy.r_[o]\n vidx = numpy.r_[v]\n if code == 0 or code == 1:\n Vvvvv = Umat[numpy.ix_(vidx, vidx, vidx, vidx)]\n if code == 0 or code == 2:\n Vvvvo = Umat[numpy.ix_(vidx, vidx, vidx, oidx)]\n if code == 0 or code == 3:\n Vvovv = Umat[numpy.ix_(vidx, oidx, vidx, vidx)]\n if code == 0 or code == 4:\n Vvvoo = Umat[numpy.ix_(vidx, vidx, oidx, oidx)]\n if code == 0 or code == 5:\n Vvovo = Umat[numpy.ix_(vidx, oidx, vidx, oidx)]\n if code == 0 or code == 6:\n Voovv = Umat[numpy.ix_(oidx, oidx, vidx, vidx)]\n if code == 0 or code == 7:\n Vvooo = Umat[numpy.ix_(vidx, oidx, oidx, oidx)]\n if code == 0 or code == 8:\n Vooov = Umat[numpy.ix_(oidx, oidx, oidx, vidx)]\n if code == 0 or code == 9:\n Voooo = Umat[numpy.ix_(oidx, oidx, oidx, oidx)]\n return two_e_blocks(\n vvvv=Vvvvv, vvvo=Vvvvo,\n vovv=Vvovv, vvoo=Vvvoo,\n vovo=Vvovo, oovv=Voovv,\n vooo=Vvooo, ooov=Vooov,\n oooo=Voooo)\n\n def u_aint_tot(self):\n V = self.model.get_umatS()\n Va = V - V.transpose((0, 1, 3, 2))\n Vabab = self._transform2(V, self.ua, self.ub, self.ua, self.ub)\n Vb = self._transform1(Va, self.ub)\n Va = self._transform1(Va, self.ua)\n return Va, Vb, Vabab\n\n def g_aint_tot(self):\n Us = self.model.get_umatS()\n n, n1, n2, n3 = Us.shape\n assert(n == n1)\n assert(n == n2)\n assert(n == n3)\n\n U = numpy.zeros((2*n, 2*n, 2*n, 2*n))\n U[n:, :n, n:, :n] = Us\n U[:n, n:, n:, :n] = -Us\n U[:n, n:, :n, n:] = Us\n U[n:, :n, :n, n:] = -Us\n utot = utils.block_diag(self.ua, self.ub)\n U = self._transform1(U, utot)\n return U\n\n def r_int_tot(self):\n raise Exception(\"Restricted MOs not implemented\")\n return None\n\n def g_int_tot(self):\n U = self.model.get_umat()\n utot = utils.block_diag(self.ua, self.ub)\n U = self._transform1(U, utot)\n return U\n\n def _get_ov(self):\n \"\"\"Get occupied and virtual indices in the general orbital space\"\"\"\n if self.mu is None:\n e = self.g_energies_tot()\n es = numpy.argsort(e)\n N = self.na + self.nb\n occ = es[:N]\n vir = es[N:]\n return (occ, vir)\n else:\n d = self.g_energies_tot()\n occ = []\n vir = []\n mu = self.mu\n for i in range(d.shape[0]):\n if d[i] < mu:\n occ.append(i)\n else:\n vir.append(i)\n return (occ, vir)\n\n def _u_get_ov(self):\n \"\"\"Get occupied and virtual indices in the general orbital space\"\"\"\n if self.mu is None:\n ea, eb = self.u_energies_tot()\n esa = numpy.argsort(ea)\n esb = numpy.argsort(eb)\n na = self.na\n nb = self.nb\n occa = esa[:na]\n occb = esb[:nb]\n vira = esa[na:]\n virb = esb[nb:]\n return occa, vira, occb, virb\n else:\n da, db = self.u_energies_tot()\n occa = []\n vira = []\n mu = self.mu\n for i in range(da.shape[0]):\n if da[i] < mu:\n occa.append(i)\n else:\n vira.append(i)\n for i in range(db.shape[0]):\n if db[i] < mu:\n occb.append(i)\n else:\n virb.append(i)\n return occa, vira, occb, virb\n\n def _transform1(self, V, u):\n return self._transform2(V, u, u, u, u)\n\n def _transform2(self, V, u1, u2, u3, u4):\n Umat2 = numpy.einsum('ijkl,ls->ijks', V, u4)\n Umat1 = numpy.einsum('ijks,kr->ijrs', Umat2, u3)\n Umat2 = numpy.einsum('ijrs,jq->iqrs', Umat1, u2)\n Umat1 = numpy.einsum('iqrs,ip->pqrs', Umat2, u1)\n return Umat1\n" ]
[ [ "numpy.diag", "numpy.hstack", "numpy.ix_", "numpy.einsum", "numpy.linalg.eigh", "numpy.identity", "numpy.argsort", "numpy.zeros" ] ]
MarcRoigVilamala/TwoOutput3DResNet
[ "71a98cefc050c134d3bc027b3a3144e121374776" ]
[ "TwoOutput3DResNet/main.py" ]
[ "import os\nimport json\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.optim import lr_scheduler\n\nfrom TwoOutput3DResNet.opts import parse_opts\nfrom TwoOutput3DResNet.model import generate_model\nfrom TwoOutput3DResNet.Transformations.spatial_transforms import get_spatial_transform, get_norm_method\nfrom TwoOutput3DResNet.Transformations.temporal_transforms import LoopPadding, TemporalRandomCrop\nfrom TwoOutput3DResNet.Transformations.target_transforms import TimeStampLabel, VideoIDAndFrames\nfrom TwoOutput3DResNet.dataset import get_training_set, get_validation_set, get_test_set\nfrom TwoOutput3DResNet.utils import Logger\nfrom TwoOutput3DResNet.train import train_epoch\nfrom TwoOutput3DResNet.validation import val_epoch\nfrom TwoOutput3DResNet import test\n\nif __name__ == '__main__':\n opt = parse_opts()\n if opt.root_path != '':\n opt.video_path = os.path.join(opt.root_path, opt.video_path)\n opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)\n opt.result_path = os.path.join(opt.root_path, opt.result_path)\n if opt.resume_path:\n opt.resume_path = os.path.join(opt.root_path, opt.resume_path)\n if opt.pretrain_path:\n opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)\n opt.arch = '{}-{}'.format(opt.model, opt.model_depth)\n print(opt)\n with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:\n json.dump(vars(opt), opt_file)\n\n torch.manual_seed(opt.manual_seed)\n\n model, parameters = generate_model(opt)\n print(model)\n criterion = nn.CrossEntropyLoss()\n if not opt.no_cuda:\n criterion = criterion.cuda()\n\n norm_method = get_norm_method(opt)\n\n if not opt.no_train:\n spatial_transform = get_spatial_transform(opt, norm_method, 'train')\n temporal_transform = TemporalRandomCrop(opt.sample_duration)\n # target_transform = ClassLabel()\n target_transform = TimeStampLabel('Temporal_Anomaly_Annotation_for_Testing_Videos.txt')\n training_data = get_training_set(opt, spatial_transform,\n temporal_transform, target_transform)\n train_loader = torch.utils.data.DataLoader(\n training_data,\n batch_size=opt.batch_size,\n shuffle=True,\n num_workers=opt.n_threads,\n pin_memory=True)\n train_logger = Logger(\n os.path.join(opt.result_path, 'train.log'),\n ['epoch', 'loss', 'acc', 'lr'])\n train_batch_logger = Logger(\n os.path.join(opt.result_path, 'train_batch.log'),\n ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])\n\n if opt.nesterov:\n dampening = 0\n else:\n dampening = opt.dampening\n optimizer = optim.SGD(\n parameters,\n lr=opt.learning_rate,\n momentum=opt.momentum,\n dampening=dampening,\n weight_decay=opt.weight_decay,\n nesterov=opt.nesterov)\n scheduler = lr_scheduler.ReduceLROnPlateau(\n optimizer, 'min', patience=opt.lr_patience)\n if not opt.no_val:\n spatial_transform = get_spatial_transform(opt, norm_method, 'val')\n temporal_transform = LoopPadding(opt.sample_duration)\n # target_transform = ClassLabel()\n target_transform = TimeStampLabel('Temporal_Anomaly_Annotation_for_Testing_Videos.txt')\n validation_data = get_validation_set(\n opt, spatial_transform, temporal_transform, target_transform)\n val_loader = torch.utils.data.DataLoader(\n validation_data,\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.n_threads,\n pin_memory=True)\n val_logger = Logger(\n os.path.join(opt.result_path, 'val.log'), ['epoch', 'loss', 'acc'])\n\n if opt.resume_path:\n print('loading checkpoint {}'.format(opt.resume_path))\n checkpoint = torch.load(opt.resume_path)\n assert opt.arch == checkpoint['arch']\n\n opt.begin_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n if not opt.no_train:\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('run')\n for i in range(opt.begin_epoch, opt.n_epochs + 1):\n if not opt.no_train:\n train_epoch(i, train_loader, model, criterion, optimizer, opt,\n train_logger, train_batch_logger)\n if not opt.no_val:\n validation_loss = val_epoch(i, val_loader, model, criterion, opt,\n val_logger)\n\n if not opt.no_train and not opt.no_val:\n scheduler.step(validation_loss)\n\n if opt.test:\n spatial_transform = get_spatial_transform(opt, norm_method, 'test')\n temporal_transform = LoopPadding(opt.sample_duration)\n # target_transform = VideoID()\n target_transform = VideoIDAndFrames()\n\n test_data = get_test_set(opt, spatial_transform, temporal_transform,\n target_transform)\n test_loader = torch.utils.data.DataLoader(\n test_data,\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.n_threads,\n pin_memory=True)\n # test.test(test_loader, model, opt, test_data.class_names)\n test.every_segment_test(test_loader, model, opt, test_data.class_names)\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.optim.SGD" ] ]
unionai/flyteevents-datahub
[ "972dd7c68c9f07b934c7c948068da429e9ce1813" ]
[ "flytelineage/tests/test_glue.py" ]
[ "import pytest\nimport moto\nimport boto3\n\n\n@pytest.fixture(scope=\"function\")\ndef moto_s3():\n with moto.mock_s3():\n s3 = boto3.resource(\"s3\", region_name=\"us-east-1\")\n s3.create_bucket(\n Bucket=\"bucket\",\n )\n yield s3\n\n\n@pytest.fixture(scope=\"module\")\ndef moto_glue():\n import os\n\n with moto.mock_glue():\n region_name = \"us-east-1\"\n os.environ[\"AWS_DEFAULT_REGION\"] = region_name\n glue = boto3.client(\"glue\", region_name=region_name)\n yield glue\n\n\ndef test_glue(moto_glue, moto_s3):\n from flytelineage.glue import GlueCatalogTarget\n\n target = GlueCatalogTarget(bucket_path=\"bucket/prefix\", kms_key_arn=\"bogus\")\n\n from flytelineage.interface import Pipeline\n\n pipeline = Pipeline(\n id=\"1\",\n name=\"a.b.c\",\n )\n from flytelineage.dataset import DatasetSchema\n import numpy as np\n import pandas as pd\n\n ds = DatasetSchema(\"foo\")\n a = np.array([[5, \"hello\", True], [2, \"goodbye\", False]])\n df = pd.DataFrame(a, columns=list(\"ABC\"))\n schema = ds.infer(df)\n dataset = (ds, schema, df)\n\n result = target.ingest(pipeline, [dataset])\n assert len(result) == 1\n assert result[0].get(\"paths\")[0].startswith(\"s3://bucket/prefix/flyte_a_b/foo/\")\n\n\ndef test_glue_with_db(moto_glue, moto_s3):\n database_name = \"mydb\"\n\n import awswrangler as wr\n\n wr.catalog.create_database(name=database_name)\n from flytelineage.glue import GlueCatalogTarget\n\n target = GlueCatalogTarget(\n bucket_path=\"bucket/prefix\", kms_key_arn=\"bogus\", db_name=database_name\n )\n\n from flytelineage.interface import Pipeline\n\n pipeline = Pipeline(\n id=\"1\",\n name=\"a.b.c\",\n )\n from flytelineage.dataset import DatasetSchema\n import numpy as np\n import pandas as pd\n\n ds = DatasetSchema(\"foo\")\n a = np.array([[5, \"hello\", True], [2, \"goodbye\", False]])\n df = pd.DataFrame(a, columns=list(\"ABC\"))\n schema = ds.infer(df)\n dataset = (ds, schema, df)\n\n result = target.ingest(pipeline, [dataset])\n assert len(result) == 1\n assert result[0].get(\"paths\")[0].startswith(\"s3://bucket/prefix/mydb/foo\")\n\n\ndef test_glue_error(moto_glue, moto_s3):\n from flytelineage.glue import GlueCatalogTarget\n\n target = GlueCatalogTarget(bucket_path=\"bucket/prefix\", kms_key_arn=\"bogus\")\n\n from flytelineage.interface import Pipeline\n\n pipeline = Pipeline(\n id=\"1\",\n name=\"a.b.c\",\n )\n from flytelineage.dataset import DatasetSchema\n import numpy as np\n import pandas as pd\n\n ds = DatasetSchema(\"foo\")\n a = np.array([[5, \"hello\", True], [2, \"goodbye\", False]])\n df = pd.DataFrame(a)\n schema = ds.infer(df)\n dataset = (ds, schema, df)\n result = target.ingest(pipeline, [dataset])\n assert len(result) == 0\n" ]
[ [ "numpy.array", "pandas.DataFrame" ] ]
yurymalkov/ann-benchmarks
[ "e5fa90cc5eee529a4c2650c2daf7589eca78bc20" ]
[ "ann_benchmarks/algorithms/kgraph.py" ]
[ "from __future__ import absolute_import\nimport os\nimport numpy\nimport pykgraph\nfrom ann_benchmarks.constants import INDEX_DIR\nfrom ann_benchmarks.algorithms.base import BaseANN\n\nclass KGraph(BaseANN):\n def __init__(self, metric, index_params, save_index):\n if type(metric) == unicode:\n metric = str(metric)\n self.name = 'KGraph(%s)' % (metric)\n self._metric = metric\n self._index_params = index_params\n self._save_index = save_index\n\n def fit(self, X):\n if X.dtype != numpy.float32:\n X = X.astype(numpy.float32)\n self._kgraph = pykgraph.KGraph(X, self._metric)\n path = os.path.join(INDEX_DIR, 'kgraph-index-%s' % self._metric)\n if os.path.exists(path):\n self._kgraph.load(path)\n else:\n self._kgraph.build(**self._index_params) #iterations=30, L=100, delta=0.002, recall=0.99, K=25)\n if not os.path.exists(INDEX_DIR):\n os.makedirs(INDEX_DIR)\n self._kgraph.save(path)\n def set_query_arguments(self, P):\n self._P = P\n def query(self, v, n):\n if v.dtype != numpy.float32:\n v = v.astype(numpy.float32)\n result = self._kgraph.search(numpy.array([v]), K=n, threads=1, P=self._P)\n return result[0]\n" ]
[ [ "numpy.array" ] ]
MASILab/pyPheWAS
[ "10cf320a24dc9d81a3b09aa38d3d4de8c2e1fcd5" ]
[ "deprecated/pyProWAS.py" ]
[ "from collections import Counter\nimport getopt\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport numpy as np\nimport os\nimport pandas as pd\nimport scipy.stats\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom tqdm import tqdm\nimport matplotlib.lines as mlines\n\n\ndef get_codes(): # same\n \"\"\"\n Gets the PheWAS codes from a local csv file and load it into a pandas DataFrame.\n\n :returns: All of the codes from the resource file.\n :rtype: pandas DataFrame\n\n \"\"\"\n sep = os.sep\n path = os.path.dirname(os.path.abspath(__file__))\n filename = os.sep.join([path, 'resources', 'prowas_codes.csv'])\n return pd.read_csv(filename,dtype=str)\n\n\ndef get_group_file(path, filename): # same\n \"\"\"\n Read all of the genotype data from the given file and load it into a pandas DataFrame.\n\n :param path: The path to the file that contains the phenotype data\n :param filename: The name of the file that contains the phenotype data.\n :type path: string\n :type filename: string\n\n :returns: The data from the genotype file.\n :rtype: pandas DataFrame\n \"\"\"\n wholefname = path + filename\n genotypes = pd.read_csv(wholefname)\n return genotypes\n\n\ndef get_input(path, filename, reg_type): # diff -done - add duration\n \"\"\"\n Read all of the phenotype data from the given file and load it into a pandas DataFrame.\n\n :param path: The path to the file that contains the phenotype data\n :param filename: The name of the file that contains the phenotype data.\n :type path: string\n :type filename: string\n\n :returns: The data from the phenotype file.\n :rtype: pandas DataFrame\n \"\"\"\n wholefname = path + filename\n cptfile = pd.read_csv(wholefname)\n cptfile['cpt'] = cptfile['cpt'].str.strip()\n if reg_type == 0:\n phenotypes = pd.merge(cptfile, codes, on='cpt')\n phenotypes['MaxAgeAtCPT'] = 0\n phenotypes['MaxAgeAtCPT'] = phenotypes.groupby(['id', 'prowas_code'])['AgeAtCPT'].transform('max')\n else:\n \"\"\"\n This needs to be changed, need to adjust for a variety of different naming conventions\n in the phenotype file, not simply 'AgeAtCPT', 'id', 'cpt', etc.\n Either we need to adjust for different names in the code, or state explicitly in the\n documentation that we cannot do things like this.\n \"\"\"\n phenotypes = pd.merge(cptfile, codes, on='cpt')\n phenotypes['count'] = 0\n phenotypes['count'] = phenotypes.groupby(['id', 'prowas_code'])['count'].transform('count')\n phenotypes['duration'] = phenotypes.groupby(['id', 'prowas_code'])['AgeAtCPT'].transform('max') - \\\n phenotypes.groupby(['id', 'prowas_code'])['AgeAtCPT'].transform('min') + 1\n phenotypes['MaxAgeAtCPT'] = 0\n phenotypes['MaxAgeAtCPT'] = phenotypes.groupby(['id', 'prowas_code'])['AgeAtCPT'].transform('max')\n return phenotypes\n\n\ndef generate_feature_matrix(genotypes, phenotypes, reg_type, phewas_cov=''): # diff - done\n \"\"\"\n Generates the feature matrix that will be used to run the regressions.\n\n :param genotypes:\n :param phenotypes:\n :type genotypes:\n :type phenotypes:\n\n :returns:\n :rtype:\n\n \"\"\"\n pu=phenotypes[['id','prowas_code']].drop_duplicates()\n temp = pd.DataFrame(np.log2(pu['id'].drop_duplicates().count()/pu.groupby('prowas_code')['id'].count()).reset_index())\n temp.rename(columns={'id': 'idf'}, inplace=True)\n prowas_codes2 = pd.merge(prowas_codes, temp, on='prowas_code', how='left')\n\n feature_matrix = np.zeros((3, genotypes.shape[0], prowas_codes.shape[0]), dtype=int)\n count = 0;\n for i in tqdm(genotypes['id']):\n if reg_type == 0:\n temp = pd.DataFrame(phenotypes[phenotypes['id'] == i][['prowas_code', 'MaxAgeAtCPT']]).drop_duplicates()\n match = prowas_codes2['prowas_code'].isin(list(phenotypes[phenotypes['id'] == i]['prowas_code']))\n feature_matrix[0][count, match[match == True].index] = 1\n age = pd.merge(prowas_codes2, temp, on='prowas_code', how='left')['MaxAgeAtCPT']\n age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeAtVisit']\n assert np.all(np.isfinite(age)), \"make sure MaxAgeAtVisit is filled\"\n feature_matrix[1][count, :] = age\n if phewas_cov:\n feature_matrix[2][count, :] = int(phewas_cov in list(phenotypes[phenotypes['id'] == i]['prowas_code']))\n\n else:\n if reg_type == 1:\n temp = pd.DataFrame(\n phenotypes[phenotypes['id'] == i][['prowas_code', 'MaxAgeAtCPT', 'count']]).drop_duplicates()\n cts = pd.merge(prowas_codes, temp, on='prowas_code', how='left')['count']\n cts[np.isnan(cts)] = 0\n feature_matrix[0][count, :] = cts\n age = pd.merge(prowas_codes2, temp, on='prowas_code', how='left')['MaxAgeAtCPT']\n age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeAtVisit']\n assert np.all(np.isfinite(age)), \"make sure MaxAgeAtVisit is filled\"\n feature_matrix[1][count, :] = age\n if phewas_cov:\n feature_matrix[2][count, :] = int(\n phewas_cov in list(phenotypes[phenotypes['id'] == i]['prowas_code']))\n\n elif reg_type == 2:\n temp = pd.DataFrame(\n phenotypes[phenotypes['id'] == i][['prowas_code', 'MaxAgeAtCPT', 'count']]).drop_duplicates()\n temp = pd.merge(prowas_codes2, temp, on='prowas_code', how='left')\n tfidf=temp['count']*temp['idf']\n tfidf[np.isnan(tfidf)] = 0\n\n feature_matrix[0][count, :] = tfidf\n age = pd.merge(prowas_codes2, temp, on='prowas_code', how='left')['MaxAgeAtCPT']\n age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeAtVisit']\n assert np.all(np.isfinite(age)), \"make sure MaxAgeAtVisit is filled\"\n feature_matrix[1][count, :] = age\n if phewas_cov:\n feature_matrix[2][count, :] = int(\n phewas_cov in list(phenotypes[phenotypes['id'] == i]['prowas_code']))\n\n count += 1\n return feature_matrix\n\n\n\"\"\"\n\nStatistical Modeling\n\n\"\"\"\n\n\ndef get_phewas_info(p_index): # same\n \"\"\"\n Returns all of the info of the phewas code at the given index.\n\n :param p_index: The index of the desired phewas code\n :type p_index: int\n\n :returns: A list including the code, the name, and the rollup of the phewas code. The rollup is a list of all of the cpt-9 codes that are grouped into this phewas code.\n :rtype: list of strings\n \"\"\"\n p_code = prowas_codes.loc[p_index].prowas_code\n corresponding = codes[codes.prowas_code == p_code]\n\n p_name = corresponding.iloc[0].prowas_desc\n p_rollup = ','.join(codes[codes.prowas_code == p_code].cpt.tolist())\n return [p_code, p_name, p_rollup]\n\n\ndef calculate_odds_ratio(genotypes, phen_vector1, phen_vector2, covariates, lr=0, response='',\n\t\t\t\t\t\t phen_vector3=''): # diff - done\n\t\"\"\"\n\tRuns the regression for a specific phenotype vector relative to the genotype data and covariates.\n\n\t:param genotypes: a DataFrame containing the genotype information\n\t:param phen_vector1: a array containing the phenotype vector\n\t:param phen_vector2: a array containing the phenotype vector\n\t:param covariates: a string containing all desired covariates\n\t:type genotypes: pandas DataFrame\n\t:type phen_vector1: numpy array\n\t:type phen_vector2: numpy array\n\t:type covariates: string\n\n\t.. note::\n\t\tThe covariates must be a string that is delimited by '+', not a list.\n\t\tIf you are using a list of covariates and would like to convert it to the pyPhewas format, use the following::\n\n\t\t\tl = ['genotype', 'age'] # a list of your covariates\n\t\t\tcovariates = '+'.join(l) # pyPhewas format\n\n\t\tThe covariates that are listed here *must* be headers to your genotype CSV file.\n\t\"\"\"\n\n\tdata = genotypes\n\tdata['y'] = phen_vector1\n\tdata['MaxAgeAtCPT'] = phen_vector2\n\t# f='y~'+covariates\n\tif covariates is not '':\n\t\tcovariates = '+' + covariates\n\tif response:\n\t\tf = response + '~ y + genotype' + covariates\n\t\tif phen_vector3.any():\n\t\t\tdata['phe'] = phen_vector3\n\t\t\tf = response + '~ y + phe + genotype' + covariates\n\telse:\n\t\tf = 'genotype ~ y' + covariates\n\t\tif phen_vector3.any():\n\t\t\tdata['phe'] = phen_vector3\n\t\t\tf = 'genotype ~ y + phe' + covariates\n\ttry:\n\t\tif lr == 0: # fit logit without regulatization\n\t\t\tlogreg = smf.logit(f, data).fit(disp=False)\n\t\t\tp = logreg.pvalues.y\n\t\t\todds = 0 #\n\t\t\tconf = logreg.conf_int()\n\t\t\tod = [-math.log10(p), p, logreg.params.y, '[%s,%s]' % (conf[0]['y'], conf[1]['y'])]\n\t\telif lr == 1: # fit logit with regularization\n\t\t\tf1 = f.split(' ~ ')\n\t\t\tf1[1] = f1[1].replace(\" \", \"\")\n\t\t\tlogit = sm.Logit(data[f1[0].strip()], data[f1[1].split('+')])\n\t\t\tlf = logit.fit_regularized(method='l1', alpha=0.1, disp=0, trim_mode='size', qc_verbose=0)\n\t\t\tp = lf.pvalues.y\n\t\t\todds = 0\n\t\t\tconf = lf.conf_int()\n\t\t\tod = [-math.log10(p), p, lf.params.y, '[%s,%s]' % (conf[0]['y'], conf[1]['y'])]\n\t\telse:\n\t\t\tlinreg = smf.logit(f, data).fit(method='bfgs', disp=False)\n\t\t\tp = linreg.pvalues.y\n\t\t\todds = 0\n\t\t\tconf = linreg.conf_int()\n\t\t\tod = [-math.log10(p), p, linreg.params.y, '[%s,%s]' % (conf[0]['y'], conf[1]['y'])]\n\texcept ValueError as ve:\n\t\tprint(ve)\n\t\tprint('lr = % d' %lr)\n\t\todds = 0\n\t\tp = np.nan\n\t\tod = [np.nan, p, np.nan, np.nan]\n\texcept Exception as e:\n\t\tprint(e)\n\t\todds = 0\n\t\tp = np.nan\n\t\tod = [np.nan, p, np.nan, np.nan]\n\treturn (odds, p, od)\n\n\ndef run_phewas(fm, genotypes, covariates, reg_type, response='', phewas_cov=''): # same\n\t\"\"\"\n\tFor each phewas code in the feature matrix, run the specified type of regression and save all of the resulting p-values.\n\n\t:param fm: The phewas feature matrix.\n\t:param genotypes: A pandas DataFrame of the genotype file.\n\t:param covariates: The covariates that the function is to be run on.\n\t:param reg_type: The covariates that the function is to be run on.\n\t:param response: The covariates that the function is to be run on.\n\t:param phewas_cov: The covariates that the function is to be run on.\n\n\t:returns: A tuple containing indices, p-values, and all the regression data.\n\t\"\"\"\n\n\tnum_phecodes = len(fm[0, 0])\n\tthresh = math.ceil(genotypes.shape[0] * 0.03)\n\t# store all of the pertinent data from the regressions\n\tregressions = pd.DataFrame(columns=output_columns)\n\tcontrol = fm[0][genotypes.genotype == 0, :]\n\tdisease = fm[0][genotypes.genotype == 1, :]\n\t# find all phecodes that only present for a single genotype (ie only controls or only diseased show the phecode) -> have to use regularization\n\tinds = np.where((control.any(axis=0) & ~disease.any(axis=0)) | (~control.any(axis=0) & disease.any(axis=0)))[0]\n\tfor index in tqdm(range(num_phecodes), desc='Running Regressions'):\n\t\tphen_vector1 = fm[0][:, index]\n\t\tphen_vector2 = fm[1][:, index]\n\t\tphen_vector3 = fm[2][:, index]\n\t\t# to prevent false positives, only run regressions if more than thresh records have positive values\n\t\tif np.where(phen_vector1 > 0)[0].shape[0] > thresh:\n\t\t\tif index in inds:\n\t\t\t\tres = calculate_odds_ratio(genotypes, phen_vector1, phen_vector2, covariates,\n\t\t\t\t\t\t\t\t\t\t lr=1,\n\t\t\t\t\t\t\t\t\t\t response=response,\n\t\t\t\t\t\t\t\t\t\t phen_vector3=phen_vector3)\n\t\t\telse:\n\t\t\t\tres = calculate_odds_ratio(genotypes, phen_vector1, phen_vector2, covariates,\n\t\t\t\t\t\t\t\t\t\t lr=0,\n\t\t\t\t\t\t\t\t\t\t response=response,\n\t\t\t\t\t\t\t\t\t\t phen_vector3=phen_vector3)\n\n\t\telse: # default (non-significant) values if not enough samples to run regression\n\t\t\todds = 0\n\t\t\tp = np.nan\n\t\t\tod = [np.nan, p, np.nan, np.nan]\n\t\t\tres = (odds, p, od)\n\n\t\t# save all of the regression data\n\t\tphewas_info = get_phewas_info(index)\n\t\tstat_info = res[2]\n\t\tinfo = phewas_info[0:2] + stat_info + [phewas_info[2]]\n\n\t\tregressions.loc[index] = info\n\n\treturn regressions.dropna(subset=['p-val']).sort_values(by='PheWAS Code')\n\n\ndef get_bon_thresh(normalized, power): # same\n \"\"\"\n Calculate the bonferroni correction threshold.\n\n Divide the power by the sum of all finite values (all non-nan values).\n\n :param normalized: an array of all normalized p-values. Normalized p-values are -log10(p) where p is the p-value.\n :param power: the threshold power being used (usually 0.05)\n :type normalized: numpy array\n :type power: float\n\n :returns: The bonferroni correction\n :rtype: float\n\n \"\"\"\n return power / sum(np.isfinite(normalized))\n\n\ndef get_fdr_thresh(p_values, power):\n \"\"\"\n Calculate the false discovery rate threshold.\n\n :param p_values: a list of p-values obtained by executing the regression\n :param power: the thershold power being used (usually 0.05)\n :type p_values: numpy array\n :type power: float\n\n :returns: the false discovery rate\n :rtype: float\n \"\"\"\n sn = np.sort(p_values)\n sn = sn[np.isfinite(sn)]\n sn = sn[::-1]\n for i in range(len(sn)):\n thresh = 0.05 * i / len(sn)\n if sn[i] <= power:\n break\n return sn[i]\n\n\ndef get_imbalances(regressions):\n \"\"\"\n Generates a numpy array of the imbalances.\n\n For a value *x* where *x* is the beta of a regression:\n\n ========= ====== =======================================================\n *x* < 0 **-1** The regression had a negative beta value\n *x* = nan **0** The regression had a nan beta value (and a nan p-value)\n *x* > 0 **+1** The regression had a positive beta value\n ========= ====== =======================================================\n\n These values are then used to get the correct colors using the imbalance_colors.\n\n :param regressions: DataFrame containing a variety of different output values from the regression performed. The only one used for this function are the 'beta' values.\n :type regressions: pandas DataFrame\n\n :returns: A list that is the length of the number of regressions performed. Each element in the list is either a -1, 0, or +1. These are used as explained above.\n :rtype: numpy array\n \"\"\"\n\n imbalance = np.array(regressions['beta'])\n imbalance[np.isnan(imbalance)] = 0\n imbalance[imbalance > 0] = 1\n imbalance[imbalance < 0] = -1\n return imbalance\n\n\ndef get_x_label_positions(categories, lines=True): # same\n \"\"\"\n This method is used get the position of the x-labels and the lines between the columns\n\n :param categories: list of the categories\n :param lines: a boolean which determines the locations returned (either the center of each category or the end)\n :type categories:\n :type lines: bool\n\n :returns: A list of positions\n :rtype: list of ints\n\n \"\"\"\n tt = Counter(categories)\n s = 0\n label_positions = []\n for _, v in tt.items():\n if lines:\n inc = v // 2\n else:\n inc = v\n label_positions.append(s + inc)\n s += v\n return label_positions\n\n\ndef plot_manhattan(regressions, thresh, show_imbalance=True, save='', save_format=''): # same\n\t\"\"\"\n\tPlots the data on a Manhattan Plot.\n\n\t:param regressions: dataframe containing the regression results\n\t:param thresh: the significance threshold\n\t:param save: the output file to save to (if empty, display the plot)\n\t:param show_imbalance: boolean variable that determines whether or not to show imbalances on the plot (default True)\n\t:type regressions: pandas DataFrame\n\t:type thresh: float\n\t:type save: str\n\t:type show_imbalance: boolean\n\n\t\"\"\"\n\n\t# Initialize figure\n\tfig = plt.figure(1)\n\tax = plt.subplot(111)\n\tframe1 = plt.gca()\n\n\t# Merge regressions with Phewas data to get categories\n\tregressions = pd.merge(regressions, prowas_codes, left_on='PheWAS Code', right_on='prowas_code').sort_values(by='ccs')\n\n\t# Determine whether or not to show the imbalance.\n\t# show_imbalance = imbalances.size != 0\n\n\t# c = icd9_codes.loc[phewas_codes['index']]\n\t# c = c.reset_index()\n\t# idx = c.sort_values(by='category').index\n\n\t# Plot all points w/ labels\n\te = 1\n\tartists = []\n\tplt.ylabel('-log10(p)')\n\tax.axhline(y=thresh, color='red', ls='dotted') # plot threshold\n\tfor ix,data in regressions.iterrows():\n\t\tlogp_ix = data['\"-log(p)\"']\n\t\tif logp_ix > thresh:\n\t\t\t# determine marker type based on whether/not showing imbalance\n\t\t\tif show_imbalance:\n\t\t\t\tmew = 1.5\n\t\t\t\tif data['beta'] > 0: m = '+'\n\t\t\t\telse: m = '_'\n\t\t\telse:\n\t\t\t\tmew = 0.0\n\t\t\t\tm = 'o'\n\t\t\t# Plot PheCode data point & format PheCode label\n\t\t\tax.plot(e, logp_ix, m, color=\"blue\", fillstyle='full', markeredgewidth=mew)\n\t\t\tartists.append(ax.text(e, logp_ix, data['prowas_desc'], rotation=89, va='bottom', fontsize=6))\n\t\t\te += 15\n\n\t# # Legend\n\t# line1 = []\n\t# box = ax.get_position()\n\t# ax.set_position([box.x0, box.y0 + box.height * 0.05, box.width, box.height * 0.95])\n\t# for lab in plot_colors.keys():\n\t# \tline1.append(mlines.Line2D(range(1), range(1), color=\"white\", marker='o', markerfacecolor=plot_colors[lab], label=lab))\n\t# artists.append(ax.legend(handles=line1, bbox_to_anchor=(0.5, 0), loc='upper center', fancybox=True, ncol=4, prop={'size': 6}))\n\n\t# Plot x axis\n\tax.axhline(y=0, color='black')\n\tframe1.axes.get_xaxis().set_visible(False)\n\n\t# If the imbalance is to be shown, draw lines to show the categories.\n\t# if show_imbalance:\n\t# \tfor pos in linepos:\n\t# \t\tax.axvline(x=pos, color='black', ls='dotted')\n\n\t# Save the plot\n\tif save:\n\t\tplt.savefig(save,format=save_format, bbox_extra_artists=artists, bbox_inches='tight')\n\t\tplt.clf()\n\n\treturn\n\n\ndef plot_odds_ratio(regressions, thresh, show_imbalance=True, save='', save_format='', label_loc=\"plot\"): # same\n\t\"\"\"\n\tPlots the data on a Log Odds Plot.\n\n\t:param regressions: dataframe containing the regression results\n\t:param thresh: the significance threshold\n\t:param save: the output file to save to (if empty, display the plot)\n\t:param show_imbalance: boolean variable that determines whether or not to show imbalances on the plot (default True)\n\t:param label_loc: the output file to save to (if empty, display the plot)\n\t:type regressions: pandas DataFrame\n\t:type thresh: float\n\t:type save: str\n\t:type show_imbalance: boolean\n\n\t\"\"\"\n\n\t# Initialize figure\n\tfig = plt.figure(2)\n\tax = plt.subplot(111)\n\tframe1 = plt.gca()\n\n\t# Merge regressions with Phewas data to get categories\n\tregressions = pd.merge(regressions, prowas_codes, left_on='PheWAS Code', right_on='prowas_code').sort_values(by='ccs')\n\n\t# determine whether or not to show imbalances\n\t# show_imbalance = imbalances.size != 0\n\n\t# Sort the phewas codes by category.\n\t# c = icd9_codes.loc[phewas_codes['index']]\n\t# c = c.reset_index()\n\t# idx = c.sort_values(by='category').index\n\n\t# Plot all points w/ labels\n\te = 1 # vertical index\n\tho = 0.025 # horizontal text offset\n\tvo = 1 # vertical text offset\n\ttext_size = 6\n\tartists = []\n\tif label_loc == \"axis\":\n\t\tphecode_labels = []\n\t\tphecode_locs = []\n\tplt.xlabel('Log odds ratio')\n\tfor ix, data in regressions.iterrows():\n\t\tbeta_ix = data['beta']\n\t\tif data['\"-log(p)\"'] > thresh:\n\t\t\t# Add Phecode label\n\t\t\tif label_loc == \"plot\":\n\t\t\t\tif show_imbalance:\n\t\t\t\t\tif beta_ix > 0:\n\t\t\t\t\t\tartists.append(\n\t\t\t\t\t\t\tax.text(beta_ix + ho, e + vo, data['prowas_desc'], rotation=0, ha='left', fontsize=text_size))\n\t\t\t\t\telse:\n\t\t\t\t\t\tartists.append(ax.text(beta_ix - ho, e + vo, data['prowas_desc'], rotation=0, ha='right',\n\t\t\t\t\t\t fontsize=text_size))\n\t\t\t\telse:\n\t\t\t\t\tartists.append(\n\t\t\t\t\t\tax.text(beta_ix + ho, e + vo, data['prowas_desc'], rotation=0, va='bottom', fontsize=text_size))\n\t\t\telse: # location = \"axis\"\n\t\t\t\tphecode_labels.append(data['prowas_desc'])\n\t\t\t\tphecode_locs.append(e)\n\n\t\t\t# Plot Phecode Data\n\t\t\tax.plot(beta_ix, e, 'o', color=\"green\", fillstyle='full', markeredgewidth=0.0)\n\t\t\tax.plot([data['lowlim'], data['uplim']], [e, e], color=\"green\")\n\t\t\te += 15\n\n\t# Plot y axis\n\tax.axvline(x=0, color='black')\n\n\tif label_loc == \"axis\":\n\t\tplt.yticks(phecode_locs, phecode_labels, ha='right', fontsize=text_size)\n\telse:\n\t\tframe1.axes.get_yaxis().set_visible(False)\n\n\t# Legend\n\t# line1 = []\n\t# box = ax.get_position()\n\t# ax.set_position([box.x0, box.y0 + box.height * 0.05, box.width, box.height * 0.95])\n\t# for lab in plot_colors.keys():\n\t# \tline1.append(\n\t# \t\tmlines.Line2D(range(1), range(1), color=\"white\", marker='o', markerfacecolor=plot_colors[lab], label=lab))\n\t# artists.append(ax.legend(handles=line1, bbox_to_anchor=(0.5, -0.125), loc='upper center', fancybox=True, ncol=4,\n\t# prop={'size': text_size}))\n\n\t# If the imbalance is to be shown, draw lines to show the categories.\n\t# if show_imbalance:\n\t# \tfor pos in linepos:\n\t# \t\tax.axvline(x=pos, color='black', ls='dotted')\n\n\t# Save the plot\n\tif save:\n\t\tplt.savefig(save, format=save_format, bbox_extra_artists=artists, bbox_inches='tight')\n\t\tplt.clf()\n\n\treturn\n\ndef process_args(kwargs, optargs, *args):\n clean = np.vectorize(lambda x: x[x.rfind('-') + 1:] + '=')\n searchfor = clean(list(optargs.keys()))\n opts, rem = getopt.getopt(args, '', searchfor)\n assert len(rem) == 0, 'Unknown arguments included %s' % (str(rem))\n for option in opts:\n k, v = option\n kwargs[optargs[k]] = v\n\n return kwargs\n\n\ndef display_kwargs(kwargs):\n print (\"Arguments: \")\n for k, v in kwargs.items():\n left = str(k).ljust(30, '.')\n right = str(v).rjust(50, '.')\n print(left + right)\n\n\noutput_columns = ['PheWAS Code',\n 'PheWAS Name',\n 'p-val',\n '\\\"-log(p)\\\"',\n 'beta',\n 'Conf-interval beta',\n 'cpt']\n\nplot_colors = {'-': 'gold',\n 'circulatory system': 'red',\n 'congenital anomalies': 'mediumspringgreen',\n 'dermatologic': 'maroon',\n 'digestive': 'green',\n 'endocrine/metabolic': 'darkred',\n 'genitourinary': 'black',\n 'hematopoietic': 'orange',\n 'infectious diseases': 'blue',\n 'injuries & poisonings': 'slategray',\n 'mental disorders': 'fuchsia',\n 'musculoskeletal': 'darkgreen',\n 'neoplasms': 'teal',\n 'neurological': 'midnightblue',\n 'pregnancy complications': 'gold',\n 'respiratory': 'brown',\n 'sense organs': 'darkviolet',\n 'symptoms': 'darkviolet'}\nimbalance_colors = {\n 0: 'white',\n 1: 'deepskyblue',\n -1: 'red'\n}\nregression_map = {\n 'log': 0,\n 'lin': 1,\n 'lind': 2\n}\nthreshold_map = {\n 'bon': 0,\n 'fdr': 1\n}\nglobal codes, prowas_codes\ncodes = get_codes()\nprowas_codes = codes[['prowas_code','prowas_desc','ccs','CCS Label']].drop_duplicates(subset='prowas_code')\nprowas_codes.reset_index(inplace=True,drop=True)\n\n\n" ]
[ [ "matplotlib.pyplot.gca", "pandas.merge", "pandas.read_csv", "numpy.isfinite", "numpy.isnan", "pandas.DataFrame", "numpy.sort", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "numpy.array", "numpy.zeros", "numpy.where", "matplotlib.pyplot.figure" ] ]
ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer
[ "93871bed9078d5bf6b4bb37407c9dce87c569b55" ]
[ "models/dual_sformer.py" ]
[ "\"\"\"\nCode from\nhttps://github.com/zengqunzhao/Former-DFER\n\"\"\"\nfrom einops import rearrange, repeat\nfrom torch import nn, einsum\nimport math\nimport torch\nfrom torchvision import models\nfrom .loss import CCCLoss,AULoss,FocalLoss_Ori\nfrom torch.functional import F\nimport numpy as np\nfrom collections import OrderedDict\nfrom torch.nn import SmoothL1Loss\n\nclass Dummy(nn.Module):\n def __init__(self):\n super(Dummy, self).__init__()\n\n def forward(self, input):\n return input\n\nclass GELU(nn.Module):\n def forward(self, x):\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\n\nclass PreNorm(nn.Module):\n def __init__(self, dim, fn):\n super().__init__()\n self.norm = nn.LayerNorm(dim)\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(self.norm(x), **kwargs)\n\n\nclass FeedForward(nn.Module):\n def __init__(self, dim, hidden_dim, dropout=0.):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(dim, hidden_dim),\n GELU(),\n nn.Dropout(dropout),\n nn.Linear(hidden_dim, dim),\n nn.Dropout(dropout)\n )\n\n def forward(self, x):\n return self.net(x)\n\n\nclass Attention(nn.Module):\n def __init__(self, dim, heads=8, dim_head=64, dropout=0.):\n super().__init__()\n inner_dim = dim_head * heads\n project_out = not (heads == 1 and dim_head == dim)\n\n self.heads = heads\n self.scale = dim_head ** -0.5\n\n self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim),\n nn.Dropout(dropout)\n ) if project_out else nn.Identity()\n\n def forward(self, x, mask=None):\n b, n, _, h = *x.shape, self.heads\n qkv = self.to_qkv(x).chunk(3, dim=-1)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), qkv)\n\n dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n mask_value = -torch.finfo(dots.dtype).max\n\n if mask is not None:\n mask = F.pad(mask.flatten(1), (1, 0), value=True)\n assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions'\n mask = rearrange(mask, 'b i -> b () i ()') * rearrange(mask, 'b j -> b () () j')\n dots.masked_fill_(~mask, mask_value)\n del mask\n\n attn = dots.softmax(dim=-1)\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n out = self.to_out(out)\n return out\n\n\nclass Transformer(nn.Module):\n def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0.):\n super().__init__()\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n Residual(PreNorm(dim, Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout))),\n Residual(PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout)))\n ]))\n\n def forward(self, x, mask=None):\n for attn, ff in self.layers:\n x = attn(x, mask=mask)\n x = ff(x)\n return x\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\ndef load_pretrain(model,weight_path):\n print('Loading former weight')\n pretrained_dict = torch.load(weight_path)['state_dict']\n new_state_dict=OrderedDict()\n for k,v in pretrained_dict.items():\n new_name = k.replace('module.','')\n new_state_dict[new_name]=v\n model.load_state_dict(new_state_dict,strict=False)\n\nclass ResFormer(nn.Module): # S-Former after stage3\n\n def __init__(self, block, layers, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None,\n num_patches=7*7, dim=256, depth=1, heads=8, mlp_dim=512, dim_head=32, dropout=0.0):\n super(ResFormer, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None or\"\n \" a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n\n self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))\n self.spatial_transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n if zero_init_residual:\n for m in self.modules():\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion))\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width,\n dilation=self.dilation, norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n #torch.Size([2, 16, 3, 112, 112])\n b,t,c,h,w = x.shape\n x = x.contiguous().view(-1, c, h, w)\n #torch.Size([32, 3, 112, 112])\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x) # torch.Size([1, 64, 28, 28])\n x = self.layer1(x) # torch.Size([1, 64, 28, 28])\n x = self.layer2(x) # torch.Size([1, 128, 14, 14])\n x = self.layer3(x) # torch.Size([1, 256, 7, 7])\n b_l, c, h, w = x.shape\n #torch.Size([32, 256, 7, 7])\n x = x.reshape((b_l, c, h*w))\n #torch.Size([32, 256, 49])\n x = x.permute(0, 2, 1)\n b, n, _ = x.shape\n #x shape: torch.Size([32, 49, 256])\n #pos_embedding shape torch.Size([1, 49, 256])\n x = x + self.pos_embedding[:, :n]\n #torch.Size([32, 49, 256])\n x = self.spatial_transformer(x)\n #torch.Size([32, 49, 256])\n x = x.permute(0, 2, 1)\n #torch.Size([32, 256, 49])\n x = x.reshape((b, c, h, w))\n #torch.Size([32, 256, 7, 7])\n x = self.layer4(x) \n #torch.Size([32, 512, 4, 4])\n x = self.avgpool(x)\n #torch.Size([32, 512, 1, 1])\n x = torch.flatten(x, 1)\n #torch.Size([32, 512])\n\n return x\n\nclass DualSpatialFormer(nn.Module):\n def __init__(self, modality='A;V;M', video_pretrained = True, task='EX'):\n super(DualSpatialFormer, self).__init__()\n assert 'V' in modality and 'M' in modality\n self.rgb_model = ResFormer(BasicBlock, [2, 2, 2, 2])\n self.mask_model = ResFormer(BasicBlock, [2, 2, 2, 2])\n self.mask_model.conv1 = nn.Conv2d(in_channels=1,\n out_channels=self.mask_model.conv1.out_channels,\n kernel_size=self.mask_model.conv1.kernel_size,\n stride=self.mask_model.conv1.stride,\n padding=self.mask_model.conv1.padding,\n bias=False)\n\n self.task = task\n self.modes = [\"clip\"]\n self.fc = nn.Sequential(\n nn.BatchNorm1d(512),\n nn.Linear(in_features=512,out_features=256),\n nn.BatchNorm1d(256),\n nn.Linear(in_features=256,out_features=12+7+2)\n )\n self.loss_EX = nn.CrossEntropyLoss(ignore_index=7)\n #weight=torch.tensor([2.62, 26.5, 45, 40, 4.0, 5.87, 1.0])\n #self.loss_EX = FocalLoss_Ori(num_class=7, gamma=2.0, ignore_index=7, reduction='mean')\n self.loss_AU = AULoss()\n self.loss_VA = CCCLoss()\n\n def forward(self, x):\n clip = x['clip']\n assert clip.size(2) == 1\n clip_rgb = clip[:,:-1,:,:,:].permute(0,2,1,3,4)\n clip_mask = clip[:,-1,:,:,:].unsqueeze(1).permute(0,2,1,3,4)\n #clip = clip.squeeze(2)\n\n rgb_features = self.rgb_model(clip_rgb)\n mask_features = self.mask_model(clip_mask)\n features = torch.add(rgb_features, mask_features)\n out = self.fc(features)\n # out = self.fc_video(video_model_features)\n return out\n\n def get_ex_loss(self, y_pred, y_true):\n y_pred = y_pred[:, 12:19]\n y_true = y_true.view(-1)\n loss = self.loss_EX(y_pred, y_true)\n return loss\n\n def get_au_loss(self, y_pred, y_true):\n #y_pred = torch.sigmoid(y_pred[:, :12])\n loss = self.loss_AU(y_pred[:, :12], y_true)\n return loss\n\n def get_va_loss(self, y_pred, y_true):\n y_pred_v = torch.tanh(y_pred[:, 19])\n y_pred_a = torch.tanh(y_pred[:, 20])\n #print(y_pred_v)\n #print(y_true[:, 0])\n loss = self.loss_VA(y_pred_v, y_true[:, 0]) + self.loss_VA(y_pred_a, y_true[:, 1])\n return loss\n\n def get_mt_loss(self,y_pred, y_true, normalize = False): #multi-task loss\n loss_ex = self.get_ex_loss(y_pred,y_true['EX'])\n loss_au = self.get_au_loss(y_pred, y_true['AU'])\n loss_va = self.get_va_loss(y_pred, y_true['VA'])\n if normalize:\n valid_ex_label_num = np.sum(y_true['EX'].detach().cpu().numpy() != 7)\n if valid_ex_label_num != 0:\n loss_ex = loss_ex/valid_ex_label_num\n else:\n device = y_true.device\n loss_ex = torch.tensor(0.0, requires_grad=True).to(device)\n \n valid_au_label_num = np.sum((y_true['AU'].detach().cpu().numpy() != -1))\n if valid_au_label_num != 0:\n loss_au = loss_au/valid_au_label_num\n else:\n device = y_true.device\n loss_au = torch.tensor(0.0, requires_grad=True).to(device)\n \n valid_va_label_num = np.sum(y_true['VA'].detach().cpu().numpy() != -5.0)\n if valid_va_label_num != 0:\n loss_va = loss_va/valid_va_label_num\n else:\n device = y_true.device\n loss_va = torch.tensor(0.0, requires_grad=True).to(device)\n\n return [loss_ex,loss_au,loss_va]" ]
[ [ "torch.load", "torch.tanh", "torch.flatten", "torch.finfo", "torch.pow", "torch.nn.CrossEntropyLoss", "torch.nn.Dropout", "torch.add", "torch.einsum", "torch.randn", "torch.tensor", "torch.nn.Sequential", "torch.nn.BatchNorm1d", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.LayerNorm", "torch.nn.MaxPool2d", "torch.nn.Identity", "torch.nn.AdaptiveAvgPool2d", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
andrewsalij/pymatgen
[ "7b6809c783ef356d437d65e8d6b733333a16d381" ]
[ "pymatgen/analysis/tests/test_pourbaix_diagram.py" ]
[ "# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\nimport logging\nimport multiprocessing\nimport os\nimport unittest\nimport warnings\n\nimport numpy as np\nfrom monty.serialization import loadfn\n\nfrom pymatgen.core import SETTINGS\nfrom pymatgen.analysis.pourbaix_diagram import (\n IonEntry,\n MultiEntry,\n PourbaixDiagram,\n PourbaixEntry,\n PourbaixPlotter,\n)\nfrom pymatgen.core.ion import Ion\nfrom pymatgen.entries.computed_entries import ComputedEntry\nfrom pymatgen.util.testing import PymatgenTest\n\nlogger = logging.getLogger(__name__)\n\n\nclass PourbaixEntryTest(unittest.TestCase):\n _multiprocess_shared_ = True\n \"\"\"\n Test all functions using a fictitious entry\n \"\"\"\n\n def setUp(self):\n # comp = Composition(\"Mn2O3\")\n self.solentry = ComputedEntry(\"Mn2O3\", 49)\n ion = Ion.from_formula(\"MnO4-\")\n self.ionentry = IonEntry(ion, 25)\n self.PxIon = PourbaixEntry(self.ionentry)\n self.PxSol = PourbaixEntry(self.solentry)\n self.PxIon.concentration = 1e-4\n\n def test_pourbaix_entry(self):\n self.assertEqual(self.PxIon.entry.energy, 25, \"Wrong Energy!\")\n self.assertEqual(self.PxIon.entry.name, \"MnO4[-]\", \"Wrong Entry!\")\n self.assertEqual(self.PxSol.entry.energy, 49, \"Wrong Energy!\")\n self.assertEqual(self.PxSol.entry.name, \"Mn2O3\", \"Wrong Entry!\")\n # self.assertEqual(self.PxIon.energy, 25, \"Wrong Energy!\")\n # self.assertEqual(self.PxSol.energy, 49, \"Wrong Energy!\")\n self.assertEqual(self.PxIon.concentration, 1e-4, \"Wrong concentration!\")\n\n def test_calc_coeff_terms(self):\n self.assertEqual(self.PxIon.npH, -8, \"Wrong npH!\")\n self.assertEqual(self.PxIon.nPhi, -7, \"Wrong nPhi!\")\n self.assertEqual(self.PxIon.nH2O, 4, \"Wrong nH2O!\")\n\n self.assertEqual(self.PxSol.npH, -6, \"Wrong npH!\")\n self.assertEqual(self.PxSol.nPhi, -6, \"Wrong nPhi!\")\n self.assertEqual(self.PxSol.nH2O, 3, \"Wrong nH2O!\")\n\n def test_to_from_dict(self):\n d = self.PxIon.as_dict()\n ion_entry = self.PxIon.from_dict(d)\n self.assertEqual(ion_entry.entry.name, \"MnO4[-]\", \"Wrong Entry!\")\n\n d = self.PxSol.as_dict()\n sol_entry = self.PxSol.from_dict(d)\n self.assertEqual(sol_entry.name, \"Mn2O3(s)\", \"Wrong Entry!\")\n self.assertEqual(\n sol_entry.energy,\n self.PxSol.energy,\n \"as_dict and from_dict energies unequal\",\n )\n\n def test_energy_functions(self):\n # TODO: test these for values\n self.PxSol.energy_at_conditions(10, 0)\n self.PxSol.energy_at_conditions(np.array([1, 2, 3]), 0)\n self.PxSol.energy_at_conditions(10, np.array([1, 2, 3]))\n self.PxSol.energy_at_conditions(np.array([1, 2, 3]), np.array([1, 2, 3]))\n\n def test_multi_entry(self):\n # TODO: More robust multientry test\n m_entry = MultiEntry([self.PxSol, self.PxIon])\n for attr in [\"energy\", \"composition\", \"nPhi\"]:\n self.assertEqual(\n getattr(m_entry, attr),\n getattr(self.PxSol, attr) + getattr(self.PxIon, attr),\n )\n\n # As dict, from dict\n m_entry_dict = m_entry.as_dict()\n m_entry_new = MultiEntry.from_dict(m_entry_dict)\n self.assertEqual(m_entry_new.energy, m_entry.energy)\n\n def test_get_elt_fraction(self):\n entry = ComputedEntry(\"Mn2Fe3O3\", 49)\n pbentry = PourbaixEntry(entry)\n self.assertAlmostEqual(pbentry.get_element_fraction(\"Fe\"), 0.6)\n self.assertAlmostEqual(pbentry.get_element_fraction(\"Mn\"), 0.4)\n\n\nclass PourbaixDiagramTest(unittest.TestCase):\n _multiprocess_shared_ = True\n\n @classmethod\n def setUpClass(cls):\n cls.test_data = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, \"pourbaix_test_data.json\"))\n cls.pbx = PourbaixDiagram(cls.test_data[\"Zn\"], filter_solids=True)\n cls.pbx_nofilter = PourbaixDiagram(cls.test_data[\"Zn\"], filter_solids=False)\n\n def test_pourbaix_diagram(self):\n self.assertEqual(\n set([e.name for e in self.pbx.stable_entries]),\n {\"ZnO(s)\", \"Zn[2+]\", \"ZnHO2[-]\", \"ZnO2[2-]\", \"Zn(s)\"},\n \"List of stable entries does not match\",\n )\n\n self.assertEqual(\n set([e.name for e in self.pbx_nofilter.stable_entries]),\n {\"ZnO(s)\", \"Zn[2+]\", \"ZnHO2[-]\", \"ZnO2[2-]\", \"Zn(s)\", \"ZnO2(s)\", \"ZnH(s)\"},\n \"List of stable entries for unfiltered pbx does not match\",\n )\n\n pbx_lowconc = PourbaixDiagram(self.test_data[\"Zn\"], conc_dict={\"Zn\": 1e-8}, filter_solids=True)\n self.assertEqual(\n set([e.name for e in pbx_lowconc.stable_entries]),\n {\"Zn(HO)2(aq)\", \"Zn[2+]\", \"ZnHO2[-]\", \"ZnO2[2-]\", \"Zn(s)\"},\n )\n\n def test_properties(self):\n self.assertEqual(len(self.pbx.unstable_entries), 2)\n\n def test_multicomponent(self):\n # Assure no ions get filtered at high concentration\n ag_n = [e for e in self.test_data[\"Ag-Te-N\"] if \"Te\" not in e.composition]\n highconc = PourbaixDiagram(ag_n, filter_solids=True, conc_dict={\"Ag\": 1e-5, \"N\": 1})\n entry_sets = [set(e.entry_id) for e in highconc.stable_entries]\n self.assertIn({\"mp-124\", \"ion-17\"}, entry_sets)\n\n # Binary system\n pd_binary = PourbaixDiagram(\n self.test_data[\"Ag-Te\"],\n filter_solids=True,\n comp_dict={\"Ag\": 0.5, \"Te\": 0.5},\n conc_dict={\"Ag\": 1e-8, \"Te\": 1e-8},\n )\n self.assertEqual(len(pd_binary.stable_entries), 30)\n test_entry = pd_binary.find_stable_entry(8, 2)\n self.assertTrue(\"mp-499\" in test_entry.entry_id)\n\n # Find a specific multientry to test\n self.assertEqual(pd_binary.get_decomposition_energy(test_entry, 8, 2), 0)\n\n pd_ternary = PourbaixDiagram(self.test_data[\"Ag-Te-N\"], filter_solids=True)\n self.assertEqual(len(pd_ternary.stable_entries), 49)\n\n # Fetch a solid entry and a ground state entry mixture\n ag_te_n = self.test_data[\"Ag-Te-N\"][-1]\n ground_state_ag_with_ions = MultiEntry(\n [self.test_data[\"Ag-Te-N\"][i] for i in [4, 18, 30]],\n weights=[1 / 3, 1 / 3, 1 / 3],\n )\n self.assertAlmostEqual(pd_ternary.get_decomposition_energy(ag_te_n, 2, -1), 2.767822855765)\n self.assertAlmostEqual(pd_ternary.get_decomposition_energy(ag_te_n, 10, -2), 3.756840056890625)\n self.assertAlmostEqual(pd_ternary.get_decomposition_energy(ground_state_ag_with_ions, 2, -1), 0)\n\n # Test invocation of pourbaix diagram from ternary data\n new_ternary = PourbaixDiagram(pd_ternary.all_entries)\n self.assertEqual(len(new_ternary.stable_entries), 49)\n self.assertAlmostEqual(new_ternary.get_decomposition_energy(ag_te_n, 2, -1), 2.767822855765)\n self.assertAlmostEqual(new_ternary.get_decomposition_energy(ag_te_n, 10, -2), 3.756840056890625)\n self.assertAlmostEqual(new_ternary.get_decomposition_energy(ground_state_ag_with_ions, 2, -1), 0)\n\n def test_get_pourbaix_domains(self):\n domains = PourbaixDiagram.get_pourbaix_domains(self.test_data[\"Zn\"])\n self.assertEqual(len(domains[0]), 7)\n\n def test_get_decomposition(self):\n # Test a stable entry to ensure that it's zero in the stable region\n entry = self.test_data[\"Zn\"][12] # Should correspond to mp-2133\n self.assertAlmostEqual(\n self.pbx.get_decomposition_energy(entry, 10, 1),\n 0.0,\n 5,\n \"Decomposition energy of ZnO is not 0.\",\n )\n\n # Test an unstable entry to ensure that it's never zero\n entry = self.test_data[\"Zn\"][11]\n ph, v = np.meshgrid(np.linspace(0, 14), np.linspace(-2, 4))\n result = self.pbx_nofilter.get_decomposition_energy(entry, ph, v)\n self.assertTrue((result >= 0).all(), \"Unstable energy has hull energy of 0 or less\")\n\n # Test an unstable hydride to ensure HER correction works\n self.assertAlmostEqual(self.pbx.get_decomposition_energy(entry, -3, -2), 3.6979147983333)\n # Test a list of pHs\n self.pbx.get_decomposition_energy(entry, np.linspace(0, 2, 5), 2)\n\n # Test a list of Vs\n self.pbx.get_decomposition_energy(entry, 4, np.linspace(-3, 3, 10))\n\n # Test a set of matching arrays\n ph, v = np.meshgrid(np.linspace(0, 14), np.linspace(-3, 3))\n self.pbx.get_decomposition_energy(entry, ph, v)\n\n def test_get_stable_entry(self):\n entry = self.pbx.get_stable_entry(0, 0)\n self.assertEqual(entry.entry_id, \"ion-0\")\n\n def test_multielement_parallel(self):\n # Simple test to ensure that multiprocessing is working\n test_entries = self.test_data[\"Ag-Te-N\"]\n nproc = multiprocessing.cpu_count()\n pbx = PourbaixDiagram(test_entries, filter_solids=True, nproc=nproc)\n self.assertEqual(len(pbx.stable_entries), 49)\n\n def test_solid_filter(self):\n entries = self.test_data[\"Zn\"]\n pbx = PourbaixDiagram(entries, filter_solids=False)\n oxidized_phase = pbx.find_stable_entry(10, 2)\n self.assertEqual(oxidized_phase.name, \"ZnO2(s)\")\n\n entries = self.test_data[\"Zn\"]\n pbx = PourbaixDiagram(entries, filter_solids=True)\n oxidized_phase = pbx.find_stable_entry(10, 2)\n self.assertEqual(oxidized_phase.name, \"ZnO(s)\")\n\n def test_serialization(self):\n d = self.pbx.as_dict()\n new = PourbaixDiagram.from_dict(d)\n self.assertEqual(\n set([e.name for e in new.stable_entries]),\n {\"ZnO(s)\", \"Zn[2+]\", \"ZnHO2[-]\", \"ZnO2[2-]\", \"Zn(s)\"},\n \"List of stable entries does not match\",\n )\n\n # Test with unprocessed entries included, this should result in the\n # previously filtered entries being included\n d = self.pbx.as_dict(include_unprocessed_entries=True)\n new = PourbaixDiagram.from_dict(d)\n self.assertEqual(\n set([e.name for e in new.stable_entries]),\n {\"ZnO(s)\", \"Zn[2+]\", \"ZnHO2[-]\", \"ZnO2[2-]\", \"Zn(s)\", \"ZnO2(s)\", \"ZnH(s)\"},\n \"List of stable entries for unfiltered pbx does not match\",\n )\n\n pd_binary = PourbaixDiagram(\n self.test_data[\"Ag-Te\"],\n filter_solids=True,\n comp_dict={\"Ag\": 0.5, \"Te\": 0.5},\n conc_dict={\"Ag\": 1e-8, \"Te\": 1e-8},\n )\n new_binary = PourbaixDiagram.from_dict(pd_binary.as_dict())\n self.assertEqual(len(pd_binary.stable_entries), len(new_binary.stable_entries))\n\n # The two tests below rely on the MP Rest interface.\n @unittest.skipIf(not SETTINGS.get(\"PMG_MAPI_KEY\"), \"PMG_MAPI_KEY environment variable not set.\")\n def test_heavy(self):\n from pymatgen.ext.matproj import MPRester\n\n mpr = MPRester()\n entries = mpr.get_pourbaix_entries([\"Li\", \"Mg\", \"Sn\", \"Pd\"])\n pbx = PourbaixDiagram(entries, nproc=4, filter_solids=False)\n entries = mpr.get_pourbaix_entries([\"Ba\", \"Ca\", \"V\", \"Cu\", \"F\"])\n pbx = PourbaixDiagram(entries, nproc=4, filter_solids=False)\n entries = mpr.get_pourbaix_entries([\"Ba\", \"Ca\", \"V\", \"Cu\", \"F\", \"Fe\"])\n pbx = PourbaixDiagram(entries, nproc=4, filter_solids=False)\n entries = mpr.get_pourbaix_entries([\"Na\", \"Ca\", \"Nd\", \"Y\", \"Ho\", \"F\"])\n pbx = PourbaixDiagram(entries, nproc=4, filter_solids=False)\n\n @unittest.skipIf(not SETTINGS.get(\"PMG_MAPI_KEY\"), \"PMG_MAPI_KEY environment variable not set.\")\n def test_mpr_pipeline(self):\n from pymatgen.ext.matproj import MPRester\n\n mpr = MPRester()\n data = mpr.get_pourbaix_entries([\"Zn\"])\n pbx = PourbaixDiagram(data, filter_solids=True, conc_dict={\"Zn\": 1e-8})\n pbx.find_stable_entry(10, 0)\n\n data = mpr.get_pourbaix_entries([\"Ag\", \"Te\"])\n pbx = PourbaixDiagram(data, filter_solids=True, conc_dict={\"Ag\": 1e-8, \"Te\": 1e-8})\n self.assertEqual(len(pbx.stable_entries), 30)\n test_entry = pbx.find_stable_entry(8, 2)\n self.assertAlmostEqual(test_entry.energy, 2.3894017960000009, 1)\n\n # Test custom ions\n entries = mpr.get_pourbaix_entries([\"Sn\", \"C\", \"Na\"])\n ion = IonEntry(Ion.from_formula(\"NaO28H80Sn12C24+\"), -161.676)\n custom_ion_entry = PourbaixEntry(ion, entry_id=\"my_ion\")\n pbx = PourbaixDiagram(\n entries + [custom_ion_entry],\n filter_solids=True,\n comp_dict={\"Na\": 1, \"Sn\": 12, \"C\": 24},\n )\n self.assertAlmostEqual(pbx.get_decomposition_energy(custom_ion_entry, 5, 2), 2.1209002582, 1)\n\n # Test against ion sets with multiple equivalent ions (Bi-V regression)\n entries = mpr.get_pourbaix_entries([\"Bi\", \"V\"])\n pbx = PourbaixDiagram(entries, filter_solids=True, conc_dict={\"Bi\": 1e-8, \"V\": 1e-8})\n self.assertTrue(all([\"Bi\" in entry.composition and \"V\" in entry.composition for entry in pbx.all_entries]))\n\n\nclass PourbaixPlotterTest(unittest.TestCase):\n def setUp(self):\n warnings.simplefilter(\"ignore\")\n self.test_data = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, \"pourbaix_test_data.json\"))\n self.pd = PourbaixDiagram(self.test_data[\"Zn\"])\n self.plotter = PourbaixPlotter(self.pd)\n\n def tearDown(self):\n warnings.simplefilter(\"default\")\n\n def test_plot_pourbaix(self):\n plotter = PourbaixPlotter(self.pd)\n # Default limits\n plotter.get_pourbaix_plot()\n # Non-standard limits\n plotter.get_pourbaix_plot(limits=[[-5, 4], [-2, 2]])\n\n def test_plot_entry_stability(self):\n entry = self.pd.all_entries[0]\n self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-3, 3]])\n\n # binary system\n pd_binary = PourbaixDiagram(self.test_data[\"Ag-Te\"], comp_dict={\"Ag\": 0.5, \"Te\": 0.5})\n binary_plotter = PourbaixPlotter(pd_binary)\n plt = binary_plotter.plot_entry_stability(self.test_data[\"Ag-Te\"][53])\n plt.close()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.linspace" ] ]
JohnSpencerTerry/superset
[ "597b020168411892853949f09608884b9afad963" ]
[ "superset/views/core.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=comparison-with-callable, line-too-long, too-many-branches\nimport logging\nimport re\nfrom contextlib import closing\nfrom datetime import datetime, timedelta\nfrom typing import Any, Callable, cast, Dict, List, Optional, Union\nfrom urllib import parse\n\nimport backoff\nimport pandas as pd\nimport simplejson as json\nfrom flask import abort, flash, g, Markup, redirect, render_template, request, Response\nfrom flask_appbuilder import expose\nfrom flask_appbuilder.models.sqla.interface import SQLAInterface\nfrom flask_appbuilder.security.decorators import (\n has_access,\n has_access_api,\n permission_name,\n)\nfrom flask_appbuilder.security.sqla import models as ab_models\nfrom flask_babel import gettext as __, lazy_gettext as _, ngettext\nfrom jinja2.exceptions import TemplateError\nfrom jinja2.meta import find_undeclared_variables\nfrom sqlalchemy import and_, or_\nfrom sqlalchemy.engine.url import make_url\nfrom sqlalchemy.exc import ArgumentError, DBAPIError, NoSuchModuleError, SQLAlchemyError\nfrom sqlalchemy.orm.session import Session\nfrom sqlalchemy.sql import functions as func\nfrom werkzeug.urls import Href\n\nfrom superset import (\n app,\n appbuilder,\n conf,\n db,\n event_logger,\n get_feature_flags,\n is_feature_enabled,\n results_backend,\n results_backend_use_msgpack,\n security_manager,\n sql_lab,\n viz,\n)\nfrom superset.charts.dao import ChartDAO\nfrom superset.connectors.base.models import BaseDatasource\nfrom superset.connectors.connector_registry import ConnectorRegistry\nfrom superset.connectors.sqla.models import (\n AnnotationDatasource,\n SqlaTable,\n SqlMetric,\n TableColumn,\n)\nfrom superset.dashboards.commands.importers.v0 import ImportDashboardsCommand\nfrom superset.dashboards.dao import DashboardDAO\nfrom superset.databases.dao import DatabaseDAO\nfrom superset.databases.filters import DatabaseFilter\nfrom superset.datasets.commands.exceptions import DatasetNotFoundError\nfrom superset.exceptions import (\n CacheLoadError,\n CertificateException,\n DatabaseNotFound,\n SerializationError,\n SupersetException,\n SupersetGenericDBErrorException,\n SupersetSecurityException,\n SupersetTemplateParamsErrorException,\n SupersetTimeoutException,\n)\nfrom superset.extensions import async_query_manager, cache_manager\nfrom superset.jinja_context import get_template_processor\nfrom superset.models.core import Database, FavStar, Log\nfrom superset.models.dashboard import Dashboard\nfrom superset.models.datasource_access_request import DatasourceAccessRequest\nfrom superset.models.slice import Slice\nfrom superset.models.sql_lab import Query, TabState\nfrom superset.models.user_attributes import UserAttribute\nfrom superset.queries.dao import QueryDAO\nfrom superset.security.analytics_db_safety import (\n check_sqlalchemy_uri,\n DBSecurityException,\n)\nfrom superset.sql_parse import CtasMethod, ParsedQuery, Table\nfrom superset.sql_validators import get_validator_by_name\nfrom superset.tasks.async_queries import load_explore_json_into_cache\nfrom superset.typing import FlaskResponse\nfrom superset.utils import core as utils\nfrom superset.utils.async_query_manager import AsyncQueryTokenException\nfrom superset.utils.cache import etag_cache\nfrom superset.utils.dates import now_as_float\nfrom superset.views.base import (\n api,\n BaseSupersetView,\n check_ownership,\n common_bootstrap_payload,\n create_table_permissions,\n CsvResponse,\n data_payload_response,\n generate_download_headers,\n get_error_msg,\n get_user_roles,\n handle_api_exception,\n json_error_response,\n json_errors_response,\n json_success,\n validate_sqlatable,\n)\nfrom superset.views.utils import (\n _deserialize_results_payload,\n apply_display_max_row_limit,\n bootstrap_user_data,\n check_datasource_perms,\n check_explore_cache_perms,\n check_slice_perms,\n get_cta_schema_name,\n get_dashboard_extra_filters,\n get_datasource_info,\n get_form_data,\n get_viz,\n is_owner,\n)\nfrom superset.viz import BaseViz\n\nconfig = app.config\nSQLLAB_QUERY_COST_ESTIMATE_TIMEOUT = config[\"SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT\"]\nstats_logger = config[\"STATS_LOGGER\"]\nDAR = DatasourceAccessRequest\nQueryStatus = utils.QueryStatus\nlogger = logging.getLogger(__name__)\n\nDATABASE_KEYS = [\n \"allow_csv_upload\",\n \"allow_ctas\",\n \"allow_cvas\",\n \"allow_dml\",\n \"allow_multi_schema_metadata_fetch\",\n \"allow_run_async\",\n \"allows_subquery\",\n \"backend\",\n \"database_name\",\n \"expose_in_sqllab\",\n \"force_ctas_schema\",\n \"id\",\n]\n\n\nDATASOURCE_MISSING_ERR = __(\"The data source seems to have been deleted\")\nUSER_MISSING_ERR = __(\"The user seems to have been deleted\")\nPARAMETER_MISSING_ERR = (\n \"Please check your template parameters for syntax errors and make sure \"\n \"they match across your SQL query and Set Parameters. Then, try running \"\n \"your query again.\"\n)\n\n\nclass Superset(BaseSupersetView): # pylint: disable=too-many-public-methods\n \"\"\"The base views for Superset!\"\"\"\n\n logger = logging.getLogger(__name__)\n\n @has_access_api\n @event_logger.log_this\n @expose(\"/datasources/\")\n def datasources(self) -> FlaskResponse:\n return self.json_response(\n sorted(\n [\n datasource.short_data\n for datasource in ConnectorRegistry.get_all_datasources(db.session)\n if datasource.short_data.get(\"name\")\n ],\n key=lambda datasource: datasource[\"name\"],\n )\n )\n\n @has_access_api\n @event_logger.log_this\n @expose(\"/override_role_permissions/\", methods=[\"POST\"])\n def override_role_permissions(self) -> FlaskResponse:\n \"\"\"Updates the role with the give datasource permissions.\n\n Permissions not in the request will be revoked. This endpoint should\n be available to admins only. Expects JSON in the format:\n {\n 'role_name': '{role_name}',\n 'database': [{\n 'datasource_type': '{table|druid}',\n 'name': '{database_name}',\n 'schema': [{\n 'name': '{schema_name}',\n 'datasources': ['{datasource name}, {datasource name}']\n }]\n }]\n }\n \"\"\"\n data = request.get_json(force=True)\n role_name = data[\"role_name\"]\n databases = data[\"database\"]\n\n db_ds_names = set()\n for dbs in databases:\n for schema in dbs[\"schema\"]:\n for ds_name in schema[\"datasources\"]:\n fullname = utils.get_datasource_full_name(\n dbs[\"name\"], ds_name, schema=schema[\"name\"]\n )\n db_ds_names.add(fullname)\n\n existing_datasources = ConnectorRegistry.get_all_datasources(db.session)\n datasources = [d for d in existing_datasources if d.full_name in db_ds_names]\n role = security_manager.find_role(role_name)\n # remove all permissions\n role.permissions = []\n # grant permissions to the list of datasources\n granted_perms = []\n for datasource in datasources:\n view_menu_perm = security_manager.find_permission_view_menu(\n view_menu_name=datasource.perm, permission_name=\"datasource_access\"\n )\n # prevent creating empty permissions\n if view_menu_perm and view_menu_perm.view_menu:\n role.permissions.append(view_menu_perm)\n granted_perms.append(view_menu_perm.view_menu.name)\n db.session.commit()\n return self.json_response(\n {\"granted\": granted_perms, \"requested\": list(db_ds_names)}, status=201\n )\n\n @has_access\n @event_logger.log_this\n @expose(\"/request_access/\")\n def request_access(self) -> FlaskResponse:\n datasources = set()\n dashboard_id = request.args.get(\"dashboard_id\")\n if dashboard_id:\n dash = db.session.query(Dashboard).filter_by(id=int(dashboard_id)).one()\n datasources |= dash.datasources\n datasource_id = request.args.get(\"datasource_id\")\n datasource_type = request.args.get(\"datasource_type\")\n if datasource_id and datasource_type:\n ds_class = ConnectorRegistry.sources.get(datasource_type)\n datasource = (\n db.session.query(ds_class).filter_by(id=int(datasource_id)).one()\n )\n datasources.add(datasource)\n\n has_access_ = all(\n (\n datasource and security_manager.can_access_datasource(datasource)\n for datasource in datasources\n )\n )\n if has_access_:\n return redirect(\"/superset/dashboard/{}\".format(dashboard_id))\n\n if request.args.get(\"action\") == \"go\":\n for datasource in datasources:\n access_request = DAR(\n datasource_id=datasource.id, datasource_type=datasource.type\n )\n db.session.add(access_request)\n db.session.commit()\n flash(__(\"Access was requested\"), \"info\")\n return redirect(\"/\")\n\n return self.render_template(\n \"superset/request_access.html\",\n datasources=datasources,\n datasource_names=\", \".join([o.name for o in datasources]),\n )\n\n @has_access\n @event_logger.log_this\n @expose(\"/approve\")\n def approve(self) -> FlaskResponse: # pylint: disable=too-many-locals,no-self-use\n def clean_fulfilled_requests(session: Session) -> None:\n for dar in session.query(DAR).all():\n datasource = ConnectorRegistry.get_datasource(\n dar.datasource_type, dar.datasource_id, session,\n )\n if not datasource or security_manager.can_access_datasource(datasource):\n # Dataset does not exist anymore\n session.delete(dar)\n session.commit()\n\n datasource_type = request.args[\"datasource_type\"]\n datasource_id = request.args[\"datasource_id\"]\n created_by_username = request.args.get(\"created_by\")\n role_to_grant = request.args.get(\"role_to_grant\")\n role_to_extend = request.args.get(\"role_to_extend\")\n\n session = db.session\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, session\n )\n\n if not datasource:\n flash(DATASOURCE_MISSING_ERR, \"alert\")\n return json_error_response(DATASOURCE_MISSING_ERR)\n\n requested_by = security_manager.find_user(username=created_by_username)\n if not requested_by:\n flash(USER_MISSING_ERR, \"alert\")\n return json_error_response(USER_MISSING_ERR)\n\n requests = (\n session.query(DAR)\n .filter(\n DAR.datasource_id == datasource_id,\n DAR.datasource_type == datasource_type,\n DAR.created_by_fk == requested_by.id,\n )\n .all()\n )\n\n if not requests:\n err = __(\"The access requests seem to have been deleted\")\n flash(err, \"alert\")\n return json_error_response(err)\n\n # check if you can approve\n if security_manager.can_access_all_datasources() or check_ownership(\n datasource, raise_if_false=False\n ):\n # can by done by admin only\n if role_to_grant:\n role = security_manager.find_role(role_to_grant)\n requested_by.roles.append(role)\n msg = __(\n \"%(user)s was granted the role %(role)s that gives access \"\n \"to the %(datasource)s\",\n user=requested_by.username,\n role=role_to_grant,\n datasource=datasource.full_name,\n )\n utils.notify_user_about_perm_udate(\n g.user,\n requested_by,\n role,\n datasource,\n \"email/role_granted.txt\",\n app.config,\n )\n flash(msg, \"info\")\n\n if role_to_extend:\n perm_view = security_manager.find_permission_view_menu(\n \"email/datasource_access\", datasource.perm\n )\n role = security_manager.find_role(role_to_extend)\n security_manager.add_permission_role(role, perm_view)\n msg = __(\n \"Role %(r)s was extended to provide the access to \"\n \"the datasource %(ds)s\",\n r=role_to_extend,\n ds=datasource.full_name,\n )\n utils.notify_user_about_perm_udate(\n g.user,\n requested_by,\n role,\n datasource,\n \"email/role_extended.txt\",\n app.config,\n )\n flash(msg, \"info\")\n clean_fulfilled_requests(session)\n else:\n flash(__(\"You have no permission to approve this request\"), \"danger\")\n return redirect(\"/accessrequestsmodelview/list/\")\n for request_ in requests:\n session.delete(request_)\n session.commit()\n return redirect(\"/accessrequestsmodelview/list/\")\n\n @has_access\n @event_logger.log_this\n @expose(\"/slice/<int:slice_id>/\")\n def slice(self, slice_id: int) -> FlaskResponse: # pylint: disable=no-self-use\n _, slc = get_form_data(slice_id, use_slice_data=True)\n if not slc:\n abort(404)\n endpoint = \"/superset/explore/?form_data={}\".format(\n parse.quote(json.dumps({\"slice_id\": slice_id}))\n )\n param = utils.ReservedUrlParameters.STANDALONE.value\n if request.args.get(param) == \"true\":\n endpoint += f\"&{param}=true\"\n return redirect(endpoint)\n\n def get_query_string_response(self, viz_obj: BaseViz) -> FlaskResponse:\n query = None\n try:\n query_obj = viz_obj.query_obj()\n if query_obj:\n query = viz_obj.datasource.get_query_str(query_obj)\n except Exception as ex: # pylint: disable=broad-except\n err_msg = utils.error_msg_from_exception(ex)\n logger.exception(err_msg)\n return json_error_response(err_msg)\n\n if not query:\n query = \"No query.\"\n\n return self.json_response(\n {\"query\": query, \"language\": viz_obj.datasource.query_language}\n )\n\n def get_raw_results(self, viz_obj: BaseViz) -> FlaskResponse:\n payload = viz_obj.get_df_payload()\n if viz_obj.has_error(payload):\n return json_error_response(payload=payload, status=400)\n return self.json_response({\"data\": payload[\"df\"].to_dict(\"records\")})\n\n def get_samples(self, viz_obj: BaseViz) -> FlaskResponse:\n return self.json_response({\"data\": viz_obj.get_samples()})\n\n def generate_json(\n self, viz_obj: BaseViz, response_type: Optional[str] = None\n ) -> FlaskResponse:\n if response_type == utils.ChartDataResultFormat.CSV:\n return CsvResponse(\n viz_obj.get_csv(),\n status=200,\n headers=generate_download_headers(\"csv\"),\n mimetype=\"application/csv\",\n )\n\n if response_type == utils.ChartDataResultType.QUERY:\n return self.get_query_string_response(viz_obj)\n\n if response_type == utils.ChartDataResultType.RESULTS:\n return self.get_raw_results(viz_obj)\n\n if response_type == utils.ChartDataResultType.SAMPLES:\n return self.get_samples(viz_obj)\n\n payload = viz_obj.get_payload()\n return data_payload_response(*viz_obj.payload_json_and_has_error(payload))\n\n @event_logger.log_this\n @api\n @has_access_api\n @expose(\"/slice_json/<int:slice_id>\")\n @etag_cache(check_perms=check_slice_perms)\n def slice_json(self, slice_id: int) -> FlaskResponse:\n form_data, slc = get_form_data(slice_id, use_slice_data=True)\n if not slc:\n return json_error_response(\"The slice does not exist\")\n try:\n viz_obj = get_viz(\n datasource_type=slc.datasource.type,\n datasource_id=slc.datasource.id,\n form_data=form_data,\n force=False,\n )\n return self.generate_json(viz_obj)\n except SupersetException as ex:\n return json_error_response(utils.error_msg_from_exception(ex))\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/annotation_json/<int:layer_id>\")\n def annotation_json( # pylint: disable=no-self-use\n self, layer_id: int\n ) -> FlaskResponse:\n form_data = get_form_data()[0]\n form_data[\"layer_id\"] = layer_id\n form_data[\"filters\"] = [{\"col\": \"layer_id\", \"op\": \"==\", \"val\": layer_id}]\n # Set all_columns to ensure the TableViz returns the necessary columns to the\n # frontend.\n form_data[\"all_columns\"] = [\n \"created_on\",\n \"changed_on\",\n \"id\",\n \"start_dttm\",\n \"end_dttm\",\n \"layer_id\",\n \"short_descr\",\n \"long_descr\",\n \"json_metadata\",\n \"created_by_fk\",\n \"changed_by_fk\",\n ]\n datasource = AnnotationDatasource()\n viz_obj = viz.viz_types[\"table\"](datasource, form_data=form_data, force=False)\n payload = viz_obj.get_payload()\n return data_payload_response(*viz_obj.payload_json_and_has_error(payload))\n\n @event_logger.log_this\n @api\n @has_access_api\n @handle_api_exception\n @permission_name(\"explore_json\")\n @expose(\"/explore_json/data/<cache_key>\", methods=[\"GET\"])\n @etag_cache(check_perms=check_explore_cache_perms)\n def explore_json_data(self, cache_key: str) -> FlaskResponse:\n \"\"\"Serves cached result data for async explore_json calls\n\n `self.generate_json` receives this input and returns different\n payloads based on the request args in the first block\n\n TODO: form_data should not be loaded twice from cache\n (also loaded in `check_explore_cache_perms`)\n \"\"\"\n try:\n cached = cache_manager.cache.get(cache_key)\n if not cached:\n raise CacheLoadError(\"Cached data not found\")\n\n form_data = cached.get(\"form_data\")\n response_type = cached.get(\"response_type\")\n\n datasource_id, datasource_type = get_datasource_info(None, None, form_data)\n\n viz_obj = get_viz(\n datasource_type=cast(str, datasource_type),\n datasource_id=datasource_id,\n form_data=form_data,\n force_cached=True,\n )\n\n return self.generate_json(viz_obj, response_type)\n except SupersetException as ex:\n return json_error_response(utils.error_msg_from_exception(ex), 400)\n\n EXPLORE_JSON_METHODS = [\"POST\"]\n if not is_feature_enabled(\"ENABLE_EXPLORE_JSON_CSRF_PROTECTION\"):\n EXPLORE_JSON_METHODS.append(\"GET\")\n\n @api\n @has_access_api\n @handle_api_exception\n @event_logger.log_this\n @expose(\n \"/explore_json/<datasource_type>/<int:datasource_id>/\",\n methods=EXPLORE_JSON_METHODS,\n )\n @expose(\"/explore_json/\", methods=EXPLORE_JSON_METHODS)\n @etag_cache(check_perms=check_datasource_perms)\n def explore_json(\n self, datasource_type: Optional[str] = None, datasource_id: Optional[int] = None\n ) -> FlaskResponse:\n \"\"\"Serves all request that GET or POST form_data\n\n This endpoint evolved to be the entry point of many different\n requests that GETs or POSTs a form_data.\n\n `self.generate_json` receives this input and returns different\n payloads based on the request args in the first block\n\n TODO: break into one endpoint for each return shape\"\"\"\n\n response_type = utils.ChartDataResultFormat.JSON.value\n responses: List[\n Union[utils.ChartDataResultFormat, utils.ChartDataResultType]\n ] = list(utils.ChartDataResultFormat)\n responses.extend(list(utils.ChartDataResultType))\n for response_option in responses:\n if request.args.get(response_option) == \"true\":\n response_type = response_option\n break\n\n form_data = get_form_data()[0]\n\n try:\n datasource_id, datasource_type = get_datasource_info(\n datasource_id, datasource_type, form_data\n )\n\n force = request.args.get(\"force\") == \"true\"\n\n # TODO: support CSV, SQL query and other non-JSON types\n if (\n is_feature_enabled(\"GLOBAL_ASYNC_QUERIES\")\n and response_type == utils.ChartDataResultFormat.JSON\n ):\n try:\n async_channel_id = async_query_manager.parse_jwt_from_request(\n request\n )[\"channel\"]\n job_metadata = async_query_manager.init_job(async_channel_id)\n load_explore_json_into_cache.delay(\n job_metadata, form_data, response_type, force\n )\n except AsyncQueryTokenException:\n return json_error_response(\"Not authorized\", 401)\n\n return json_success(json.dumps(job_metadata), status=202)\n\n viz_obj = get_viz(\n datasource_type=cast(str, datasource_type),\n datasource_id=datasource_id,\n form_data=form_data,\n force=force,\n )\n\n return self.generate_json(viz_obj, response_type)\n except SupersetException as ex:\n return json_error_response(utils.error_msg_from_exception(ex), 400)\n\n @has_access\n @event_logger.log_this\n @expose(\"/import_dashboards\", methods=[\"GET\", \"POST\"])\n def import_dashboards(self) -> FlaskResponse:\n \"\"\"Overrides the dashboards using json instances from the file.\"\"\"\n import_file = request.files.get(\"file\")\n if request.method == \"POST\" and import_file:\n success = False\n database_id = request.form.get(\"db_id\")\n try:\n ImportDashboardsCommand(\n {import_file.filename: import_file.read()}, database_id\n ).run()\n success = True\n except DatabaseNotFound as ex:\n logger.exception(ex)\n flash(\n _(\n \"Cannot import dashboard: %(db_error)s.\\n\"\n \"Make sure to create the database before \"\n \"importing the dashboard.\",\n db_error=ex,\n ),\n \"danger\",\n )\n except Exception as ex: # pylint: disable=broad-except\n logger.exception(ex)\n flash(\n _(\n \"An unknown error occurred. \"\n \"Please contact your Superset administrator\"\n ),\n \"danger\",\n )\n if success:\n flash(\"Dashboard(s) have been imported\", \"success\")\n return redirect(\"/dashboard/list/\")\n\n databases = db.session.query(Database).all()\n return self.render_template(\n \"superset/import_dashboards.html\", databases=databases\n )\n\n @has_access\n @event_logger.log_this\n @expose(\"/explore/<datasource_type>/<int:datasource_id>/\", methods=[\"GET\", \"POST\"])\n @expose(\"/explore/\", methods=[\"GET\", \"POST\"])\n def explore( # pylint: disable=too-many-locals,too-many-return-statements\n self, datasource_type: Optional[str] = None, datasource_id: Optional[int] = None\n ) -> FlaskResponse:\n user_id = g.user.get_id() if g.user else None\n form_data, slc = get_form_data(use_slice_data=True)\n\n # Flash the SIP-15 message if the slice is owned by the current user and has not\n # been updated, i.e., is not using the [start, end) interval.\n if (\n config[\"SIP_15_ENABLED\"]\n and slc\n and g.user in slc.owners\n and (\n not form_data.get(\"time_range_endpoints\")\n or form_data[\"time_range_endpoints\"]\n != (\n utils.TimeRangeEndpoint.INCLUSIVE,\n utils.TimeRangeEndpoint.EXCLUSIVE,\n )\n )\n ):\n url = Href(\"/superset/explore/\")(\n {\n \"form_data\": json.dumps(\n {\n \"slice_id\": slc.id,\n \"time_range_endpoints\": (\n utils.TimeRangeEndpoint.INCLUSIVE.value,\n utils.TimeRangeEndpoint.EXCLUSIVE.value,\n ),\n }\n )\n }\n )\n flash(Markup(config[\"SIP_15_TOAST_MESSAGE\"].format(url=url)))\n\n try:\n datasource_id, datasource_type = get_datasource_info(\n datasource_id, datasource_type, form_data\n )\n except SupersetException:\n datasource_id = None\n # fallback unkonw datasource to table type\n datasource_type = SqlaTable.type\n\n datasource: Optional[BaseDatasource] = None\n if datasource_id is not None:\n try:\n datasource = ConnectorRegistry.get_datasource(\n cast(str, datasource_type), datasource_id, db.session\n )\n except DatasetNotFoundError:\n pass\n datasource_name = datasource.name if datasource else _(\"[Missing Dataset]\")\n\n if datasource:\n if config[\"ENABLE_ACCESS_REQUEST\"] and (\n not security_manager.can_access_datasource(datasource)\n ):\n flash(\n __(security_manager.get_datasource_access_error_msg(datasource)),\n \"danger\",\n )\n return redirect(\n \"superset/request_access/?\"\n f\"datasource_type={datasource_type}&\"\n f\"datasource_id={datasource_id}&\"\n )\n\n # if feature enabled, run some health check rules for sqla datasource\n if hasattr(datasource, \"health_check\"):\n datasource.health_check()\n\n viz_type = form_data.get(\"viz_type\")\n if not viz_type and datasource and datasource.default_endpoint:\n return redirect(datasource.default_endpoint)\n\n # slc perms\n slice_add_perm = security_manager.can_access(\"can_write\", \"Chart\")\n slice_overwrite_perm = is_owner(slc, g.user) if slc else False\n slice_download_perm = security_manager.can_access(\"can_read\", \"Chart\")\n\n form_data[\"datasource\"] = str(datasource_id) + \"__\" + cast(str, datasource_type)\n\n # On explore, merge legacy and extra filters into the form data\n utils.convert_legacy_filters_into_adhoc(form_data)\n utils.merge_extra_filters(form_data)\n\n # merge request url params\n if request.method == \"GET\":\n utils.merge_request_params(form_data, request.args)\n\n # handle save or overwrite\n action = request.args.get(\"action\")\n\n if action == \"overwrite\" and not slice_overwrite_perm:\n return json_error_response(\n _(\"You don't have the rights to \") + _(\"alter this \") + _(\"chart\"),\n status=400,\n )\n\n if action == \"saveas\" and not slice_add_perm:\n return json_error_response(\n _(\"You don't have the rights to \") + _(\"create a \") + _(\"chart\"),\n status=400,\n )\n\n if action in (\"saveas\", \"overwrite\") and datasource:\n return self.save_or_overwrite_slice(\n slc,\n slice_add_perm,\n slice_overwrite_perm,\n slice_download_perm,\n datasource.id,\n datasource.type,\n datasource.name,\n )\n\n standalone = (\n request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == \"true\"\n )\n dummy_datasource_data: Dict[str, Any] = {\n \"type\": datasource_type,\n \"name\": datasource_name,\n \"columns\": [],\n \"metrics\": [],\n }\n bootstrap_data = {\n \"can_add\": slice_add_perm,\n \"can_download\": slice_download_perm,\n \"can_overwrite\": slice_overwrite_perm,\n \"datasource\": datasource.data if datasource else dummy_datasource_data,\n \"form_data\": form_data,\n \"datasource_id\": datasource_id,\n \"datasource_type\": datasource_type,\n \"slice\": slc.data if slc else None,\n \"standalone\": standalone,\n \"user_id\": user_id,\n \"forced_height\": request.args.get(\"height\"),\n \"common\": common_bootstrap_payload(),\n }\n if slc:\n title = slc.slice_name\n elif datasource:\n table_name = (\n datasource.table_name\n if datasource_type == \"table\"\n else datasource.datasource_name\n )\n title = _(\"Explore - %(table)s\", table=table_name)\n else:\n title = _(\"Explore\")\n\n return self.render_template(\n \"superset/basic.html\",\n bootstrap_data=json.dumps(\n bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser\n ),\n entry=\"explore\",\n title=title,\n standalone_mode=standalone,\n )\n\n @api\n @handle_api_exception\n @has_access_api\n @event_logger.log_this\n @expose(\"/filter/<datasource_type>/<int:datasource_id>/<column>/\")\n def filter( # pylint: disable=no-self-use\n self, datasource_type: str, datasource_id: int, column: str\n ) -> FlaskResponse:\n \"\"\"\n Endpoint to retrieve values for specified column.\n\n :param datasource_type: Type of datasource e.g. table\n :param datasource_id: Datasource id\n :param column: Column name to retrieve values for\n :returns: The Flask response\n :raises SupersetSecurityException: If the user cannot access the resource\n \"\"\"\n # TODO: Cache endpoint by user, datasource and column\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session,\n )\n if not datasource:\n return json_error_response(DATASOURCE_MISSING_ERR)\n\n datasource.raise_for_access()\n payload = json.dumps(\n datasource.values_for_column(column, config[\"FILTER_SELECT_ROW_LIMIT\"]),\n default=utils.json_int_dttm_ser,\n ignore_nan=True,\n )\n return json_success(payload)\n\n @staticmethod\n def remove_extra_filters(filters: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n \"\"\"Extra filters are ones inherited from the dashboard's temporary context\n Those should not be saved when saving the chart\"\"\"\n return [f for f in filters if not f.get(\"isExtra\")]\n\n def save_or_overwrite_slice( # pylint: disable=too-many-arguments,too-many-locals,no-self-use\n self,\n slc: Optional[Slice],\n slice_add_perm: bool,\n slice_overwrite_perm: bool,\n slice_download_perm: bool,\n datasource_id: int,\n datasource_type: str,\n datasource_name: str,\n ) -> FlaskResponse:\n \"\"\"Save or overwrite a slice\"\"\"\n slice_name = request.args.get(\"slice_name\")\n action = request.args.get(\"action\")\n form_data = get_form_data()[0]\n\n if action == \"saveas\":\n if \"slice_id\" in form_data:\n form_data.pop(\"slice_id\") # don't save old slice_id\n slc = Slice(owners=[g.user] if g.user else [])\n\n form_data[\"adhoc_filters\"] = self.remove_extra_filters(\n form_data.get(\"adhoc_filters\", [])\n )\n\n assert slc\n slc.params = json.dumps(form_data, indent=2, sort_keys=True)\n slc.datasource_name = datasource_name\n slc.viz_type = form_data[\"viz_type\"]\n slc.datasource_type = datasource_type\n slc.datasource_id = datasource_id\n slc.slice_name = slice_name\n\n if action == \"saveas\" and slice_add_perm:\n ChartDAO.save(slc)\n msg = _(\"Chart [{}] has been saved\").format(slc.slice_name)\n flash(msg, \"info\")\n elif action == \"overwrite\" and slice_overwrite_perm:\n ChartDAO.overwrite(slc)\n msg = _(\"Chart [{}] has been overwritten\").format(slc.slice_name)\n flash(msg, \"info\")\n\n # Adding slice to a dashboard if requested\n dash: Optional[Dashboard] = None\n\n save_to_dashboard_id = request.args.get(\"save_to_dashboard_id\")\n new_dashboard_name = request.args.get(\"new_dashboard_name\")\n if save_to_dashboard_id:\n # Adding the chart to an existing dashboard\n dash = cast(\n Dashboard,\n db.session.query(Dashboard)\n .filter_by(id=int(save_to_dashboard_id))\n .one(),\n )\n # check edit dashboard permissions\n dash_overwrite_perm = check_ownership(dash, raise_if_false=False)\n if not dash_overwrite_perm:\n return json_error_response(\n _(\"You don't have the rights to \")\n + _(\"alter this \")\n + _(\"dashboard\"),\n status=400,\n )\n\n flash(\n _(\"Chart [{}] was added to dashboard [{}]\").format(\n slc.slice_name, dash.dashboard_title\n ),\n \"info\",\n )\n elif new_dashboard_name:\n # Creating and adding to a new dashboard\n # check create dashboard permissions\n dash_add_perm = security_manager.can_access(\"can_write\", \"Dashboard\")\n if not dash_add_perm:\n return json_error_response(\n _(\"You don't have the rights to \")\n + _(\"create a \")\n + _(\"dashboard\"),\n status=400,\n )\n\n dash = Dashboard(\n dashboard_title=request.args.get(\"new_dashboard_name\"),\n owners=[g.user] if g.user else [],\n )\n flash(\n _(\n \"Dashboard [{}] just got created and chart [{}] was added \" \"to it\"\n ).format(dash.dashboard_title, slc.slice_name),\n \"info\",\n )\n\n if dash and slc not in dash.slices:\n dash.slices.append(slc)\n db.session.commit()\n\n response = {\n \"can_add\": slice_add_perm,\n \"can_download\": slice_download_perm,\n \"can_overwrite\": is_owner(slc, g.user),\n \"form_data\": slc.form_data,\n \"slice\": slc.data,\n \"dashboard_url\": dash.url if dash else None,\n \"dashboard_id\": dash.id if dash else None,\n }\n\n if dash and request.args.get(\"goto_dash\") == \"true\":\n response.update({\"dashboard\": dash.url})\n\n return json_success(json.dumps(response))\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/schemas/<int:db_id>/\")\n @expose(\"/schemas/<int:db_id>/<force_refresh>/\")\n def schemas( # pylint: disable=no-self-use\n self, db_id: int, force_refresh: str = \"false\"\n ) -> FlaskResponse:\n logger.warning(\n \"This API endpoint is deprecated and will be removed in version 1.0.0\"\n )\n db_id = int(db_id)\n database = db.session.query(Database).get(db_id)\n if database:\n schemas = database.get_all_schema_names(\n cache=database.schema_cache_enabled,\n cache_timeout=database.schema_cache_timeout,\n force=force_refresh.lower() == \"true\",\n )\n schemas = security_manager.get_schemas_accessible_by_user(database, schemas)\n else:\n schemas = []\n\n return Response(json.dumps({\"schemas\": schemas}), mimetype=\"application/json\")\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/tables/<int:db_id>/<schema>/<substr>/\")\n @expose(\"/tables/<int:db_id>/<schema>/<substr>/<force_refresh>/\")\n def tables( # pylint: disable=too-many-locals,no-self-use\n self, db_id: int, schema: str, substr: str, force_refresh: str = \"false\"\n ) -> FlaskResponse:\n \"\"\"Endpoint to fetch the list of tables for given database\"\"\"\n # Guarantees database filtering by security access\n query = db.session.query(Database)\n query = DatabaseFilter(\"id\", SQLAInterface(Database, db.session)).apply(\n query, None\n )\n database = query.filter_by(id=db_id).one_or_none()\n if not database:\n return json_error_response(\"Not found\", 404)\n\n force_refresh_parsed = force_refresh.lower() == \"true\"\n schema_parsed = utils.parse_js_uri_path_item(schema, eval_undefined=True)\n substr_parsed = utils.parse_js_uri_path_item(substr, eval_undefined=True)\n\n if schema_parsed:\n tables = (\n database.get_all_table_names_in_schema(\n schema=schema_parsed,\n force=force_refresh_parsed,\n cache=database.table_cache_enabled,\n cache_timeout=database.table_cache_timeout,\n )\n or []\n )\n views = (\n database.get_all_view_names_in_schema(\n schema=schema_parsed,\n force=force_refresh_parsed,\n cache=database.table_cache_enabled,\n cache_timeout=database.table_cache_timeout,\n )\n or []\n )\n else:\n tables = database.get_all_table_names_in_database(\n cache=True, force=False, cache_timeout=24 * 60 * 60\n )\n views = database.get_all_view_names_in_database(\n cache=True, force=False, cache_timeout=24 * 60 * 60\n )\n tables = security_manager.get_datasources_accessible_by_user(\n database, tables, schema_parsed\n )\n views = security_manager.get_datasources_accessible_by_user(\n database, views, schema_parsed\n )\n\n def get_datasource_label(ds_name: utils.DatasourceName) -> str:\n return (\n ds_name.table if schema_parsed else f\"{ds_name.schema}.{ds_name.table}\"\n )\n\n if substr_parsed:\n tables = [tn for tn in tables if substr_parsed in get_datasource_label(tn)]\n views = [vn for vn in views if substr_parsed in get_datasource_label(vn)]\n\n if not schema_parsed and database.default_schemas:\n user_schema = g.user.email.split(\"@\")[0]\n valid_schemas = set(database.default_schemas + [user_schema])\n\n tables = [tn for tn in tables if tn.schema in valid_schemas]\n views = [vn for vn in views if vn.schema in valid_schemas]\n\n max_items = config[\"MAX_TABLE_NAMES\"] or len(tables)\n total_items = len(tables) + len(views)\n max_tables = len(tables)\n max_views = len(views)\n if total_items and substr_parsed:\n max_tables = max_items * len(tables) // total_items\n max_views = max_items * len(views) // total_items\n\n dataset_tables = {table.name: table for table in database.tables}\n\n table_options = [\n {\n \"value\": tn.table,\n \"schema\": tn.schema,\n \"label\": get_datasource_label(tn),\n \"title\": get_datasource_label(tn),\n \"type\": \"table\",\n \"extra\": dataset_tables[f\"{tn.schema}.{tn.table}\"].extra_dict\n if (f\"{tn.schema}.{tn.table}\" in dataset_tables)\n else None,\n }\n for tn in tables[:max_tables]\n ]\n table_options.extend(\n [\n {\n \"value\": vn.table,\n \"schema\": vn.schema,\n \"label\": get_datasource_label(vn),\n \"title\": get_datasource_label(vn),\n \"type\": \"view\",\n }\n for vn in views[:max_views]\n ]\n )\n table_options.sort(key=lambda value: value[\"label\"])\n payload = {\"tableLength\": len(tables) + len(views), \"options\": table_options}\n return json_success(json.dumps(payload))\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/copy_dash/<int:dashboard_id>/\", methods=[\"GET\", \"POST\"])\n def copy_dash( # pylint: disable=no-self-use\n self, dashboard_id: int\n ) -> FlaskResponse:\n \"\"\"Copy dashboard\"\"\"\n session = db.session()\n data = json.loads(request.form[\"data\"])\n # client-side send back last_modified_time which was set when\n # the dashboard was open. it was use to avoid mid-air collision.\n # remove it to avoid confusion.\n data.pop(\"last_modified_time\", None)\n\n dash = Dashboard()\n original_dash = session.query(Dashboard).get(dashboard_id)\n\n dash.owners = [g.user] if g.user else []\n dash.dashboard_title = data[\"dashboard_title\"]\n\n old_to_new_slice_ids: Dict[int, int] = {}\n if data[\"duplicate_slices\"]:\n # Duplicating slices as well, mapping old ids to new ones\n for slc in original_dash.slices:\n new_slice = slc.clone()\n new_slice.owners = [g.user] if g.user else []\n session.add(new_slice)\n session.flush()\n new_slice.dashboards.append(dash)\n old_to_new_slice_ids[slc.id] = new_slice.id\n\n # update chartId of layout entities\n for value in data[\"positions\"].values():\n if isinstance(value, dict) and value.get(\"meta\", {}).get(\"chartId\"):\n old_id = value[\"meta\"][\"chartId\"]\n new_id = old_to_new_slice_ids.get(old_id)\n value[\"meta\"][\"chartId\"] = new_id\n else:\n dash.slices = original_dash.slices\n\n dash.params = original_dash.params\n\n DashboardDAO.set_dash_metadata(dash, data, old_to_new_slice_ids)\n session.add(dash)\n session.commit()\n dash_json = json.dumps(dash.data)\n session.close()\n return json_success(dash_json)\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/save_dash/<int:dashboard_id>/\", methods=[\"GET\", \"POST\"])\n def save_dash( # pylint: disable=no-self-use\n self, dashboard_id: int\n ) -> FlaskResponse:\n \"\"\"Save a dashboard's metadata\"\"\"\n session = db.session()\n dash = session.query(Dashboard).get(dashboard_id)\n check_ownership(dash, raise_if_false=True)\n data = json.loads(request.form[\"data\"])\n # client-side send back last_modified_time which was set when\n # the dashboard was open. it was use to avoid mid-air collision.\n remote_last_modified_time = data.get(\"last_modified_time\")\n current_last_modified_time = dash.changed_on.replace(microsecond=0).timestamp()\n if remote_last_modified_time < current_last_modified_time:\n return json_error_response(\n __(\n \"This dashboard was changed recently. \"\n \"Please reload dashboard to get latest version.\"\n ),\n 412,\n )\n # remove to avoid confusion.\n data.pop(\"last_modified_time\", None)\n\n DashboardDAO.set_dash_metadata(dash, data)\n session.merge(dash)\n session.commit()\n\n # get updated changed_on\n dash = session.query(Dashboard).get(dashboard_id)\n last_modified_time = dash.changed_on.replace(microsecond=0).timestamp()\n session.close()\n return json_success(\n json.dumps({\"status\": \"SUCCESS\", \"last_modified_time\": last_modified_time,})\n )\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/add_slices/<int:dashboard_id>/\", methods=[\"POST\"])\n def add_slices( # pylint: disable=no-self-use\n self, dashboard_id: int\n ) -> FlaskResponse:\n \"\"\"Add and save slices to a dashboard\"\"\"\n data = json.loads(request.form[\"data\"])\n session = db.session()\n dash = session.query(Dashboard).get(dashboard_id)\n check_ownership(dash, raise_if_false=True)\n new_slices = session.query(Slice).filter(Slice.id.in_(data[\"slice_ids\"]))\n dash.slices += new_slices\n session.merge(dash)\n session.commit()\n session.close()\n return \"SLICES ADDED\"\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/testconn\", methods=[\"POST\", \"GET\"])\n def testconn( # pylint: disable=too-many-return-statements,no-self-use\n self,\n ) -> FlaskResponse:\n \"\"\"Tests a sqla connection\"\"\"\n db_name = request.json.get(\"name\")\n uri = request.json.get(\"uri\")\n try:\n if app.config[\"PREVENT_UNSAFE_DB_CONNECTIONS\"]:\n check_sqlalchemy_uri(uri)\n # if the database already exists in the database, only its safe\n # (password-masked) URI would be shown in the UI and would be passed in the\n # form data so if the database already exists and the form was submitted\n # with the safe URI, we assume we should retrieve the decrypted URI to test\n # the connection.\n if db_name:\n existing_database = (\n db.session.query(Database)\n .filter_by(database_name=db_name)\n .one_or_none()\n )\n if existing_database and uri == existing_database.safe_sqlalchemy_uri():\n uri = existing_database.sqlalchemy_uri_decrypted\n\n # This is the database instance that will be tested. Note the extra fields\n # are represented as JSON encoded strings in the model.\n database = Database(\n server_cert=request.json.get(\"server_cert\"),\n extra=json.dumps(request.json.get(\"extra\", {})),\n impersonate_user=request.json.get(\"impersonate_user\"),\n encrypted_extra=json.dumps(request.json.get(\"encrypted_extra\", {})),\n )\n database.set_sqlalchemy_uri(uri)\n database.db_engine_spec.mutate_db_for_connection_test(database)\n\n username = g.user.username if g.user is not None else None\n engine = database.get_sqla_engine(user_name=username)\n\n with closing(engine.raw_connection()) as conn:\n if engine.dialect.do_ping(conn):\n return json_success('\"OK\"')\n\n raise DBAPIError(None, None, None)\n except CertificateException as ex:\n logger.info(\"Certificate exception\")\n return json_error_response(ex.message)\n except (NoSuchModuleError, ModuleNotFoundError):\n logger.info(\"Invalid driver\")\n driver_name = make_url(uri).drivername\n return json_error_response(\n _(\n \"Could not load database driver: %(driver_name)s\",\n driver_name=driver_name,\n ),\n 400,\n )\n except ArgumentError:\n logger.info(\"Invalid URI\")\n return json_error_response(\n _(\n \"Invalid connection string, a valid string usually follows:\\n\"\n \"'DRIVER://USER:PASSWORD@DB-HOST/DATABASE-NAME'\"\n )\n )\n except DBAPIError:\n logger.warning(\"Connection failed\")\n return json_error_response(\n _(\"Connection failed, please check your connection settings\"), 400\n )\n except DBSecurityException as ex:\n logger.warning(\"Stopped an unsafe database connection\")\n return json_error_response(_(str(ex)), 400)\n except Exception as ex: # pylint: disable=broad-except\n logger.warning(\"Unexpected error %s\", type(ex).__name__)\n return json_error_response(\n _(\"Unexpected error occurred, please check your logs for details\"), 400\n )\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/recent_activity/<int:user_id>/\", methods=[\"GET\"])\n def recent_activity( # pylint: disable=no-self-use\n self, user_id: int\n ) -> FlaskResponse:\n \"\"\"Recent activity (actions) for a given user\"\"\"\n limit = request.args.get(\"limit\")\n limit = int(limit) if limit and limit.isdigit() else 100\n actions = request.args.get(\"actions\", \"explore,dashboard\").split(\",\")\n # whether to get distinct subjects\n distinct = request.args.get(\"distinct\") != \"false\"\n\n has_subject_title = or_(\n and_(\n Dashboard.dashboard_title is not None, Dashboard.dashboard_title != \"\",\n ),\n and_(Slice.slice_name is not None, Slice.slice_name != \"\"),\n )\n\n if distinct:\n one_year_ago = datetime.today() - timedelta(days=365)\n subqry = (\n db.session.query(\n Log.dashboard_id,\n Log.slice_id,\n Log.action,\n func.max(Log.dttm).label(\"dttm\"),\n )\n .group_by(Log.dashboard_id, Log.slice_id, Log.action)\n .filter(\n and_(\n Log.action.in_(actions),\n Log.user_id == user_id,\n # limit to one year of data to improve performance\n Log.dttm > one_year_ago,\n or_(Log.dashboard_id.isnot(None), Log.slice_id.isnot(None)),\n )\n )\n .subquery()\n )\n qry = (\n db.session.query(\n subqry,\n Dashboard.slug.label(\"dashboard_slug\"),\n Dashboard.dashboard_title,\n Slice.slice_name,\n )\n .outerjoin(Dashboard, Dashboard.id == subqry.c.dashboard_id)\n .outerjoin(Slice, Slice.id == subqry.c.slice_id,)\n .filter(has_subject_title)\n .order_by(subqry.c.dttm.desc())\n .limit(limit)\n )\n else:\n qry = (\n db.session.query(\n Log.dttm,\n Log.action,\n Log.dashboard_id,\n Log.slice_id,\n Dashboard.slug.label(\"dashboard_slug\"),\n Dashboard.dashboard_title,\n Slice.slice_name,\n )\n .outerjoin(Dashboard, Dashboard.id == Log.dashboard_id)\n .outerjoin(Slice, Slice.id == Log.slice_id)\n .filter(has_subject_title)\n .order_by(Log.dttm.desc())\n .limit(limit)\n )\n\n payload = []\n for log in qry.all():\n item_url = None\n item_title = None\n item_type = None\n if log.dashboard_id:\n item_type = \"dashboard\"\n item_url = Dashboard(id=log.dashboard_id, slug=log.dashboard_slug).url\n item_title = log.dashboard_title\n elif log.slice_id:\n slc = Slice(id=log.slice_id, slice_name=log.slice_name)\n item_type = \"slice\"\n item_url = slc.slice_url\n item_title = slc.chart\n\n payload.append(\n {\n \"action\": log.action,\n \"item_type\": item_type,\n \"item_url\": item_url,\n \"item_title\": item_title,\n \"time\": log.dttm,\n }\n )\n return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/csrf_token/\", methods=[\"GET\"])\n def csrf_token(self) -> FlaskResponse:\n return Response(\n self.render_template(\"superset/csrf_token.json\"), mimetype=\"text/json\"\n )\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/available_domains/\", methods=[\"GET\"])\n def available_domains(self) -> FlaskResponse: # pylint: disable=no-self-use\n \"\"\"\n Returns the list of available Superset Webserver domains (if any)\n defined in config. This enables charts embedded in other apps to\n leverage domain sharding if appropriately configured.\n \"\"\"\n return Response(\n json.dumps(conf.get(\"SUPERSET_WEBSERVER_DOMAINS\")), mimetype=\"text/json\"\n )\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/fave_dashboards_by_username/<username>/\", methods=[\"GET\"])\n def fave_dashboards_by_username(self, username: str) -> FlaskResponse:\n \"\"\"This lets us use a user's username to pull favourite dashboards\"\"\"\n user = security_manager.find_user(username=username)\n return self.fave_dashboards(user.get_id())\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/fave_dashboards/<int:user_id>/\", methods=[\"GET\"])\n def fave_dashboards( # pylint: disable=no-self-use\n self, user_id: int\n ) -> FlaskResponse:\n qry = (\n db.session.query(Dashboard, FavStar.dttm)\n .join(\n FavStar,\n and_(\n FavStar.user_id == int(user_id),\n FavStar.class_name == \"Dashboard\",\n Dashboard.id == FavStar.obj_id,\n ),\n )\n .order_by(FavStar.dttm.desc())\n )\n payload = []\n for o in qry.all():\n dash = {\n \"id\": o.Dashboard.id,\n \"dashboard\": o.Dashboard.dashboard_link(),\n \"title\": o.Dashboard.dashboard_title,\n \"url\": o.Dashboard.url,\n \"dttm\": o.dttm,\n }\n if o.Dashboard.created_by:\n user = o.Dashboard.created_by\n dash[\"creator\"] = str(user)\n dash[\"creator_url\"] = \"/superset/profile/{}/\".format(user.username)\n payload.append(dash)\n return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/created_dashboards/<int:user_id>/\", methods=[\"GET\"])\n def created_dashboards( # pylint: disable=no-self-use\n self, user_id: int\n ) -> FlaskResponse:\n Dash = Dashboard\n qry = (\n db.session.query(Dash)\n .filter(\n or_( # pylint: disable=comparison-with-callable\n Dash.created_by_fk == user_id, Dash.changed_by_fk == user_id\n )\n )\n .order_by(Dash.changed_on.desc())\n )\n payload = [\n {\n \"id\": o.id,\n \"dashboard\": o.dashboard_link(),\n \"title\": o.dashboard_title,\n \"url\": o.url,\n \"dttm\": o.changed_on,\n }\n for o in qry.all()\n ]\n return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/user_slices\", methods=[\"GET\"])\n @expose(\"/user_slices/<int:user_id>/\", methods=[\"GET\"])\n def user_slices( # pylint: disable=no-self-use\n self, user_id: Optional[int] = None\n ) -> FlaskResponse:\n \"\"\"List of slices a user owns, created, modified or faved\"\"\"\n if not user_id:\n user_id = g.user.id\n\n owner_ids_query = (\n db.session.query(Slice.id)\n .join(Slice.owners)\n .filter(security_manager.user_model.id == user_id)\n )\n\n qry = (\n db.session.query(Slice, FavStar.dttm)\n .join(\n FavStar,\n and_(\n FavStar.user_id == user_id,\n FavStar.class_name == \"slice\",\n Slice.id == FavStar.obj_id,\n ),\n isouter=True,\n )\n .filter(\n or_(\n Slice.id.in_(owner_ids_query),\n Slice.created_by_fk == user_id,\n Slice.changed_by_fk == user_id,\n FavStar.user_id == user_id,\n )\n )\n .order_by(Slice.slice_name.asc())\n )\n payload = [\n {\n \"id\": o.Slice.id,\n \"title\": o.Slice.slice_name,\n \"url\": o.Slice.slice_url,\n \"data\": o.Slice.form_data,\n \"dttm\": o.dttm if o.dttm else o.Slice.changed_on,\n \"viz_type\": o.Slice.viz_type,\n }\n for o in qry.all()\n ]\n return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/created_slices\", methods=[\"GET\"])\n @expose(\"/created_slices/<int:user_id>/\", methods=[\"GET\"])\n def created_slices( # pylint: disable=no-self-use\n self, user_id: Optional[int] = None\n ) -> FlaskResponse:\n \"\"\"List of slices created by this user\"\"\"\n if not user_id:\n user_id = g.user.id\n qry = (\n db.session.query(Slice)\n .filter(or_(Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id))\n .order_by(Slice.changed_on.desc())\n )\n payload = [\n {\n \"id\": o.id,\n \"title\": o.slice_name,\n \"url\": o.slice_url,\n \"dttm\": o.changed_on,\n \"viz_type\": o.viz_type,\n }\n for o in qry.all()\n ]\n return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/fave_slices\", methods=[\"GET\"])\n @expose(\"/fave_slices/<int:user_id>/\", methods=[\"GET\"])\n def fave_slices( # pylint: disable=no-self-use\n self, user_id: Optional[int] = None\n ) -> FlaskResponse:\n \"\"\"Favorite slices for a user\"\"\"\n if not user_id:\n user_id = g.user.id\n qry = (\n db.session.query(Slice, FavStar.dttm)\n .join(\n FavStar,\n and_(\n FavStar.user_id == user_id,\n FavStar.class_name == \"slice\",\n Slice.id == FavStar.obj_id,\n ),\n )\n .order_by(FavStar.dttm.desc())\n )\n payload = []\n for o in qry.all():\n dash = {\n \"id\": o.Slice.id,\n \"title\": o.Slice.slice_name,\n \"url\": o.Slice.slice_url,\n \"dttm\": o.dttm,\n \"viz_type\": o.Slice.viz_type,\n }\n if o.Slice.created_by:\n user = o.Slice.created_by\n dash[\"creator\"] = str(user)\n dash[\"creator_url\"] = \"/superset/profile/{}/\".format(user.username)\n payload.append(dash)\n return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @event_logger.log_this\n @api\n @has_access_api\n @expose(\"/warm_up_cache/\", methods=[\"GET\"])\n def warm_up_cache( # pylint: disable=too-many-locals,no-self-use\n self,\n ) -> FlaskResponse:\n \"\"\"Warms up the cache for the slice or table.\n\n Note for slices a force refresh occurs.\n\n In terms of the `extra_filters` these can be obtained from records in the JSON\n encoded `logs.json` column associated with the `explore_json` action.\n \"\"\"\n session = db.session()\n slice_id = request.args.get(\"slice_id\")\n dashboard_id = request.args.get(\"dashboard_id\")\n table_name = request.args.get(\"table_name\")\n db_name = request.args.get(\"db_name\")\n extra_filters = request.args.get(\"extra_filters\")\n slices: List[Slice] = []\n\n if not slice_id and not (table_name and db_name):\n return json_error_response(\n __(\n \"Malformed request. slice_id or table_name and db_name \"\n \"arguments are expected\"\n ),\n status=400,\n )\n if slice_id:\n slices = session.query(Slice).filter_by(id=slice_id).all()\n if not slices:\n return json_error_response(\n __(\"Chart %(id)s not found\", id=slice_id), status=404\n )\n elif table_name and db_name:\n table = (\n session.query(SqlaTable)\n .join(Database)\n .filter(\n Database.database_name == db_name\n or SqlaTable.table_name == table_name\n )\n ).one_or_none()\n if not table:\n return json_error_response(\n __(\n \"Table %(table)s wasn't found in the database %(db)s\",\n table=table_name,\n db=db_name,\n ),\n status=404,\n )\n slices = (\n session.query(Slice)\n .filter_by(datasource_id=table.id, datasource_type=table.type)\n .all()\n )\n\n result = []\n\n for slc in slices:\n try:\n form_data = get_form_data(slc.id, use_slice_data=True)[0]\n if dashboard_id:\n form_data[\"extra_filters\"] = (\n json.loads(extra_filters)\n if extra_filters\n else get_dashboard_extra_filters(slc.id, dashboard_id)\n )\n\n obj = get_viz(\n datasource_type=slc.datasource.type,\n datasource_id=slc.datasource.id,\n form_data=form_data,\n force=True,\n )\n\n g.form_data = form_data\n payload = obj.get_payload()\n delattr(g, \"form_data\")\n error = payload[\"errors\"] or None\n status = payload[\"status\"]\n except Exception as ex: # pylint: disable=broad-except\n error = utils.error_msg_from_exception(ex)\n status = None\n\n result.append(\n {\"slice_id\": slc.id, \"viz_error\": error, \"viz_status\": status}\n )\n\n return json_success(json.dumps(result))\n\n @has_access_api\n @event_logger.log_this\n @expose(\"/favstar/<class_name>/<int:obj_id>/<action>/\")\n def favstar( # pylint: disable=no-self-use\n self, class_name: str, obj_id: int, action: str\n ) -> FlaskResponse:\n \"\"\"Toggle favorite stars on Slices and Dashboard\"\"\"\n session = db.session()\n count = 0\n favs = (\n session.query(FavStar)\n .filter_by(class_name=class_name, obj_id=obj_id, user_id=g.user.get_id())\n .all()\n )\n if action == \"select\":\n if not favs:\n session.add(\n FavStar(\n class_name=class_name,\n obj_id=obj_id,\n user_id=g.user.get_id(),\n dttm=datetime.now(),\n )\n )\n count = 1\n elif action == \"unselect\":\n for fav in favs:\n session.delete(fav)\n else:\n count = len(favs)\n session.commit()\n return json_success(json.dumps({\"count\": count}))\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/dashboard/<int:dashboard_id>/published/\", methods=(\"GET\", \"POST\"))\n def publish( # pylint: disable=no-self-use\n self, dashboard_id: int\n ) -> FlaskResponse:\n \"\"\"Gets and toggles published status on dashboards\"\"\"\n logger.warning(\n \"This API endpoint is deprecated and will be removed in version 1.0.0\"\n )\n session = db.session()\n Role = ab_models.Role\n dash = (\n session.query(Dashboard).filter(Dashboard.id == dashboard_id).one_or_none()\n )\n admin_role = session.query(Role).filter(Role.name == \"Admin\").one_or_none()\n\n if request.method == \"GET\":\n if dash:\n return json_success(json.dumps({\"published\": dash.published}))\n\n return json_error_response(\n f\"ERROR: cannot find dashboard {dashboard_id}\", status=404\n )\n\n edit_perm = is_owner(dash, g.user) or admin_role in get_user_roles()\n if not edit_perm:\n return json_error_response(\n f'ERROR: \"{g.user.username}\" cannot alter '\n f'dashboard \"{dash.dashboard_title}\"',\n status=403,\n )\n\n dash.published = str(request.form[\"published\"]).lower() == \"true\"\n session.commit()\n return json_success(json.dumps({\"published\": dash.published}))\n\n @has_access\n @expose(\"/dashboard/<dashboard_id_or_slug>/\")\n @event_logger.log_this_with_extra_payload\n def dashboard( # pylint: disable=too-many-locals\n self,\n dashboard_id_or_slug: str,\n # this parameter is added by `log_this_with_manual_updates`,\n # set a default value to appease pylint\n add_extra_log_payload: Callable[..., None] = lambda **kwargs: None,\n ) -> FlaskResponse:\n \"\"\"Server side rendering for a dashboard\"\"\"\n session = db.session()\n qry = session.query(Dashboard)\n if dashboard_id_or_slug.isdigit():\n qry = qry.filter_by(id=int(dashboard_id_or_slug))\n else:\n qry = qry.filter_by(slug=dashboard_id_or_slug)\n\n dash = qry.one_or_none()\n if not dash:\n abort(404)\n\n data = dash.full_data()\n\n if config[\"ENABLE_ACCESS_REQUEST\"]:\n for datasource in data[\"datasources\"].values():\n datasource = ConnectorRegistry.get_datasource(\n datasource_type=datasource[\"type\"],\n datasource_id=datasource[\"id\"],\n session=session,\n )\n if datasource and not security_manager.can_access_datasource(\n datasource=datasource\n ):\n flash(\n __(\n security_manager.get_datasource_access_error_msg(datasource)\n ),\n \"danger\",\n )\n return redirect(f\"/superset/request_access/?dashboard_id={dash.id}\")\n\n dash_edit_perm = check_ownership(\n dash, raise_if_false=False\n ) and security_manager.can_access(\"can_save_dash\", \"Superset\")\n dash_save_perm = security_manager.can_access(\"can_save_dash\", \"Superset\")\n superset_can_explore = security_manager.can_access(\"can_explore\", \"Superset\")\n superset_can_csv = security_manager.can_access(\"can_csv\", \"Superset\")\n slice_can_edit = security_manager.can_access(\"can_edit\", \"SliceModelView\")\n\n standalone_mode = (\n request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == \"true\"\n )\n edit_mode = (\n request.args.get(utils.ReservedUrlParameters.EDIT_MODE.value) == \"true\"\n )\n\n add_extra_log_payload(\n dashboard_id=dash.id,\n dashboard_version=\"v2\",\n dash_edit_perm=dash_edit_perm,\n edit_mode=edit_mode,\n )\n\n if is_feature_enabled(\"REMOVE_SLICE_LEVEL_LABEL_COLORS\"):\n # dashboard metadata has dashboard-level label_colors,\n # so remove slice-level label_colors from its form_data\n for slc in data[\"slices\"]:\n form_data = slc.get(\"form_data\")\n form_data.pop(\"label_colors\", None)\n\n url_params = {\n key: value\n for key, value in request.args.items()\n if key not in [param.value for param in utils.ReservedUrlParameters]\n }\n\n bootstrap_data = {\n \"user_id\": g.user.get_id(),\n \"common\": common_bootstrap_payload(),\n \"editMode\": edit_mode,\n \"urlParams\": url_params,\n \"dashboard_data\": {\n **data[\"dashboard\"],\n \"standalone_mode\": standalone_mode,\n \"dash_save_perm\": dash_save_perm,\n \"dash_edit_perm\": dash_edit_perm,\n \"superset_can_explore\": superset_can_explore,\n \"superset_can_csv\": superset_can_csv,\n \"slice_can_edit\": slice_can_edit,\n },\n \"datasources\": data[\"datasources\"],\n }\n\n if request.args.get(\"json\") == \"true\":\n return json_success(\n json.dumps(bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser)\n )\n\n return self.render_template(\n \"superset/dashboard.html\",\n entry=\"dashboard\",\n standalone_mode=standalone_mode,\n title=dash.dashboard_title,\n custom_css=dash.css,\n bootstrap_data=json.dumps(\n bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser\n ),\n )\n\n @api\n @has_access\n @event_logger.log_this\n @expose(\"/log/\", methods=[\"POST\"])\n def log(self) -> FlaskResponse: # pylint: disable=no-self-use\n return Response(status=200)\n\n @has_access\n @expose(\"/sync_druid/\", methods=[\"POST\"])\n @event_logger.log_this\n def sync_druid_source(self) -> FlaskResponse: # pylint: disable=no-self-use\n \"\"\"Syncs the druid datasource in main db with the provided config.\n\n The endpoint takes 3 arguments:\n user - user name to perform the operation as\n cluster - name of the druid cluster\n config - configuration stored in json that contains:\n name: druid datasource name\n dimensions: list of the dimensions, they become druid columns\n with the type STRING\n metrics_spec: list of metrics (dictionary). Metric consists of\n 2 attributes: type and name. Type can be count,\n etc. `count` type is stored internally as longSum\n other fields will be ignored.\n\n Example: {\n 'name': 'test_click',\n 'metrics_spec': [{'type': 'count', 'name': 'count'}],\n 'dimensions': ['affiliate_id', 'campaign', 'first_seen']\n }\n \"\"\"\n payload = request.get_json(force=True)\n druid_config = payload[\"config\"]\n user_name = payload[\"user\"]\n cluster_name = payload[\"cluster\"]\n\n user = security_manager.find_user(username=user_name)\n DruidDatasource = ConnectorRegistry.sources[ # pylint: disable=invalid-name\n \"druid\"\n ]\n DruidCluster = DruidDatasource.cluster_class # pylint: disable=invalid-name\n if not user:\n err_msg = __(\n \"Can't find User '%(name)s', please ask your admin \" \"to create one.\",\n name=user_name,\n )\n logger.error(err_msg)\n return json_error_response(err_msg)\n cluster = (\n db.session.query(DruidCluster)\n .filter_by(cluster_name=cluster_name)\n .one_or_none()\n )\n if not cluster:\n err_msg = __(\n \"Can't find DruidCluster with cluster_name = \" \"'%(name)s'\",\n name=cluster_name,\n )\n logger.error(err_msg)\n return json_error_response(err_msg)\n try:\n DruidDatasource.sync_to_db_from_config(druid_config, user, cluster)\n except Exception as ex: # pylint: disable=broad-except\n err_msg = utils.error_msg_from_exception(ex)\n logger.exception(err_msg)\n return json_error_response(err_msg)\n return Response(status=201)\n\n @has_access\n @expose(\"/get_or_create_table/\", methods=[\"POST\"])\n @event_logger.log_this\n def sqllab_table_viz(self) -> FlaskResponse: # pylint: disable=no-self-use\n \"\"\"Gets or creates a table object with attributes passed to the API.\n\n It expects the json with params:\n * datasourceName - e.g. table name, required\n * dbId - database id, required\n * schema - table schema, optional\n * templateParams - params for the Jinja templating syntax, optional\n :return: Response\n \"\"\"\n data = json.loads(request.form[\"data\"])\n table_name = data[\"datasourceName\"]\n database_id = data[\"dbId\"]\n table = (\n db.session.query(SqlaTable)\n .filter_by(database_id=database_id, table_name=table_name)\n .one_or_none()\n )\n if not table:\n # Create table if doesn't exist.\n with db.session.no_autoflush:\n table = SqlaTable(table_name=table_name, owners=[g.user])\n table.database_id = database_id\n table.database = (\n db.session.query(Database).filter_by(id=database_id).one()\n )\n table.schema = data.get(\"schema\")\n table.template_params = data.get(\"templateParams\")\n # needed for the table validation.\n validate_sqlatable(table)\n\n db.session.add(table)\n table.fetch_metadata()\n create_table_permissions(table)\n db.session.commit()\n\n return json_success(json.dumps({\"table_id\": table.id}))\n\n @has_access\n @expose(\"/sqllab_viz/\", methods=[\"POST\"])\n @event_logger.log_this\n def sqllab_viz(self) -> FlaskResponse: # pylint: disable=no-self-use\n data = json.loads(request.form[\"data\"])\n try:\n table_name = data[\"datasourceName\"]\n database_id = data[\"dbId\"]\n except KeyError:\n return json_error_response(\"Missing required fields\", status=400)\n database = db.session.query(Database).get(database_id)\n if not database:\n return json_error_response(\"Database not found\", status=400)\n table = (\n db.session.query(SqlaTable)\n .filter_by(database_id=database_id, table_name=table_name)\n .one_or_none()\n )\n if not table:\n table = SqlaTable(table_name=table_name, owners=[g.user])\n table.database = database\n table.schema = data.get(\"schema\")\n table.template_params = data.get(\"templateParams\")\n table.is_sqllab_view = True\n table.sql = ParsedQuery(data.get(\"sql\")).stripped()\n db.session.add(table)\n cols = []\n for config_ in data.get(\"columns\"):\n column_name = config_.get(\"name\")\n col = TableColumn(\n column_name=column_name,\n filterable=True,\n groupby=True,\n is_dttm=config_.get(\"is_date\", False),\n type=config_.get(\"type\", False),\n )\n cols.append(col)\n\n table.columns = cols\n table.metrics = [SqlMetric(metric_name=\"count\", expression=\"count(*)\")]\n db.session.commit()\n return json_success(json.dumps({\"table_id\": table.id}))\n\n @has_access\n @expose(\"/extra_table_metadata/<int:database_id>/<table_name>/<schema>/\")\n @event_logger.log_this\n def extra_table_metadata( # pylint: disable=no-self-use\n self, database_id: int, table_name: str, schema: str\n ) -> FlaskResponse:\n parsed_schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)\n table_name = utils.parse_js_uri_path_item(table_name) # type: ignore\n mydb = db.session.query(Database).filter_by(id=database_id).one()\n payload = mydb.db_engine_spec.extra_table_metadata(\n mydb, table_name, parsed_schema\n )\n return json_success(json.dumps(payload))\n\n @has_access\n @expose(\"/select_star/<int:database_id>/<table_name>\")\n @expose(\"/select_star/<int:database_id>/<table_name>/<schema>\")\n @event_logger.log_this\n def select_star(\n self, database_id: int, table_name: str, schema: Optional[str] = None\n ) -> FlaskResponse:\n logging.warning(\n \"%s.select_star \"\n \"This API endpoint is deprecated and will be removed in version 1.0.0\",\n self.__class__.__name__,\n )\n stats_logger.incr(f\"{self.__class__.__name__}.select_star.init\")\n database = db.session.query(Database).get(database_id)\n if not database:\n stats_logger.incr(\n f\"deprecated.{self.__class__.__name__}.select_star.database_not_found\"\n )\n return json_error_response(\"Not found\", 404)\n schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)\n table_name = utils.parse_js_uri_path_item(table_name) # type: ignore\n if not self.appbuilder.sm.can_access_table(database, Table(table_name, schema)):\n stats_logger.incr(\n f\"deprecated.{self.__class__.__name__}.select_star.permission_denied\"\n )\n logging.warning(\n \"Permission denied for user %s on table: %s schema: %s\",\n str(g.user),\n table_name,\n schema,\n )\n return json_error_response(\"Not found\", 404)\n stats_logger.incr(f\"deprecated.{self.__class__.__name__}.select_star.success\")\n return json_success(\n database.select_star(\n table_name, schema, latest_partition=True, show_cols=True\n )\n )\n\n @has_access_api\n @expose(\"/estimate_query_cost/<int:database_id>/\", methods=[\"POST\"])\n @expose(\"/estimate_query_cost/<int:database_id>/<schema>/\", methods=[\"POST\"])\n @event_logger.log_this\n def estimate_query_cost( # pylint: disable=no-self-use\n self, database_id: int, schema: Optional[str] = None\n ) -> FlaskResponse:\n mydb = db.session.query(Database).get(database_id)\n\n sql = json.loads(request.form.get(\"sql\", '\"\"'))\n template_params = json.loads(request.form.get(\"templateParams\") or \"{}\")\n if template_params:\n template_processor = get_template_processor(mydb)\n sql = template_processor.process_template(sql, **template_params)\n\n timeout = SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT\n timeout_msg = f\"The estimation exceeded the {timeout} seconds timeout.\"\n try:\n with utils.timeout(seconds=timeout, error_message=timeout_msg):\n cost = mydb.db_engine_spec.estimate_query_cost(\n mydb, schema, sql, utils.QuerySource.SQL_LAB\n )\n except SupersetTimeoutException as ex:\n logger.exception(ex)\n return json_errors_response([ex.error])\n except Exception as ex: # pylint: disable=broad-except\n return json_error_response(utils.error_msg_from_exception(ex))\n\n spec = mydb.db_engine_spec\n query_cost_formatters: Dict[str, Any] = get_feature_flags().get(\n \"QUERY_COST_FORMATTERS_BY_ENGINE\", {}\n )\n query_cost_formatter = query_cost_formatters.get(\n spec.engine, spec.query_cost_formatter\n )\n cost = query_cost_formatter(cost)\n\n return json_success(json.dumps(cost))\n\n @expose(\"/theme/\")\n def theme(self) -> FlaskResponse:\n return self.render_template(\"superset/theme.html\")\n\n @has_access_api\n @expose(\"/results/<key>/\")\n @event_logger.log_this\n def results(self, key: str) -> FlaskResponse:\n return self.results_exec(key)\n\n @staticmethod\n def results_exec( # pylint: disable=too-many-return-statements\n key: str,\n ) -> FlaskResponse:\n \"\"\"Serves a key off of the results backend\n\n It is possible to pass the `rows` query argument to limit the number\n of rows returned.\n \"\"\"\n if not results_backend:\n return json_error_response(\"Results backend isn't configured\")\n\n read_from_results_backend_start = now_as_float()\n blob = results_backend.get(key)\n stats_logger.timing(\n \"sqllab.query.results_backend_read\",\n now_as_float() - read_from_results_backend_start,\n )\n if not blob:\n return json_error_response(\n \"Data could not be retrieved. \" \"You may want to re-run the query.\",\n status=410,\n )\n\n query = db.session.query(Query).filter_by(results_key=key).one_or_none()\n if query is None:\n return json_error_response(\n \"Data could not be retrieved. You may want to re-run the query.\",\n status=404,\n )\n\n try:\n query.raise_for_access()\n except SupersetSecurityException as ex:\n return json_errors_response([ex.error], status=403)\n\n payload = utils.zlib_decompress(blob, decode=not results_backend_use_msgpack)\n try:\n obj = _deserialize_results_payload(\n payload, query, cast(bool, results_backend_use_msgpack)\n )\n except SerializationError:\n return json_error_response(\n __(\"Data could not be deserialized. You may want to re-run the query.\"),\n status=404,\n )\n\n if \"rows\" in request.args:\n try:\n rows = int(request.args[\"rows\"])\n except ValueError:\n return json_error_response(\"Invalid `rows` argument\", status=400)\n obj = apply_display_max_row_limit(obj, rows)\n\n return json_success(\n json.dumps(obj, default=utils.json_iso_dttm_ser, ignore_nan=True)\n )\n\n @has_access_api\n @expose(\"/stop_query/\", methods=[\"POST\"])\n @event_logger.log_this\n @backoff.on_exception(\n backoff.constant,\n Exception,\n interval=1,\n on_backoff=lambda details: db.session.rollback(),\n on_giveup=lambda details: db.session.rollback(),\n max_tries=5,\n )\n def stop_query(self) -> FlaskResponse:\n client_id = request.form.get(\"client_id\")\n\n query = db.session.query(Query).filter_by(client_id=client_id).one()\n if query.status in [\n QueryStatus.FAILED,\n QueryStatus.SUCCESS,\n QueryStatus.TIMED_OUT,\n ]:\n logger.error(\n \"Query with client_id %s could not be stopped: \"\n \"query already complete\",\n str(client_id),\n )\n return self.json_response(\"OK\")\n query.status = QueryStatus.STOPPED\n db.session.commit()\n\n return self.json_response(\"OK\")\n\n @has_access_api\n @event_logger.log_this\n @expose(\"/validate_sql_json/\", methods=[\"POST\", \"GET\"])\n def validate_sql_json( # pylint: disable=too-many-locals,too-many-return-statements,no-self-use\n self,\n ) -> FlaskResponse:\n \"\"\"Validates that arbitrary sql is acceptable for the given database.\n Returns a list of error/warning annotations as json.\n \"\"\"\n sql = request.form[\"sql\"]\n database_id = request.form[\"database_id\"]\n schema = request.form.get(\"schema\") or None\n template_params = json.loads(request.form.get(\"templateParams\") or \"{}\")\n\n if len(template_params) > 0:\n # TODO: factor the Database object out of template rendering\n # or provide it as mydb so we can render template params\n # without having to also persist a Query ORM object.\n return json_error_response(\n \"SQL validation does not support template parameters\", status=400\n )\n\n session = db.session()\n mydb = session.query(Database).filter_by(id=database_id).one_or_none()\n if not mydb:\n return json_error_response(\n \"Database with id {} is missing.\".format(database_id), status=400\n )\n\n spec = mydb.db_engine_spec\n validators_by_engine = get_feature_flags().get(\"SQL_VALIDATORS_BY_ENGINE\")\n if not validators_by_engine or spec.engine not in validators_by_engine:\n return json_error_response(\n \"no SQL validator is configured for {}\".format(spec.engine), status=400\n )\n validator_name = validators_by_engine[spec.engine]\n validator = get_validator_by_name(validator_name)\n if not validator:\n return json_error_response(\n \"No validator named {} found (configured for the {} engine)\".format(\n validator_name, spec.engine\n )\n )\n\n try:\n timeout = config[\"SQLLAB_VALIDATION_TIMEOUT\"]\n timeout_msg = f\"The query exceeded the {timeout} seconds timeout.\"\n with utils.timeout(seconds=timeout, error_message=timeout_msg):\n errors = validator.validate(sql, schema, mydb)\n payload = json.dumps(\n [err.to_dict() for err in errors],\n default=utils.pessimistic_json_iso_dttm_ser,\n ignore_nan=True,\n encoding=None,\n )\n return json_success(payload)\n except Exception as ex: # pylint: disable=broad-except\n logger.exception(ex)\n msg = _(\n \"%(validator)s was unable to check your query.\\n\"\n \"Please recheck your query.\\n\"\n \"Exception: %(ex)s\",\n validator=validator.name,\n ex=ex,\n )\n # Return as a 400 if the database error message says we got a 4xx error\n if re.search(r\"([\\W]|^)4\\d{2}([\\W]|$)\", str(ex)):\n return json_error_response(f\"{msg}\", status=400)\n return json_error_response(f\"{msg}\")\n\n @staticmethod\n def _sql_json_async( # pylint: disable=too-many-arguments\n session: Session,\n rendered_query: str,\n query: Query,\n expand_data: bool,\n log_params: Optional[Dict[str, Any]] = None,\n ) -> FlaskResponse:\n \"\"\"\n Send SQL JSON query to celery workers.\n\n :param session: SQLAlchemy session object\n :param rendered_query: the rendered query to perform by workers\n :param query: The query (SQLAlchemy) object\n :return: A Flask Response\n \"\"\"\n logger.info(\"Query %i: Running query on a Celery worker\", query.id)\n # Ignore the celery future object and the request may time out.\n query_id = query.id\n try:\n task = sql_lab.get_sql_results.delay(\n query.id,\n rendered_query,\n return_results=False,\n store_results=not query.select_as_cta,\n user_name=g.user.username if g.user else None,\n start_time=now_as_float(),\n expand_data=expand_data,\n log_params=log_params,\n )\n\n # Explicitly forget the task to ensure the task metadata is removed from the\n # Celery results backend in a timely manner.\n try:\n task.forget()\n except NotImplementedError:\n logger.warning(\n \"Unable to forget Celery task as backend\"\n \"does not support this operation\"\n )\n except Exception as ex: # pylint: disable=broad-except\n logger.exception(\"Query %i: %s\", query.id, str(ex))\n msg = _(\n \"Failed to start remote query on a worker. \"\n \"Tell your administrator to verify the availability of \"\n \"the message queue.\"\n )\n query.status = QueryStatus.FAILED\n query.error_message = msg\n session.commit()\n return json_error_response(\"{}\".format(msg))\n\n # Update saved query with execution info from the query execution\n QueryDAO.update_saved_query_exec_info(query_id)\n\n resp = json_success(\n json.dumps(\n {\"query\": query.to_dict()},\n default=utils.json_int_dttm_ser,\n ignore_nan=True,\n ),\n status=202,\n )\n session.commit()\n return resp\n\n @staticmethod\n def _sql_json_sync(\n _session: Session,\n rendered_query: str,\n query: Query,\n expand_data: bool,\n log_params: Optional[Dict[str, Any]] = None,\n ) -> FlaskResponse:\n \"\"\"\n Execute SQL query (sql json).\n\n :param rendered_query: The rendered query (included templates)\n :param query: The query SQL (SQLAlchemy) object\n :return: A Flask Response\n :raises: SupersetTimeoutException\n \"\"\"\n try:\n timeout = config[\"SQLLAB_TIMEOUT\"]\n timeout_msg = f\"The query exceeded the {timeout} seconds timeout.\"\n store_results = (\n is_feature_enabled(\"SQLLAB_BACKEND_PERSISTENCE\")\n and not query.select_as_cta\n )\n query_id = query.id\n with utils.timeout(seconds=timeout, error_message=timeout_msg):\n # pylint: disable=no-value-for-parameter\n data = sql_lab.get_sql_results(\n query.id,\n rendered_query,\n return_results=True,\n store_results=store_results,\n user_name=g.user.username if g.user else None,\n expand_data=expand_data,\n log_params=log_params,\n )\n\n # Update saved query if needed\n QueryDAO.update_saved_query_exec_info(query_id)\n\n payload = json.dumps(\n apply_display_max_row_limit(data),\n default=utils.pessimistic_json_iso_dttm_ser,\n ignore_nan=True,\n encoding=None,\n )\n except SupersetTimeoutException as ex:\n # re-raise exception for api exception handler\n raise ex\n except Exception as ex: # pylint: disable=broad-except\n logger.exception(\"Query %i failed unexpectedly\", query.id)\n raise SupersetGenericDBErrorException(utils.error_msg_from_exception(ex))\n\n if data.get(\"status\") == QueryStatus.FAILED:\n raise SupersetGenericDBErrorException(data[\"error\"])\n return json_success(payload)\n\n @has_access_api\n @handle_api_exception\n @event_logger.log_this\n @expose(\"/sql_json/\", methods=[\"POST\"])\n def sql_json(self) -> FlaskResponse:\n log_params = {\n \"user_agent\": cast(Optional[str], request.headers.get(\"USER_AGENT\"))\n }\n return self.sql_json_exec(request.json, log_params)\n\n def sql_json_exec( # pylint: disable=too-many-statements,too-many-locals\n self, query_params: Dict[str, Any], log_params: Optional[Dict[str, Any]] = None\n ) -> FlaskResponse:\n \"\"\"Runs arbitrary sql and returns data as json\"\"\"\n # Collect Values\n database_id: int = cast(int, query_params.get(\"database_id\"))\n schema: str = cast(str, query_params.get(\"schema\"))\n sql: str = cast(str, query_params.get(\"sql\"))\n try:\n template_params = json.loads(query_params.get(\"templateParams\") or \"{}\")\n except json.JSONDecodeError:\n logger.warning(\n \"Invalid template parameter %s\" \" specified. Defaulting to empty dict\",\n str(query_params.get(\"templateParams\")),\n )\n template_params = {}\n limit: int = query_params.get(\"queryLimit\") or app.config[\"SQL_MAX_ROW\"]\n async_flag: bool = cast(bool, query_params.get(\"runAsync\"))\n if limit < 0:\n logger.warning(\n \"Invalid limit of %i specified. Defaulting to max limit.\", limit\n )\n limit = 0\n select_as_cta: bool = cast(bool, query_params.get(\"select_as_cta\"))\n ctas_method: CtasMethod = cast(\n CtasMethod, query_params.get(\"ctas_method\", CtasMethod.TABLE)\n )\n tmp_table_name: str = cast(str, query_params.get(\"tmp_table_name\"))\n client_id: str = cast(\n str, query_params.get(\"client_id\") or utils.shortid()[:10]\n )\n sql_editor_id: str = cast(str, query_params.get(\"sql_editor_id\"))\n tab_name: str = cast(str, query_params.get(\"tab\"))\n status: str = QueryStatus.PENDING if async_flag else QueryStatus.RUNNING\n\n session = db.session()\n mydb = session.query(Database).get(database_id)\n if not mydb:\n return json_error_response(\"Database with id %i is missing.\", database_id)\n\n # Set tmp_schema_name for CTA\n # TODO(bkyryliuk): consider parsing, splitting tmp_schema_name from\n # tmp_table_name if user enters\n # <schema_name>.<table_name>\n tmp_schema_name: Optional[str] = schema\n if select_as_cta and mydb.force_ctas_schema:\n tmp_schema_name = mydb.force_ctas_schema\n elif select_as_cta:\n tmp_schema_name = get_cta_schema_name(mydb, g.user, schema, sql)\n\n # Save current query\n query = Query(\n database_id=database_id,\n sql=sql,\n schema=schema,\n select_as_cta=select_as_cta,\n ctas_method=ctas_method,\n start_time=now_as_float(),\n tab_name=tab_name,\n status=status,\n sql_editor_id=sql_editor_id,\n tmp_table_name=tmp_table_name,\n tmp_schema_name=tmp_schema_name,\n user_id=g.user.get_id() if g.user else None,\n client_id=client_id,\n )\n try:\n session.add(query)\n session.flush()\n query_id = query.id\n session.commit() # shouldn't be necessary\n except SQLAlchemyError as ex:\n logger.error(\"Errors saving query details %s\", str(ex))\n session.rollback()\n raise Exception(_(\"Query record was not created as expected.\"))\n if not query_id:\n raise Exception(_(\"Query record was not created as expected.\"))\n\n logger.info(\"Triggering query_id: %i\", query_id)\n\n try:\n query.raise_for_access()\n except SupersetSecurityException as ex:\n query.status = QueryStatus.FAILED\n session.commit()\n return json_errors_response([ex.error], status=403)\n\n try:\n template_processor = get_template_processor(\n database=query.database, query=query\n )\n rendered_query = template_processor.process_template(\n query.sql, **template_params\n )\n except TemplateError as ex:\n query.status = QueryStatus.FAILED\n session.commit()\n raise SupersetTemplateParamsErrorException(\n utils.error_msg_from_exception(ex)\n )\n\n if is_feature_enabled(\"ENABLE_TEMPLATE_PROCESSING\"):\n # pylint: disable=protected-access\n ast = template_processor._env.parse(rendered_query)\n undefined_parameters = find_undeclared_variables(ast) # type: ignore\n if undefined_parameters:\n query.status = QueryStatus.FAILED\n session.commit()\n raise SupersetTemplateParamsErrorException(\n message=ngettext(\n \"The parameter %(parameters)s in your query is undefined.\",\n \"The following parameters in your query are undefined: %(parameters)s.\",\n len(undefined_parameters),\n parameters=utils.format_list(undefined_parameters),\n )\n + \" \"\n + PARAMETER_MISSING_ERR,\n extra={\n \"undefined_parameters\": list(undefined_parameters),\n \"template_parameters\": template_params,\n },\n )\n\n # Limit is not applied to the CTA queries if SQLLAB_CTAS_NO_LIMIT flag is set\n # to True.\n if not (config.get(\"SQLLAB_CTAS_NO_LIMIT\") and select_as_cta):\n # set LIMIT after template processing\n limits = [mydb.db_engine_spec.get_limit_from_sql(rendered_query), limit]\n query.limit = min(lim for lim in limits if lim is not None)\n\n # Flag for whether or not to expand data\n # (feature that will expand Presto row objects and arrays)\n expand_data: bool = cast(\n bool,\n is_feature_enabled(\"PRESTO_EXPAND_DATA\")\n and query_params.get(\"expand_data\"),\n )\n\n # Async request.\n if async_flag:\n return self._sql_json_async(\n session, rendered_query, query, expand_data, log_params\n )\n # Sync request.\n return self._sql_json_sync(\n session, rendered_query, query, expand_data, log_params\n )\n\n @has_access\n @event_logger.log_this\n @expose(\"/csv/<client_id>\")\n def csv(self, client_id: str) -> FlaskResponse: # pylint: disable=no-self-use\n \"\"\"Download the query results as csv.\"\"\"\n logger.info(\"Exporting CSV file [%s]\", client_id)\n query = db.session.query(Query).filter_by(client_id=client_id).one()\n\n try:\n query.raise_for_access()\n except SupersetSecurityException as ex:\n flash(ex.error.message)\n return redirect(\"/\")\n\n blob = None\n if results_backend and query.results_key:\n logger.info(\"Fetching CSV from results backend [%s]\", query.results_key)\n blob = results_backend.get(query.results_key)\n if blob:\n logger.info(\"Decompressing\")\n payload = utils.zlib_decompress(\n blob, decode=not results_backend_use_msgpack\n )\n obj = _deserialize_results_payload(\n payload, query, cast(bool, results_backend_use_msgpack)\n )\n columns = [c[\"name\"] for c in obj[\"columns\"]]\n df = pd.DataFrame.from_records(obj[\"data\"], columns=columns)\n logger.info(\"Using pandas to convert to CSV\")\n csv = df.to_csv(index=False, **config[\"CSV_EXPORT\"])\n else:\n logger.info(\"Running a query to turn into CSV\")\n sql = query.select_sql or query.executed_sql\n df = query.database.get_df(sql, query.schema)\n # TODO(bkyryliuk): add compression=gzip for big files.\n csv = df.to_csv(index=False, **config[\"CSV_EXPORT\"])\n response = Response(csv, mimetype=\"text/csv\")\n quoted_csv_name = parse.quote(query.name)\n response.headers[\"Content-Disposition\"] = (\n f'attachment; filename=\"{quoted_csv_name}.csv\"; '\n f\"filename*=UTF-8''{quoted_csv_name}.csv\"\n )\n event_info = {\n \"event_type\": \"data_export\",\n \"client_id\": client_id,\n \"row_count\": len(df.index),\n \"database\": query.database.name,\n \"schema\": query.schema,\n \"sql\": query.sql,\n \"exported_format\": \"csv\",\n }\n event_rep = repr(event_info)\n logger.info(\"CSV exported: %s\", event_rep, extra={\"superset_event\": event_info})\n return response\n\n @api\n @handle_api_exception\n @has_access\n @event_logger.log_this\n @expose(\"/fetch_datasource_metadata\")\n def fetch_datasource_metadata(self) -> FlaskResponse: # pylint: disable=no-self-use\n \"\"\"\n Fetch the datasource metadata.\n\n :returns: The Flask response\n :raises SupersetSecurityException: If the user cannot access the resource\n \"\"\"\n\n datasource_id, datasource_type = request.args[\"datasourceKey\"].split(\"__\")\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session,\n )\n # Check if datasource exists\n if not datasource:\n return json_error_response(DATASOURCE_MISSING_ERR)\n\n datasource.raise_for_access()\n return json_success(json.dumps(datasource.data))\n\n @has_access_api\n @event_logger.log_this\n @expose(\"/queries/<float:last_updated_ms>\")\n @expose(\"/queries/<int:last_updated_ms>\")\n def queries(self, last_updated_ms: Union[float, int]) -> FlaskResponse:\n \"\"\"\n Get the updated queries.\n\n :param last_updated_ms: Unix time (milliseconds)\n \"\"\"\n\n return self.queries_exec(last_updated_ms)\n\n @staticmethod\n def queries_exec(last_updated_ms: Union[float, int]) -> FlaskResponse:\n stats_logger.incr(\"queries\")\n if not g.user.get_id():\n return json_error_response(\n \"Please login to access the queries.\", status=403\n )\n\n # UTC date time, same that is stored in the DB.\n last_updated_dt = datetime.utcfromtimestamp(last_updated_ms / 1000)\n\n sql_queries = (\n db.session.query(Query)\n .filter(\n Query.user_id == g.user.get_id(), Query.changed_on >= last_updated_dt\n )\n .all()\n )\n dict_queries = {q.client_id: q.to_dict() for q in sql_queries}\n return json_success(json.dumps(dict_queries, default=utils.json_int_dttm_ser))\n\n @has_access\n @event_logger.log_this\n @expose(\"/search_queries\")\n def search_queries(self) -> FlaskResponse: # pylint: disable=no-self-use\n \"\"\"\n Search for previously run sqllab queries. Used for Sqllab Query Search\n page /superset/sqllab#search.\n\n Custom permission can_only_search_queries_owned restricts queries\n to only queries run by current user.\n\n :returns: Response with list of sql query dicts\n \"\"\"\n if security_manager.can_access_all_queries():\n search_user_id = request.args.get(\"user_id\")\n elif request.args.get(\"user_id\") is not None:\n try:\n search_user_id = int(cast(int, request.args.get(\"user_id\")))\n except ValueError:\n return Response(status=400, mimetype=\"application/json\")\n if search_user_id != g.user.get_user_id():\n return Response(status=403, mimetype=\"application/json\")\n else:\n search_user_id = g.user.get_user_id()\n database_id = request.args.get(\"database_id\")\n search_text = request.args.get(\"search_text\")\n status = request.args.get(\"status\")\n # From and To time stamp should be Epoch timestamp in seconds\n from_time = request.args.get(\"from\")\n to_time = request.args.get(\"to\")\n\n query = db.session.query(Query)\n if search_user_id:\n # Filter on user_id\n query = query.filter(Query.user_id == search_user_id)\n\n if database_id:\n # Filter on db Id\n query = query.filter(Query.database_id == database_id)\n\n if status:\n # Filter on status\n query = query.filter(Query.status == status)\n\n if search_text:\n # Filter on search text\n query = query.filter(Query.sql.like(f\"%{search_text}%\"))\n\n if from_time:\n query = query.filter(Query.start_time > int(from_time))\n\n if to_time:\n query = query.filter(Query.start_time < int(to_time))\n\n query_limit = config[\"QUERY_SEARCH_LIMIT\"]\n sql_queries = query.order_by(Query.start_time.asc()).limit(query_limit).all()\n\n dict_queries = [q.to_dict() for q in sql_queries]\n\n return Response(\n json.dumps(dict_queries, default=utils.json_int_dttm_ser),\n status=200,\n mimetype=\"application/json\",\n )\n\n @app.errorhandler(500)\n def show_traceback(self) -> FlaskResponse: # pylint: disable=no-self-use\n return (\n render_template(\"superset/traceback.html\", error_msg=get_error_msg()),\n 500,\n )\n\n @event_logger.log_this\n @expose(\"/welcome\")\n def welcome(self) -> FlaskResponse:\n \"\"\"Personalized welcome page\"\"\"\n if not g.user or not g.user.get_id():\n if conf.get(\"PUBLIC_ROLE_LIKE_GAMMA\", False) or conf[\"PUBLIC_ROLE_LIKE\"]:\n return self.render_template(\"superset/public_welcome.html\")\n return redirect(appbuilder.get_url_for_login)\n\n welcome_dashboard_id = (\n db.session.query(UserAttribute.welcome_dashboard_id)\n .filter_by(user_id=g.user.get_id())\n .scalar()\n )\n if welcome_dashboard_id:\n return self.dashboard(str(welcome_dashboard_id))\n\n payload = {\n \"user\": bootstrap_user_data(g.user),\n \"common\": common_bootstrap_payload(),\n }\n\n return self.render_template(\n \"superset/crud_views.html\",\n entry=\"crudViews\",\n bootstrap_data=json.dumps(\n payload, default=utils.pessimistic_json_iso_dttm_ser\n ),\n )\n\n @has_access\n @event_logger.log_this\n @expose(\"/profile/<username>/\")\n def profile(self, username: str) -> FlaskResponse:\n \"\"\"User profile page\"\"\"\n user = (\n db.session.query(ab_models.User).filter_by(username=username).one_or_none()\n )\n if not user:\n abort(404, description=f\"User: {username} does not exist.\")\n\n payload = {\n \"user\": bootstrap_user_data(user, include_perms=True),\n \"common\": common_bootstrap_payload(),\n }\n\n return self.render_template(\n \"superset/basic.html\",\n title=_(\"%(user)s's profile\", user=username),\n entry=\"profile\",\n bootstrap_data=json.dumps(\n payload, default=utils.pessimistic_json_iso_dttm_ser\n ),\n )\n\n @staticmethod\n def _get_sqllab_tabs(user_id: int) -> Dict[str, Any]:\n # send list of tab state ids\n tabs_state = (\n db.session.query(TabState.id, TabState.label)\n .filter_by(user_id=user_id)\n .all()\n )\n tab_state_ids = [str(tab_state[0]) for tab_state in tabs_state]\n # return first active tab, or fallback to another one if no tab is active\n active_tab = (\n db.session.query(TabState)\n .filter_by(user_id=user_id)\n .order_by(TabState.active.desc())\n .first()\n )\n\n databases: Dict[int, Any] = {\n database.id: {\n k: v for k, v in database.to_json().items() if k in DATABASE_KEYS\n }\n for database in DatabaseDAO.find_all()\n }\n queries: Dict[str, Any] = {}\n\n # These are unnecessary if sqllab backend persistence is disabled\n if is_feature_enabled(\"SQLLAB_BACKEND_PERSISTENCE\"):\n # return all user queries associated with existing SQL editors\n user_queries = (\n db.session.query(Query)\n .filter_by(user_id=user_id)\n .filter(Query.sql_editor_id.in_(tab_state_ids))\n .all()\n )\n queries = {\n query.client_id: dict(query.to_dict().items()) for query in user_queries\n }\n\n return {\n \"tab_state_ids\": tabs_state,\n \"active_tab\": active_tab.to_dict() if active_tab else None,\n \"databases\": databases,\n \"queries\": queries,\n }\n\n @has_access\n @event_logger.log_this\n @expose(\"/sqllab\", methods=[\"GET\", \"POST\"])\n def sqllab(self) -> FlaskResponse:\n \"\"\"SQL Editor\"\"\"\n payload = {\n \"defaultDbId\": config[\"SQLLAB_DEFAULT_DBID\"],\n \"common\": common_bootstrap_payload(),\n **self._get_sqllab_tabs(g.user.get_id()),\n }\n\n form_data = request.form.get(\"form_data\")\n if form_data:\n try:\n payload[\"requested_query\"] = json.loads(form_data)\n except json.JSONDecodeError:\n pass\n\n payload[\"user\"] = bootstrap_user_data(g.user)\n bootstrap_data = json.dumps(\n payload, default=utils.pessimistic_json_iso_dttm_ser\n )\n\n return self.render_template(\n \"superset/basic.html\", entry=\"sqllab\", bootstrap_data=bootstrap_data\n )\n\n @has_access\n @event_logger.log_this\n @expose(\"/sqllab/history/\", methods=[\"GET\"])\n @event_logger.log_this\n def sqllab_history(self) -> FlaskResponse:\n if not is_feature_enabled(\"ENABLE_REACT_CRUD_VIEWS\"):\n return redirect(\"/superset/sqllab#search\", code=307)\n\n return super().render_app_template()\n\n @api\n @has_access_api\n @event_logger.log_this\n @expose(\"/schemas_access_for_csv_upload\")\n def schemas_access_for_csv_upload(self) -> FlaskResponse:\n \"\"\"\n This method exposes an API endpoint to\n get the schema access control settings for csv upload in this database\n \"\"\"\n if not request.args.get(\"db_id\"):\n return json_error_response(\"No database is allowed for your csv upload\")\n\n db_id = int(request.args[\"db_id\"])\n database = db.session.query(Database).filter_by(id=db_id).one()\n try:\n schemas_allowed = database.get_schema_access_for_csv_upload()\n if security_manager.can_access_database(database):\n return self.json_response(schemas_allowed)\n # the list schemas_allowed should not be empty here\n # and the list schemas_allowed_processed returned from security_manager\n # should not be empty either,\n # otherwise the database should have been filtered out\n # in CsvToDatabaseForm\n schemas_allowed_processed = security_manager.get_schemas_accessible_by_user(\n database, schemas_allowed, False\n )\n return self.json_response(schemas_allowed_processed)\n except Exception as ex: # pylint: disable=broad-except\n logger.exception(ex)\n return json_error_response(\n \"Failed to fetch schemas allowed for csv upload in this database! \"\n \"Please contact your Superset Admin!\"\n )\n" ]
[ [ "pandas.DataFrame.from_records" ] ]
HelloImRobert/mmdetection
[ "223235c90fc644bb2f04fa92c770b83f320db7d2" ]
[ "ext/utils/model_zoo.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport os\nimport sys\n\nimport torch\n\nfrom ssd.utils.dist_util import is_main_process, synchronize\n\ntry:\n from torch.hub import _download_url_to_file\n from torch.hub import urlparse\n from torch.hub import HASH_REGEX\nexcept ImportError:\n from torch.utils.model_zoo import _download_url_to_file\n from torch.utils.model_zoo import urlparse\n from torch.utils.model_zoo import HASH_REGEX\n\n\n# very similar to https://github.com/pytorch/pytorch/blob/master/torch/utils/model_zoo.py\n# but with a few improvements and modifications\ndef cache_url(url, model_dir=None, progress=True):\n r\"\"\"Loads the Torch serialized object at the given URL.\n If the object is already present in `model_dir`, it's deserialized and\n returned. The filename part of the URL should follow the naming convention\n ``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more\n digits of the SHA256 hash of the contents of the file. The hash is used to\n ensure unique names and to verify the contents of the file.\n The default value of `model_dir` is ``$TORCH_HOME/models`` where\n ``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be\n overridden with the ``$TORCH_MODEL_ZOO`` environment variable.\n Args:\n url (string): URL of the object to download\n model_dir (string, optional): directory in which to save the object\n progress (bool, optional): whether or not to display a progress bar to stderr\n Example:\n >>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')\n \"\"\"\n if model_dir is None:\n torch_home = os.path.expanduser(os.getenv(\"TORCH_HOME\", \"~/.torch\"))\n model_dir = os.getenv(\"TORCH_MODEL_ZOO\", os.path.join(torch_home, \"models\"))\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n parts = urlparse(url)\n filename = os.path.basename(parts.path)\n if filename == \"model_final.pkl\":\n # workaround as pre-trained Caffe2 models from Detectron have all the same filename\n # so make the full path the filename by replacing / with _\n filename = parts.path.replace(\"/\", \"_\")\n cached_file = os.path.join(model_dir, filename)\n if not os.path.exists(cached_file) and is_main_process():\n sys.stderr.write('Downloading: \"{}\" to {}\\n'.format(url, cached_file))\n hash_prefix = HASH_REGEX.search(filename)\n if hash_prefix is not None:\n hash_prefix = hash_prefix.group(1)\n # workaround: Caffe2 models don't have a hash, but follow the R-50 convention,\n # which matches the hash PyTorch uses. So we skip the hash matching\n # if the hash_prefix is less than 6 characters\n if len(hash_prefix) < 6:\n hash_prefix = None\n _download_url_to_file(url, cached_file, hash_prefix, progress=progress)\n synchronize()\n return cached_file\n\n\ndef load_state_dict_from_url(url, map_location='cpu'):\n cached_file = cache_url(url)\n return torch.load(cached_file, map_location=map_location)\n\n\ndef load_state_dict_from_file(path, map_location='cpu'):\n cached_file = path\n return torch.load(cached_file, map_location=map_location)\n" ]
[ [ "torch.utils.model_zoo.urlparse", "torch.utils.model_zoo.HASH_REGEX.search", "torch.utils.model_zoo._download_url_to_file", "torch.load" ] ]
zren96/rlpyt
[ "7e29587d29219f7af80868f7c85e38bea80ed2cf" ]
[ "rlpyt/agents/qpg/sac_vision_agent.py" ]
[ "\nimport numpy as np\nimport torch\nfrom collections import namedtuple\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom rlpyt.agents.base import BaseAgent, AgentStep, RecurrentAgentMixin\nfrom rlpyt.agents.qpg.base import AgentInfo, AgentInfoRnn\nfrom rlpyt.models.qpg.conv import QConvLSTMModel, PiConvLSTMModel\nfrom rlpyt.utils.quick_args import save__init__args\nfrom rlpyt.distributions.gaussian import Gaussian, DistInfoStd\nfrom rlpyt.utils.buffer import buffer_to, buffer_func, buffer_method\nfrom rlpyt.utils.logging import logger\nfrom rlpyt.models.utils import update_state_dict\nfrom rlpyt.utils.collections import namedarraytuple\n\n\nMIN_LOG_STD = -20\nMAX_LOG_STD = 2\n\nModels = namedtuple(\"Models\", [\"pi\", \"q1\", \"q2\", \"v\"])\n\n\n\nclass SacVisionAgent(BaseAgent):\n \"\"\"Agent for SAC algorithm, including action-squashing, using twin Q-values.\"\"\"\n\n def __init__(\n self,\n ModelCls=PiConvLSTMModel, # Pi model.\n QModelCls=QConvLSTMModel,\n model_kwargs=None, # Pi model.\n q_model_kwargs=None,\n initial_model_state_dict=None, # All models.\n action_squash=2., # Max magnitude (or None).\n pretrain_std=0.75, # With squash 0.75 is near uniform.\n ):\n \"\"\"Saves input arguments; network defaults stored within.\"\"\"\n if model_kwargs is None:\n model_kwargs = dict()\n if q_model_kwargs is None:\n q_model_kwargs = dict()\n super().__init__(ModelCls=ModelCls, model_kwargs=model_kwargs,\n initial_model_state_dict=initial_model_state_dict)\n save__init__args(locals())\n self.min_itr_learn = 0 # Get from algo.\n\n def initialize(self, env_spaces, share_memory=False,\n global_B=1, env_ranks=None):\n _initial_model_state_dict = self.initial_model_state_dict\n self.initial_model_state_dict = None # Don't let base agent try to load.\n super().initialize(env_spaces, share_memory,\n global_B=global_B, env_ranks=env_ranks)\n self.initial_model_state_dict = _initial_model_state_dict\n self.q1_model = self.QModelCls(**self.env_model_kwargs, **self.q_model_kwargs)\n self.q2_model = self.QModelCls(**self.env_model_kwargs, **self.q_model_kwargs)\n self.target_q1_model = self.QModelCls(**self.env_model_kwargs,\n **self.q_model_kwargs)\n self.target_q2_model = self.QModelCls(**self.env_model_kwargs,\n **self.q_model_kwargs)\n self.target_q1_model.load_state_dict(self.q1_model.state_dict())\n self.target_q2_model.load_state_dict(self.q2_model.state_dict())\n if self.initial_model_state_dict is not None:\n self.load_state_dict(self.initial_model_state_dict)\n assert len(env_spaces.action.shape) == 1\n self.distribution = Gaussian(\n dim=env_spaces.action.shape[0],\n squash=self.action_squash,\n min_std=np.exp(MIN_LOG_STD),\n max_std=np.exp(MAX_LOG_STD),\n )\n\n def to_device(self, cuda_idx=None):\n super().to_device(cuda_idx)\n self.q1_model.to(self.device)\n self.q2_model.to(self.device)\n self.target_q1_model.to(self.device)\n self.target_q2_model.to(self.device)\n\n def data_parallel(self):\n device_id = super().data_parallel\n self.q1_model = DDP(\n self.q1_model,\n device_ids=None if device_id is None else [device_id], # 1 GPU.\n output_device=device_id,\n )\n self.q2_model = DDP(\n self.q2_model,\n device_ids=None if device_id is None else [device_id], # 1 GPU.\n output_device=device_id,\n )\n return device_id\n\n def give_min_itr_learn(self, min_itr_learn):\n self.min_itr_learn = min_itr_learn # From algo.\n\n def make_env_to_model_kwargs(self, env_spaces):\n assert len(env_spaces.action.shape) == 1\n return dict(\n observation_shape=env_spaces.observation.shape,\n action_size=env_spaces.action.shape[0],\n )\n\n def q(self, observation, prev_action, prev_reward, action, init_rnn_state_1,initi_rnn_state_2):\n \"\"\"Compute twin Q-values for state/observation and input action \n (with grad).\"\"\"\n model_inputs_1 = buffer_to((observation, prev_action, prev_reward,\n action, init_rnn_state_1), device=self.device)\n model_inputs_2 = buffer_to((observation, prev_action, prev_reward,\n action, initi_rnn_state_2), device=self.device)\n q1, q1_rnn_state = self.q1_model(*model_inputs_1)\n q2, q2_rnn_state = self.q2_model(*model_inputs_2)\n return q1.cpu(), q1_rnn_state, q2.cpu(), q2_rnn_state\n\n def target_q(self, observation, prev_action, prev_reward, action, init_rnn_state_1, init_rnn_state_2):\n \"\"\"Compute twin target Q-values for state/observation and input\n action.\"\"\" \n model_inputs_1 = buffer_to((observation, prev_action,\n prev_reward, action, init_rnn_state_1), device=self.device)\n model_inputs_2 = buffer_to((observation, prev_action,\n prev_reward, action, init_rnn_state_2), device=self.device)\n target_q1, q1_rnn_state = self.target_q1_model(*model_inputs_1)\n target_q2, q2_rnn_state = self.target_q2_model(*model_inputs_2)\n return target_q1.cpu(), q1_rnn_state, target_q2.cpu(), q2_rnn_state\n\n def pi(self, observation, prev_action, prev_reward, init_rnn_state):\n \"\"\"Compute action log-probabilities for state/observation, and\n sample new action (with grad). Uses special ``sample_loglikelihood()``\n method of Gaussian distriution, which handles action squashing\n through this process.\"\"\"\n model_inputs = buffer_to((observation, prev_action, prev_reward, init_rnn_state), device=self.device)\n mean, log_std, rnn_state = self.model(*model_inputs)\n dist_info = DistInfoStd(mean=mean, log_std=log_std)\n action, log_pi = self.distribution.sample_loglikelihood(dist_info)\n # action = self.distribution.sample(dist_info)\n # log_pi = self.distribution.log_likelihood(action, dist_info)\n log_pi, dist_info = buffer_to((log_pi, dist_info), device=\"cpu\")\n return action, log_pi, dist_info, rnn_state # Action stays on device for q models.\n\n @torch.no_grad()\n def step(self, observation, prev_action, prev_reward):\n model_inputs = buffer_to((observation, prev_action, prev_reward),\n device=self.device)\n mean, log_std, rnn_state = self.model(*model_inputs, self.prev_rnn_state)\n dist_info = DistInfoStd(mean=mean, log_std=log_std)\n action = self.distribution.sample(dist_info)\n\n # Model handles None, but Buffer does not, make zeros if needed:\n prev_rnn_state = self.prev_rnn_state or buffer_func(rnn_state, torch.zeros_like)\n # Transpose the rnn_state from [N,B,H] --> [B,N,H] for storage.\n # (Special case: model should always leave B dimension in.)\n prev_rnn_state = buffer_method(prev_rnn_state, \"transpose\", 0, 1)\n\n agent_info = AgentInfoRnn(dist_info=dist_info, \n prev_rnn_state=prev_rnn_state)\n action, agent_info = buffer_to((action, agent_info), device=\"cpu\")\n self.advance_rnn_state(rnn_state) # update prev_rnn_state\n return AgentStep(action=action, agent_info=agent_info)\n\n def update_target(self, tau=1):\n update_state_dict(self.target_q1_model, self.q1_model.state_dict(), tau)\n update_state_dict(self.target_q2_model, self.q2_model.state_dict(), tau)\n\n @property\n def models(self):\n return Models(pi=self.model, q1=self.q1_model, q2=self.q2_model)\n\n def pi_parameters(self):\n return self.model.parameters()\n\n def q1_parameters(self):\n return self.q1_model.parameters()\n\n def q2_parameters(self):\n return self.q2_model.parameters()\n\n def train_mode(self, itr):\n super().train_mode(itr)\n self.q1_model.train()\n self.q2_model.train()\n\n def sample_mode(self, itr):\n super().sample_mode(itr)\n self.q1_model.eval()\n self.q2_model.eval()\n if itr == 0:\n logger.log(f\"Agent at itr {itr}, sample std: {self.pretrain_std}\")\n if itr == self.min_itr_learn:\n logger.log(f\"Agent at itr {itr}, sample std: learned.\")\n std = None if itr >= self.min_itr_learn else self.pretrain_std\n self.distribution.set_std(std) # If None: std from policy dist_info.\n\n def eval_mode(self, itr):\n super().eval_mode(itr)\n self.q1_model.eval()\n self.q2_model.eval()\n self.distribution.set_std(0.) # Deterministic (dist_info std ignored).\n\n def state_dict(self):\n return dict(\n model=self.model.state_dict(), # Pi model.\n q1_model=self.q1_model.state_dict(),\n q2_model=self.q2_model.state_dict(),\n target_q1_model=self.target_q1_model.state_dict(),\n target_q2_model=self.target_q2_model.state_dict(),\n )\n\n def load_state_dict(self, state_dict):\n self.model.load_state_dict(state_dict[\"model\"])\n self.q1_model.load_state_dict(state_dict[\"q1_model\"])\n self.q2_model.load_state_dict(state_dict[\"q2_model\"])\n self.target_q1_model.load_state_dict(state_dict[\"target_q1_model\"])\n self.target_q2_model.load_state_dict(state_dict[\"target_q2_model\"])\n\n\nclass SacVisionLSTMAgent(RecurrentAgentMixin, SacVisionAgent):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n" ]
[ [ "numpy.exp", "torch.no_grad", "torch.nn.parallel.DistributedDataParallel" ] ]
fighting41love/zincbase
[ "40d68bcb50e8a509cafe7496545f172cc3559406" ]
[ "zincbase/utils/calc_auc_roc.py" ]
[ "\"\"\"Calculate the Area-Under-the-Curve Receiver Operating Characteristic\n\nA funny measure that combines precision and recall. Sklearn can't agree how\nto implement it for multiclass; this version is from fbrundu on\nhttps://github.com/scikit-learn/scikit-learn/issues/3298\n\"\"\"\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import LabelBinarizer\n\ndef calc_auc_roc(truth, pred, average=\"macro\"):\n\n lb = LabelBinarizer()\n lb.fit(truth)\n truth = lb.transform(truth)\n pred = lb.transform(pred)\n return roc_auc_score(truth, pred, average=average)\n" ]
[ [ "sklearn.metrics.roc_auc_score", "sklearn.preprocessing.LabelBinarizer" ] ]
JiaweiSheng/FAAN
[ "b439b829506c4e2e9044a6b2ab7f3d844f445a95" ]
[ "trainer.py" ]
[ "from collections import defaultdict\r\nfrom torch import optim\r\nfrom collections import deque\r\nfrom args import read_options\r\nfrom data_loader import *\r\nfrom matcher import *\r\nfrom tensorboardX import SummaryWriter\r\nimport os\r\nfrom tqdm import tqdm\r\n\r\n\r\nclass Trainer(object):\r\n def __init__(self, arg):\r\n super(Trainer, self).__init__()\r\n for k, v in vars(arg).items():\r\n setattr(self, k, v)\r\n\r\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n self.meta = not self.no_meta\r\n\r\n # pre-train\r\n if self.random_embed:\r\n use_pretrain = False\r\n else:\r\n use_pretrain = True\r\n\r\n logging.info('LOADING SYMBOL ID AND SYMBOL EMBEDDING')\r\n if self.test or self.random_embed:\r\n # gen symbol2id, without embedding\r\n self.load_symbol2id()\r\n use_pretrain = False\r\n else:\r\n self.load_embed()\r\n self.use_pretrain = use_pretrain\r\n\r\n self.num_symbols = len(self.symbol2id.keys()) - 1 # one for 'PAD'\r\n self.pad_id = self.num_symbols\r\n\r\n self.Matcher = Matcher(self.embed_dim, self.num_symbols,\r\n use_pretrain=self.use_pretrain,\r\n embed=self.symbol2vec,\r\n dropout_layers=self.dropout_layers,\r\n dropout_input=self.dropout_input,\r\n dropout_neighbors=self.dropout_neighbors,\r\n finetune=self.fine_tune,\r\n num_transformer_layers=self.num_transformer_layers,\r\n num_transformer_heads=self.num_transformer_heads,\r\n device=self.device\r\n )\r\n\r\n self.Matcher.to(self.device)\r\n self.batch_nums = 0\r\n if self.test:\r\n self.writer = None\r\n else:\r\n self.writer = SummaryWriter('logs/' + self.prefix)\r\n\r\n self.parameters = filter(lambda p: p.requires_grad, self.Matcher.parameters())\r\n\r\n self.optim = optim.Adam(self.parameters, lr=self.lr, weight_decay=self.weight_decay)\r\n self.ent2id = json.load(open(self.dataset + '/ent2ids'))\r\n self.num_ents = len(self.ent2id.keys())\r\n\r\n logging.info('BUILDING CONNECTION MATRIX')\r\n degrees = self.build_connection(max_=self.max_neighbor)\r\n\r\n logging.info('LOADING CANDIDATES ENTITIES')\r\n self.rel2candidates = json.load(open(self.dataset + '/rel2candidates.json'))\r\n\r\n # load answer dict\r\n self.e1rel_e2 = defaultdict(list)\r\n self.e1rel_e2 = json.load(open(self.dataset + '/e1rel_e2.json'))\r\n\r\n def load_symbol2id(self):\r\n # gen symbol2id, without embedding\r\n symbol_id = {}\r\n rel2id = json.load(open(self.dataset + '/relation2ids'))\r\n ent2id = json.load(open(self.dataset + '/ent2ids'))\r\n i = 0\r\n # rel and ent combine together\r\n for key in rel2id.keys():\r\n if key not in ['', 'OOV']:\r\n symbol_id[key] = i\r\n i += 1\r\n\r\n for key in ent2id.keys():\r\n if key not in ['', 'OOV']:\r\n symbol_id[key] = i\r\n i += 1\r\n\r\n symbol_id['PAD'] = i\r\n self.symbol2id = symbol_id\r\n self.symbol2vec = None\r\n\r\n def load_embed(self):\r\n # gen symbol2id, with embedding\r\n symbol_id = {}\r\n rel2id = json.load(open(self.dataset + '/relation2ids')) # relation2id contains inverse rel\r\n ent2id = json.load(open(self.dataset + '/ent2ids'))\r\n\r\n logging.info('LOADING PRE-TRAINED EMBEDDING')\r\n if self.embed_model in ['DistMult', 'TransE', 'ComplEx', 'RESCAL']:\r\n ent_embed = np.loadtxt(self.dataset + '/entity2vec.' + self.embed_model)\r\n rel_embed = np.loadtxt(self.dataset + '/relation2vec.' + self.embed_model) # contain inverse edge\r\n\r\n if self.embed_model == 'ComplEx':\r\n # normalize the complex embeddings\r\n ent_mean = np.mean(ent_embed, axis=1, keepdims=True)\r\n ent_std = np.std(ent_embed, axis=1, keepdims=True)\r\n rel_mean = np.mean(rel_embed, axis=1, keepdims=True)\r\n rel_std = np.std(rel_embed, axis=1, keepdims=True)\r\n eps = 1e-3\r\n ent_embed = (ent_embed - ent_mean) / (ent_std + eps)\r\n rel_embed = (rel_embed - rel_mean) / (rel_std + eps)\r\n\r\n assert ent_embed.shape[0] == len(ent2id.keys())\r\n assert rel_embed.shape[0] == len(rel2id.keys())\r\n\r\n i = 0\r\n embeddings = []\r\n for key in rel2id.keys():\r\n if key not in ['', 'OOV']:\r\n symbol_id[key] = i\r\n i += 1\r\n embeddings.append(list(rel_embed[rel2id[key], :]))\r\n\r\n for key in ent2id.keys():\r\n if key not in ['', 'OOV']:\r\n symbol_id[key] = i\r\n i += 1\r\n embeddings.append(list(ent_embed[ent2id[key], :]))\r\n\r\n symbol_id['PAD'] = i\r\n embeddings.append(list(np.zeros((rel_embed.shape[1],))))\r\n embeddings = np.array(embeddings)\r\n assert embeddings.shape[0] == len(symbol_id.keys())\r\n\r\n self.symbol2id = symbol_id\r\n self.symbol2vec = embeddings\r\n\r\n def build_connection(self, max_=100):\r\n self.connections = (np.ones((self.num_ents, max_, 2)) * self.pad_id).astype(int)\r\n self.e1_rele2 = defaultdict(list)\r\n self.e1_degrees = defaultdict(int)\r\n with open(self.dataset + '/path_graph') as f:\r\n lines = f.readlines()\r\n for line in tqdm(lines):\r\n e1, rel, e2 = line.rstrip().split()\r\n self.e1_rele2[e1].append((self.symbol2id[rel], self.symbol2id[e2])) # 1-n\r\n self.e1_rele2[e2].append((self.symbol2id[rel + '_inv'], self.symbol2id[e1])) # n-1\r\n\r\n degrees = {}\r\n for ent, id_ in self.ent2id.items():\r\n neighbors = self.e1_rele2[ent]\r\n if len(neighbors) > max_:\r\n neighbors = neighbors[:max_]\r\n degrees[ent] = len(neighbors)\r\n self.e1_degrees[id_] = len(neighbors) # add one for self conn\r\n for idx, _ in enumerate(neighbors):\r\n self.connections[id_, idx, 0] = _[0] # rel\r\n self.connections[id_, idx, 1] = _[1] # tail\r\n return degrees\r\n\r\n def save(self, path=None):\r\n if not path:\r\n path = self.save_path\r\n torch.save(self.Matcher.state_dict(), path)\r\n\r\n def load(self, path=None):\r\n if path:\r\n self.Matcher.load_state_dict(torch.load(path))\r\n else:\r\n self.Matcher.load_state_dict(torch.load(self.save_path))\r\n\r\n def get_meta(self, left, right):\r\n left_connections = Variable(\r\n torch.LongTensor(np.stack([self.connections[_, :, :] for _ in left], axis=0))).to(self.device)\r\n left_degrees = Variable(torch.FloatTensor([self.e1_degrees[_] for _ in left])).to(self.device)\r\n right_connections = Variable(\r\n torch.LongTensor(np.stack([self.connections[_, :, :] for _ in right], axis=0))).to(self.device)\r\n right_degrees = Variable(torch.FloatTensor([self.e1_degrees[_] for _ in right])).to(self.device)\r\n return (left_connections, left_degrees, right_connections, right_degrees)\r\n\r\n def train(self):\r\n logging.info('START TRAINING...')\r\n best_mrr = 0.0\r\n best_batches = 0\r\n\r\n losses = deque([], self.log_every)\r\n margins = deque([], self.log_every)\r\n for data in train_generate(self.dataset, self.batch_size, self.train_few, self.symbol2id, self.ent2id,\r\n self.e1rel_e2):\r\n support, query, false, support_left, support_right, query_left, query_right, false_left, false_right = data\r\n\r\n self.batch_nums += 1\r\n support_meta = self.get_meta(support_left, support_right)\r\n query_meta = self.get_meta(query_left, query_right)\r\n false_meta = self.get_meta(false_left, false_right)\r\n\r\n support = Variable(torch.LongTensor(support)).to(self.device)\r\n query = Variable(torch.LongTensor(query)).to(self.device)\r\n false = Variable(torch.LongTensor(false)).to(self.device)\r\n self.Matcher.train()\r\n if self.no_meta:\r\n positive_score, negative_score = self.Matcher(support, query, false, isEval=False)\r\n else:\r\n positive_score, negative_score = self.Matcher(support, query, false, isEval=False,\r\n support_meta=support_meta,\r\n query_meta=query_meta,\r\n false_meta=false_meta)\r\n margin_ = positive_score - negative_score\r\n loss = F.relu(self.margin - margin_).mean()\r\n margins.append(margin_.mean().item())\r\n lr = adjust_learning_rate(optimizer=self.optim, epoch=self.batch_nums, lr=self.lr,\r\n warm_up_step=self.warm_up_step,\r\n max_update_step=self.max_batches)\r\n losses.append(loss.item())\r\n\r\n self.optim.zero_grad()\r\n loss.backward()\r\n nn.utils.clip_grad_norm(self.parameters, self.grad_clip)\r\n self.optim.step()\r\n if self.batch_nums % self.log_every == 0:\r\n lr = self.optim.param_groups[0]['lr']\r\n logging.info(\r\n 'Batch: {:d}, Avg_batch_loss: {:.6f}, lr: {:.6f}, '.format(\r\n self.batch_nums,\r\n np.mean(losses),\r\n lr))\r\n self.writer.add_scalar('Avg_batch_loss_every_log', np.mean(losses), self.batch_nums)\r\n\r\n if self.batch_nums % self.eval_every == 0:\r\n logging.info('Batch_nums is %d' % self.batch_nums)\r\n hits10, hits5, hits1, mrr = self.eval(meta=self.meta, mode='dev')\r\n self.writer.add_scalar('HITS10', hits10, self.batch_nums)\r\n self.writer.add_scalar('HITS5', hits5, self.batch_nums)\r\n self.writer.add_scalar('HITS1', hits1, self.batch_nums)\r\n self.writer.add_scalar('MRR', mrr, self.batch_nums)\r\n self.save()\r\n\r\n if mrr > best_mrr:\r\n self.save(self.save_path + '_best')\r\n best_mrr = mrr\r\n best_batches = self.batch_nums\r\n logging.info('Best_mrr is {:.6f}, when batch num is {:d}'.format(best_mrr, best_batches))\r\n\r\n if self.batch_nums == self.max_batches:\r\n self.save()\r\n break\r\n\r\n if self.batch_nums - best_batches > self.eval_every * 10:\r\n logging.info('Early stop!')\r\n self.save()\r\n break\r\n\r\n def eval(self, mode='dev', meta=False):\r\n self.Matcher.eval()\r\n\r\n symbol2id = self.symbol2id\r\n few = self.few\r\n\r\n logging.info('EVALUATING ON %s DATA' % mode.upper())\r\n if mode == 'dev':\r\n test_tasks = json.load(open(self.dataset + '/dev_tasks.json'))\r\n else:\r\n test_tasks = json.load(open(self.dataset + '/test_tasks.json'))\r\n\r\n rel2candidates = self.rel2candidates\r\n\r\n hits10 = []\r\n hits5 = []\r\n hits1 = []\r\n mrr = []\r\n for query_ in test_tasks.keys():\r\n hits10_ = []\r\n hits5_ = []\r\n hits1_ = []\r\n mrr_ = []\r\n candidates = rel2candidates[query_]\r\n support_triples = test_tasks[query_][:few]\r\n support_pairs = [[symbol2id[triple[0]], symbol2id[triple[2]]] for triple in support_triples]\r\n\r\n if meta:\r\n support_left = [self.ent2id[triple[0]] for triple in support_triples]\r\n support_right = [self.ent2id[triple[2]] for triple in support_triples]\r\n support_meta = self.get_meta(support_left, support_right)\r\n\r\n support = Variable(torch.LongTensor(support_pairs)).to(self.device)\r\n\r\n for triple in test_tasks[query_][few:]:\r\n true = triple[2]\r\n query_pairs = []\r\n query_pairs.append([symbol2id[triple[0]], symbol2id[triple[2]]])\r\n if meta:\r\n query_left = []\r\n query_right = []\r\n query_left.append(self.ent2id[triple[0]])\r\n query_right.append(self.ent2id[triple[2]])\r\n for ent in candidates:\r\n if (ent not in self.e1rel_e2[triple[0] + triple[1]]) and ent != true:\r\n query_pairs.append([symbol2id[triple[0]], symbol2id[ent]])\r\n if meta:\r\n query_left.append(self.ent2id[triple[0]])\r\n query_right.append(self.ent2id[ent])\r\n\r\n query = Variable(torch.LongTensor(query_pairs)).to(self.device)\r\n\r\n if meta:\r\n query_meta = self.get_meta(query_left, query_right)\r\n scores, _ = self.Matcher(support, query, None, isEval=True,\r\n support_meta=support_meta,\r\n query_meta=query_meta,\r\n false_meta=None)\r\n scores.detach()\r\n scores = scores.data\r\n\r\n scores = scores.cpu().numpy()\r\n sort = list(np.argsort(scores, kind='stable'))[::-1]\r\n rank = sort.index(0) + 1\r\n if rank <= 10:\r\n hits10.append(1.0)\r\n hits10_.append(1.0)\r\n else:\r\n hits10.append(0.0)\r\n hits10_.append(0.0)\r\n if rank <= 5:\r\n hits5.append(1.0)\r\n hits5_.append(1.0)\r\n else:\r\n hits5.append(0.0)\r\n hits5_.append(0.0)\r\n if rank <= 1:\r\n hits1.append(1.0)\r\n hits1_.append(1.0)\r\n else:\r\n hits1.append(0.0)\r\n hits1_.append(0.0)\r\n mrr.append(1.0 / rank)\r\n mrr_.append(1.0 / rank)\r\n\r\n logging.critical('{} Hits10:{:.3f}, Hits5:{:.3f}, Hits1:{:.3f}, MRR:{:.3f}'.format(query_,\r\n np.mean(\r\n hits10_),\r\n np.mean(hits5_),\r\n np.mean(hits1_),\r\n np.mean(mrr_),\r\n ))\r\n logging.info('Number of candidates: {}, number of test examples {}'.format(len(candidates), len(hits10_)))\r\n logging.critical('HITS10: {:.3f}'.format(np.mean(hits10)))\r\n logging.critical('HITS5: {:.3f}'.format(np.mean(hits5)))\r\n logging.critical('HITS1: {:.3f}'.format(np.mean(hits1)))\r\n logging.critical('MRR: {:.3f}'.format(np.mean(mrr)))\r\n return np.mean(hits10), np.mean(hits5), np.mean(hits1), np.mean(mrr)\r\n\r\n def test_(self, path=None):\r\n self.load(path)\r\n logging.info('Pre-trained model loaded for test')\r\n self.eval(mode='test', meta=self.meta)\r\n\r\n def eval_(self, path=None):\r\n self.load(path)\r\n logging.info('Pre-trained model loaded for dev')\r\n self.eval(mode='dev', meta=self.meta)\r\n\r\n\r\ndef seed_everything(seed=2040):\r\n random.seed(seed)\r\n np.random.seed(seed)\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed_all(seed)\r\n os.environ['PYTHONHASHSEED'] = str(seed)\r\n torch.backends.cudnn.deterministic = True\r\n torch.backends.cudnn.benchmark = False\r\n\r\n\r\ndef adjust_learning_rate(optimizer, epoch, lr, warm_up_step, max_update_step, end_learning_rate=0.0, power=1.0):\r\n epoch += 1\r\n if warm_up_step > 0 and epoch <= warm_up_step:\r\n warm_up_factor = epoch / float(warm_up_step)\r\n lr = warm_up_factor * lr\r\n elif epoch >= max_update_step:\r\n lr = end_learning_rate\r\n else:\r\n lr_range = lr - end_learning_rate\r\n pct_remaining = 1 - (epoch - warm_up_step) / (max_update_step - warm_up_step)\r\n lr = lr_range * (pct_remaining ** power) + end_learning_rate\r\n\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n return lr\r\n\r\n\r\nif __name__ == '__main__':\r\n args = read_options()\r\n if not os.path.exists('./logs_'):\r\n os.mkdir('./logs_')\r\n if not os.path.exists('./logs'):\r\n os.mkdir('./logs')\r\n\r\n logger = logging.getLogger()\r\n logger.setLevel(logging.DEBUG)\r\n formatter = logging.Formatter('%(asctime)s %(levelname)s: - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\r\n\r\n fh = logging.FileHandler('./logs_/log-{}.txt'.format(args.prefix))\r\n fh.setLevel(logging.DEBUG)\r\n fh.setFormatter(formatter)\r\n\r\n ch = logging.StreamHandler()\r\n ch.setLevel(logging.INFO)\r\n ch.setFormatter(formatter)\r\n\r\n logger.addHandler(ch)\r\n logger.addHandler(fh)\r\n\r\n seed_everything(args.seed)\r\n\r\n logging.info('*' * 100)\r\n logging.info('*** hyper-parameters ***')\r\n for k, v in vars(args).items():\r\n logging.info(k + ': ' + str(v))\r\n logging.info('*' * 100)\r\n\r\n trainer = Trainer(args)\r\n\r\n if args.test:\r\n trainer.test_()\r\n trainer.eval_()\r\n else:\r\n trainer.train()\r\n print('last checkpoint!')\r\n trainer.eval_()\r\n trainer.test_()\r\n print('best checkpoint!')\r\n trainer.eval_(args.save_path + '_best')\r\n trainer.test_(args.save_path + '_best')\r\n" ]
[ [ "torch.optim.Adam" ] ]
iosifidisvasileios/AdaFair
[ "5e4ad12a670a767dd8aaf4f7d0a68b871d1f4d1a" ]
[ "Competitors/SMOTEBoost.py" ]
[ "\"\"\"Weight Boosting\n\nThis module contains weight boosting estimators for both classification and\nregression.\n\nThe module structure is the following:\n\n- The ``BaseWeightBoosting`` base class implements a common ``fit`` method\n for all the estimators in the module. Regression and classification\n only differ from each other in the loss function that is optimized.\n\n- ``AdaCostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for\n classification problems.\n\n- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for\n regression problems.\n\"\"\"\n\n# Authors: Noel Dawe <noel@dawe.me>\n# Gilles Louppe <g.louppe@gmail.com>\n# Hamzeh Alsalhi <ha258@cornell.edu>\n# Arnaud Joly <arnaud.v.joly@gmail.com>\n#\n# License: BSD 3 clause\n\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nfrom sklearn.base import is_classifier, ClassifierMixin, is_regressor\nfrom sklearn.ensemble import BaseEnsemble\nfrom sklearn.ensemble.forest import BaseForest\n# from sklearn.externals import six\nimport six\nimport sys\nsys.modules['sklearn.externals.six'] = six\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import r2_score\nfrom sklearn.tree.tree import BaseDecisionTree, DTYPE, DecisionTreeClassifier\nfrom sklearn.utils.validation import has_fit_parameter, check_is_fitted, check_array, check_X_y, check_random_state\n\n__all__ = [\n 'SMOTEBoost'\n]\n\nfrom collections import Counter\n\nimport numpy as np\nfrom sklearn.base import is_regressor\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble.forest import BaseForest\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.preprocessing import normalize\nfrom sklearn.tree.tree import BaseDecisionTree\nfrom sklearn.utils import check_array\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils import check_X_y\n\n\n# from sklearn.utils import shuffle\n\n\nclass SMOTE(object):\n \"\"\"Implementation of Synthetic Minority Over-Sampling Technique (SMOTE).\n\n SMOTE performs oversampling of the minority class by picking target\n minority class samples and their nearest minority class neighbors and\n generating new samples that linearly combine features of each target\n sample with features of its selected minority class neighbors [1].\n\n Parameters\n ----------\n k_neighbors : int, optional (default=5)\n Number of nearest neighbors.\n random_state : int or None, optional (default=None)\n If int, random_state is the seed used by the random number generator.\n If None, the random number generator is the RandomState instance used\n by np.random.\n\n References\n ----------\n .. [1] N. V. Chawla, K. W. Bowyer, L. O. Hall, and P. Kegelmeyer. \"SMOTE:\n Synthetic Minority Over-Sampling Technique.\" Journal of Artificial\n Intelligence Research (JAIR), 2002.\n \"\"\"\n\n def __init__(self, k_neighbors=5, random_state=None):\n self.k = k_neighbors\n self.random_state = random_state\n\n def sample(self, n_samples):\n \"\"\"Generate samples.\n\n Parameters\n ----------\n n_samples : int\n Number of new synthetic samples.\n\n Returns\n -------\n S : array, shape = [n_samples, n_features]\n Returns synthetic samples.\n \"\"\"\n np.random.seed(seed=self.random_state)\n\n S = np.zeros(shape=(n_samples, self.n_features))\n # Calculate synthetic samples.\n for i in range(n_samples):\n j = np.random.randint(0, self.X.shape[0])\n\n # Find the NN for each sample.\n # Exclude the sample itself.\n nn = self.neigh.kneighbors(self.X[j].reshape(1, -1),\n return_distance=False)[:, 1:]\n nn_index = np.random.choice(nn[0])\n\n dif = self.X[nn_index] - self.X[j]\n gap = np.random.random()\n\n S[i, :] = self.X[j, :] + gap * dif[:]\n\n return S\n\n def fit(self, X):\n \"\"\"Train model based on input data.\n\n Parameters\n ----------\n X : array-like, shape = [n_minority_samples, n_features]\n Holds the minority samples.\n \"\"\"\n self.X = X\n self.n_minority_samples, self.n_features = self.X.shape\n\n # Learn nearest neighbors.\n self.neigh = NearestNeighbors(n_neighbors=self.k + 1)\n self.neigh.fit(self.X)\n\n return self\n\n\nclass BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):\n \"\"\"Base class for AdaBoost estimators.\n\n Warning: This class should not be used directly. Use derived classes\n instead.\n \"\"\"\n\n @abstractmethod\n def __init__(self,\n base_estimator=None,\n n_estimators=50,\n estimator_params=tuple(),\n learning_rate=1.,\n random_state=None):\n\n super(BaseWeightBoosting, self).__init__(\n base_estimator=base_estimator,\n n_estimators=n_estimators,\n estimator_params=estimator_params)\n\n self.W_pos = 0.\n self.W_neg = 0.\n self.W_dp = 0.\n self.W_fp = 0.\n self.W_dn = 0.\n self.W_fn = 0.\n self.performance = []\n self.learning_rate = learning_rate\n self.random_state = random_state\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Build a boosted classifier/regressor from the training set (X, y).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape = [n_samples, n_features]\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is\n forced to DTYPE from tree._tree if the base classifier of this\n ensemble weighted boosting classifier is a tree or forest.\n\n y : array-like of shape = [n_samples]\n The target values (class labels in classification, real numbers in\n regression).\n\n sample_weight : array-like of shape = [n_samples], optional\n Sample weights. If None, the sample weights are initialized to\n 1 / n_samples.\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n # Check parameters\n self.weight_list = []\n if self.learning_rate <= 0:\n raise ValueError(\"learning_rate must be greater than zero\")\n\n if (self.base_estimator is None or\n isinstance(self.base_estimator, (BaseDecisionTree,\n BaseForest))):\n dtype = DTYPE\n accept_sparse = 'csc'\n else:\n dtype = None\n accept_sparse = ['csr', 'csc']\n\n X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype, y_numeric=is_regressor(self))\n\n if sample_weight is None:\n # Initialize weights to 1 / n_samples\n sample_weight = np.empty(X.shape[0], dtype=np.float64)\n sample_weight[:] = 1. / X.shape[0]\n else:\n sample_weight = check_array(sample_weight, ensure_2d=False)\n # Normalize existing weights\n sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)\n\n # Check that the sample weights sum is positive\n if sample_weight.sum() <= 0:\n raise ValueError(\n \"Attempting to fit with a non-positive \"\n \"weighted number of samples.\")\n\n # Check parameters\n self._validate_estimator()\n\n # Clear any previous fit results\n self.estimators_ = []\n self.estimator_alphas_ = np.zeros(self.n_estimators, dtype=np.float64)\n\n if self.debug:\n self.conf_scores = []\n\n random_state = check_random_state(self.random_state)\n # if self.debug:\n # print \"iteration, alpha , positives , negatives , dp , fp , dn , fn\"\n\n old_weights_sum = np.sum(sample_weight)\n pos, neg, dp, fp, dn, fn = self.calculate_weights(X, y, sample_weight)\n\n if self.debug:\n self.weight_list.append(\n 'init' + \",\" + str(0) + \",\" + str(pos) + \", \" + str(neg) + \", \" + str(dp) + \", \" + str(\n fp) + \", \" + str(dn) + \", \" + str(fn))\n\n stats_c_ = Counter(y)\n maj_c_ = max(stats_c_, key=stats_c_.get)\n min_c_ = min(stats_c_, key=stats_c_.get)\n self.minority_target = min_c_\n\n # print \"training error, training balanced accuracy, training EQ.Odds, testing error, testing balanced accuracy, testing EQ.Odds\"\n for iboost in range(self.n_estimators):\n # SMOTE step.\n X_min = X[np.where(y == self.minority_target)]\n self.smote.fit(X_min)\n X_syn = self.smote.sample(self.n_samples)\n y_syn = np.full(X_syn.shape[0], fill_value=self.minority_target,\n dtype=np.int64)\n\n # Normalize synthetic sample weights based on current training set.\n sample_weight_syn = np.empty(X_syn.shape[0], dtype=np.float64)\n sample_weight_syn[:] = 1. / X.shape[0]\n\n # Combine the original and synthetic samples.\n X = np.vstack((X, X_syn))\n y = np.append(y, y_syn)\n\n # Combine the weights.\n sample_weight = \\\n np.append(sample_weight, sample_weight_syn).reshape(-1, 1)\n sample_weight = \\\n np.squeeze(normalize(sample_weight, axis=0, norm='l1'))\n # Boosting step\n sample_weight, alpha, error = self._boost(\n iboost,\n X, y,\n sample_weight,\n random_state)\n\n # Early termination\n if sample_weight is None:\n break\n\n self.estimator_alphas_[iboost] = alpha\n\n # Stop if error is zero\n if error == 0.5:\n break\n\n new_sample_weight = np.sum(sample_weight)\n multiplier = old_weights_sum / new_sample_weight\n\n # Stop if the sum of sample weights has become non-positive\n if new_sample_weight <= 0:\n break\n\n if iboost < self.n_estimators - 1:\n # Normalize\n sample_weight *= multiplier\n\n pos, neg, dp, fp, dn, fn = self.calculate_weights(X, y, sample_weight)\n\n if self.debug:\n self.weight_list.append(\n str(iboost) + \",\" + str(alpha) + \",\" + str(pos) + \", \" + str(neg) + \", \" + str(dp) + \", \" + str(\n fp) + \", \" + str(dn) + \", \" + str(fn))\n\n self.W_pos += pos / self.n_estimators\n self.W_neg += neg / self.n_estimators\n self.W_dp += dp / self.n_estimators\n self.W_fp += fp / self.n_estimators\n self.W_dn += dn / self.n_estimators\n self.W_fn += fn / self.n_estimators\n\n old_weights_sum = np.sum(sample_weight)\n\n if self.debug:\n self.get_confidence_scores(X)\n\n return self\n\n def get_weights(self, ):\n return [self.W_pos, self.W_neg, self.W_dp, self.W_fp, self.W_dn, self.W_fn]\n\n def get_confidence_scores(self, X):\n self.conf_scores = self.decision_function(X)\n\n @abstractmethod\n def _boost(self, iboost, X, y, sample_weight, random_state):\n \"\"\"Implement a single boost.\n\n Warning: This method needs to be overridden by subclasses.\n\n Parameters\n ----------\n iboost : int\n The index of the current boost iteration.\n\n X : {array-like, sparse matrix} of shape = [n_samples, n_features]\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. COO, DOK, and LIL are converted to CSR.\n\n y : array-like of shape = [n_samples]\n The target values (class labels).\n\n sample_weight : array-like of shape = [n_samples]\n The current sample weights.\n\n random_state : numpy.RandomState\n The current random number generator\n\n Returns\n -------\n sample_weight : array-like of shape = [n_samples] or None\n The reweighted sample weights.\n If None then boosting has terminated early.\n\n estimator_weight : float\n The weight for the current boost.\n If None then boosting has terminated early.\n\n error : float\n The classification error for the current boost.\n If None then boosting has terminated early.\n \"\"\"\n pass\n\n def staged_score(self, X, y, sample_weight=None):\n \"\"\"Return staged scores for X, y.\n\n This generator method yields the ensemble score after each iteration of\n boosting and therefore allows monitoring, such as to determine the\n score on a test set after each boost.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape = [n_samples, n_features]\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. DOK and LIL are converted to CSR.\n\n y : array-like, shape = [n_samples]\n Labels for X.\n\n sample_weight : array-like, shape = [n_samples], optional\n Sample weights.\n\n Returns\n -------\n z : float\n \"\"\"\n for y_pred in self.staged_predict(X):\n if is_classifier(self):\n yield accuracy_score(y, y_pred, sample_weight=sample_weight)\n else:\n yield r2_score(y, y_pred, sample_weight=sample_weight)\n\n @property\n def feature_importances_(self):\n \"\"\"Return the feature importances (the higher, the more important the\n feature).\n\n Returns\n -------\n feature_importances_ : array, shape = [n_features]\n \"\"\"\n if self.estimators_ is None or len(self.estimators_) == 0:\n raise ValueError(\"Estimator not fitted, \"\n \"call `fit` before `feature_importances_`.\")\n\n try:\n norm = self.estimator_alphas_.sum()\n return (sum(weight * clf.feature_importances_ for weight, clf\n in zip(self.estimator_alphas_, self.estimators_))\n / norm)\n\n except AttributeError:\n raise AttributeError(\n \"Unable to compute feature importances \"\n \"since base_estimator does not have a \"\n \"feature_importances_ attribute\")\n\n def _validate_X_predict(self, X):\n \"\"\"Ensure that X is in the proper format\"\"\"\n if (self.base_estimator is None or\n isinstance(self.base_estimator,\n (BaseDecisionTree, BaseForest))):\n X = check_array(X, accept_sparse='csr', dtype=DTYPE)\n\n else:\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])\n\n return X\n\n def calculate_weights(self, data, labels, sample_weight):\n\n protected_positive = 0.\n non_protected_positive = 0.\n\n protected_negative = 0.\n non_protected_negative = 0.\n\n for idx, val in enumerate(data):\n # protrcted population\n if val[self.saIndex] == self.saValue:\n # protected group\n if labels[idx] == 1:\n protected_positive += sample_weight[idx] # /len(sample_weight)\n else:\n protected_negative += sample_weight[idx] # /len(sample_weight)\n else:\n # correctly classified\n if labels[idx] == 1:\n non_protected_positive += sample_weight[idx] # /len(sample_weight)\n else:\n non_protected_negative += sample_weight[idx] # /len(sample_weight)\n\n return [protected_positive + non_protected_positive,\n protected_negative + non_protected_negative,\n protected_positive,\n non_protected_positive,\n protected_negative,\n non_protected_negative]\n\n\ndef _samme_proba(estimator, n_classes, X):\n \"\"\"Calculate algorithm 4, step 2, equation c) of Zhu et al [1].\n\n References\n ----------\n .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, \"Multi-class AdaBoost\", 2009.\n\n \"\"\"\n proba = estimator.predict_proba(X)\n\n # Displace zero probabilities so the log is defined.\n # Also fix negative elements which may occur with\n # negative sample weights.\n proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps\n log_proba = np.log(proba)\n\n return (n_classes - 1) * (log_proba - (1. / n_classes)\n * log_proba.sum(axis=1)[:, np.newaxis])\n\n\nclass SMOTEBoost(BaseWeightBoosting, ClassifierMixin):\n\n def __init__(self,\n base_estimator=None,\n n_estimators=50,\n learning_rate=1.,\n algorithm='SAMME',\n random_state=None,\n saIndex=None, saValue=None,\n debug=False, CSB=\"CSB1\",\n X_test=None, y_test=None,\n n_samples=100,\n k_neighbors=5, ):\n\n self.n_samples = n_samples\n self.algorithm = algorithm\n self.smote = SMOTE(k_neighbors=k_neighbors,\n random_state=random_state)\n\n super(SMOTEBoost, self).__init__(\n base_estimator=base_estimator,\n n_estimators=n_estimators,\n learning_rate=learning_rate,\n random_state=random_state)\n\n self.saIndex = saIndex\n self.saValue = saValue\n self.algorithm = algorithm\n self.debug = debug\n self.X_test = X_test\n self.y_test = y_test\n\n def fit(self, X, y, sample_weight=None):\n return super(SMOTEBoost, self).fit(X, y, sample_weight)\n\n def _validate_estimator(self):\n \"\"\"Check the estimator and set the base_estimator_ attribute.\"\"\"\n super(SMOTEBoost, self)._validate_estimator(default=DecisionTreeClassifier(max_depth=1))\n\n # SAMME-R requires predict_proba-enabled base estimators\n if self.algorithm == 'SAMME.R':\n if not hasattr(self.base_estimator_, 'predict_proba'):\n raise TypeError(\n \"AdaCostClassifier with algorithm='SAMME.R' requires \"\n \"that the weak learner supports the calculation of class \"\n \"probabilities with a predict_proba method.\\n\"\n \"Please change the base estimator or set \"\n \"algorithm='SAMME' instead.\")\n if not has_fit_parameter(self.base_estimator_, \"sample_weight\"):\n raise ValueError(\"%s doesn't support sample_weight.\"\n % self.base_estimator_.__class__.__name__)\n\n def _boost(self, iboost, X, y, sample_weight, random_state):\n return self._boost_discrete(iboost, X, y, sample_weight, random_state)\n\n def _boost_discrete(self, iboost, X, y, sample_weight, random_state):\n \"\"\"Implement a single boost using the SAMME discrete algorithm.\"\"\"\n estimator = self._make_estimator(random_state=random_state)\n estimator.fit(X, y, sample_weight=sample_weight)\n y_predict = estimator.predict(X)\n if iboost == 0:\n self.classes_ = getattr(estimator, 'classes_', None)\n self.n_classes_ = len(self.classes_)\n\n incorrect = y_predict != y\n # Error fraction\n estimator_error = np.mean(\n np.average(incorrect, weights=sample_weight, axis=0))\n # Stop if classification is perfect\n if estimator_error <= 0:\n return sample_weight, 1., 0.\n n_classes = self.n_classes_\n # Stop if the error is at least as bad as random guessing\n if estimator_error >= 1. - (1. / n_classes):\n self.estimators_.pop(-1)\n if len(self.estimators_) == 0:\n raise ValueError('BaseClassifier in AdaBoostClassifier '\n 'ensemble is worse than random, ensemble '\n 'can not be fit.')\n return None, None, None\n # Boost weight using multi-class AdaBoost SAMME alg\n alpha = self.learning_rate * (\n np.log((1. - estimator_error) / estimator_error) +\n np.log(n_classes - 1.))\n if not iboost == self.n_estimators - 1:\n # Only boost positive weights\n for idx, row in enumerate(sample_weight):\n if y[idx] != y_predict[idx] :\n sample_weight[idx] *= np.exp(alpha)\n\n # if self.debug:\n # if iboost !=0:\n # y_predict = self.predict(X)\n # y_predict_probs = self.decision_function(X)\n # incorrect = y_predict != y\n # training_error = np.mean(np.average(incorrect, axis=0))\n # train_auc = sklearn.metrics.balanced_accuracy_score(y, y_predict)\n # train_fairness = self.calculate_fairness(X,y,y_predict)\n #\n # y_predict = self.predict(self.X_test)\n # y_predict_probs = self.decision_function(self.X_test)\n # incorrect = y_predict != self.y_test\n # test_error = np.mean(np.average(incorrect, axis=0))\n # test_auc = sklearn.metrics.balanced_accuracy_score(self.y_test, y_predict)\n # test_fairness = self.calculate_fairness(self.X_test,self.y_test,y_predict)\n # self.performance.append(str(iboost) + \",\" + str(training_error) + \", \" + str(train_auc) + \", \" + str(train_fairness) + \",\"+ str(test_error) + \", \" + str(test_auc)+ \", \" + str(test_fairness))\n # # print str(iboost) + \",\" + str(training_error) + \", \" + str(train_auc) + \", \" + str(train_fairness) + \",\"+ str(test_error) + \", \" + str(test_auc)+ \", \" + str(test_fairness)\n\n return sample_weight, alpha, estimator_error\n\n def get_performance_over_iterations(self):\n return self.performance\n\n def get_weights_over_iterations(self):\n return self.weight_list[-1]\n\n def get_initial_weights(self):\n return self.weight_list[0]\n\n def predict(self, X):\n \"\"\"Predict classes for X.\n\n The predicted class of an input sample is computed as the weighted mean\n prediction of the classifiers in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape = [n_samples, n_features]\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. DOK and LIL are converted to CSR.\n\n Returns\n -------\n y : array of shape = [n_samples]\n The predicted classes.\n \"\"\"\n pred = self.decision_function(X)\n\n if self.n_classes_ == 2:\n return self.classes_.take(pred > 0, axis=0)\n\n return self.classes_.take(np.argmax(pred, axis=1), axis=0)\n\n def decision_function(self, X):\n \"\"\"Compute the decision function of ``X``.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape = [n_samples, n_features]\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. DOK and LIL are converted to CSR.\n\n Returns\n -------\n score : array, shape = [n_samples, k]\n The decision function of the input samples. The order of\n outputs is the same of that of the `classes_` attribute.\n Binary classification is a special cases with ``k == 1``,\n otherwise ``k==n_classes``. For binary classification,\n values closer to -1 or 1 mean more like the first or second\n class in ``classes_``, respectively.\n \"\"\"\n check_is_fitted(self, \"n_classes_\")\n X = self._validate_X_predict(X)\n\n n_classes = self.n_classes_\n classes = self.classes_[:, np.newaxis]\n\n pred = sum(\n (estimator.predict(X) == classes).T * w for estimator, w in zip(self.estimators_, self.estimator_alphas_))\n\n pred /= self.estimator_alphas_.sum()\n if n_classes == 2:\n pred[:, 0] *= -1\n return pred.sum(axis=1)\n return pred\n\n def predict_proba(self, X):\n \"\"\"Predict class probabilities for X.\n\n The predicted class probabilities of an input sample is computed as\n the weighted mean predicted class probabilities of the classifiers\n in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape = [n_samples, n_features]\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. DOK and LIL are converted to CSR.\n\n Returns\n -------\n p : array of shape = [n_samples]\n The class probabilities of the input samples. The order of\n outputs is the same of that of the `classes_` attribute.\n \"\"\"\n check_is_fitted(self, \"n_classes_\")\n\n n_classes = self.n_classes_\n X = self._validate_X_predict(X)\n\n if n_classes == 1:\n return np.ones((X.shape[0], 1))\n\n if self.algorithm == 'SAMME.R':\n # The weights are all 1. for SAMME.R\n proba = sum(_samme_proba(estimator, n_classes, X)\n for estimator in self.estimators_)\n else: # self.algorithm == \"SAMME\"\n proba = sum(estimator.predict_proba(X) * w\n for estimator, w in zip(self.estimators_,\n self.estimator_alphas_))\n\n proba /= self.estimator_alphas_.sum()\n proba = np.exp((1. / (n_classes - 1)) * proba)\n normalizer = proba.sum(axis=1)[:, np.newaxis]\n normalizer[normalizer == 0.0] = 1.0\n proba /= normalizer\n\n return proba\n\n def predict_log_proba(self, X):\n \"\"\"Predict class log-probabilities for X.\n\n The predicted class log-probabilities of an input sample is computed as\n the weighted mean predicted class log-probabilities of the classifiers\n in the ensemble.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape = [n_samples, n_features]\n The training input samples. Sparse matrix can be CSC, CSR, COO,\n DOK, or LIL. DOK and LIL are converted to CSR.\n\n Returns\n -------\n p : array of shape = [n_samples]\n The class probabilities of the input samples. The order of\n outputs is the same of that of the `classes_` attribute.\n \"\"\"\n return np.log(self.predict_proba(X))\n\n\n\n" ]
[ [ "sklearn.utils.validation.check_is_fitted", "sklearn.metrics.r2_score", "numpy.exp", "numpy.where", "numpy.random.randint", "sklearn.utils.validation.has_fit_parameter", "numpy.finfo", "numpy.full", "numpy.argmax", "sklearn.base.is_classifier", "sklearn.base.is_regressor", "sklearn.neighbors.NearestNeighbors", "numpy.zeros", "numpy.log", "numpy.random.choice", "numpy.append", "sklearn.tree.tree.DecisionTreeClassifier", "numpy.sum", "numpy.random.random", "numpy.random.seed", "sklearn.utils.check_array", "numpy.empty", "numpy.ones", "sklearn.preprocessing.normalize", "numpy.average", "sklearn.utils.check_random_state", "numpy.vstack", "sklearn.metrics.accuracy_score" ] ]
thimo72/haystack
[ "85571cdd15f1c9592cf28121187ffef7d4827f83" ]
[ "haystack/document_stores/milvus2.py" ]
[ "import logging\nimport warnings\nfrom typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Union\n\nif TYPE_CHECKING:\n from haystack.nodes.retriever.base import BaseRetriever\n\nimport numpy as np\n\nfrom scipy.special import expit\nfrom tqdm import tqdm\n\ntry:\n from pymilvus import FieldSchema, CollectionSchema, Collection, connections, utility\n from pymilvus.client.abstract import QueryResult\n from pymilvus.client.types import DataType\nexcept (ImportError, ModuleNotFoundError) as ie:\n from haystack.utils.import_utils import _optional_component_not_installed\n\n _optional_component_not_installed(__name__, \"milvus2\", ie)\n\nfrom haystack.schema import Document\nfrom haystack.document_stores.sql import SQLDocumentStore\nfrom haystack.document_stores.base import get_batches_from_generator\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Milvus2DocumentStore(SQLDocumentStore):\n \"\"\"\n Limitations:\n Milvus 2.0 so far doesn't support the deletion of documents (https://github.com/milvus-io/milvus/issues/7130).\n Therefore, delete_documents() and update_embeddings() won't work yet.\n\n Differences to 1.x:\n Besides big architectural changes that impact performance and reliability 2.0 supports the filtering by scalar data types.\n For Haystack users this means you can now run a query using vector similarity and filter for some meta data at the same time!\n (See https://milvus.io/docs/v2.0.0/comparison.md for more details)\n\n Usage:\n 1. Start a Milvus service via docker (see https://milvus.io/docs/v2.0.0/install_standalone-docker.md)\n 2. Run pip install farm-haystack[milvus]\n 3. Init a MilvusDocumentStore() in Haystack\n\n Overview:\n Milvus (https://milvus.io/) is a highly reliable, scalable Document Store specialized on storing and processing vectors.\n Therefore, it is particularly suited for Haystack users that work with dense retrieval methods (like DPR).\n\n In contrast to FAISS, Milvus ...\n - runs as a separate service (e.g. a Docker container) and can scale easily in a distributed environment\n - allows dynamic data management (i.e. you can insert/delete vectors without recreating the whole index)\n - encapsulates multiple ANN libraries (FAISS, ANNOY ...)\n\n This class uses Milvus for all vector related storage, processing and querying.\n The meta-data (e.g. for filtering) and the document text are however stored in a separate SQL Database as Milvus\n does not allow these data types (yet).\n \"\"\"\n\n def __init__(\n self,\n sql_url: str = \"sqlite:///\",\n host: str = \"localhost\",\n port: str = \"19530\",\n connection_pool: str = \"SingletonThread\",\n index: str = \"document\",\n vector_dim: int = None,\n embedding_dim: int = 768,\n index_file_size: int = 1024,\n similarity: str = \"dot_product\",\n index_type: str = \"IVF_FLAT\",\n index_param: Optional[Dict[str, Any]] = None,\n search_param: Optional[Dict[str, Any]] = None,\n return_embedding: bool = False,\n embedding_field: str = \"embedding\",\n id_field: str = \"id\",\n custom_fields: Optional[List[Any]] = None,\n progress_bar: bool = True,\n duplicate_documents: str = \"overwrite\",\n isolation_level: str = None,\n consistency_level: int = 0,\n ):\n \"\"\"\n :param sql_url: SQL connection URL for storing document texts and metadata. It defaults to a local, file based SQLite DB. For large scale\n deployment, Postgres is recommended. If using MySQL then same server can also be used for\n Milvus metadata. For more details see https://milvus.io/docs/v2.0.0/data_manage.md.\n :param milvus_url: Milvus server connection URL for storing and processing vectors.\n Protocol, host and port will automatically be inferred from the URL.\n See https://milvus.io/docs/v2.0.0/install_milvus.md for instructions to start a Milvus instance.\n :param connection_pool: Connection pool type to connect with Milvus server. Default: \"SingletonThread\".\n :param index: Index name for text, embedding and metadata (in Milvus terms, this is the \"collection name\").\n :param vector_dim: Deprecated. Use embedding_dim instead.\n :param embedding_dim: The embedding vector size. Default: 768.\n :param index_file_size: Specifies the size of each segment file that is stored by Milvus and its default value is 1024 MB.\n When the size of newly inserted vectors reaches the specified volume, Milvus packs these vectors into a new segment.\n Milvus creates one index file for each segment. When conducting a vector search, Milvus searches all index files one by one.\n As a rule of thumb, we would see a 30% ~ 50% increase in the search performance after changing the value of index_file_size from 1024 to 2048.\n Note that an overly large index_file_size value may cause failure to load a segment into the memory or graphics memory.\n (From https://milvus.io/docs/v2.0.0/performance_faq.md)\n :param similarity: The similarity function used to compare document vectors. 'dot_product' is the default and recommended for DPR embeddings.\n 'cosine' is recommended for Sentence Transformers, but is not directly supported by Milvus.\n However, you can normalize your embeddings and use `dot_product` to get the same results.\n See https://milvus.io/docs/v2.0.0/metric.md.\n :param index_type: Type of approximate nearest neighbour (ANN) index used. The choice here determines your tradeoff between speed and accuracy.\n Some popular options:\n - FLAT (default): Exact method, slow\n - IVF_FLAT, inverted file based heuristic, fast\n - HSNW: Graph based, fast\n - ANNOY: Tree based, fast\n See: https://milvus.io/docs/v2.0.0/index.md\n :param index_param: Configuration parameters for the chose index_type needed at indexing time.\n For example: {\"nlist\": 16384} as the number of cluster units to create for index_type IVF_FLAT.\n See https://milvus.io/docs/v2.0.0/index.md\n :param search_param: Configuration parameters for the chose index_type needed at query time\n For example: {\"nprobe\": 10} as the number of cluster units to query for index_type IVF_FLAT.\n See https://milvus.io/docs/v2.0.0/index.md\n :param return_embedding: To return document embedding.\n :param embedding_field: Name of field containing an embedding vector.\n :param progress_bar: Whether to show a tqdm progress bar or not.\n Can be helpful to disable in production deployments to keep the logs clean.\n :param duplicate_documents: Handle duplicates document based on parameter options.\n Parameter options : ( 'skip','overwrite','fail')\n skip: Ignore the duplicates documents\n overwrite: Update any existing documents with the same ID when adding documents.\n fail: an error is raised if the document ID of the document being added already\n exists.\n :param isolation_level: see SQLAlchemy's `isolation_level` parameter for `create_engine()` (https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.isolation_level)\n \"\"\"\n\n # save init parameters to enable export of component config as YAML\n self.set_config(\n sql_url=sql_url,\n host=host,\n port=port,\n connection_pool=connection_pool,\n index=index,\n vector_dim=vector_dim,\n embedding_dim=embedding_dim,\n index_file_size=index_file_size,\n similarity=similarity,\n index_type=index_type,\n index_param=index_param,\n search_param=search_param,\n duplicate_documents=duplicate_documents,\n id_field=id_field,\n return_embedding=return_embedding,\n embedding_field=embedding_field,\n progress_bar=progress_bar,\n custom_fields=custom_fields,\n isolation_level=isolation_level,\n )\n connections.add_connection(default={\"host\": host, \"port\": port})\n connections.connect()\n\n if vector_dim is not None:\n warnings.warn(\n \"The 'vector_dim' parameter is deprecated, \" \"use 'embedding_dim' instead.\", DeprecationWarning, 2\n )\n self.embedding_dim = vector_dim\n else:\n self.embedding_dim = embedding_dim\n\n self.index_file_size = index_file_size\n self.cosine = False\n\n if similarity == \"dot_product\":\n self.metric_type = \"IP\"\n self.similarity = similarity\n elif similarity == \"l2\":\n self.metric_type = \"L2\"\n self.similarity = similarity\n elif similarity == \"cosine\":\n self.metric_type = \"IP\"\n self.similarity = \"dot_product\"\n self.cosine = True\n else:\n raise ValueError(\n \"The Milvus document store can currently only support dot_product and L2 similarity. \"\n 'Please set similarity=\"dot_product\" or \"l2\"'\n )\n\n self.index_type = index_type\n self.index_param = index_param or {\"nlist\": 16384}\n self.search_param = search_param or {\"nprobe\": 10}\n self.index = index\n self.embedding_field = embedding_field\n self.id_field = id_field\n self.custom_fields = custom_fields\n\n self.collection = self._create_collection_and_index_if_not_exist(self.index, consistency_level)\n\n self.return_embedding = return_embedding\n self.progress_bar = progress_bar\n\n super().__init__(\n url=sql_url, index=index, duplicate_documents=duplicate_documents, isolation_level=isolation_level\n )\n\n def _create_collection_and_index_if_not_exist(\n self, index: Optional[str] = None, consistency_level: int = 0, index_param: Optional[Dict[str, Any]] = None\n ):\n index = index or self.index\n index_param = index_param or self.index_param\n custom_fields = self.custom_fields or []\n\n has_collection = utility.has_collection(collection_name=index)\n if not has_collection:\n fields = [\n FieldSchema(name=self.id_field, dtype=DataType.INT64, is_primary=True, auto_id=True),\n FieldSchema(name=self.embedding_field, dtype=DataType.FLOAT_VECTOR, dim=self.embedding_dim),\n ]\n\n for field in custom_fields:\n if field.name == self.id_field or field.name == self.embedding_field:\n logger.warning(f\"Skipping `{field.name}` as it is similar to `id_field` or `embedding_field`\")\n else:\n fields.append(field)\n\n collection_schema = CollectionSchema(fields=fields)\n else:\n collection_schema = None\n\n collection = Collection(name=index, schema=collection_schema, consistency_level=consistency_level)\n\n has_index = collection.has_index()\n if not has_index:\n collection.create_index(\n field_name=self.embedding_field,\n index_params={\"index_type\": self.index_type, \"metric_type\": self.metric_type, \"params\": index_param},\n )\n\n collection.load()\n\n return collection\n\n def _create_document_field_map(self) -> Dict:\n return {self.index: self.embedding_field}\n\n def write_documents(\n self,\n documents: Union[List[dict], List[Document]],\n index: Optional[str] = None,\n batch_size: int = 10_000,\n duplicate_documents: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n index_param: Optional[Dict[str, Any]] = None,\n ):\n \"\"\"\n Add new documents to the DocumentStore.\n\n :param documents: List of `Dicts` or List of `Documents`. If they already contain the embeddings, we'll index\n them right away in Milvus. If not, you can later call `update_embeddings()` to create & index them.\n :param index: (SQL) index name for storing the docs and metadata\n :param batch_size: When working with large number of documents, batching can help reduce memory footprint.\n :param duplicate_documents: Handle duplicates document based on parameter options.\n Parameter options : ( 'skip','overwrite','fail')\n skip: Ignore the duplicates documents\n overwrite: Update any existing documents with the same ID when adding documents.\n fail: an error is raised if the document ID of the document being added already\n exists.\n :raises DuplicateDocumentError: Exception trigger on duplicate document\n :return:\n \"\"\"\n if headers:\n raise NotImplementedError(\"Milvus2DocumentStore does not support headers.\")\n\n index = index or self.index\n index_param = index_param or self.index_param\n duplicate_documents = duplicate_documents or self.duplicate_documents\n assert (\n duplicate_documents in self.duplicate_documents_options\n ), f\"duplicate_documents parameter must be {', '.join(self.duplicate_documents_options)}\"\n field_map = self._create_document_field_map()\n\n if len(documents) == 0:\n logger.warning(\"Calling DocumentStore.write_documents() with empty list\")\n return\n\n document_objects = [Document.from_dict(d, field_map=field_map) if isinstance(d, dict) else d for d in documents]\n document_objects = self._handle_duplicate_documents(document_objects, duplicate_documents)\n add_vectors = False if document_objects[0].embedding is None else True\n\n batched_documents = get_batches_from_generator(document_objects, batch_size)\n with tqdm(total=len(document_objects), disable=not self.progress_bar) as progress_bar:\n mutation_result: Any = None\n\n for document_batch in batched_documents:\n if add_vectors:\n doc_ids = []\n embeddings = []\n for doc in document_batch:\n doc_ids.append(doc.id)\n if isinstance(doc.embedding, np.ndarray):\n if self.cosine:\n embedding = doc.embedding / np.linalg.norm(doc.embedding)\n embeddings.append(embedding.tolist())\n else:\n embeddings.append(doc.embedding.tolist())\n elif isinstance(doc.embedding, list):\n if self.cosine:\n embedding = np.array(doc.embedding)\n embedding /= np.linalg.norm(embedding)\n embeddings.append(embedding.tolist())\n else:\n embeddings.append(doc.embedding)\n else:\n raise AttributeError(\n f\"Format of supplied document embedding {type(doc.embedding)} is not \"\n f\"supported. Please use list or numpy.ndarray\"\n )\n if duplicate_documents == \"overwrite\":\n existing_docs = super().get_documents_by_id(ids=doc_ids, index=index)\n self._delete_vector_ids_from_milvus(documents=existing_docs, index=index)\n\n mutation_result = self.collection.insert([embeddings])\n\n docs_to_write_in_sql = []\n\n for idx, doc in enumerate(document_batch):\n meta = doc.meta\n if add_vectors and mutation_result is not None:\n meta[\"vector_id\"] = str(mutation_result.primary_keys[idx])\n docs_to_write_in_sql.append(doc)\n\n super().write_documents(docs_to_write_in_sql, index=index, duplicate_documents=duplicate_documents)\n progress_bar.update(batch_size)\n progress_bar.close()\n\n # TODO: Equivalent in 2.0?\n\n # if duplicate_documents == 'overwrite':\n # connection.compact(collection_name=index)\n\n def update_embeddings(\n self,\n retriever: \"BaseRetriever\",\n index: Optional[str] = None,\n batch_size: int = 10_000,\n update_existing_embeddings: bool = True,\n filters: Optional[Dict[str, Any]] = None, # TODO: Adapt type once we allow extended filters in Milvus2DocStore\n ):\n \"\"\"\n Updates the embeddings in the the document store using the encoding model specified in the retriever.\n This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config).\n\n :param retriever: Retriever to use to get embeddings for text\n :param index: (SQL) index name for storing the docs and metadata\n :param batch_size: When working with large number of documents, batching can help reduce memory footprint.\n :param update_existing_embeddings: Whether to update existing embeddings of the documents. If set to False,\n only documents without embeddings are processed. This mode can be used for\n incremental updating of embeddings, wherein, only newly indexed documents\n get processed.\n :param filters: Optional filters to narrow down the documents for which embeddings are to be updated.\n Example: {\"name\": [\"some\", \"more\"], \"category\": [\"only_one\"]}\n :return: None\n \"\"\"\n index = index or self.index\n\n document_count = self.get_document_count(index=index)\n if document_count == 0:\n logger.warning(\"Calling DocumentStore.update_embeddings() on an empty index\")\n return\n\n logger.info(f\"Updating embeddings for {document_count} docs...\")\n\n result = self._query(\n index=index,\n vector_ids=None,\n batch_size=batch_size,\n filters=filters,\n only_documents_without_embedding=not update_existing_embeddings,\n )\n batched_documents = get_batches_from_generator(result, batch_size)\n with tqdm(\n total=document_count, disable=not self.progress_bar, position=0, unit=\" docs\", desc=\"Updating Embedding\"\n ) as progress_bar:\n for document_batch in batched_documents:\n self._delete_vector_ids_from_milvus(documents=document_batch, index=index)\n\n embeddings = retriever.embed_documents(document_batch) # type: ignore\n if self.cosine:\n embeddings = [embedding / np.linalg.norm(embedding) for embedding in embeddings]\n embeddings_list = [embedding.tolist() for embedding in embeddings]\n assert len(document_batch) == len(embeddings_list)\n\n mutation_result = self.collection.insert([embeddings_list])\n\n vector_id_map = {}\n for vector_id, doc in zip(mutation_result.primary_keys, document_batch):\n vector_id_map[doc.id] = str(vector_id)\n\n self.update_vector_ids(vector_id_map, index=index)\n progress_bar.set_description_str(\"Documents Processed\")\n progress_bar.update(batch_size)\n\n # TODO: Equivalent in 2.0?\n # self.milvus_server.compact(collection_name=index)\n\n def query_by_embedding(\n self,\n query_emb: np.ndarray,\n filters: Optional[Dict[str, Any]] = None, # TODO: Adapt type once we allow extended filters in Milvus2DocStore\n top_k: int = 10,\n index: Optional[str] = None,\n return_embedding: Optional[bool] = None,\n headers: Optional[Dict[str, str]] = None,\n ) -> List[Document]:\n \"\"\"\n Find the document that is most similar to the provided `query_emb` by using a vector similarity metric.\n\n :param query_emb: Embedding of the query (e.g. gathered from DPR)\n :param filters: Optional filters to narrow down the search space.\n Example: {\"name\": [\"some\", \"more\"], \"category\": [\"only_one\"]}\n :param top_k: How many documents to return\n :param index: (SQL) index name for storing the docs and metadata\n :param return_embedding: To return document embedding\n :return:\n \"\"\"\n if headers:\n raise NotImplementedError(\"Milvus2DocumentStore does not support headers.\")\n\n index = index or self.index\n has_collection = utility.has_collection(collection_name=index)\n if not has_collection:\n raise Exception(\"No index exists. Use 'update_embeddings()` to create an index.\")\n\n if return_embedding is None:\n return_embedding = self.return_embedding\n\n query_emb = query_emb.reshape(-1).astype(np.float32)\n if self.cosine:\n query_emb = query_emb / np.linalg.norm(query_emb)\n\n search_result: QueryResult = self.collection.search(\n data=[query_emb.tolist()],\n anns_field=self.embedding_field,\n param={\"metric_type\": self.metric_type, **self.search_param},\n limit=top_k,\n )\n\n vector_ids_for_query = []\n scores_for_vector_ids: Dict[str, float] = {}\n for vector_id, distance in zip(search_result[0].ids, search_result[0].distances):\n vector_ids_for_query.append(str(vector_id))\n scores_for_vector_ids[str(vector_id)] = distance\n\n documents = self.get_documents_by_vector_ids(vector_ids_for_query, index=index)\n\n if return_embedding:\n self._populate_embeddings_to_docs(index=index, docs=documents)\n\n for doc in documents:\n raw_score = scores_for_vector_ids[doc.meta[\"vector_id\"]]\n if self.cosine:\n doc.score = float((raw_score + 1) / 2)\n else:\n doc.score = float(expit(np.asarray(raw_score / 100)))\n\n return documents\n\n def delete_documents(\n self,\n index: Optional[str] = None,\n ids: Optional[List[str]] = None,\n filters: Optional[Dict[str, Any]] = None, # TODO: Adapt type once we allow extended filters in Milvus2DocStore\n headers: Optional[Dict[str, str]] = None,\n batch_size: int = 10_000,\n ):\n \"\"\"\n Delete all documents (from SQL AND Milvus).\n :param index: (SQL) index name for storing the docs and metadata\n :param filters: Optional filters to narrow down the search space.\n Example: {\"name\": [\"some\", \"more\"], \"category\": [\"only_one\"]}\n :return: None\n \"\"\"\n if headers:\n raise NotImplementedError(\"Milvus2DocumentStore does not support headers.\")\n\n if ids:\n self._delete_vector_ids_from_milvus(ids=ids, index=index)\n elif filters:\n batch = []\n for existing_docs in super().get_all_documents_generator(\n filters=filters, index=index, batch_size=batch_size\n ):\n batch.append(existing_docs)\n if len(batch) == batch_size:\n self._delete_vector_ids_from_milvus(documents=batch, index=index)\n if len(batch) != 0:\n self._delete_vector_ids_from_milvus(documents=batch, index=index)\n else:\n self.collection.drop()\n self.collection = self._create_collection_and_index_if_not_exist(self.index)\n\n index = index or self.index\n super().delete_documents(index=index, filters=filters, ids=ids)\n\n def get_all_documents_generator(\n self,\n index: Optional[str] = None,\n filters: Optional[Dict[str, Any]] = None, # TODO: Adapt type once we allow extended filters in Milvus2DocStore\n return_embedding: Optional[bool] = None,\n batch_size: int = 10_000,\n headers: Optional[Dict[str, str]] = None,\n ) -> Generator[Document, None, None]:\n \"\"\"\n Get all documents from the document store. Under-the-hood, documents are fetched in batches from the\n document store and yielded as individual documents. This method can be used to iteratively process\n a large number of documents without having to load all documents in memory.\n\n :param index: Name of the index to get the documents from. If None, the\n DocumentStore's default index (self.index) will be used.\n :param filters: Optional filters to narrow down the documents to return.\n Example: {\"name\": [\"some\", \"more\"], \"category\": [\"only_one\"]}\n :param return_embedding: Whether to return the document embeddings.\n :param batch_size: When working with large number of documents, batching can help reduce memory footprint.\n \"\"\"\n if headers:\n raise NotImplementedError(\"Milvus2DocumentStore does not support headers.\")\n\n index = index or self.index\n documents = super().get_all_documents_generator(index=index, filters=filters, batch_size=batch_size)\n if return_embedding is None:\n return_embedding = self.return_embedding\n\n for doc in documents:\n if return_embedding:\n self._populate_embeddings_to_docs(index=index, docs=[doc])\n yield doc\n\n def get_all_documents(\n self,\n index: Optional[str] = None,\n filters: Optional[Dict[str, Any]] = None, # TODO: Adapt type once we allow extended filters in Milvus2DocStore\n return_embedding: Optional[bool] = None,\n batch_size: int = 10_000,\n headers: Optional[Dict[str, str]] = None,\n ) -> List[Document]:\n \"\"\"\n Get documents from the document store (optionally using filter criteria).\n\n :param index: Name of the index to get the documents from. If None, the\n DocumentStore's default index (self.index) will be used.\n :param filters: Optional filters to narrow down the documents to return.\n Example: {\"name\": [\"some\", \"more\"], \"category\": [\"only_one\"]}\n :param return_embedding: Whether to return the document embeddings.\n :param batch_size: When working with large number of documents, batching can help reduce memory footprint.\n \"\"\"\n if headers:\n raise NotImplementedError(\"Milvus2DocumentStore does not support headers.\")\n\n index = index or self.index\n result = self.get_all_documents_generator(\n index=index, filters=filters, return_embedding=return_embedding, batch_size=batch_size\n )\n documents = list(result)\n return documents\n\n def get_document_by_id(\n self, id: str, index: Optional[str] = None, headers: Optional[Dict[str, str]] = None\n ) -> Optional[Document]:\n \"\"\"\n Fetch a document by specifying its text id string\n\n :param id: ID of the document\n :param index: Name of the index to get the documents from. If None, the\n DocumentStore's default index (self.index) will be used.\n \"\"\"\n if headers:\n raise NotImplementedError(\"Milvus2DocumentStore does not support headers.\")\n\n documents = self.get_documents_by_id([id], index)\n document = documents[0] if documents else None\n return document\n\n def get_documents_by_id(\n self,\n ids: List[str],\n index: Optional[str] = None,\n batch_size: int = 10_000,\n headers: Optional[Dict[str, str]] = None,\n ) -> List[Document]:\n \"\"\"\n Fetch multiple documents by specifying their IDs (strings)\n\n :param ids: List of IDs of the documents\n :param index: Name of the index to get the documents from. If None, the\n DocumentStore's default index (self.index) will be used.\n :param batch_size: When working with large number of documents, batching can help reduce memory footprint.\n \"\"\"\n if headers:\n raise NotImplementedError(\"Milvus2DocumentStore does not support headers.\")\n\n index = index or self.index\n documents = super().get_documents_by_id(ids=ids, index=index, batch_size=batch_size)\n if self.return_embedding:\n self._populate_embeddings_to_docs(index=index, docs=documents)\n\n return documents\n\n def _populate_embeddings_to_docs(self, docs: List[Document], index: Optional[str] = None):\n index = index or self.index\n docs_with_vector_ids = []\n for doc in docs:\n if doc.meta and doc.meta.get(\"vector_id\") is not None:\n docs_with_vector_ids.append(doc)\n\n if len(docs_with_vector_ids) == 0:\n return\n\n ids = []\n vector_id_map = {}\n\n for doc in docs_with_vector_ids:\n vector_id: str = doc.meta[\"vector_id\"] # type: ignore\n # vector_id is always a string, but it isn't part of type hint\n ids.append(str(vector_id))\n vector_id_map[int(vector_id)] = doc\n\n search_result: QueryResult = self.collection.query(\n expr=f'{self.id_field} in [ {\",\".join(ids)} ]', output_fields=[self.embedding_field]\n )\n\n for result in search_result:\n doc = vector_id_map[result[\"id\"]]\n doc.embedding = np.array(result[\"embedding\"], \"float32\")\n\n def _delete_vector_ids_from_milvus(\n self, documents: Optional[List[Document]] = None, ids: Optional[List[str]] = None, index: Optional[str] = None\n ):\n index = index or self.index\n if ids is None:\n ids = []\n if documents is None:\n raise ValueError(\"You must either specify documents or ids to delete.\")\n for doc in documents:\n if \"vector_id\" in doc.meta:\n ids.append(str(doc.meta[\"vector_id\"]))\n else:\n docs = super().get_documents_by_id(ids=ids, index=index)\n ids = [doc.meta[\"vector_id\"] for doc in docs if \"vector_id\" in doc.meta]\n\n expr = f\"{self.id_field} in [{','.join(ids)}]\"\n import logging\n\n # logging.info(expr)\n self.collection.delete(expr)\n\n def get_embedding_count(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None) -> int:\n \"\"\"\n Return the count of embeddings in the document store.\n \"\"\"\n if filters:\n raise Exception(\"filters are not supported for get_embedding_count in MilvusDocumentStore.\")\n return len(self.get_all_documents(index=index))\n" ]
[ [ "numpy.asarray", "numpy.array", "numpy.linalg.norm" ] ]
TaskeHAMANO/deblur
[ "4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b" ]
[ "deblur/workflow.py" ]
[ "# ----------------------------------------------------------------------------\n# Copyright (c) 2015, The Deblur Development Team.\n#\n# Distributed under the terms of the BSD 3-clause License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nfrom os.path import splitext, join, basename, isfile, split\nfrom datetime import datetime\nfrom os import stat\nfrom glob import glob\nimport logging\nimport re\nimport scipy\nimport numpy as np\nimport subprocess\nimport time\nimport warnings\nimport io\nimport os\n\nimport skbio\nfrom biom.table import Table\nfrom biom.util import biom_open\nfrom biom import load_table\n\nfrom deblur.deblurring import deblur\n\n\nsniff_fasta = skbio.io.io_registry.get_sniffer('fasta')\nsniff_fastq = skbio.io.io_registry.get_sniffer('fastq')\n\n\ndef _get_fastq_variant(input_fp):\n # http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters\n variant = None\n variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']\n for v in variants:\n try:\n next(skbio.read(input_fp, format='fastq', variant=v))\n except Exception:\n continue\n else:\n variant = v\n break\n\n if variant is None:\n raise ValueError(\"Unknown variant, unable to interpret PHRED\")\n\n return variant\n\n\ndef sequence_generator(input_fp):\n \"\"\"Yield (id, sequence) from an input file\n\n Parameters\n ----------\n input_fp : filepath\n A filepath, which can be any valid fasta or fastq file within the\n limitations of scikit-bio's IO registry.\n\n Notes\n -----\n The use of this method is a stopgap to replicate the existing `parse_fasta`\n functionality while at the same time allowing for fastq support.\n\n Raises\n ------\n skbio.io.FormatIdentificationWarning\n If the format of the input file cannot be determined.\n\n Returns\n -------\n (str, str)\n The ID and sequence.\n\n \"\"\"\n logger = logging.getLogger(__name__)\n kw = {}\n if sniff_fasta(input_fp)[0]:\n format = 'fasta'\n elif sniff_fastq(input_fp)[0]:\n format = 'fastq'\n\n kw['variant'] = _get_fastq_variant(input_fp)\n else:\n # usually happens when the fasta file is empty\n # so need to return no sequences (and warn)\n msg = \"input file %s does not appear to be FASTA or FASTQ\" % input_fp\n logger.warn(msg)\n warnings.warn(msg, UserWarning)\n return\n\n # some of the test code is using file paths, some is using StringIO.\n if isinstance(input_fp, io.TextIOBase):\n input_fp.seek(0)\n\n for record in skbio.read(input_fp, format=format, **kw):\n yield (record.metadata['id'], str(record))\n\n\ndef trim_seqs(input_seqs, trim_len, left_trim_len):\n \"\"\"Trim FASTA sequences to specified length.\n\n Parameters\n ----------\n input_seqs : iterable of (str, str)\n The list of input sequences in (label, sequence) format\n trim_len : int\n Sequence trimming length. Specify a value of -1 to disable trimming.\n left_trim_len : int\n Sequence trimming from the 5' end. A value of 0 will disable this trim.\n\n\n Returns\n -------\n Generator of (str, str)\n The trimmed sequences in (label, sequence) format\n \"\"\"\n # counters for the number of trimmed and total sequences\n logger = logging.getLogger(__name__)\n\n okseqs = 0\n totseqs = 0\n\n if trim_len < -1:\n raise ValueError(\"Invalid trim_len: %d\" % trim_len)\n\n for label, seq in input_seqs:\n totseqs += 1\n\n if trim_len == -1:\n okseqs += 1\n yield label, seq\n elif len(seq) >= trim_len:\n okseqs += 1\n yield label, seq[left_trim_len:trim_len]\n\n if okseqs < 0.01*totseqs:\n logger = logging.getLogger(__name__)\n errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \\\n 'than the trim length (%d). ' \\\n 'Are you using the correct -t trim length?' \\\n % (totseqs-okseqs, totseqs, trim_len)\n logger.warn(errmsg)\n warnings.warn(errmsg, UserWarning)\n else:\n logger.debug('trimmed to length %d (%d / %d remaining)'\n % (trim_len, okseqs, totseqs))\n\n\ndef dereplicate_seqs(seqs_fp,\n output_fp,\n min_size=2,\n use_log=False,\n threads=1):\n \"\"\"Dereplicate FASTA sequences and remove singletons using VSEARCH.\n\n Parameters\n ----------\n seqs_fp : string\n filepath to FASTA sequence file\n output_fp : string\n file path to dereplicated sequences (FASTA format)\n min_size : integer, optional\n discard sequences with an abundance value smaller\n than integer\n use_log: boolean, optional\n save the vsearch logfile as well (to output_fp.log)\n default=False\n threads : int, optional\n number of threads to use (0 for all available)\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('dereplicate seqs file %s' % seqs_fp)\n\n log_name = \"%s.log\" % output_fp\n\n params = ['vsearch', '--derep_fulllength', seqs_fp,\n '--output', output_fp, '--sizeout',\n '--fasta_width', '0', '--minuniquesize', str(min_size),\n '--quiet', '--threads', str(threads)]\n if use_log:\n params.extend(['--log', log_name])\n sout, serr, res = _system_call(params)\n if not res == 0:\n logger.error('Problem running vsearch dereplication on file %s' %\n seqs_fp)\n logger.debug('parameters used:\\n%s' % params)\n logger.debug('stdout: %s' % sout)\n logger.debug('stderr: %s' % serr)\n return\n\n\ndef build_index_sortmerna(ref_fp, working_dir):\n \"\"\"Build a SortMeRNA index for all reference databases.\n\n Parameters\n ----------\n ref_fp: tuple\n filepaths to FASTA reference databases\n working_dir: string\n working directory path where to store the indexed database\n\n Returns\n -------\n all_db: tuple\n filepaths to SortMeRNA indexed reference databases\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('build_index_sortmerna files %s to'\n ' dir %s' % (ref_fp, working_dir))\n all_db = []\n for db in ref_fp:\n fasta_dir, fasta_filename = split(db)\n index_basename = splitext(fasta_filename)[0]\n db_output = join(working_dir, index_basename)\n logger.debug('processing file %s into location %s' % (db, db_output))\n params = ['indexdb_rna', '--ref', '%s,%s' %\n (db, db_output), '--tmpdir', working_dir]\n sout, serr, res = _system_call(params)\n if not res == 0:\n logger.error('Problem running indexdb_rna on file %s to dir %s. '\n 'database not indexed' % (db, db_output))\n logger.debug('stdout: %s' % sout)\n logger.debug('stderr: %s' % serr)\n logger.critical('execution halted')\n raise RuntimeError('Cannot index database file %s' % db)\n logger.debug('file %s indexed' % db)\n all_db.append(db_output)\n return all_db\n\n\ndef filter_minreads_samples_from_table(table, minreads=1, inplace=True):\n \"\"\"Filter samples from biom table that have less than\n minreads reads total\n\n Paraneters\n ----------\n table : biom.Table\n the biom table to filter\n minreads : int (optional)\n the minimal number of reads in a sample in order to keep it\n inplace : bool (optional)\n if True, filter the biom table in place, if false create a new copy\n\n Returns\n -------\n table : biom.Table\n the filtered biom table\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.debug('filter_minreads_started. minreads=%d' % minreads)\n samp_sum = table.sum(axis='sample')\n samp_ids = table.ids(axis='sample')\n bad_samples = samp_ids[samp_sum < minreads]\n if len(bad_samples) > 0:\n logger.warn('removed %d samples with reads per sample<%d'\n % (len(bad_samples), minreads))\n table = table.filter(bad_samples, axis='sample',\n inplace=inplace, invert=True)\n else:\n logger.debug('all samples contain > %d reads' % minreads)\n return table\n\n\ndef fasta_from_biom(table, fasta_file_name):\n '''Save sequences from a biom table to a fasta file\n\n Parameters\n ----------\n table : biom.Table\n The biom table containing the sequences\n fasta_file_name : str\n Name of the fasta output file\n '''\n logger = logging.getLogger(__name__)\n logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)\n\n with open(fasta_file_name, 'w') as f:\n for cseq in table.ids(axis='observation'):\n f.write('>%s\\n%s\\n' % (cseq, cseq))\n logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)\n\n\ndef remove_artifacts_from_biom_table(table_filename,\n fasta_filename,\n ref_fp,\n biom_table_dir,\n ref_db_fp,\n threads=1,\n verbose=False,\n sim_thresh=None,\n coverage_thresh=None):\n \"\"\"Remove artifacts from a biom table using SortMeRNA\n\n Parameters\n ----------\n table : str\n name of the biom table file\n fasta_filename : str\n the fasta file containing all the sequences of the biom table\n\n Returns\n -------\n tmp_files : list of str\n The temp files created during the artifact removal step\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('getting 16s sequences from the biom table')\n\n # remove artifacts from the fasta file. output is in clean_fp fasta file\n clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,\n working_dir=biom_table_dir,\n ref_db_fp=ref_db_fp,\n negate=False, threads=threads,\n verbose=verbose,\n sim_thresh=sim_thresh,\n coverage_thresh=coverage_thresh)\n if clean_fp is None:\n logger.warn(\"No clean sequences in %s\" % fasta_filename)\n return tmp_files\n\n logger.debug('removed artifacts from sequences input %s'\n ' to output %s' % (fasta_filename, clean_fp))\n\n # read the clean fasta file\n good_seqs = {s for _, s in sequence_generator(clean_fp)}\n logger.debug('loaded %d sequences from cleaned biom table'\n ' fasta file' % len(good_seqs))\n\n logger.debug('loading biom table %s' % table_filename)\n table = load_table(table_filename)\n\n # filter and save the artifact biom table\n artifact_table = table.filter(list(good_seqs),\n axis='observation', inplace=False,\n invert=True)\n # remove the samples with 0 reads\n filter_minreads_samples_from_table(artifact_table)\n output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')\n write_biom_table(artifact_table, output_nomatch_fp)\n logger.info('wrote artifact only filtered biom table to %s'\n % output_nomatch_fp)\n # and save the reference-non-hit fasta file\n output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')\n fasta_from_biom(artifact_table, output_nomatch_fasta_fp)\n\n # filter and save the only 16s biom table\n table.filter(list(good_seqs), axis='observation')\n # remove the samples with 0 reads\n filter_minreads_samples_from_table(table)\n output_fp = join(biom_table_dir, 'reference-hit.biom')\n write_biom_table(table, output_fp)\n logger.info('wrote 16s filtered biom table to %s' % output_fp)\n # and save the reference-non-hit fasta file\n output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')\n fasta_from_biom(table, output_match_fasta_fp)\n\n # we also don't need the cleaned fasta file\n tmp_files.append(clean_fp)\n return tmp_files\n\n\ndef remove_artifacts_seqs(seqs_fp,\n ref_fp,\n working_dir,\n ref_db_fp,\n negate=False,\n threads=1,\n verbose=False,\n sim_thresh=None,\n coverage_thresh=None):\n \"\"\"Remove artifacts from FASTA file using SortMeRNA.\n\n Parameters\n ----------\n seqs_fp: string\n file path to FASTA input sequence file\n ref_fp: tuple\n file path(s) to FASTA database file\n working_dir: string\n working directory path\n ref_db_fp: tuple\n file path(s) to indexed FASTA database\n negate: boolean, optional\n if True, discard all input sequences aligning\n to reference database\n threads: integer, optional\n number of threads to use for SortMeRNA\n verbose: boolean, optional\n If true, output SortMeRNA errors\n sim_thresh: float, optional\n The minimal similarity threshold (between 0 and 1)\n for keeping the sequence\n if None, the default values used are 0.65 for negate=False,\n 0.95 for negate=True\n coverage_thresh: float, optional\n The minimal coverage threshold (between 0 and 1)\n for alignments for keeping the sequence\n if None, the default values used are 0.5 for negate=False,\n 0.95 for negate=True\n\n Returns\n -------\n output_fp : str\n Name of the artifact removed fasta file\n okseqs : int\n The number of sequences left after artifact removal\n tmp_files : list of str\n Names of the tmp files created\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('remove_artifacts_seqs file %s' % seqs_fp)\n\n if stat(seqs_fp).st_size == 0:\n logger.warn('file %s has size 0, continuing' % seqs_fp)\n return None, 0, []\n\n if coverage_thresh is None:\n if negate:\n coverage_thresh = 0.95 * 100\n else:\n coverage_thresh = 0.5 * 100\n\n if sim_thresh is None:\n if negate:\n sim_thresh = 0.95 * 100\n else:\n sim_thresh = 0.65 * 100\n\n # the minimal average bitscore per nucleotide\n bitscore_thresh = 0.65\n\n output_fp = join(working_dir,\n \"%s.no_artifacts\" % basename(seqs_fp))\n blast_output = join(working_dir,\n '%s.sortmerna' % basename(seqs_fp))\n aligned_seq_ids = set()\n for i, db in enumerate(ref_fp):\n logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'\n % (db, working_dir, ref_db_fp[i], seqs_fp))\n # run SortMeRNA\n # we use -e 100 to remove E-value based filtering by sortmerna\n # since we use bitscore/identity/coverage filtering instead\n params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %\n (db, ref_db_fp[i]),\n '--aligned', blast_output, '--blast', '3', '--best', '1',\n '--print_all_reads', '-v', '-e', '100']\n\n sout, serr, res = _system_call(params)\n if not res == 0:\n logger.error('sortmerna error on file %s' % seqs_fp)\n logger.error('stdout : %s' % sout)\n logger.error('stderr : %s' % serr)\n return output_fp, 0, []\n\n blast_output_filename = '%s.blast' % blast_output\n with open(blast_output_filename, 'r') as bfl:\n for line in bfl:\n line = line.strip().split('\\t')\n # if * means no match\n if line[1] == '*':\n continue\n # check if % identity[2] and coverage[13] are large enough\n if (float(line[2]) >= sim_thresh) and \\\n (float(line[13]) >= coverage_thresh) and \\\n (float(line[11]) >= bitscore_thresh * len(line[0])):\n aligned_seq_ids.add(line[0])\n\n if negate:\n def op(x): return x not in aligned_seq_ids\n else:\n def op(x): return x in aligned_seq_ids\n\n # if negate = False, only output sequences\n # matching to at least one of the databases\n totalseqs = 0\n okseqs = 0\n badseqs = 0\n with open(output_fp, 'w') as out_f:\n for label, seq in sequence_generator(seqs_fp):\n totalseqs += 1\n label = label.split()[0]\n if op(label):\n out_f.write(\">%s\\n%s\\n\" % (label, seq))\n okseqs += 1\n else:\n badseqs += 1\n logger.info('total sequences %d, passing sequences %d, '\n 'failing sequences %d' % (totalseqs, okseqs, badseqs))\n return output_fp, okseqs, [blast_output_filename]\n\n\ndef multiple_sequence_alignment(seqs_fp, threads=1):\n \"\"\"Perform multiple sequence alignment on FASTA file using MAFFT.\n\n Parameters\n ----------\n seqs_fp: string\n filepath to FASTA file for multiple sequence alignment\n threads: integer, optional\n number of threads to use. 0 to use all threads\n\n Returns\n -------\n msa_fp : str\n name of output alignment file or None if error encountered\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)\n\n # for mafft we use -1 to denote all threads and not 0\n if threads == 0:\n threads = -1\n\n if stat(seqs_fp).st_size == 0:\n logger.warning('msa failed. file %s has no reads' % seqs_fp)\n return None\n msa_fp = seqs_fp + '.msa'\n params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',\n '--thread', str(threads), seqs_fp]\n sout, serr, res = _system_call(params, stdoutfilename=msa_fp)\n if not res == 0:\n logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)\n logger.debug('stderr : %s' % serr)\n return None\n return msa_fp\n\n\ndef remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):\n \"\"\"Remove chimeras de novo using UCHIME (VSEARCH implementation).\n\n Parameters\n ----------\n seqs_fp: string\n file path to FASTA input sequence file\n output_fp: string\n file path to store chimera-free results\n threads : int\n number of threads (0 for all cores)\n\n Returns\n -------\n output_fp\n the chimera removed fasta file name\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('remove_chimeras_denovo_from_seqs seqs file %s'\n 'to working dir %s' % (seqs_fp, working_dir))\n\n output_fp = join(\n working_dir, \"%s.no_chimeras\" % basename(seqs_fp))\n\n # we use the parameters dn=0.000001, xn=1000, minh=10000000\n # so 1 mismatch in the A/B region will cancel it being labeled as chimera\n # and ~3 unique reads in each region will make it a chimera if\n # no mismatches\n params = ['vsearch', '--uchime_denovo', seqs_fp,\n '--nonchimeras', output_fp,\n '-dn', '0.000001', '-xn', '1000',\n '-minh', '10000000', '--mindiffs', '5',\n '--fasta_width', '0', '--threads', str(threads)]\n sout, serr, res = _system_call(params)\n if not res == 0:\n logger.error('problem with chimera removal for file %s' % seqs_fp)\n logger.debug('stdout : %s' % sout)\n logger.debug('stderr : %s' % serr)\n return output_fp\n\n\ndef sample_id_from_read_id(readid):\n \"\"\"Get SampleID from the split_libraries_fastq.py output\n fasta file read header\n\n Parameters\n ----------\n readid : str\n the fasta file read name\n\n Returns\n -------\n sampleid : str\n the sample id\n \"\"\"\n\n # get the sampleid_readid field\n sampleread = readid.split(' ')[0]\n\n # get the sampleid field\n sampleid = sampleread.rsplit('_', 1)[0]\n return sampleid\n\n\ndef split_sequence_file_on_sample_ids_to_files(seqs,\n outdir):\n \"\"\"Split FASTA file on sample IDs.\n\n Parameters\n ----------\n seqs: file handler\n file handler to demultiplexed FASTA file\n outdir: string\n dirpath to output split FASTA files\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('split_sequence_file_on_sample_ids_to_files'\n ' for file %s into dir %s' % (seqs, outdir))\n\n outputs = {}\n\n for bits in sequence_generator(seqs):\n sample = sample_id_from_read_id(bits[0])\n\n if sample not in outputs:\n outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')\n outputs[sample].write(\">%s\\n%s\\n\" % (bits[0], bits[1]))\n for sample in outputs:\n outputs[sample].close()\n logger.info('split to %d files' % len(outputs))\n\n\ndef write_biom_table(table, biom_fp):\n \"\"\"Write BIOM table to file.\n\n Parameters\n ----------\n table: biom.table\n an instance of a BIOM table\n biom_fp: string\n filepath to output BIOM table\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.debug('write_biom_table to file %s' % biom_fp)\n with biom_open(biom_fp, 'w') as f:\n table.to_hdf5(h5grp=f, generated_by=\"deblur\")\n logger.debug('wrote to BIOM file %s' % biom_fp)\n\n\ndef get_files_for_table(input_dir,\n file_end='.trim.derep.no_artifacts'\n '.msa.deblur.no_chimeras'):\n \"\"\"Get a list of files to add to the output table\n\n Parameters:\n -----------\n input_dir : string\n name of the directory containing the deblurred fasta files\n file_end : string\n the ending of all the fasta files to be added to the table\n (default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')\n\n Returns\n -------\n names : list of tuples of (string,string)\n list of tuples of:\n name of fasta files to be added to the biom table\n sampleid (file names without the file_end and path)\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.debug('get_files_for_table input dir %s, '\n 'file-ending %s' % (input_dir, file_end))\n names = []\n for cfile in glob(join(input_dir, \"*%s\" % file_end)):\n if not isfile(cfile):\n continue\n sample_id = basename(cfile)[:-len(file_end)]\n sample_id = os.path.splitext(sample_id)[0]\n names.append((cfile, sample_id))\n\n logger.debug('found %d files' % len(names))\n return names\n\n\ndef create_otu_table(output_fp, deblurred_list,\n outputfasta_fp=None, minreads=0):\n \"\"\"Create a biom table out of all files in a directory\n\n Parameters\n ----------\n output_fp : string\n filepath to output BIOM table\n deblurred_list : list of (str, str)\n list of file names (including path), sampleid of all deblurred\n fasta files to add to the table\n outputfasta_fp : str, optional\n name of output fasta file (of all sequences in the table) or None\n to not write\n minreads : int, optional\n minimal number of reads per bacterial sequence in order to write\n it to the biom table and fasta file or 0 to write all\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('create_otu_table for %d samples, '\n 'into output table %s' % (len(deblurred_list), output_fp))\n\n # the regexp for finding the number of reads of a sequence\n sizeregexp = re.compile('(?<=size=)\\w+')\n seqdict = {}\n seqlist = []\n sampset = set()\n samplist = []\n # arbitrary size for the sparse results matrix so we won't run out of space\n obs = scipy.sparse.dok_matrix((int(1E9), len(deblurred_list)), dtype=np.double)\n\n # load the sequences from all samples into a sprase matrix\n sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}\n for (cfilename, csampleid) in deblurred_list:\n if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:\n csampleid = csampleid.rsplit('.', 1)[0]\n\n # test if sample has already been processed\n if csampleid in sampset:\n warnings.warn('sample %s already in table!', UserWarning)\n logger.error('sample %s already in table!' % csampleid)\n continue\n sampset.add(csampleid)\n samplist.append(csampleid)\n csampidx = len(sampset)-1\n # read the fasta file and add to the matrix\n for chead, cseq in sequence_generator(cfilename):\n cseq = cseq.upper()\n if cseq not in seqdict:\n seqdict[cseq] = len(seqlist)\n seqlist.append(cseq)\n cseqidx = seqdict[cseq]\n cfreq = float(sizeregexp.search(chead).group(0))\n try:\n obs[cseqidx, csampidx] += cfreq\n except IndexError:\n # exception means we ran out of space - add more OTUs\n shape = obs.shape\n obs.resize((shape[0]*2, shape[1]))\n obs[cseqidx, csampidx] = cfreq\n\n logger.info('for output biom table loaded %d samples, %d unique sequences'\n % (len(samplist), len(seqlist)))\n\n # and now make the sparse matrix the real size\n obs.resize((len(seqlist), len(samplist)))\n\n # do the minimal reads per otu filtering\n if minreads > 0:\n readsperotu = obs.sum(axis=1)\n keep = np.where(readsperotu >= minreads)[0]\n logger.info('keeping %d (out of %d sequences) with >=%d reads' %\n (len(keep), len(seqlist), minreads))\n obs = obs[keep, :]\n seqlist = list(np.array(seqlist)[keep])\n logger.debug('filtering completed')\n\n # convert the matrix to a biom table\n table = Table(obs.tocsr(), seqlist, samplist,\n observation_metadata=None,\n sample_metadata=None, table_id=None,\n generated_by=\"deblur\",\n create_date=datetime.now().isoformat())\n logger.debug('converted to biom table')\n\n # remove samples with 0 reads\n filter_minreads_samples_from_table(table)\n\n # save the merged otu table\n write_biom_table(table, output_fp)\n logger.info('saved to biom file %s' % output_fp)\n\n # and save the fasta file\n if outputfasta_fp is not None:\n logger.debug('saving fasta file')\n with open(outputfasta_fp, 'w') as f:\n for cseq in seqlist:\n f.write('>%s\\n%s\\n' % (cseq, cseq))\n logger.info('saved sequence fasta file to %s' % outputfasta_fp)\n\n\ndef launch_workflow(seqs_fp, working_dir, mean_error, error_dist,\n indel_prob, indel_max, trim_length, left_trim_length,\n min_size, ref_fp, ref_db_fp, threads_per_sample=1,\n sim_thresh=None, coverage_thresh=None):\n \"\"\"Launch full deblur workflow for a single post split-libraries fasta file\n\n Parameters\n ----------\n seqs_fp: string\n a post split library fasta file for debluring\n working_dir: string\n working directory path\n mean_error: float\n mean error for original sequence estimate\n error_dist: list\n list of error probabilities for each hamming distance\n indel_prob: float\n insertion/deletion (indel) probability\n indel_max: integer\n maximal indel number\n trim_length: integer\n sequence trim length\n left_trim_length: integer\n trim the first n reads\n min_size: integer\n upper limit on sequence abundance (discard sequences below limit)\n ref_fp: tuple\n filepath(s) to FASTA reference database for artifact removal\n ref_db_fp: tuple\n filepath(s) to SortMeRNA indexed database for artifact removal\n threads_per_sample: integer, optional\n number of threads to use for SortMeRNA/mafft/vsearch\n (0 for max available)\n sim_thresh: float, optional\n the minimal similarity for a sequence to the database.\n if None, take the defaults (0.65 for negate=False,\n 0.95 for negate=True)\n coverage_thresh: float, optional\n the minimal coverage for alignment of a sequence to the database.\n if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)\n\n Return\n ------\n output_no_chimers_fp : string\n filepath to fasta file with no chimeras of None if error encountered\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('--------------------------------------------------------')\n logger.info('launch_workflow for file %s' % seqs_fp)\n\n # Step 1: Trim sequences to specified length\n output_trim_fp = join(working_dir, \"%s.trim\" % basename(seqs_fp))\n with open(output_trim_fp, 'w') as out_f:\n for label, seq in trim_seqs(\n input_seqs=sequence_generator(seqs_fp),\n trim_len=trim_length,\n left_trim_len=left_trim_length):\n out_f.write(\">%s\\n%s\\n\" % (label, seq))\n # Step 2: Dereplicate sequences\n output_derep_fp = join(working_dir,\n \"%s.derep\" % basename(output_trim_fp))\n dereplicate_seqs(seqs_fp=output_trim_fp,\n output_fp=output_derep_fp,\n min_size=min_size, threads=threads_per_sample)\n # Step 3: Remove artifacts\n output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,\n ref_fp=ref_fp,\n working_dir=working_dir,\n ref_db_fp=ref_db_fp,\n negate=True,\n threads=threads_per_sample,\n sim_thresh=sim_thresh)\n if not output_artif_fp:\n warnings.warn('Problem removing artifacts from file %s' %\n seqs_fp, UserWarning)\n logger.warning('remove artifacts failed, aborting')\n return None\n # Step 4: Multiple sequence alignment\n if num_seqs_left > 1:\n output_msa_fp = join(working_dir,\n \"%s.msa\" % basename(output_artif_fp))\n alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,\n threads=threads_per_sample)\n if not alignment:\n warnings.warn('Problem performing multiple sequence alignment '\n 'on file %s' % seqs_fp, UserWarning)\n logger.warning('msa failed. aborting')\n return None\n elif num_seqs_left == 1:\n # only one sequence after remove artifacts (but could be many reads)\n # no need to run MSA - just use the pre-msa file as input for next step\n output_msa_fp = output_artif_fp\n else:\n err_msg = ('No sequences left after artifact removal in '\n 'file %s' % seqs_fp)\n warnings.warn(err_msg, UserWarning)\n logger.warning(err_msg)\n return None\n # Step 5: Launch deblur\n output_deblur_fp = join(working_dir,\n \"%s.deblur\" % basename(output_msa_fp))\n with open(output_deblur_fp, 'w') as f:\n seqs = deblur(sequence_generator(output_msa_fp), mean_error,\n error_dist, indel_prob, indel_max)\n if seqs is None:\n warnings.warn('multiple sequence alignment file %s contains '\n 'no sequences' % output_msa_fp, UserWarning)\n logger.warn('no sequences returned from deblur for file %s' %\n output_msa_fp)\n return None\n for s in seqs:\n # remove '-' from aligned sequences\n s.sequence = s.sequence.replace('-', '')\n f.write(s.to_fasta())\n # Step 6: Chimera removal\n output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(\n output_deblur_fp, working_dir, threads=threads_per_sample)\n logger.info('finished processing file')\n return output_no_chimeras_fp\n\n\ndef start_log(level=logging.DEBUG, filename=None):\n \"\"\"start the logger for the run\n\n Parameters\n ----------\n level : int, optional\n logging.DEBUG, logging.INFO etc. for the log level (between 0-50).\n filename : str, optional\n name of the filename to save the log to or\n None (default) to use deblur.log.TIMESTAMP\n \"\"\"\n if filename is None:\n tstr = time.ctime()\n tstr = tstr.replace(' ', '.')\n tstr = tstr.replace(':', '.')\n filename = 'deblur.log.%s' % tstr\n logging.basicConfig(filename=filename, level=level,\n format='%(levelname)s(%(thread)d)'\n '%(asctime)s:%(message)s')\n logger = logging.getLogger(__name__)\n logger.info('*************************')\n logger.info('deblurring started')\n\n\ndef _system_call(cmd, stdoutfilename=None):\n \"\"\"Execute the command `cmd`\n Parameters\n ----------\n cmd : str\n The string containing the command to be run.\n stdoutfilename : str\n Name of the file to save stdout to or None\n (default) to not save to file\n stderrfilename : str\n Name of the file to save stderr to or None\n (default) to not save to file\n\n Returns\n -------\n tuple of (str, str, int)\n The standard output, standard error and exist status of the\n executed command\n\n Notes\n -----\n This function is ported and modified from QIIME\n (http://www.qiime.org), previously named\n qiime_system_call. QIIME is a GPL project, but we obtained permission from\n the authors of this function to port it to Qiita and keep it under BSD\n license.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.debug('system call: %s' % cmd)\n if stdoutfilename:\n with open(stdoutfilename, 'w') as f:\n proc = subprocess.Popen(cmd, universal_newlines=True,\n shell=False, stdout=f,\n stderr=subprocess.PIPE)\n else:\n proc = subprocess.Popen(cmd, universal_newlines=True,\n shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n # Communicate pulls all stdout/stderr from the PIPEs\n # This call blocks until the command is done\n stdout, stderr = proc.communicate()\n return_value = proc.returncode\n\n return stdout, stderr, return_value\n" ]
[ [ "numpy.array", "numpy.where" ] ]
HermasTV/mmfu
[ "dc14f0c06dbff3f1c92606ff11fc30d782ea23ef" ]
[ "tests/face_cropper.py" ]
[ "import argparse\nimport os\nimport numpy as np\nfrom cv2 import cv2\nfrom face_utils.detection import Detector\nfrom face_utils.cropping import cropping\n\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required = False, help = \"Path to the input image\")\nap.add_argument(\"-d\", \"--model\", required = False, help = \"detector model\",default='hog')\nap.add_argument(\"-s\", \"--size\", required = False, help = \"the output size\",default=128,type=int)\nap.add_argument(\"-o\", \"--out\", required = True, help = \"Path to out folder\")\nap.add_argument(\"-f\", \"--folder\", required = False, help = \"Path to input folder\")\n\n\nargs = ap.parse_args()\n\ndef detectImage(inPath,outFolder,model,size):\n \n image = cv2.imread(inPath)\n img= np.array(image)\n bbox = model.detect(img,2) #using (x,y,w,h) return mode\n outImg = cropping.crop(img,bbox,1,size,size)\n outFile = os.path.basename(inPath).split('.')[0]+\"_out.jpg\"\n outPath = os.path.join(outFolder,outFile)\n cv2.imwrite(outPath,outImg)\n \nif __name__ == \"__main__\":\n \n modelName = args.model\n model = Detector(modelName)\n outFolder = args.out\n size = args.size\n \n if 'image' in args : \n imgPath = args.image\n detectImage(imgPath,outFolder,model,size)\n \n else: \n for file in os.listdir(args.folder):\n imgPath = os.path.join(args.folder,file)\n detectImage(imgPath,outFolder,model,size)" ]
[ [ "numpy.array" ] ]
yuchenhou/eagle
[ "8050b4c023d3bef4bd80c9ad6b10615ba54eb953" ]
[ "elephant/clean.py" ]
[ "import pandas\n\n\ndef main():\n links = pandas.read_csv('../resources/' + 'Newman-Cond_mat_95-99-co_occurrence.txt', sep=' ', header=None)\n links.to_csv('../graph/' + 'authors.tsv', sep='\\t', index=False, header=False)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.read_csv" ] ]
seanbruno/vinyl_inventory
[ "93783089319a1e6228e4fe0e9c3522a7483fafdd" ]
[ "csv_to_html.py" ]
[ "#!/usr/local/bin/python\n\nimport sys, getopt\nimport pandas as pd\n\ndef usage():\n\tprint ('csv_to_html.py -h -i <input_csv> -o <output_html>')\n\tsys.exit(2)\n\ndef main(argv):\n\ttry:\n\t\topts, args = getopt.getopt(argv,\"hi:o:\",[\"help\",\"input_csv=\",\"output_html=\"])\n\texcept getopt.GetoptError as err:\n\t\tprint(err)\n\t\tusage()\n\t\tsys.exit(2)\n\tinput_csv = ''\n\toutput_html = ''\n\tfor opt, arg in opts:\n\t\tif opt in (\"-h\", \"--help\"):\n\t\t\tusage()\n\t\telif opt in (\"-i\", \"--input_csv\"):\n\t\t\tinput_csv = arg\n\t\telif opt in (\"-o\", \"--output_html\"):\n\t\t\toutput_html = arg\n\t\telse:\n\t\t\tassert False, usage();\n\n\n\t# Open the CSV for conversion\n\tfd = pd.read_csv(input_csv)\n\t\n\t# Use the .to_html() to get your table in html\n\tfd.to_html(output_html, index=False, na_rep=\"\", justify=\"center\")\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:])\n" ]
[ [ "pandas.read_csv" ] ]
tbredbenner/unsupervised_learning_of_dense_shape_correspondence
[ "440643d633a6db3f947ac71a247c8083cb3aeadc" ]
[ "Single Pair Experiment/models_self_supervised.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\nfrom ops_self_supervised import *\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n\ndef fmnet_model(phase, part_shot, model_shot, part_dist_map , model_dist_map, part_evecs,\tpart_evecs_trans, model_evecs, model_evecs_trans):\n\t\"\"\"Build FM-net model.\n\n\tArgs:\n\t\tphase: train\\test.\n\t\tpart_shot: SHOT descriptor of source shape (part).\n\t\tmodel_shot: SHOT descriptor of target shape (model).\n\t\tdist_map: distance map on target shape to evaluate geodesic error\n\t\tpart_evecs: eigenvectors on source shape\n\t\tpart_evecs_trans: transposed part_evecs with mass matrix correction\n\t\tmodel_evecs: eigenvectors on target shape\n\t\tmodel_evecs_trans: transposed model_evecs with mass matrix correction\n\n\t\"\"\"\n\n\tnet = {}\n\n\tfor i_layer in range(FLAGS.num_layers):\n\t\twith tf.variable_scope(\"layer_%d\" % i_layer) as scope:\n\t\t\tif i_layer == 0:\n\t\t\t\tnet['layer_%d_part' % i_layer] = res_layer(part_shot, dims_out=int(part_shot.shape[-1]), scope=scope,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t phase=phase)\n\t\t\t\tscope.reuse_variables()\n\t\t\t\tnet['layer_%d_model' % i_layer] = res_layer(model_shot, dims_out=int(model_shot.shape[-1]), scope=scope,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tphase=phase)\n\t\t\telse:\n\t\t\t\tnet['layer_%d_part' % i_layer] = res_layer(net['layer_%d_part' % (i_layer - 1)],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t dims_out=int(part_shot.shape[-1]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t scope=scope, phase=phase)\n\t\t\t\tscope.reuse_variables()\n\t\t\t\tnet['layer_%d_model' % i_layer] = res_layer(net['layer_%d_model' % (i_layer - 1)],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdims_out=int(part_shot.shape[-1]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tscope=scope, phase=phase)\n\n\t# project output features on the shape Laplacian eigen functions\n\tlayer_C_est = i_layer + 1 # grab current layer index\n\tA = tf.matmul(part_evecs_trans, net['layer_%d_part' % (layer_C_est - 1)])\n\tnet['A'] = A\n\tB = tf.matmul(model_evecs_trans, net['layer_%d_model' % (layer_C_est - 1)])\n\tnet['B'] = B\n\n\t# FM-layer: evaluate C_est\n\tnet['C_est'], safeguard_inverse = solve_ls(A, B)\n\n\t# Evaluate loss via soft-correspondence error\n\twith tf.variable_scope(\"pointwise_corr_loss\"):\n\t\tP_norm, unsupervised_loss = pointwise_corr_layer(net['C_est'], model_evecs, part_evecs_trans, model_dist_map, part_dist_map)\n\n\n\treturn unsupervised_loss, safeguard_inverse, P_norm, net" ]
[ [ "tensorflow.variable_scope", "tensorflow.matmul" ] ]
ansin218/gutereise
[ "af64d93bc02f0870671dcfe9bfacb87dec835584" ]
[ "misc/price_scraper_v1.py" ]
[ "from amadeus import Client\nimport pandas as pd\nimport datetime as dt\nimport glob\n\napi_file = open('API_KEY.txt', 'r')\napi_credentials = api_file.read()\nc_id = api_credentials.split(' ', 1)[0]\nc_secret = api_credentials.split(' ', 1)[1]\napi_file.close()\n\namadeus = Client(\n client_id = c_id,\n client_secret = c_secret,\n log_level = 'debug'\n)\n\nindian_cities_code = ['MAA', 'CCU', 'BOM', 'DEL', 'BLR']\nindian_cities_name = ['Chennai', 'Kolkata', 'Mumbai', 'Delhi', 'Bengaluru']\n\ntoday_date = dt.date.today()\n\ngap_days = [1, 7, 15, 30, 60, 90]\n\nfinalDf = pd.DataFrame()\n\nfor to_city_code in indian_cities_code:\n \n for days in gap_days:\n \n from_date = str(today_date + dt.timedelta(days = days))\n to_date = str(today_date + dt.timedelta(days = days + 20))\n \n result = amadeus.shopping.flight_offers.get(\n origin = 'MUC', \n destination = to_city_code,\n departureDate = from_date,\n returnDate = to_date,\n currency = 'EUR',\n max = 250\n )\n result_data = result.data\n \n airlineName = []\n priceList = []\n \n for i in range(len(result_data)):\n x = result_data[i]['offerItems'][0]['price']['total']\n y = result_data[i]['offerItems'][0]['price']['totalTaxes']\n z = float(x) + float(y)\n a = result_data[i]['offerItems'][0]['services'][0]['segments'][0]['flightSegment']['carrierCode']\n airlineName.append(a)\n priceList.append(float(x))\n \n df = pd.DataFrame()\n df['airline_code'] = airlineName\n df['ticket_price'] = priceList\n df.sort_values('ticket_price', ascending = True, inplace = True)\n df = df.drop_duplicates('airline_code', keep = 'first')\n \n df = df[df['airline_code'] != '9B']\n df['airline_name'] = df['airline_code'].map({'BA': 'British Airways', 'LH': 'Lufthansa', 'WY': 'Oman Air', \n 'EK': 'Emirates', 'EY': 'Etihad', 'TK': 'Turkish Airlines',\n 'KL': 'KLM Royal Dutch Airlines', 'AZ': 'Alitalia',\n 'AF': 'Air France', 'LX': 'Swiss Air', 'QR': 'Qatar Airways',\n 'QF': 'Qantas Airways', 'SQ': 'Singapore Airlines',\n 'TG': 'Thai Airways', 'AY': 'Finn Air', 'SU': 'Aeroflot',\n 'LO': 'LOT Polish Airlines', 'UL': 'Sri Lankan Airlines',\n 'OS': 'Austrian Airlines', 'CX': 'Cathay Pacific', \n 'AI': 'Air India', 'KU': 'Kuwait Airways', 'MS': 'Egypt Air',\n 'PS': 'Ukraine International Airlines', 'SV': 'Saudia',\n 'MU': 'China Eastern Airlines', 'NH': 'All Nippon Airways',\n 'CA': 'Air China'})\n df['from_city'] = 'MUC'\n df['to_city'] = to_city_code\n df['from_date'] = from_date\n df['to_date'] = to_date\n df['crawl_date'] = str(today_date)\n finalDf = finalDf.append(df)\n \nexport_file_name = 'dataset/_' + str(today_date) + '_muc_in.csv'\nfinalDf.to_csv(export_file_name, index = False)\n\nlist_of_files = glob.glob('dataset/*')\n\nbigDf = pd.DataFrame()\n\nfor i in list_of_files:\n smallDf = pd.read_csv(i)\n bigDf = bigDf.append(smallDf)\n \nbigDf.to_csv('final_data/_raw_dataset.csv', index = False)" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
yongzx/lm-evaluation-harness
[ "26f0233fa4b6ca5b2d663a017dc4352ac528648a" ]
[ "lm_eval/evaluator.py" ]
[ "import collections\nimport itertools\nimport random\nimport lm_eval.metrics\nimport lm_eval.models\nimport lm_eval.tasks\nimport lm_eval.base\nimport numpy as np\nfrom lm_eval.utils import positional_deprecated\n\n\n@positional_deprecated\ndef simple_evaluate(model, model_args=None, tasks=[],\n num_fewshot=0, batch_size=None, device=None,\n no_cache=False, limit=None, bootstrap_iters=100000,\n description_dict=None):\n \"\"\"Instantiate and evaluate a model on a list of tasks.\n\n :param model: Union[str, LM]\n Name of model or LM object, see lm_eval.models.get_model\n :param model_args: Optional[str]\n String arguments for each model class, see LM.create_from_arg_string. \n Ignored if `model` argument is a LM object.\n :param tasks: list[Union[str, Task]]\n List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.\n :param num_fewshot: int\n Number of examples in few-shot context\n :param batch_size: int, optional\n Batch size for model\n :param device: str, optional\n PyTorch device (e.g. \"cpu\" or \"cuda:0\") for running models\n :param no_cache: bool\n Whether or not to cache\n :param limit: int, optional\n Limit the number of examples per task (only use this for testing)\n :param bootstrap_iters:\n Number of iterations for bootstrap statistics\n :param description_dict: dict[str, str]\n Dictionary of custom task descriptions of the form: `task_name: description` \n :return\n Dictionary of results\n \"\"\"\n random.seed(1234)\n np.random.seed(1234)\n\n assert tasks != [], \"No tasks specified\"\n\n if isinstance(model, str):\n if model_args is None: model_args = \"\"\n lm = lm_eval.models.get_model(model).create_from_arg_string(model_args, {\n 'batch_size': batch_size, 'device': device\n })\n else:\n assert isinstance(model, lm_eval.base.LM)\n lm = model\n\n if not no_cache:\n lm = lm_eval.base.CachingLM(\n lm, 'lm_cache/' + model + '_' + model_args.replace('=', '-').replace(',', '_').replace('/', '-') + '.db'\n )\n \n task_dict = lm_eval.tasks.get_task_dict(tasks)\n\n results = evaluate(\n lm=lm,\n task_dict=task_dict,\n num_fewshot=num_fewshot,\n limit=limit,\n description_dict=description_dict\n )\n\n # add info about the model and few shot config\n results[\"config\"] = {\n \"model\": model,\n \"model_args\": model_args,\n \"num_fewshot\": num_fewshot,\n \"batch_size\": batch_size,\n \"device\": device,\n \"no_cache\": no_cache,\n \"limit\": limit,\n \"bootstrap_iters\": bootstrap_iters,\n \"description_dict\": description_dict\n }\n\n return results\n\n\n@positional_deprecated\ndef evaluate(lm, task_dict, provide_description=None, num_fewshot=0, limit=None, bootstrap_iters=100000, description_dict=None):\n \"\"\"Instantiate and evaluate a model on a list of tasks.\n\n :param lm: obj\n Language Model\n :param task_dict: dict[str, Task]\n Dictionary of tasks. Tasks will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.\n :param provide_description: bool\n Not implemented, and this option is deprecated and will be removed in a future version in favor of a different description providing method\n :param num_fewshot: int\n Number of examples in few-shot context\n :param limit: int, optional\n Limit the number of examples per task (only use this for testing)\n :param bootstrap_iters:\n Number of iterations for bootstrap statistics\n :param description_dict: dict[str, str]\n Dictionary of custom task descriptions of the form: `task_name: description` \n :return\n Dictionary of results\n \"\"\"\n # TODO: completely refactor this entire function to not be a huge mess, ideally breaking it down into smaller pieces\n\n # TODO: todo: implement proper description-providing system\n assert not provide_description # not implemented.\n if provide_description is not None:\n # nudge people to not specify it at all\n print(\"WARNING: provide_description is deprecated and will be removed in a future version in favor of description_dict\")\n\n task_dict_items = [\n (name, task)\n for name, task in task_dict.items()\n if(task.has_validation_docs() or task.has_test_docs())\n ]\n\n results = collections.defaultdict(dict)\n versions = collections.defaultdict(dict)\n\n requests = collections.defaultdict(list)\n requests_origin = collections.defaultdict(list)\n\n # If we ever run into issues where the eval tasks don't fit in memory and we can't afford a machine with bigger\n # memory, we can always modify this plumbing to support that, but I didn't want to include it just yet because\n # over-engineering is bad (or we could make it write the requests to disk and then read them back out again\n # - probably using an sqlite db because of all the moving parts we have\n\n # TODO: we need unit tests & sanity checks or something to ensure that the return of `validation_docs` is stable\n docs = {}\n\n # get lists of each type of request\n for task_name, task in task_dict_items:\n versions[task_name] = task.VERSION\n # default to test doc, fall back to val doc if validation unavailable\n # TODO: the test-fallback-to-val system isn't final, we should revisit it at some point\n if task.has_test_docs():\n task_doc_func = task.test_docs\n elif task.has_validation_docs():\n task_doc_func = task.validation_docs\n else:\n raise RuntimeError(\"Task has neither test_docs nor validation_docs\")\n\n # deterministically shuffle docs and chop off the first `limit` because sometimes docs are in some kind of order\n task_docs = list(task_doc_func())\n rnd = random.Random()\n rnd.seed(42)\n rnd.shuffle(task_docs)\n\n description = description_dict[task_name] if description_dict and task_name in description_dict else \"\"\n\n for doc_id, doc in enumerate(itertools.islice(task_docs, 0, limit)):\n docs[(task_name, doc_id)] = doc\n ctx = task.fewshot_context(\n doc=doc,\n num_fewshot=num_fewshot,\n rnd=rnd,\n description=description\n )\n reqs = task.construct_requests(doc, ctx)\n if not isinstance(reqs, (list, tuple)):\n reqs = [reqs]\n for i, req in enumerate(reqs):\n requests[req.request_type].append(req)\n # i: index in requests for a single task instance\n # doc_id: unique id that we can get back to a doc using `docs`\n requests_origin[req.request_type].append((i, task_name, doc, doc_id))\n\n # all responses for each (task, doc)\n process_res_queue = collections.defaultdict(list)\n\n # execute each type of request\n for reqtype, reqs in requests.items():\n # TODO: right now, this code runs multiple separate LM requests for multiple Requests differing\n # only in index. We could implement some kind of caching, but that would be more of a band-aid\n # solution. we could also implement some kind of auto-grouping here;\n # they should end up next to each other.\n\n print(\"Running\", reqtype, \"requests\")\n resps = getattr(lm, reqtype)([req.args for req in reqs])\n resps = [x if req.index is None else x[req.index] for x, req in zip(resps, reqs)]\n\n for resp, (i, task_name, doc, doc_id) in zip(resps, requests_origin[reqtype]):\n process_res_queue[(task_name, doc_id)].append((i, resp))\n \n vals = collections.defaultdict(list)\n\n # unpack results and sort back in order and return control to Task\n for (task_name, doc_id), requests in process_res_queue.items():\n requests.sort(key=lambda x: x[0])\n requests = [x[1] for x in requests]\n\n task = task_dict[task_name]\n doc = docs[(task_name, doc_id)]\n\n metrics = task.process_results(doc, requests)\n for metric, value in metrics.items():\n vals[(task_name, metric)].append(value)\n \n # aggregate results\n for (task_name, metric), items in vals.items():\n task = task_dict[task_name]\n results[task_name][metric] = task.aggregation()[metric](items)\n\n # hotfix: bleu, chrf, ter seem to be really expensive to bootstrap\n # so we run them less iterations. still looking for a cleaner way to do this\n stderr = lm_eval.metrics.stderr_for_metric(\n metric=task.aggregation()[metric],\n bootstrap_iters=min(bootstrap_iters, 1000) if metric in [\"bleu\", \"chrf\", \"ter\"] else bootstrap_iters,\n )\n if stderr is not None:\n results[task_name][metric + \"_stderr\"] = stderr(items)\n \n return {\n \"results\": dict(results),\n \"versions\": dict(versions)\n }\n\n\ndef make_table(result_dict):\n \"\"\"Generate table of results.\"\"\"\n from pytablewriter import MarkdownTableWriter, LatexTableWriter\n\n md_writer = MarkdownTableWriter()\n latex_writer = LatexTableWriter()\n md_writer.headers = [\"Task\", \"Version\", \"Metric\", \"Value\", \"\", \"Stderr\"]\n latex_writer.headers = [\"Task\", \"Version\", \"Metric\", \"Value\", \"\", \"Stderr\"]\n\n values = []\n\n for k, dic in result_dict[\"results\"].items():\n version = result_dict[\"versions\"][k]\n for m, v in dic.items():\n if m.endswith(\"_stderr\"):\n continue\n\n if m + \"_stderr\" in dic:\n se = dic[m + \"_stderr\"]\n values.append([k, version, m, '%.4f' % v, '±', '%.4f' % se])\n else:\n values.append([k, version, m, '%.4f' % v, '', ''])\n k = \"\"\n version = \"\"\n md_writer.value_matrix = values\n latex_writer.value_matrix = values\n\n # todo: make latex table look good\n # print(latex_writer.dumps())\n\n return md_writer.dumps()\n" ]
[ [ "numpy.random.seed" ] ]
HANDS-Research-Group/HNN_Soil_Reaction_Front
[ "17f65b18ddf3a93bde69111786912850702406ab" ]
[ "final.py" ]
[ "import matplotlib.style\nimport matplotlib as mpl\nmpl.style.use('classic')\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.facecolor'] = 'white'\nplt.rcParams['figure.facecolor'] = 'white'\nplt.rcParams['font.family'] = 'sans-serif'\nplt.rcParams['font.sans-serif'] = ['Arial']\nimport tensorflow.compat.v1 as tf\ntf.disable_eager_execution()\nimport os\nfrom itertools import combinations\nimport sympy as sp\nfrom sympy import *\n\nWHETHER_TRAIN = True\nONLYBEST = True\nHOME_DIR = \"./test\"\n\nbest_models = ['7a', '7b', '9a', '11a', '11c', '14a','14b']\n\n\n\nvaria_dict = {\n 'age':'t',\n 'MAT': 'T',\n 'precipitation':'P',\n 'erosion':'E',\n 'quartz':'Q',\n 'albite':'A'\n}\n\n### PARAMETEER LIST\n# iteration_symbol = 'Y'\neach_variables = ['age','MAT','precipitation','erosion','quartz','albite']\n# for a in each_variables:\n\nres = [list(com) for sub in range(len(each_variables)) for com in combinations(each_variables, sub + 1)]\n\ntotal_summary = {\n 'iteration': [], ## A. Hybrid model fit to SCT1,2,3,5\n 'function': [], ## f(t,a,Q)\n 'Targetted Soil(s)': [], # Panola\n 'Hybrid Model MSE': [],\n 'Physics-based Model MSE': [],\n 'f(z) (hybrid model)': [],\n 'gk (physics)': [],\n}\n####\n\nNUM_STEPS = 15000\nlr = 0.001\n# res = res[:2]\ntraining_locs_list = [\n['SCT1', 'SCT2', 'SCT3', 'SCT5','Panola'],\n['SCT1', 'SCT2', 'SCT3', 'SCT5','Davis'],\n['SCT1', 'SCT2', 'SCT3', 'SCT5']\n]\n# enumerate_list = list(range(len(res)))[40:]\n# res_1 = res[0]\n# res_43 = res[43]\n\n# res = [res_1,res_43]\n\nfor variables, iteration_sym in zip(res,range(len(res))):\n iteration_sym +=1\n NUM_VARIABLE = len(variables)\n suffix_list = \"abc\"\n for training_locs, suffix in zip(training_locs_list,suffix_list):\n if suffix == 'c' and 'MAT' in variables:\n continue\n if suffix == 'c' and 'precipitation' in variables:\n continue\n if suffix == 'c' and 'erosion' in variables:\n continue\n\n\n\n\n tf.reset_default_graph()\n\n iteration_symbol = str(iteration_sym)\n iteration_symbol += suffix\n\n if ONLYBEST:\n if iteration_symbol not in best_models:\n continue\n # training_locs = ['SCT1', 'SCT2', 'SCT3', 'SCT5','Panola']\n # testing_locs = ['panola','davis','JH5']\n location_list = ['SCT1','SCT2','SCT3','SCT5','Panola','Davis','Jughandle']\n testing_locs = [loc for loc in location_list if loc not in training_locs ]\n # ['sct1','sct2','sct3','sct5']\n # ['sct1' ,'sct2','sct3','sct5','davis']\n \n PATH = os.path.join(HOME_DIR,'iter_{0}'.format(iteration_symbol))\n if not os.path.exists(PATH):\n os.mkdir(PATH)\n\n\n def cal_A(c0,cx0):\n c0 = np.array(c0)\n cx0 = np.array(cx0)\n A = (c0 - cx0) / cx0\n return A\n\n ## data initialiåtion\n data = pd.read_csv('./data/all_data.csv')\n # variable_list = ['age','MAT','precipitation','erosion','quartz','albite'] ##t,T,P,E,q,a\n\n\n def formula(depth,A,gk):\n # depth = input_layer[:, 0:1]\n # c0 = input_layer[:, 1:2]\n # gk = input_layer[:, 2:3]\n # cx0 = input_layer[:, 3:4]\n # A = (c0 - cx0) / cx0\n return 1 / (1 + A * tf.math.exp(gk * depth))\n\n def tt_module(Z,hsize=[16, 8]):\n ### set 0-3\n # h1 = tf.layers.dense(Z,hsize[0])\n h2 = tf.layers.dense(Z, 16, activation='sigmoid')\n # out = tf.layers.dense(h2,8,activation='sigmoid')\n out = tf.layers.dense(h2,1,activation='linear')\n return out\n\n def analytical(depth, A, tt_output):\n return 1 / (1 + A * tf.math.exp(tt_output * depth))\n\n\n depth = tf.placeholder(tf.float32,[None,1]) ## depth\n Z = tf.placeholder(tf.float32,[None,NUM_VARIABLE]) ## Age, MAT, precipitation,erosion,QUARTZ\n A = tf.placeholder(tf.float32,[None,1]) ##\n concentration = tf.placeholder(tf.float32,[None,1])\n gk = tf.placeholder(tf.float32,[None,1])\n\n tt_output = tt_module(Z)\n concentration_pred = analytical(depth,A,tt_output)\n phy_pred = formula(depth,A,gk)\n loss = tf.reduce_mean(tf.squared_difference(concentration_pred, concentration))\n phy_loss = tf.reduce_mean(tf.squared_difference(phy_pred,concentration))\n step = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss) # G Train step\n\n\n sess = tf.Session()\n tf.global_variables_initializer().run(session=sess)\n saver = tf.train.Saver()\n\n\n data_tmp = data[data['location'].isin(training_locs)]\n\n depth_batch = np.array(data_tmp['depth']).reshape(data_tmp['depth'].shape[0],1)\n # Z_batch = np.array(data[['age','MAT']]).reshape(data['age'].shape[0],2)\n Z_batch = np.array(data_tmp[variables]).reshape(data_tmp['age'].shape[0],NUM_VARIABLE)\n gk_batch = np.array(data_tmp['gk']).reshape(data_tmp['gk'].shape[0],1)\n c0 = data_tmp[['c0']]\n cx0 = data_tmp[['cx0']]\n A_batch = cal_A(c0,cx0)\n concentration_batch = data_tmp[['concentration']]\n concentration_batch_normalize = np.array(concentration_batch)/np.array(c0)\n\n ## TRAINING\n\n print('==================== TRAINING ================================')\n if WHETHER_TRAIN:\n for i in range(NUM_STEPS):\n _, step_loss = sess.run([step,loss], feed_dict={depth: depth_batch, Z: Z_batch, A: A_batch, concentration: concentration_batch_normalize})\n if i%1000 == 0:\n print(i,step_loss)\n save_path = saver.save(sess, os.path.join(PATH,\"model.ckpt\".format(iteration_symbol) ))\n # \"model/model_{0}.ckpt\".fo/rmat(iteration_symbol))\n print(\"Model saved in path: %s\" % save_path)\n phy_loss_tmp = sess.run([phy_loss], feed_dict={depth: depth_batch, gk:gk_batch, A: A_batch, concentration: concentration_batch_normalize})\n print(phy_loss_tmp)\n\n loc_names = ''\n for i in training_locs:\n loc_names += (i+' ')\n loc_names = loc_names[:-1]\n function_name = 'f('\n # print(variables)\n for i in variables:\n function_name += varia_dict[i] + ','\n function_name = function_name[:-1]+')'\n total_summary['iteration'].append(str(iteration_symbol)+'. '+ function_name +' fit to '+ loc_names + '.')\n total_summary['function'].append(function_name)\n total_summary['Targetted Soil(s)'].append(loc_names)\n total_summary['Hybrid Model MSE'].append(step_loss)\n total_summary['Physics-based Model MSE'].append(phy_loss_tmp)\n total_summary['f(z) (hybrid model)'].append('-')\n total_summary['gk (physics)'].append('-')\n\n print(\"======== GENEEATE FUNCTION\")\n input_mat = []\n for i in variables:\n input_mat.append(Symbol(varia_dict[i]))\n # input_mat = np.array([age, temperature, precipitation, quartz])\n input_mat = np.array(input_mat)\n\n for i in range(int(len(tf.trainable_variables()) / 2)):\n\n w = tf.trainable_variables()[2 * i].eval(sess)\n b = tf.trainable_variables()[2 * i + 1].eval(sess)\n input_mat = np.dot(input_mat, w) + b\n if i == 0:\n input_mat = [1 / (sp.exp(-a) + 1) for a in input_mat]\n # if i == 1:\n # # input_mat = [sp.tanh(a) for a in input_mat]\n # input_mat = [2 / (sp.exp(-2*a) + 1) -1 for a in input_mat]\n\n # print(input_mat)\n assert len(input_mat)==1\n text_file = open(os.path.join(PATH,'Write_out_function.txt'), \"w\")\n text_file.write(\"%s\" % str(input_mat[0]))\n text_file.close()\n\n ## TESTING\n print('==================== TESTING AND PLOTTING =================================')\n saver.restore(sess, os.path.join(PATH,\"model.ckpt\".format(iteration_symbol)))\n\n for loc in testing_locs:\n print(\"testing on {0}\".format(loc))\n data_tmp = data[data['location']==loc]\n depth_batch_test = np.array(data_tmp['depth']).reshape(data_tmp['depth'].shape[0],1)\n\n\n # Z_batch = np.array(data[['age','MAT']]).reshape(data['age'].shape[0],2)\n Z_batch_test = np.array(data_tmp[variables]).reshape(data_tmp['age'].shape[0], NUM_VARIABLE)\n # if PANOLA1500 and loc == 'Panola':\n\n gk_batch_test = np.array(data_tmp['gk']).reshape(data_tmp['gk'].shape[0], 1)\n c0 = data_tmp[['c0']]\n cx0 = data_tmp[['cx0']]\n A_batch_test = cal_A(c0, cx0)\n concentration_batch_test = data_tmp[['concentration']]\n concentration_batch_normalize_test = np.array(concentration_batch_test) / np.array(c0)\n\n test_loss,phy_loss_tmp, phy_pred_tmp,concentration_pred_tmp, concentration_measured,gk_pred =sess.run([loss,phy_loss,phy_pred,concentration_pred,concentration,tt_output],\n feed_dict = {depth: depth_batch_test, gk:gk_batch_test, Z: Z_batch_test, A:A_batch_test, concentration: concentration_batch_normalize_test})\n\n # print(\"hybrid loss\", test_loss)\n # print(\"physics loss\", phy_loss_tmp)\n # print(\"hybrid gamma*k\",gk_pred[0][0])\n # print(\"physics gamma*k\",gk_batch_test[0][0])\n # loc_names = loc\n loc_names = loc\n function_name = 'f('\n # print(variables)\n\n for i in variables:\n function_name += varia_dict[i] + ','\n function_name = function_name[:-1]+')'\n total_summary['iteration'].append(str(iteration_symbol) + '. ' + function_name + ' test on ' + loc_names + '.')\n total_summary['function'].append(function_name)\n total_summary['Targetted Soil(s)'].append(loc_names)\n\n total_summary['Hybrid Model MSE'].append(test_loss)\n total_summary['Physics-based Model MSE'].append(phy_loss_tmp)\n total_summary['f(z) (hybrid model)'].append(gk_pred[0][0])\n total_summary['gk (physics)'].append(gk_batch_test[0][0])\n\n ## draw training figure\n if loc == 'Davis':\n times = 30\n elif loc =='Panola':\n times = 23\n else:\n times = 10 ## JH5\n\n a_new = np.repeat(A_batch_test[0], times).reshape(-1, 1)\n # A_batch_test\n atmp = np.repeat([Z_batch_test[0]], times,axis=0).reshape(-1, NUM_VARIABLE)\n assert np.array(atmp[0] == Z_batch_test[0]).all()\n # btmp = np.repeat(Z_batch_test_t_T[0][1],times)\n # z_new = np.column_stack((atmp,btmp)).reshape(-1,2)\n gk_batch_new = np.repeat(gk_batch_test[0], times).reshape(-1, 1)\n\n depth_batch_test_new = np.array(range(-times, 0, 1)).reshape(-1, 1)\n concentration_pred_tmp, phy_pred_tmp = sess.run([concentration_pred, phy_pred],\n feed_dict={depth: depth_batch_test_new, Z: atmp, A: a_new, gk: gk_batch_new})\n\n # plt.scatter(np.array(depth_batch_test).reshape(-1,1), np.array(concentration).reshape(-1,1),label='Real')\n # plt.plot(np.array(depth_batch_test).reshape(-1,1), np.array(concentration).reshape(-1,1))\n plt.figure()\n plt.scatter(np.array(concentration_measured).reshape(-1, 1), np.array(depth_batch_test).reshape(-1, 1),\n label='Measured', s=150, edgecolors='b')\n\n # plt.plot( np.array(concentration).reshape(-1,1), np.array(depth_batch_test).reshape(-1,1))\n\n # plt.patch.set_facecolor('xkcd:mint green')\n\n # plt.scatter(np.array(concentration_pred).reshape(-1,1), np.array(depth_batch_test).reshape(-1,1), label='Hybrid Model',s=30)\n plt.plot(np.array(concentration_pred_tmp).reshape(-1, 1), np.array(depth_batch_test_new).reshape(-1, 1),\n label='HNN', linewidth=4, alpha=0.7, c='r')\n\n # plt.scatter(np.array(phy_pred).reshape(-1,1), np.array(depth_batch_test).reshape(-1,1), label='Physics-based Model',s=30)\n plt.plot(np.array(phy_pred_tmp).reshape(-1, 1), np.array(depth_batch_test_new).reshape(-1, 1),\n label='PBM', linewidth=4, alpha=0.7, c='green')\n # plt.title('Davis', fontsize=20)\n plt.xticks(fontsize=18)\n plt.yticks(fontsize=18)\n plt.xlim(0, 1.2)\n plt.xlabel('Normalized Na concentration', fontsize=20)\n plt.ylabel('Depth (m)', fontsize=20)\n plt.legend(fontsize=20)\n plt.tight_layout()\n\n plt.savefig(os.path.join(PATH,'fig_{0}_{1}.pdf'.format(loc,iteration_symbol)), bbox_inches='tight')\n plt.clf()\n plt.close()\n\n\n# print(total_summary)\ntotal_summary = pd.DataFrame(total_summary)\n\ntotal_summary.to_csv(os.path.join(HOME_DIR,'total_results.csv'),sep=';',index=False)\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.dot", "pandas.DataFrame", "tensorflow.compat.v1.math.exp", "tensorflow.compat.v1.train.Saver", "pandas.read_csv", "matplotlib.pyplot.tight_layout", "tensorflow.compat.v1.train.AdamOptimizer", "matplotlib.style.use", "tensorflow.compat.v1.trainable_variables", "matplotlib.pyplot.close", "numpy.repeat", "matplotlib.pyplot.figure", "tensorflow.compat.v1.disable_eager_execution", "tensorflow.compat.v1.squared_difference", "numpy.array", "tensorflow.compat.v1.layers.dense", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.placeholder", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "tensorflow.compat.v1.reset_default_graph" ] ]
rsriram315/eds_covid-19
[ "528695a430ff13c9dcc6e969ebf1f7988e26c434" ]
[ "src/features/build_features.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 22 10:32:53 2020\n\n@author: Sriram\n\"\"\"\n\nimport numpy as np\nfrom sklearn import linear_model\nimport pandas as pd\nfrom scipy import signal\n\n# we define the linear regression object\nreg=linear_model.LinearRegression(fit_intercept=True)\n\ndef get_doubling_time_via_regression(in_array):\n \" Use linear regression to find the doubling rate\"\n y=np.array(in_array)\n X=np.arange(-1,2).reshape(-1,1)\n # for safety we are asserting that the length of the input array is 3\n assert len(in_array)==3\n reg.fit(X,y)\n intercept=reg.intercept_\n slope=reg.coef_\n return intercept/slope\n\ndef savgol_filter(df_input,column='confirmed',window=5):\n df_result=df_input\n degree=1\n # we fill the missing entries with zero\n filter_in=df_input[column].fillna(0)\n result=signal.savgol_filter(np.array(filter_in),\n window,\n degree)\n df_result[str(column+'_filtered')]=result\n return df_result\n\ndef rolling_reg(df_input,col='confirmed'):\n \"Input is dataframe\"\n \"return value is a single series of doubling rates\"\n days_back=3\n result=df_input[col].rolling(window=days_back,min_periods=days_back).apply(get_doubling_time_via_regression,raw=False)\n return result\n\ndef calc_filtered_data(df_input,filter_on='confirmed'):\n \"Apply SavGol filter on the dataset and return the merged dataset\"\n must_contain=set(['state','country',filter_on])\n assert must_contain.issubset(set(df_input.columns)),'Error in calc_filtered_data not all columns in data Frame'\n df_output=df_input.copy()\n pd_filtered_result=df_output[['state','country',filter_on]].groupby(['state','country']).apply(savgol_filter)#.reset_index()\n df_output=pd.merge(df_output,pd_filtered_result[[str(filter_on+'_filtered')]],left_index=True,right_index=True,how='left')\n \n return df_output.copy()\n\n\ndef calc_doubling_rate(df_input,filter_on='confirmed'):\n \"Calculate doubling rate and return the dataframe\"\n must_contain=set(['state','country',filter_on])\n assert must_contain.issubset(set(df_input.columns)),'Error in calc_filtered_data not all columns in data Frame'\n pd_DR_result=df_input[['state','country',filter_on]].groupby(['state','country']).apply(rolling_reg,filter_on).reset_index()\n pd_DR_result=pd_DR_result.rename(columns={filter_on:filter_on+'_DR','level_2':'index'})\n df_output=pd.merge(df_input,pd_DR_result[['index',str(filter_on+'_DR')]],left_index=True,right_on=['index'],how='left')\n df_output=df_output.drop(columns=['index'])\n return df_output\n\n\nif __name__=='__main__':\n #test_data=np.array([2,4,6])\n #doubling_time=get_doubling_time_via_regression(test_data)\n #print('Test slope is :'+str(doubling_time))\n # We read the data from file\n pd_JH_data=pd.read_csv('data/processed/COVID_relational_confirmed.csv',sep=';',parse_dates=[0])\n pd_JH_data=pd_JH_data.sort_values('date',ascending=True).reset_index(drop=True).copy()\n # We process the data calculating filtered data and doubling rate\n pd_JH_result_large=calc_filtered_data(pd_JH_data)\n pd_JH_result_large=calc_doubling_rate(pd_JH_result_large)\n pd_JH_result_large=calc_doubling_rate(pd_JH_result_large,filter_on='confirmed_filtered')\n # we apply a threshold on confirmed column since if values are small doubling rate goes to infinity\n mask=pd_JH_result_large['confirmed']>100\n pd_JH_result_large['confirmed_filtered_DR']=pd_JH_result_large['confirmed_filtered_DR'].where(mask,other=np.NaN)\n pd_JH_result_large.to_csv('data/processed/COVID_final_set.csv',sep=';',index=False)\n print(pd_JH_result_large.head())" ]
[ [ "numpy.arange", "pandas.read_csv", "numpy.array", "sklearn.linear_model.LinearRegression" ] ]
silky/mpld3
[ "12151b57d8f245c3538f3c19e34d71caf8e65a59" ]
[ "examples/drag_points.py" ]
[ "\"\"\"\nDraggable Points Example\n========================\nThis example shows how a D3 plugin can be created to make plot elements\ndraggable. A stopPropagation command is used to allow the drag behavior\nand pan/zoom behavior to work in tandem.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nimport mpld3\nfrom mpld3 import plugins, utils\n\n\nclass DragPlugin(plugins.PluginBase):\n JAVASCRIPT = r\"\"\"\n var DragPlugin = function(fig, prop){\n this.fig = fig;\n this.prop = mpld3.process_props(this, prop, {}, [\"id\"]);\n\n mpld3.insert_css(\"#\" + fig.figid + \" path.dragging\",\n {\"fill-opacity\": \"1.0 !important\",\n \"stroke-opacity\": \"1.0 !important\"});\n }\n\n DragPlugin.prototype.draw = function(){\n var obj = mpld3.get_element(this.prop.id);\n\n var drag = d3.behavior.drag()\n .origin(function(d) { return {x:obj.ax.x(d[0]),\n y:obj.ax.y(d[1])}; })\n .on(\"dragstart\", dragstarted)\n .on(\"drag\", dragged)\n .on(\"dragend\", dragended);\n\n obj.elements()\n .data(obj.data)\n .style(\"cursor\", \"default\")\n .call(drag);\n\n function dragstarted(d) {\n d3.event.sourceEvent.stopPropagation();\n d3.select(this).classed(\"dragging\", true);\n }\n\n function dragged(d, i) {\n d[0] = obj.ax.x.invert(d3.event.x);\n d[1] = obj.ax.y.invert(d3.event.y);\n d3.select(this)\n .attr(\"transform\", \"translate(\" + [d3.event.x,d3.event.y] + \")\");\n }\n\n function dragended(d) {\n d3.select(this).classed(\"dragging\", false);\n }\n }\n\n mpld3.register_plugin(\"drag\", DragPlugin);\n \"\"\"\n\n def __init__(self, points):\n if isinstance(points, mpl.lines.Line2D):\n suffix = \"pts\"\n else:\n suffix = None\n\n self.dict_ = {\"type\": \"drag\",\n \"id\": utils.get_id(points, suffix)}\n\n\nfig, ax = plt.subplots()\nnp.random.seed(0)\npoints = ax.plot(np.random.normal(size=20),\n np.random.normal(size=20), 'or', alpha=0.5,\n markersize=50, markeredgewidth=1)\nax.set_title(\"Click and Drag\", fontsize=18)\n\nplugins.connect(fig, DragPlugin(points[0]))\n\nmpld3.show()\n" ]
[ [ "numpy.random.normal", "matplotlib.pyplot.subplots", "numpy.random.seed" ] ]
dkoes/md-scripts
[ "2002a9e8eafaf2d203334285e47fa1637d22286d" ]
[ "mdrmsdplot.py" ]
[ "#!/usr/bin/env python3\n\nimport sys, MDAnalysis\nimport numpy as np\nfrom os.path import splitext\nfrom MDAnalysis.analysis.rms import *\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport seaborn as sns\nimport argparse\n\nparser = argparse.ArgumentParser(description='Generate pairwise RMSD heatmap plot.\\nIMPORTANT: assumes an aligned input')\nparser.add_argument(\"topology\")\nparser.add_argument(\"trajectory\")\nparser.add_argument(\"--selection\",default=\"backbone\",help=\"MDAnalysis selection for computing RMSD\",required=False)\nparser.add_argument(\"--step\",default=10, type=int, required=False,help=\"Frames to skip over\")\nparser.add_argument('--title',help=\"Graph title\")\nparser.add_argument('-o','--output',type=str,help=\"Output filename\")\nparser.add_argument('--max',type=float,help='Max RMSD value to consider',default=None)\nargs = parser.parse_args()\n\ntop = args.topology\ntraj = args.trajectory\n\nbase = splitext(top)[0]\nif not args.title:\n args.title = base\n \nif not args.output:\n args.output = base+'.png'\n \nu1 = MDAnalysis.Universe(top,traj)\nu2 = MDAnalysis.Universe(top,traj)\n\n# arguments: topology, trajector, [selection], [graph title], [step]\n# todo, switch to argparse\nsel = args.selection\n\nn = u1.trajectory.n_frames\n\ndiv = args.step\ndownn = n//div \nif n % div != 0:\n downn += 1\nrmat = np.zeros((downn,downn))\n\nsel1 = u1.select_atoms(sel)\nsel2 = u2.select_atoms(sel)\n#print len(sel1),len(sel2)\nfor t1 in u1.trajectory[::div]:\n for t2 in u2.trajectory[t1.frame::div]:\n rmat[t2.frame//div, t1.frame//div] = rmat[t1.frame//div, t2.frame//div] = rmsd(sel1.positions,sel2.positions)\n\nnp.set_printoptions(threshold=np.inf,precision=2)\n\n\n#find frame with most other frames under cutoff\ncutoff = 2.0\ncnts = np.array([len(row[row < cutoff]) for row in rmat])\npos = cnts.argmax()\nprint(\"Frame %d is within %.2f of %d frames\" % (pos, cutoff, cnts[pos]))\n\nimport matplotlib.pylab as plt\n\nplt.figure()\nplt.title(args.title)\n\n#multiples of 10, but no more than 6ish ticks\nn = 10\nwhile len(rmat)/n > 6:\n n += 10\n\nif args.max:\n sns.heatmap(rmat,square=True,xticklabels=n,yticklabels=n,cmap='YlGnBu',cbar_kws={'label':'RMSD'},vmin=0,vmax=args.max)\nelse:\n sns.heatmap(rmat,square=True,xticklabels=n,yticklabels=n,cmap='YlGnBu',cbar_kws={'label':'RMSD'},vmin=0)\nplt.xlabel(\"Frame #\")\nplt.ylabel(\"Frame #\")\nax = plt.gca()\nax.tick_params(direction='out')\nplt.tight_layout()\n\nplt.savefig(args.output)\n" ]
[ [ "matplotlib.pylab.tight_layout", "numpy.set_printoptions", "matplotlib.pylab.title", "matplotlib.pylab.figure", "matplotlib.pylab.ylabel", "matplotlib.pylab.gca", "matplotlib.pylab.savefig", "matplotlib.pylab.xlabel", "numpy.zeros" ] ]
lena-u/flair
[ "821de2e1d5446a9308fbad6f4d51bd4e9614ec02" ]
[ "flair/embeddings/token.py" ]
[ "import hashlib\nfrom abc import abstractmethod\nfrom pathlib import Path\nfrom typing import List, Union\nfrom collections import Counter\nfrom functools import lru_cache\n\nimport torch\nfrom bpemb import BPEmb\nfrom transformers import XLNetTokenizer, T5Tokenizer, GPT2Tokenizer, AutoTokenizer, AutoConfig, AutoModel\n\nimport flair\nimport gensim\nimport os\nimport re\nimport logging\nimport numpy as np\n\nfrom flair.data import Sentence, Token, Corpus, Dictionary\nfrom flair.embeddings.base import Embeddings, ScalarMix\nfrom flair.file_utils import cached_path, open_inside_zip\n\nlog = logging.getLogger(\"flair\")\n\n\nclass TokenEmbeddings(Embeddings):\n \"\"\"Abstract base class for all token-level embeddings. Ever new type of word embedding must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n pass\n\n @property\n def embedding_type(self) -> str:\n return \"word-level\"\n\n\nclass StackedEmbeddings(TokenEmbeddings):\n \"\"\"A stack of embeddings, used if you need to combine several different embedding types.\"\"\"\n\n def __init__(self, embeddings: List[TokenEmbeddings]):\n \"\"\"The constructor takes a list of embeddings to be combined.\"\"\"\n super().__init__()\n\n self.embeddings = embeddings\n\n # IMPORTANT: add embeddings as torch modules\n for i, embedding in enumerate(embeddings):\n embedding.name = f\"{str(i)}-{embedding.name}\"\n self.add_module(f\"list_embedding_{str(i)}\", embedding)\n\n self.name: str = \"Stack\"\n self.static_embeddings: bool = True\n\n self.__embedding_type: str = embeddings[0].embedding_type\n\n self.__embedding_length: int = 0\n for embedding in embeddings:\n self.__embedding_length += embedding.embedding_length\n\n def embed(\n self, sentences: Union[Sentence, List[Sentence]], static_embeddings: bool = True\n ):\n # if only one sentence is passed, convert to list of sentence\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n for embedding in self.embeddings:\n embedding.embed(sentences)\n\n @property\n def embedding_type(self) -> str:\n return self.__embedding_type\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for embedding in self.embeddings:\n embedding._add_embeddings_internal(sentences)\n\n return sentences\n\n def __str__(self):\n return f'StackedEmbeddings [{\",\".join([str(e) for e in self.embeddings])}]'\n\n\nclass WordEmbeddings(TokenEmbeddings):\n \"\"\"Standard static word embeddings, such as GloVe or FastText.\"\"\"\n\n def __init__(self, embeddings: str, field: str = None):\n \"\"\"\n Initializes classic word embeddings. Constructor downloads required files if not there.\n :param embeddings: one of: 'glove', 'extvec', 'crawl' or two-letter language code or custom\n If you want to use a custom embedding file, just pass the path to the embeddings as embeddings variable.\n \"\"\"\n self.embeddings = embeddings\n\n old_base_path = (\n \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/\"\n )\n base_path = (\n \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/\"\n )\n embeddings_path_v4 = (\n \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/\"\n )\n embeddings_path_v4_1 = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4.1/\"\n\n cache_dir = Path(\"embeddings\")\n\n # GLOVE embeddings\n if embeddings.lower() == \"glove\" or embeddings.lower() == \"en-glove\":\n cached_path(f\"{old_base_path}glove.gensim.vectors.npy\", cache_dir=cache_dir)\n embeddings = cached_path(\n f\"{old_base_path}glove.gensim\", cache_dir=cache_dir\n )\n\n # TURIAN embeddings\n elif embeddings.lower() == \"turian\" or embeddings.lower() == \"en-turian\":\n cached_path(\n f\"{embeddings_path_v4_1}turian.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{embeddings_path_v4_1}turian\", cache_dir=cache_dir\n )\n\n # KOMNINOS embeddings\n elif embeddings.lower() == \"extvec\" or embeddings.lower() == \"en-extvec\":\n cached_path(\n f\"{old_base_path}extvec.gensim.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{old_base_path}extvec.gensim\", cache_dir=cache_dir\n )\n\n # FT-CRAWL embeddings\n elif embeddings.lower() == \"crawl\" or embeddings.lower() == \"en-crawl\":\n cached_path(\n f\"{base_path}en-fasttext-crawl-300d-1M.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{base_path}en-fasttext-crawl-300d-1M\", cache_dir=cache_dir\n )\n\n # FT-CRAWL embeddings\n elif (\n embeddings.lower() == \"news\"\n or embeddings.lower() == \"en-news\"\n or embeddings.lower() == \"en\"\n ):\n cached_path(\n f\"{base_path}en-fasttext-news-300d-1M.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{base_path}en-fasttext-news-300d-1M\", cache_dir=cache_dir\n )\n\n # twitter embeddings\n elif embeddings.lower() == \"twitter\" or embeddings.lower() == \"en-twitter\":\n cached_path(\n f\"{old_base_path}twitter.gensim.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{old_base_path}twitter.gensim\", cache_dir=cache_dir\n )\n\n # two-letter language code wiki embeddings\n elif len(embeddings.lower()) == 2:\n cached_path(\n f\"{embeddings_path_v4}{embeddings}-wiki-fasttext-300d-1M.vectors.npy\",\n cache_dir=cache_dir,\n )\n embeddings = cached_path(\n f\"{embeddings_path_v4}{embeddings}-wiki-fasttext-300d-1M\",\n cache_dir=cache_dir,\n )\n\n # two-letter language code wiki embeddings\n elif len(embeddings.lower()) == 7 and embeddings.endswith(\"-wiki\"):\n cached_path(\n f\"{embeddings_path_v4}{embeddings[:2]}-wiki-fasttext-300d-1M.vectors.npy\",\n cache_dir=cache_dir,\n )\n embeddings = cached_path(\n f\"{embeddings_path_v4}{embeddings[:2]}-wiki-fasttext-300d-1M\",\n cache_dir=cache_dir,\n )\n\n # two-letter language code crawl embeddings\n elif len(embeddings.lower()) == 8 and embeddings.endswith(\"-crawl\"):\n cached_path(\n f\"{embeddings_path_v4}{embeddings[:2]}-crawl-fasttext-300d-1M.vectors.npy\",\n cache_dir=cache_dir,\n )\n embeddings = cached_path(\n f\"{embeddings_path_v4}{embeddings[:2]}-crawl-fasttext-300d-1M\",\n cache_dir=cache_dir,\n )\n\n elif not Path(embeddings).exists():\n raise ValueError(\n f'The given embeddings \"{embeddings}\" is not available or is not a valid path.'\n )\n\n self.name: str = str(embeddings)\n self.static_embeddings = True\n\n if str(embeddings).endswith(\".bin\"):\n self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(\n str(embeddings), binary=True\n )\n else:\n self.precomputed_word_embeddings = gensim.models.KeyedVectors.load(\n str(embeddings)\n )\n\n self.field = field\n\n self.__embedding_length: int = self.precomputed_word_embeddings.vector_size\n super().__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n @lru_cache(maxsize=10000, typed=False)\n def get_cached_vec(self, word: str) -> torch.Tensor:\n if word in self.precomputed_word_embeddings:\n word_embedding = self.precomputed_word_embeddings[word]\n elif word.lower() in self.precomputed_word_embeddings:\n word_embedding = self.precomputed_word_embeddings[word.lower()]\n elif re.sub(r\"\\d\", \"#\", word.lower()) in self.precomputed_word_embeddings:\n word_embedding = self.precomputed_word_embeddings[\n re.sub(r\"\\d\", \"#\", word.lower())\n ]\n elif re.sub(r\"\\d\", \"0\", word.lower()) in self.precomputed_word_embeddings:\n word_embedding = self.precomputed_word_embeddings[\n re.sub(r\"\\d\", \"0\", word.lower())\n ]\n else:\n word_embedding = np.zeros(self.embedding_length, dtype=\"float\")\n\n word_embedding = torch.tensor(\n word_embedding, device=flair.device, dtype=torch.float\n )\n return word_embedding\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for i, sentence in enumerate(sentences):\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n\n if \"field\" not in self.__dict__ or self.field is None:\n word = token.text\n else:\n word = token.get_tag(self.field).value\n\n word_embedding = self.get_cached_vec(word=word)\n\n token.set_embedding(self.name, word_embedding)\n\n return sentences\n\n def __str__(self):\n return self.name\n\n def extra_repr(self):\n # fix serialized models\n if \"embeddings\" not in self.__dict__:\n self.embeddings = self.name\n\n return f\"'{self.embeddings}'\"\n\nclass CharacterEmbeddings(TokenEmbeddings):\n \"\"\"Character embeddings of words, as proposed in Lample et al., 2016.\"\"\"\n\n def __init__(\n self,\n path_to_char_dict: str = None,\n char_embedding_dim: int = 25,\n hidden_size_char: int = 25,\n ):\n \"\"\"Uses the default character dictionary if none provided.\"\"\"\n\n super().__init__()\n self.name = \"Char\"\n self.static_embeddings = False\n\n # use list of common characters if none provided\n if path_to_char_dict is None:\n self.char_dictionary: Dictionary = Dictionary.load(\"common-chars\")\n else:\n self.char_dictionary: Dictionary = Dictionary.load_from_file(\n path_to_char_dict\n )\n\n self.char_embedding_dim: int = char_embedding_dim\n self.hidden_size_char: int = hidden_size_char\n self.char_embedding = torch.nn.Embedding(\n len(self.char_dictionary.item2idx), self.char_embedding_dim\n )\n self.char_rnn = torch.nn.LSTM(\n self.char_embedding_dim,\n self.hidden_size_char,\n num_layers=1,\n bidirectional=True,\n )\n\n self.__embedding_length = self.hidden_size_char * 2\n\n self.to(flair.device)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n\n for sentence in sentences:\n\n tokens_char_indices = []\n\n # translate words in sentence into ints using dictionary\n for token in sentence.tokens:\n char_indices = [\n self.char_dictionary.get_idx_for_item(char) for char in token.text\n ]\n tokens_char_indices.append(char_indices)\n\n # sort words by length, for batching and masking\n tokens_sorted_by_length = sorted(\n tokens_char_indices, key=lambda p: len(p), reverse=True\n )\n d = {}\n for i, ci in enumerate(tokens_char_indices):\n for j, cj in enumerate(tokens_sorted_by_length):\n if ci == cj:\n d[j] = i\n continue\n chars2_length = [len(c) for c in tokens_sorted_by_length]\n longest_token_in_sentence = max(chars2_length)\n tokens_mask = torch.zeros(\n (len(tokens_sorted_by_length), longest_token_in_sentence),\n dtype=torch.long,\n device=flair.device,\n )\n\n for i, c in enumerate(tokens_sorted_by_length):\n tokens_mask[i, : chars2_length[i]] = torch.tensor(\n c, dtype=torch.long, device=flair.device\n )\n\n # chars for rnn processing\n chars = tokens_mask\n\n character_embeddings = self.char_embedding(chars).transpose(0, 1)\n\n packed = torch.nn.utils.rnn.pack_padded_sequence(\n character_embeddings, chars2_length\n )\n\n lstm_out, self.hidden = self.char_rnn(packed)\n\n outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)\n outputs = outputs.transpose(0, 1)\n chars_embeds_temp = torch.zeros(\n (outputs.size(0), outputs.size(2)),\n dtype=torch.float,\n device=flair.device,\n )\n for i, index in enumerate(output_lengths):\n chars_embeds_temp[i] = outputs[i, index - 1]\n character_embeddings = chars_embeds_temp.clone()\n for i in range(character_embeddings.size(0)):\n character_embeddings[d[i]] = chars_embeds_temp[i]\n\n for token_number, token in enumerate(sentence.tokens):\n token.set_embedding(self.name, character_embeddings[token_number])\n\n def __str__(self):\n return self.name\n\n\nclass FlairEmbeddings(TokenEmbeddings):\n \"\"\"Contextual string embeddings of words, as proposed in Akbik et al., 2018.\"\"\"\n\n def __init__(self,\n model,\n fine_tune: bool = False,\n chars_per_chunk: int = 512,\n with_whitespace: bool = True,\n tokenized_lm: bool = True,\n ):\n \"\"\"\n initializes contextual string embeddings using a character-level language model.\n :param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',\n 'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward',\n etc (see https://github.com/flairNLP/flair/blob/master/resources/docs/embeddings/FLAIR_EMBEDDINGS.md)\n depending on which character language model is desired.\n :param fine_tune: if set to True, the gradient will propagate into the language model. This dramatically slows\n down training and often leads to overfitting, so use with caution.\n :param chars_per_chunk: max number of chars per rnn pass to control speed/memory tradeoff. Higher means faster\n but requires more memory. Lower means slower but less memory.\n :param with_whitespace: If True, use hidden state after whitespace after word. If False, use hidden\n state at last character of word.\n :param tokenized_lm: Whether this lm is tokenized. Default is True, but for LMs trained over unprocessed text\n False might be better.\n \"\"\"\n super().__init__()\n\n cache_dir = Path(\"embeddings\")\n\n aws_path: str = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources\"\n hu_path: str = \"https://flair.informatik.hu-berlin.de/resources\"\n clef_hipe_path: str = \"https://files.ifi.uzh.ch/cl/siclemat/impresso/clef-hipe-2020/flair\"\n\n self.PRETRAINED_MODEL_ARCHIVE_MAP = {\n # multilingual models\n \"multi-forward\": f\"{aws_path}/embeddings-v0.4.3/lm-jw300-forward-v0.1.pt\",\n \"multi-backward\": f\"{aws_path}/embeddings-v0.4.3/lm-jw300-backward-v0.1.pt\",\n \"multi-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-multi-forward-v0.1.pt\",\n \"multi-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-multi-backward-v0.1.pt\",\n \"multi-v0-forward-fast\": f\"{aws_path}/embeddings-v0.4/lm-multi-forward-fast-v0.1.pt\",\n \"multi-v0-backward-fast\": f\"{aws_path}/embeddings-v0.4/lm-multi-backward-fast-v0.1.pt\",\n # English models\n \"en-forward\": f\"{aws_path}/embeddings-v0.4.1/big-news-forward--h2048-l1-d0.05-lr30-0.25-20/news-forward-0.4.1.pt\",\n \"en-backward\": f\"{aws_path}/embeddings-v0.4.1/big-news-backward--h2048-l1-d0.05-lr30-0.25-20/news-backward-0.4.1.pt\",\n \"en-forward-fast\": f\"{aws_path}/embeddings/lm-news-english-forward-1024-v0.2rc.pt\",\n \"en-backward-fast\": f\"{aws_path}/embeddings/lm-news-english-backward-1024-v0.2rc.pt\",\n \"news-forward\": f\"{aws_path}/embeddings-v0.4.1/big-news-forward--h2048-l1-d0.05-lr30-0.25-20/news-forward-0.4.1.pt\",\n \"news-backward\": f\"{aws_path}/embeddings-v0.4.1/big-news-backward--h2048-l1-d0.05-lr30-0.25-20/news-backward-0.4.1.pt\",\n \"news-forward-fast\": f\"{aws_path}/embeddings/lm-news-english-forward-1024-v0.2rc.pt\",\n \"news-backward-fast\": f\"{aws_path}/embeddings/lm-news-english-backward-1024-v0.2rc.pt\",\n \"mix-forward\": f\"{aws_path}/embeddings/lm-mix-english-forward-v0.2rc.pt\",\n \"mix-backward\": f\"{aws_path}/embeddings/lm-mix-english-backward-v0.2rc.pt\",\n # Arabic\n \"ar-forward\": f\"{aws_path}/embeddings-stefan-it/lm-ar-opus-large-forward-v0.1.pt\",\n \"ar-backward\": f\"{aws_path}/embeddings-stefan-it/lm-ar-opus-large-backward-v0.1.pt\",\n # Bulgarian\n \"bg-forward-fast\": f\"{aws_path}/embeddings-v0.3/lm-bg-small-forward-v0.1.pt\",\n \"bg-backward-fast\": f\"{aws_path}/embeddings-v0.3/lm-bg-small-backward-v0.1.pt\",\n \"bg-forward\": f\"{aws_path}/embeddings-stefan-it/lm-bg-opus-large-forward-v0.1.pt\",\n \"bg-backward\": f\"{aws_path}/embeddings-stefan-it/lm-bg-opus-large-backward-v0.1.pt\",\n # Czech\n \"cs-forward\": f\"{aws_path}/embeddings-stefan-it/lm-cs-opus-large-forward-v0.1.pt\",\n \"cs-backward\": f\"{aws_path}/embeddings-stefan-it/lm-cs-opus-large-backward-v0.1.pt\",\n \"cs-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-cs-large-forward-v0.1.pt\",\n \"cs-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-cs-large-backward-v0.1.pt\",\n # Danish\n \"da-forward\": f\"{aws_path}/embeddings-stefan-it/lm-da-opus-large-forward-v0.1.pt\",\n \"da-backward\": f\"{aws_path}/embeddings-stefan-it/lm-da-opus-large-backward-v0.1.pt\",\n # German\n \"de-forward\": f\"{aws_path}/embeddings/lm-mix-german-forward-v0.2rc.pt\",\n \"de-backward\": f\"{aws_path}/embeddings/lm-mix-german-backward-v0.2rc.pt\",\n \"de-historic-ha-forward\": f\"{aws_path}/embeddings-stefan-it/lm-historic-hamburger-anzeiger-forward-v0.1.pt\",\n \"de-historic-ha-backward\": f\"{aws_path}/embeddings-stefan-it/lm-historic-hamburger-anzeiger-backward-v0.1.pt\",\n \"de-historic-wz-forward\": f\"{aws_path}/embeddings-stefan-it/lm-historic-wiener-zeitung-forward-v0.1.pt\",\n \"de-historic-wz-backward\": f\"{aws_path}/embeddings-stefan-it/lm-historic-wiener-zeitung-backward-v0.1.pt\",\n \"de-historic-rw-forward\": f\"{hu_path}/embeddings/redewiedergabe_lm_forward.pt\",\n \"de-historic-rw-backward\": f\"{hu_path}/embeddings/redewiedergabe_lm_backward.pt\",\n # Spanish\n \"es-forward\": f\"{aws_path}/embeddings-v0.4/language_model_es_forward_long/lm-es-forward.pt\",\n \"es-backward\": f\"{aws_path}/embeddings-v0.4/language_model_es_backward_long/lm-es-backward.pt\",\n \"es-forward-fast\": f\"{aws_path}/embeddings-v0.4/language_model_es_forward/lm-es-forward-fast.pt\",\n \"es-backward-fast\": f\"{aws_path}/embeddings-v0.4/language_model_es_backward/lm-es-backward-fast.pt\",\n # Basque\n \"eu-forward\": f\"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-forward-v0.2.pt\",\n \"eu-backward\": f\"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-backward-v0.2.pt\",\n \"eu-v1-forward\": f\"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-forward-v0.1.pt\",\n \"eu-v1-backward\": f\"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-backward-v0.1.pt\",\n \"eu-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-eu-large-forward-v0.1.pt\",\n \"eu-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-eu-large-backward-v0.1.pt\",\n # Persian\n \"fa-forward\": f\"{aws_path}/embeddings-stefan-it/lm-fa-opus-large-forward-v0.1.pt\",\n \"fa-backward\": f\"{aws_path}/embeddings-stefan-it/lm-fa-opus-large-backward-v0.1.pt\",\n # Finnish\n \"fi-forward\": f\"{aws_path}/embeddings-stefan-it/lm-fi-opus-large-forward-v0.1.pt\",\n \"fi-backward\": f\"{aws_path}/embeddings-stefan-it/lm-fi-opus-large-backward-v0.1.pt\",\n # French\n \"fr-forward\": f\"{aws_path}/embeddings/lm-fr-charlm-forward.pt\",\n \"fr-backward\": f\"{aws_path}/embeddings/lm-fr-charlm-backward.pt\",\n # Hebrew\n \"he-forward\": f\"{aws_path}/embeddings-stefan-it/lm-he-opus-large-forward-v0.1.pt\",\n \"he-backward\": f\"{aws_path}/embeddings-stefan-it/lm-he-opus-large-backward-v0.1.pt\",\n # Hindi\n \"hi-forward\": f\"{aws_path}/embeddings-stefan-it/lm-hi-opus-large-forward-v0.1.pt\",\n \"hi-backward\": f\"{aws_path}/embeddings-stefan-it/lm-hi-opus-large-backward-v0.1.pt\",\n # Croatian\n \"hr-forward\": f\"{aws_path}/embeddings-stefan-it/lm-hr-opus-large-forward-v0.1.pt\",\n \"hr-backward\": f\"{aws_path}/embeddings-stefan-it/lm-hr-opus-large-backward-v0.1.pt\",\n # Indonesian\n \"id-forward\": f\"{aws_path}/embeddings-stefan-it/lm-id-opus-large-forward-v0.1.pt\",\n \"id-backward\": f\"{aws_path}/embeddings-stefan-it/lm-id-opus-large-backward-v0.1.pt\",\n # Italian\n \"it-forward\": f\"{aws_path}/embeddings-stefan-it/lm-it-opus-large-forward-v0.1.pt\",\n \"it-backward\": f\"{aws_path}/embeddings-stefan-it/lm-it-opus-large-backward-v0.1.pt\",\n # Japanese\n \"ja-forward\": f\"{aws_path}/embeddings-v0.4.1/lm__char-forward__ja-wikipedia-3GB/japanese-forward.pt\",\n \"ja-backward\": f\"{aws_path}/embeddings-v0.4.1/lm__char-backward__ja-wikipedia-3GB/japanese-backward.pt\",\n # Malayalam\n \"ml-forward\": f\"https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/ml-forward.pt\",\n \"ml-backward\": f\"https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/ml-backward.pt\",\n # Dutch\n \"nl-forward\": f\"{aws_path}/embeddings-stefan-it/lm-nl-opus-large-forward-v0.1.pt\",\n \"nl-backward\": f\"{aws_path}/embeddings-stefan-it/lm-nl-opus-large-backward-v0.1.pt\",\n \"nl-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-nl-large-forward-v0.1.pt\",\n \"nl-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-nl-large-backward-v0.1.pt\",\n # Norwegian\n \"no-forward\": f\"{aws_path}/embeddings-stefan-it/lm-no-opus-large-forward-v0.1.pt\",\n \"no-backward\": f\"{aws_path}/embeddings-stefan-it/lm-no-opus-large-backward-v0.1.pt\",\n # Polish\n \"pl-forward\": f\"{aws_path}/embeddings/lm-polish-forward-v0.2.pt\",\n \"pl-backward\": f\"{aws_path}/embeddings/lm-polish-backward-v0.2.pt\",\n \"pl-opus-forward\": f\"{aws_path}/embeddings-stefan-it/lm-pl-opus-large-forward-v0.1.pt\",\n \"pl-opus-backward\": f\"{aws_path}/embeddings-stefan-it/lm-pl-opus-large-backward-v0.1.pt\",\n # Portuguese\n \"pt-forward\": f\"{aws_path}/embeddings-v0.4/lm-pt-forward.pt\",\n \"pt-backward\": f\"{aws_path}/embeddings-v0.4/lm-pt-backward.pt\",\n # Pubmed\n \"pubmed-forward\": f\"{aws_path}/embeddings-v0.4.1/pubmed-2015-fw-lm.pt\",\n \"pubmed-backward\": f\"{aws_path}/embeddings-v0.4.1/pubmed-2015-bw-lm.pt\",\n # Slovenian\n \"sl-forward\": f\"{aws_path}/embeddings-stefan-it/lm-sl-opus-large-forward-v0.1.pt\",\n \"sl-backward\": f\"{aws_path}/embeddings-stefan-it/lm-sl-opus-large-backward-v0.1.pt\",\n \"sl-v0-forward\": f\"{aws_path}/embeddings-v0.3/lm-sl-large-forward-v0.1.pt\",\n \"sl-v0-backward\": f\"{aws_path}/embeddings-v0.3/lm-sl-large-backward-v0.1.pt\",\n # Swedish\n \"sv-forward\": f\"{aws_path}/embeddings-stefan-it/lm-sv-opus-large-forward-v0.1.pt\",\n \"sv-backward\": f\"{aws_path}/embeddings-stefan-it/lm-sv-opus-large-backward-v0.1.pt\",\n \"sv-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-sv-large-forward-v0.1.pt\",\n \"sv-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-sv-large-backward-v0.1.pt\",\n # Tamil\n \"ta-forward\": f\"{aws_path}/embeddings-stefan-it/lm-ta-opus-large-forward-v0.1.pt\",\n \"ta-backward\": f\"{aws_path}/embeddings-stefan-it/lm-ta-opus-large-backward-v0.1.pt\",\n # CLEF HIPE Shared task\n \"de-impresso-hipe-v1-forward\": f\"{clef_hipe_path}/de-hipe-flair-v1-forward/best-lm.pt\",\n \"de-impresso-hipe-v1-backward\": f\"{clef_hipe_path}/de-hipe-flair-v1-backward/best-lm.pt\",\n \"en-impresso-hipe-v1-forward\": f\"{clef_hipe_path}/en-flair-v1-forward/best-lm.pt\",\n \"en-impresso-hipe-v1-backward\": f\"{clef_hipe_path}/en-flair-v1-backward/best-lm.pt\",\n \"fr-impresso-hipe-v1-forward\": f\"{clef_hipe_path}/fr-hipe-flair-v1-forward/best-lm.pt\",\n \"fr-impresso-hipe-v1-backward\": f\"{clef_hipe_path}/fr-hipe-flair-v1-backward/best-lm.pt\",\n }\n\n if type(model) == str:\n\n # load model if in pretrained model map\n if model.lower() in self.PRETRAINED_MODEL_ARCHIVE_MAP:\n base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[model.lower()]\n\n # Fix for CLEF HIPE models (avoid overwriting best-lm.pt in cache_dir)\n if \"impresso-hipe\" in model.lower():\n cache_dir = cache_dir / model.lower()\n model = cached_path(base_path, cache_dir=cache_dir)\n\n elif replace_with_language_code(model) in self.PRETRAINED_MODEL_ARCHIVE_MAP:\n base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[\n replace_with_language_code(model)\n ]\n model = cached_path(base_path, cache_dir=cache_dir)\n\n elif not Path(model).exists():\n raise ValueError(\n f'The given model \"{model}\" is not available or is not a valid path.'\n )\n\n from flair.models import LanguageModel\n\n if type(model) == LanguageModel:\n self.lm: LanguageModel = model\n self.name = f\"Task-LSTM-{self.lm.hidden_size}-{self.lm.nlayers}-{self.lm.is_forward_lm}\"\n else:\n self.lm: LanguageModel = LanguageModel.load_language_model(model)\n self.name = str(model)\n\n # embeddings are static if we don't do finetuning\n self.fine_tune = fine_tune\n self.static_embeddings = not fine_tune\n\n self.is_forward_lm: bool = self.lm.is_forward_lm\n self.with_whitespace: bool = with_whitespace\n self.tokenized_lm: bool = tokenized_lm\n self.chars_per_chunk: int = chars_per_chunk\n\n # embed a dummy sentence to determine embedding_length\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n # set to eval mode\n self.eval()\n\n def train(self, mode=True):\n\n # make compatible with serialized models (TODO: remove)\n if \"fine_tune\" not in self.__dict__:\n self.fine_tune = False\n if \"chars_per_chunk\" not in self.__dict__:\n self.chars_per_chunk = 512\n\n if not self.fine_tune:\n pass\n else:\n super(FlairEmbeddings, self).train(mode)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n # make compatible with serialized models (TODO: remove)\n if \"with_whitespace\" not in self.__dict__:\n self.with_whitespace = True\n if \"tokenized_lm\" not in self.__dict__:\n self.tokenized_lm = True\n\n # gradients are enable if fine-tuning is enabled\n gradient_context = torch.enable_grad() if self.fine_tune else torch.no_grad()\n\n with gradient_context:\n\n # if this is not possible, use LM to generate embedding. First, get text sentences\n text_sentences = [sentence.to_tokenized_string() for sentence in sentences] if self.tokenized_lm \\\n else [sentence.to_plain_string() for sentence in sentences]\n\n start_marker = self.lm.document_delimiter if \"document_delimiter\" in self.lm.__dict__ else '\\n'\n end_marker = \" \"\n\n # get hidden states from language model\n all_hidden_states_in_lm = self.lm.get_representation(\n text_sentences, start_marker, end_marker, self.chars_per_chunk\n )\n\n if not self.fine_tune:\n all_hidden_states_in_lm = all_hidden_states_in_lm.detach()\n\n # take first or last hidden states from language model as word representation\n for i, sentence in enumerate(sentences):\n sentence_text = sentence.to_tokenized_string() if self.tokenized_lm else sentence.to_plain_string()\n\n offset_forward: int = len(start_marker)\n offset_backward: int = len(sentence_text) + len(start_marker)\n\n for token in sentence.tokens:\n\n offset_forward += len(token.text)\n if self.is_forward_lm:\n offset_with_whitespace = offset_forward\n offset_without_whitespace = offset_forward - 1\n else:\n offset_with_whitespace = offset_backward\n offset_without_whitespace = offset_backward - 1\n\n # offset mode that extracts at whitespace after last character\n if self.with_whitespace:\n embedding = all_hidden_states_in_lm[offset_with_whitespace, i, :]\n # offset mode that extracts at last character\n else:\n embedding = all_hidden_states_in_lm[offset_without_whitespace, i, :]\n\n if self.tokenized_lm or token.whitespace_after:\n offset_forward += 1\n offset_backward -= 1\n\n offset_backward -= len(token.text)\n\n # only clone if optimization mode is 'gpu'\n if flair.embedding_storage_mode == \"gpu\":\n embedding = embedding.clone()\n\n token.set_embedding(self.name, embedding)\n\n del all_hidden_states_in_lm\n\n return sentences\n\n def __str__(self):\n return self.name\n\n\nclass PooledFlairEmbeddings(TokenEmbeddings):\n def __init__(\n self,\n contextual_embeddings: Union[str, FlairEmbeddings],\n pooling: str = \"min\",\n only_capitalized: bool = False,\n **kwargs,\n ):\n\n super().__init__()\n\n # use the character language model embeddings as basis\n if type(contextual_embeddings) is str:\n self.context_embeddings: FlairEmbeddings = FlairEmbeddings(\n contextual_embeddings, **kwargs\n )\n else:\n self.context_embeddings: FlairEmbeddings = contextual_embeddings\n\n # length is twice the original character LM embedding length\n self.embedding_length = self.context_embeddings.embedding_length * 2\n self.name = self.context_embeddings.name + \"-context\"\n\n # these fields are for the embedding memory\n self.word_embeddings = {}\n self.word_count = {}\n\n # whether to add only capitalized words to memory (faster runtime and lower memory consumption)\n self.only_capitalized = only_capitalized\n\n # we re-compute embeddings dynamically at each epoch\n self.static_embeddings = False\n\n # set the memory method\n self.pooling = pooling\n if pooling == \"mean\":\n self.aggregate_op = torch.add\n elif pooling == \"fade\":\n self.aggregate_op = torch.add\n elif pooling == \"max\":\n self.aggregate_op = torch.max\n elif pooling == \"min\":\n self.aggregate_op = torch.min\n\n def train(self, mode=True):\n super().train(mode=mode)\n if mode:\n # memory is wiped each time we do a training run\n print(\"train mode resetting embeddings\")\n self.word_embeddings = {}\n self.word_count = {}\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n self.context_embeddings.embed(sentences)\n\n # if we keep a pooling, it needs to be updated continuously\n for sentence in sentences:\n for token in sentence.tokens:\n\n # update embedding\n local_embedding = token._embeddings[self.context_embeddings.name].cpu()\n\n # check token.text is empty or not\n if token.text:\n if token.text[0].isupper() or not self.only_capitalized:\n\n if token.text not in self.word_embeddings:\n self.word_embeddings[token.text] = local_embedding\n self.word_count[token.text] = 1\n else:\n aggregated_embedding = self.aggregate_op(\n self.word_embeddings[token.text], local_embedding\n )\n if self.pooling == \"fade\":\n aggregated_embedding /= 2\n self.word_embeddings[token.text] = aggregated_embedding\n self.word_count[token.text] += 1\n\n # add embeddings after updating\n for sentence in sentences:\n for token in sentence.tokens:\n if token.text in self.word_embeddings:\n base = (\n self.word_embeddings[token.text] / self.word_count[token.text]\n if self.pooling == \"mean\"\n else self.word_embeddings[token.text]\n )\n else:\n base = token._embeddings[self.context_embeddings.name]\n\n token.set_embedding(self.name, base)\n\n return sentences\n\n def embedding_length(self) -> int:\n return self.embedding_length\n\n def __setstate__(self, d):\n self.__dict__ = d\n\n if flair.device != 'cpu':\n for key in self.word_embeddings:\n self.word_embeddings[key] = self.word_embeddings[key].cpu()\n\n\nclass TransformerWordEmbeddings(TokenEmbeddings):\n def __init__(\n self,\n model: str = \"bert-base-uncased\",\n layers: str = \"-1,-2,-3,-4\",\n pooling_operation: str = \"first\",\n batch_size: int = 1,\n use_scalar_mix: bool = False,\n fine_tune: bool = False\n ):\n \"\"\"\n Bidirectional transformer embeddings of words from various transformer architectures.\n :param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for\n options)\n :param layers: string indicating which layers to take for embedding (-1 is topmost layer)\n :param pooling_operation: how to get from token piece embeddings to token embedding. Either take the first\n subtoken ('first'), the last subtoken ('last'), both first and last ('first_last') or a mean over all ('mean')\n :param batch_size: How many sentence to push through transformer at once. Set to 1 by default since transformer\n models tend to be huge.\n :param use_scalar_mix: If True, uses a scalar mix of layers as embedding\n :param fine_tune: If True, allows transformers to be fine-tuned during training\n \"\"\"\n super().__init__()\n\n # load tokenizer and transformer model\n self.tokenizer = AutoTokenizer.from_pretrained(model)\n config = AutoConfig.from_pretrained(model, output_hidden_states=True)\n self.model = AutoModel.from_pretrained(model, config=config)\n\n # model name\n self.name = 'transformer-word-' + str(model)\n\n # when initializing, embeddings are in eval mode by default\n self.model.eval()\n self.model.to(flair.device)\n\n # embedding parameters\n if layers == 'all':\n # send mini-token through to check how many layers the model has\n hidden_states = self.model(torch.tensor([1], device=flair.device).unsqueeze(0))[-1]\n self.layer_indexes = [int(x) for x in range(len(hidden_states))]\n else:\n self.layer_indexes = [int(x) for x in layers.split(\",\")]\n self.mix = ScalarMix(mixture_size=len(self.layer_indexes), trainable=False)\n self.pooling_operation = pooling_operation\n self.use_scalar_mix = use_scalar_mix\n self.fine_tune = fine_tune\n self.static_embeddings = not self.fine_tune\n self.batch_size = batch_size\n\n self.special_tokens = []\n self.special_tokens.append(self.tokenizer.bos_token)\n self.special_tokens.append(self.tokenizer.cls_token)\n\n # most models have an intial BOS token, except for XLNet, T5 and GPT2\n self.begin_offset = 1\n if type(self.tokenizer) == XLNetTokenizer:\n self.begin_offset = 0\n if type(self.tokenizer) == T5Tokenizer:\n self.begin_offset = 0\n if type(self.tokenizer) == GPT2Tokenizer:\n self.begin_offset = 0\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n \"\"\"Add embeddings to all words in a list of sentences.\"\"\"\n\n # split into micro batches of size self.batch_size before pushing through transformer\n sentence_batches = [sentences[i * self.batch_size:(i + 1) * self.batch_size]\n for i in range((len(sentences) + self.batch_size - 1) // self.batch_size)]\n\n # embed each micro-batch\n for batch in sentence_batches:\n self._add_embeddings_to_sentences(batch)\n\n return sentences\n\n def _add_embeddings_to_sentences(self, sentences: List[Sentence]):\n \"\"\"Match subtokenization to Flair tokenization and extract embeddings from transformers for each token.\"\"\"\n\n # first, subtokenize each sentence and find out into how many subtokens each token was divided\n subtokenized_sentences = []\n subtokenized_sentences_token_lengths = []\n\n for sentence in sentences:\n\n tokenized_string = sentence.to_tokenized_string()\n\n # method 1: subtokenize sentence\n # subtokenized_sentence = self.tokenizer.encode(tokenized_string, add_special_tokens=True)\n\n # method 2:\n ids = self.tokenizer.encode(tokenized_string, add_special_tokens=False)\n subtokenized_sentence = self.tokenizer.build_inputs_with_special_tokens(ids)\n\n subtokenized_sentences.append(torch.tensor(subtokenized_sentence, dtype=torch.long))\n subtokens = self.tokenizer.convert_ids_to_tokens(subtokenized_sentence)\n\n word_iterator = iter(sentence)\n token = next(word_iterator)\n token_text = token.text.lower()\n\n token_subtoken_lengths = []\n reconstructed_token = ''\n subtoken_count = 0\n\n # iterate over subtokens and reconstruct tokens\n for subtoken_id, subtoken in enumerate(subtokens):\n\n subtoken_count += 1\n\n # remove special markup\n subtoken = re.sub('^Ġ', '', subtoken) # RoBERTa models\n subtoken = re.sub('^##', '', subtoken) # BERT models\n subtoken = re.sub('^▁', '', subtoken) # XLNet models\n subtoken = re.sub('</w>$', '', subtoken) # XLM models\n\n # append subtoken to reconstruct token\n reconstructed_token = reconstructed_token + subtoken\n\n # check if reconstructed token is special begin token ([CLS] or similar)\n if reconstructed_token in self.special_tokens and subtoken_id == 0:\n reconstructed_token = ''\n subtoken_count = 0\n\n # special handling for UNK subtokens\n if self.tokenizer.unk_token and self.tokenizer.unk_token in reconstructed_token:\n pieces = self.tokenizer.convert_ids_to_tokens(\n self.tokenizer.encode(token.text, add_special_tokens=False))\n token_text = ''\n for piece in pieces:\n # remove special markup\n piece = re.sub('^Ġ', '', piece) # RoBERTa models\n piece = re.sub('^##', '', piece) # BERT models\n piece = re.sub('^▁', '', piece) # XLNet models\n piece = re.sub('</w>$', '', piece) # XLM models\n token_text += piece\n token_text = token_text.lower()\n\n # check if reconstructed token is the same as current token\n if reconstructed_token.lower() == token_text:\n\n # if so, add subtoken count\n token_subtoken_lengths.append(subtoken_count)\n\n # reset subtoken count and reconstructed token\n reconstructed_token = ''\n subtoken_count = 0\n\n # break from loop if all tokens are accounted for\n if len(token_subtoken_lengths) < len(sentence):\n token = next(word_iterator)\n token_text = token.text.lower()\n else:\n break\n\n subtokenized_sentences_token_lengths.append(token_subtoken_lengths)\n\n # find longest sentence in batch\n longest_sequence_in_batch: int = len(max(subtokenized_sentences, key=len))\n\n # initialize batch tensors and mask\n input_ids = torch.zeros(\n [len(sentences), longest_sequence_in_batch],\n dtype=torch.long,\n device=flair.device,\n )\n mask = torch.zeros(\n [len(sentences), longest_sequence_in_batch],\n dtype=torch.long,\n device=flair.device,\n )\n for s_id, sentence in enumerate(subtokenized_sentences):\n sequence_length = len(sentence)\n input_ids[s_id][:sequence_length] = sentence\n mask[s_id][:sequence_length] = torch.ones(sequence_length)\n\n # put encoded batch through transformer model to get all hidden states of all encoder layers\n hidden_states = self.model(input_ids, attention_mask=mask)[-1]\n\n # gradients are enabled if fine-tuning is enabled\n gradient_context = torch.enable_grad() if (self.fine_tune and self.training) else torch.no_grad()\n\n with gradient_context:\n\n # iterate over all subtokenized sentences\n for sentence_idx, (sentence, subtoken_lengths) in enumerate(zip(sentences, subtokenized_sentences_token_lengths)):\n\n subword_start_idx = self.begin_offset\n\n # for each token, get embedding\n for token_idx, (token, number_of_subtokens) in enumerate(zip(sentence, subtoken_lengths)):\n\n subword_end_idx = subword_start_idx + number_of_subtokens\n\n subtoken_embeddings: List[torch.FloatTensor] = []\n\n # get states from all selected layers, aggregate with pooling operation\n for layer in self.layer_indexes:\n current_embeddings = hidden_states[layer][sentence_idx][subword_start_idx:subword_end_idx]\n\n if self.pooling_operation == \"first\":\n final_embedding: torch.FloatTensor = current_embeddings[0]\n\n if self.pooling_operation == \"last\":\n final_embedding: torch.FloatTensor = current_embeddings[-1]\n\n if self.pooling_operation == \"first_last\":\n final_embedding: torch.Tensor = torch.cat([current_embeddings[0], current_embeddings[-1]])\n\n if self.pooling_operation == \"mean\":\n all_embeddings: List[torch.FloatTensor] = [\n embedding.unsqueeze(0) for embedding in current_embeddings\n ]\n final_embedding: torch.Tensor = torch.mean(torch.cat(all_embeddings, dim=0), dim=0)\n\n subtoken_embeddings.append(final_embedding)\n\n # use scalar mix of embeddings if so selected\n if self.use_scalar_mix:\n sm_embeddings = torch.mean(torch.stack(subtoken_embeddings, dim=1), dim=1)\n # sm_embeddings = self.mix(subtoken_embeddings)\n\n subtoken_embeddings = [sm_embeddings]\n\n # set the extracted embedding for the token\n token.set_embedding(self.name, torch.cat(subtoken_embeddings))\n\n subword_start_idx += number_of_subtokens\n\n def train(self, mode=True):\n # if fine-tuning is not enabled (i.e. a \"feature-based approach\" used), this\n # module should never be in training mode\n if not self.fine_tune:\n pass\n else:\n super().train(mode)\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n\n if not self.use_scalar_mix:\n length = len(self.layer_indexes) * self.model.config.hidden_size\n else:\n length = self.model.config.hidden_size\n\n if self.pooling_operation == 'first_last': length *= 2\n\n return length\n\n\nclass FastTextEmbeddings(TokenEmbeddings):\n \"\"\"FastText Embeddings with oov functionality\"\"\"\n\n def __init__(self, embeddings: str, use_local: bool = True, field: str = None):\n \"\"\"\n Initializes fasttext word embeddings. Constructor downloads required embedding file and stores in cache\n if use_local is False.\n\n :param embeddings: path to your embeddings '.bin' file\n :param use_local: set this to False if you are using embeddings from a remote source\n \"\"\"\n\n cache_dir = Path(\"embeddings\")\n\n if use_local:\n if not Path(embeddings).exists():\n raise ValueError(\n f'The given embeddings \"{embeddings}\" is not available or is not a valid path.'\n )\n else:\n embeddings = cached_path(f\"{embeddings}\", cache_dir=cache_dir)\n\n self.embeddings = embeddings\n\n self.name: str = str(embeddings)\n\n self.static_embeddings = True\n\n self.precomputed_word_embeddings = gensim.models.FastText.load_fasttext_format(\n str(embeddings)\n )\n\n self.__embedding_length: int = self.precomputed_word_embeddings.vector_size\n\n self.field = field\n super().__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n @lru_cache(maxsize=10000, typed=False)\n def get_cached_vec(self, word: str) -> torch.Tensor:\n try:\n word_embedding = self.precomputed_word_embeddings[word]\n except:\n word_embedding = np.zeros(self.embedding_length, dtype=\"float\")\n\n word_embedding = torch.tensor(\n word_embedding, device=flair.device, dtype=torch.float\n )\n return word_embedding\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for i, sentence in enumerate(sentences):\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n\n if \"field\" not in self.__dict__ or self.field is None:\n word = token.text\n else:\n word = token.get_tag(self.field).value\n\n word_embedding = self.get_cached_vec(word)\n\n token.set_embedding(self.name, word_embedding)\n\n return sentences\n\n def __str__(self):\n return self.name\n\n def extra_repr(self):\n return f\"'{self.embeddings}'\"\n\n\nclass OneHotEmbeddings(TokenEmbeddings):\n \"\"\"One-hot encoded embeddings. \"\"\"\n\n def __init__(\n self,\n corpus: Corpus,\n field: str = \"text\",\n embedding_length: int = 300,\n min_freq: int = 3,\n ):\n \"\"\"\n Initializes one-hot encoded word embeddings and a trainable embedding layer\n :param corpus: you need to pass a Corpus in order to construct the vocabulary\n :param field: by default, the 'text' of tokens is embedded, but you can also embed tags such as 'pos'\n :param embedding_length: dimensionality of the trainable embedding layer\n :param min_freq: minimum frequency of a word to become part of the vocabulary\n \"\"\"\n super().__init__()\n self.name = \"one-hot\"\n self.static_embeddings = False\n self.min_freq = min_freq\n self.field = field\n\n tokens = list(map((lambda s: s.tokens), corpus.train))\n tokens = [token for sublist in tokens for token in sublist]\n\n if field == \"text\":\n most_common = Counter(list(map((lambda t: t.text), tokens))).most_common()\n else:\n most_common = Counter(\n list(map((lambda t: t.get_tag(field).value), tokens))\n ).most_common()\n\n tokens = []\n for token, freq in most_common:\n if freq < min_freq:\n break\n tokens.append(token)\n\n self.vocab_dictionary: Dictionary = Dictionary()\n for token in tokens:\n self.vocab_dictionary.add_item(token)\n\n # max_tokens = 500\n self.__embedding_length = embedding_length\n\n print(self.vocab_dictionary.idx2item)\n print(f\"vocabulary size of {len(self.vocab_dictionary)}\")\n\n # model architecture\n self.embedding_layer = torch.nn.Embedding(\n len(self.vocab_dictionary), self.__embedding_length\n )\n torch.nn.init.xavier_uniform_(self.embedding_layer.weight)\n\n self.to(flair.device)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n one_hot_sentences = []\n for i, sentence in enumerate(sentences):\n\n if self.field == \"text\":\n context_idxs = [\n self.vocab_dictionary.get_idx_for_item(t.text)\n for t in sentence.tokens\n ]\n else:\n context_idxs = [\n self.vocab_dictionary.get_idx_for_item(t.get_tag(self.field).value)\n for t in sentence.tokens\n ]\n\n one_hot_sentences.extend(context_idxs)\n\n one_hot_sentences = torch.tensor(one_hot_sentences, dtype=torch.long).to(\n flair.device\n )\n\n embedded = self.embedding_layer.forward(one_hot_sentences)\n\n index = 0\n for sentence in sentences:\n for token in sentence:\n embedding = embedded[index]\n token.set_embedding(self.name, embedding)\n index += 1\n\n return sentences\n\n def __str__(self):\n return self.name\n\n def extra_repr(self):\n return \"min_freq={}\".format(self.min_freq)\n\n\nclass HashEmbeddings(TokenEmbeddings):\n \"\"\"Standard embeddings with Hashing Trick.\"\"\"\n\n def __init__(\n self, num_embeddings: int = 1000, embedding_length: int = 300, hash_method=\"md5\"\n ):\n\n super().__init__()\n self.name = \"hash\"\n self.static_embeddings = False\n\n self.__num_embeddings = num_embeddings\n self.__embedding_length = embedding_length\n\n self.__hash_method = hash_method\n\n # model architecture\n self.embedding_layer = torch.nn.Embedding(\n self.__num_embeddings, self.__embedding_length\n )\n torch.nn.init.xavier_uniform_(self.embedding_layer.weight)\n\n self.to(flair.device)\n\n @property\n def num_embeddings(self) -> int:\n return self.__num_embeddings\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n def get_idx_for_item(text):\n hash_function = hashlib.new(self.__hash_method)\n hash_function.update(bytes(str(text), \"utf-8\"))\n return int(hash_function.hexdigest(), 16) % self.__num_embeddings\n\n hash_sentences = []\n for i, sentence in enumerate(sentences):\n context_idxs = [get_idx_for_item(t.text) for t in sentence.tokens]\n\n hash_sentences.extend(context_idxs)\n\n hash_sentences = torch.tensor(hash_sentences, dtype=torch.long).to(flair.device)\n\n embedded = self.embedding_layer.forward(hash_sentences)\n\n index = 0\n for sentence in sentences:\n for token in sentence:\n embedding = embedded[index]\n token.set_embedding(self.name, embedding)\n index += 1\n\n return sentences\n\n def __str__(self):\n return self.name\n\n\nclass MuseCrosslingualEmbeddings(TokenEmbeddings):\n def __init__(self,):\n self.name: str = f\"muse-crosslingual\"\n self.static_embeddings = True\n self.__embedding_length: int = 300\n self.language_embeddings = {}\n super().__init__()\n\n @lru_cache(maxsize=10000, typed=False)\n def get_cached_vec(self, language_code: str, word: str) -> torch.Tensor:\n current_embedding_model = self.language_embeddings[language_code]\n if word in current_embedding_model:\n word_embedding = current_embedding_model[word]\n elif word.lower() in current_embedding_model:\n word_embedding = current_embedding_model[word.lower()]\n elif re.sub(r\"\\d\", \"#\", word.lower()) in current_embedding_model:\n word_embedding = current_embedding_model[re.sub(r\"\\d\", \"#\", word.lower())]\n elif re.sub(r\"\\d\", \"0\", word.lower()) in current_embedding_model:\n word_embedding = current_embedding_model[re.sub(r\"\\d\", \"0\", word.lower())]\n else:\n word_embedding = np.zeros(self.embedding_length, dtype=\"float\")\n word_embedding = torch.tensor(\n word_embedding, device=flair.device, dtype=torch.float\n )\n return word_embedding\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for i, sentence in enumerate(sentences):\n\n language_code = sentence.get_language_code()\n supported = [\n \"en\",\n \"de\",\n \"bg\",\n \"ca\",\n \"hr\",\n \"cs\",\n \"da\",\n \"nl\",\n \"et\",\n \"fi\",\n \"fr\",\n \"el\",\n \"he\",\n \"hu\",\n \"id\",\n \"it\",\n \"mk\",\n \"no\",\n \"pl\",\n \"pt\",\n \"ro\",\n \"ru\",\n \"sk\",\n ]\n if language_code not in supported:\n language_code = \"en\"\n\n if language_code not in self.language_embeddings:\n log.info(f\"Loading up MUSE embeddings for '{language_code}'!\")\n # download if necessary\n webpath = \"https://alan-nlp.s3.eu-central-1.amazonaws.com/resources/embeddings-muse\"\n cache_dir = Path(\"embeddings\") / \"MUSE\"\n cached_path(\n f\"{webpath}/muse.{language_code}.vec.gensim.vectors.npy\",\n cache_dir=cache_dir,\n )\n embeddings_file = cached_path(\n f\"{webpath}/muse.{language_code}.vec.gensim\", cache_dir=cache_dir\n )\n\n # load the model\n self.language_embeddings[\n language_code\n ] = gensim.models.KeyedVectors.load(str(embeddings_file))\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n\n if \"field\" not in self.__dict__ or self.field is None:\n word = token.text\n else:\n word = token.get_tag(self.field).value\n\n word_embedding = self.get_cached_vec(\n language_code=language_code, word=word\n )\n\n token.set_embedding(self.name, word_embedding)\n\n return sentences\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def __str__(self):\n return self.name\n\n\n# TODO: keep for backwards compatibility, but remove in future\nclass BPEmbSerializable(BPEmb):\n def __getstate__(self):\n state = self.__dict__.copy()\n # save the sentence piece model as binary file (not as path which may change)\n state[\"spm_model_binary\"] = open(self.model_file, mode=\"rb\").read()\n state[\"spm\"] = None\n return state\n\n def __setstate__(self, state):\n from bpemb.util import sentencepiece_load\n\n model_file = self.model_tpl.format(lang=state[\"lang\"], vs=state[\"vs\"])\n self.__dict__ = state\n\n # write out the binary sentence piece model into the expected directory\n self.cache_dir: Path = Path(flair.cache_root) / \"embeddings\"\n if \"spm_model_binary\" in self.__dict__:\n # if the model was saved as binary and it is not found on disk, write to appropriate path\n if not os.path.exists(self.cache_dir / state[\"lang\"]):\n os.makedirs(self.cache_dir / state[\"lang\"])\n self.model_file = self.cache_dir / model_file\n with open(self.model_file, \"wb\") as out:\n out.write(self.__dict__[\"spm_model_binary\"])\n else:\n # otherwise, use normal process and potentially trigger another download\n self.model_file = self._load_file(model_file)\n\n # once the modes if there, load it with sentence piece\n state[\"spm\"] = sentencepiece_load(self.model_file)\n\n\nclass BytePairEmbeddings(TokenEmbeddings):\n def __init__(\n self,\n language: str,\n dim: int = 50,\n syllables: int = 100000,\n cache_dir=Path(flair.cache_root) / \"embeddings\",\n ):\n \"\"\"\n Initializes BP embeddings. Constructor downloads required files if not there.\n \"\"\"\n\n self.name: str = f\"bpe-{language}-{syllables}-{dim}\"\n self.static_embeddings = True\n self.embedder = BPEmb(lang=language, vs=syllables, dim=dim, cache_dir=cache_dir)\n\n self.__embedding_length: int = self.embedder.emb.vector_size * 2\n super().__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for i, sentence in enumerate(sentences):\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n\n if \"field\" not in self.__dict__ or self.field is None:\n word = token.text\n else:\n word = token.get_tag(self.field).value\n\n if word.strip() == \"\":\n # empty words get no embedding\n token.set_embedding(\n self.name, torch.zeros(self.embedding_length, dtype=torch.float)\n )\n else:\n # all other words get embedded\n embeddings = self.embedder.embed(word.lower())\n embedding = np.concatenate(\n (embeddings[0], embeddings[len(embeddings) - 1])\n )\n token.set_embedding(\n self.name, torch.tensor(embedding, dtype=torch.float)\n )\n\n return sentences\n\n def __str__(self):\n return self.name\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n\nclass ELMoEmbeddings(TokenEmbeddings):\n \"\"\"Contextual word embeddings using word-level LM, as proposed in Peters et al., 2018.\n ELMo word vectors can be constructed by combining layers in different ways.\n Default is to concatene the top 3 layers in the LM.\"\"\"\n\n def __init__(\n self, model: str = \"original\", options_file: str = None, weight_file: str = None, embedding_mode: str = \"all\"\n ):\n super().__init__()\n\n try:\n import allennlp.commands.elmo\n except ModuleNotFoundError:\n log.warning(\"-\" * 100)\n log.warning('ATTENTION! The library \"allennlp\" is not installed!')\n log.warning(\n 'To use ELMoEmbeddings, please first install with \"pip install allennlp\"'\n )\n log.warning(\"-\" * 100)\n pass\n\n assert embedding_mode in [\"all\", \"top\", \"average\"]\n\n self.name = f\"elmo-{model}-{embedding_mode}\"\n self.static_embeddings = True\n\n if not options_file or not weight_file:\n # the default model for ELMo is the 'original' model, which is very large\n options_file = allennlp.commands.elmo.DEFAULT_OPTIONS_FILE\n weight_file = allennlp.commands.elmo.DEFAULT_WEIGHT_FILE\n # alternatively, a small, medium or portuguese model can be selected by passing the appropriate mode name\n if model == \"small\":\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5\"\n if model == \"medium\":\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5\"\n if model in [\"large\", \"5.5B\"]:\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5\"\n if model == \"pt\" or model == \"portuguese\":\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_weights.hdf5\"\n if model == \"pubmed\":\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_weights_PubMed_only.hdf5\"\n\n if embedding_mode == \"all\":\n self.embedding_mode_fn = lambda x: torch.cat(x, 0)\n elif embedding_mode == \"top\":\n self.embedding_mode_fn = lambda x: x[-1]\n elif embedding_mode == \"average\":\n self.embedding_mode_fn = lambda x: torch.mean(torch.stack(x), 0)\n\n # put on Cuda if available\n from flair import device\n\n if re.fullmatch(r\"cuda:[0-9]+\", str(device)):\n cuda_device = int(str(device).split(\":\")[-1])\n elif str(device) == \"cpu\":\n cuda_device = -1\n else:\n cuda_device = 0\n\n self.ee = allennlp.commands.elmo.ElmoEmbedder(\n options_file=options_file, weight_file=weight_file, cuda_device=cuda_device\n )\n\n # embed a dummy sentence to determine embedding_length\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n sentence_words: List[List[str]] = []\n for sentence in sentences:\n sentence_words.append([token.text for token in sentence])\n\n embeddings = self.ee.embed_batch(sentence_words)\n\n for i, sentence in enumerate(sentences):\n\n sentence_embeddings = embeddings[i]\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n elmo_embedding_layers = [\n torch.FloatTensor(sentence_embeddings[0, token_idx, :]),\n torch.FloatTensor(sentence_embeddings[1, token_idx, :]),\n torch.FloatTensor(sentence_embeddings[2, token_idx, :])\n ]\n word_embedding = self.embedding_mode_fn(elmo_embedding_layers)\n token.set_embedding(self.name, word_embedding)\n\n return sentences\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n def __str__(self):\n return self.name\n\n\nclass NILCEmbeddings(WordEmbeddings):\n def __init__(self, embeddings: str, model: str = \"skip\", size: int = 100):\n \"\"\"\n Initializes portuguese classic word embeddings trained by NILC Lab (http://www.nilc.icmc.usp.br/embeddings).\n Constructor downloads required files if not there.\n :param embeddings: one of: 'fasttext', 'glove', 'wang2vec' or 'word2vec'\n :param model: one of: 'skip' or 'cbow'. This is not applicable to glove.\n :param size: one of: 50, 100, 300, 600 or 1000.\n \"\"\"\n\n base_path = \"http://143.107.183.175:22980/download.php?file=embeddings/\"\n\n cache_dir = Path(\"embeddings\") / embeddings.lower()\n\n # GLOVE embeddings\n if embeddings.lower() == \"glove\":\n cached_path(\n f\"{base_path}{embeddings}/{embeddings}_s{size}.zip\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{base_path}{embeddings}/{embeddings}_s{size}.zip\", cache_dir=cache_dir\n )\n\n elif embeddings.lower() in [\"fasttext\", \"wang2vec\", \"word2vec\"]:\n cached_path(\n f\"{base_path}{embeddings}/{model}_s{size}.zip\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{base_path}{embeddings}/{model}_s{size}.zip\", cache_dir=cache_dir\n )\n\n elif not Path(embeddings).exists():\n raise ValueError(\n f'The given embeddings \"{embeddings}\" is not available or is not a valid path.'\n )\n\n self.name: str = str(embeddings)\n self.static_embeddings = True\n\n log.info(\"Reading embeddings from %s\" % embeddings)\n self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(\n open_inside_zip(str(embeddings), cache_dir=cache_dir)\n )\n\n self.__embedding_length: int = self.precomputed_word_embeddings.vector_size\n super(TokenEmbeddings, self).__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def __str__(self):\n return self.name\n\n\ndef replace_with_language_code(string: str):\n string = string.replace(\"arabic-\", \"ar-\")\n string = string.replace(\"basque-\", \"eu-\")\n string = string.replace(\"bulgarian-\", \"bg-\")\n string = string.replace(\"croatian-\", \"hr-\")\n string = string.replace(\"czech-\", \"cs-\")\n string = string.replace(\"danish-\", \"da-\")\n string = string.replace(\"dutch-\", \"nl-\")\n string = string.replace(\"farsi-\", \"fa-\")\n string = string.replace(\"persian-\", \"fa-\")\n string = string.replace(\"finnish-\", \"fi-\")\n string = string.replace(\"french-\", \"fr-\")\n string = string.replace(\"german-\", \"de-\")\n string = string.replace(\"hebrew-\", \"he-\")\n string = string.replace(\"hindi-\", \"hi-\")\n string = string.replace(\"indonesian-\", \"id-\")\n string = string.replace(\"italian-\", \"it-\")\n string = string.replace(\"japanese-\", \"ja-\")\n string = string.replace(\"norwegian-\", \"no\")\n string = string.replace(\"polish-\", \"pl-\")\n string = string.replace(\"portuguese-\", \"pt-\")\n string = string.replace(\"slovenian-\", \"sl-\")\n string = string.replace(\"spanish-\", \"es-\")\n string = string.replace(\"swedish-\", \"sv-\")\n return string\n" ]
[ [ "torch.ones", "torch.enable_grad", "torch.nn.LSTM", "torch.cat", "torch.zeros", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.Embedding", "torch.tensor", "torch.nn.utils.rnn.pad_packed_sequence", "torch.no_grad", "torch.FloatTensor", "torch.nn.init.xavier_uniform_", "torch.stack", "numpy.zeros" ] ]
bkoch4142/pytorch-sequence-models
[ "ba61e79066b220bd2f34d787d89bad7c87d4004e" ]
[ "src/models/sentiment_clf.py" ]
[ "import torch\nimport torch.nn as nn\nimport sys\nfrom models.rnn import RNN\nfrom models.lstm import LSTM\nfrom models.gru import GRU\n\n\nclass SentimentClassifier(nn.Module):\n def __init__(self, vocab_sz, n_hidden, rnn_type='RNN'):\n super(SentimentClassifier, self).__init__()\n\n self.embedding = nn.Embedding(vocab_sz, n_hidden)\n\n assert rnn_type in ['RNN', 'LSTM', 'GRU'], \"Unsupported rnn_type\"\n self.rnn = getattr(sys.modules[__name__], rnn_type)(n_hidden, n_hidden)\n\n self.dropout = nn.Dropout(0.2)\n\n self.linear = nn.Linear(n_hidden*2, 1)\n\n def forward(self, x):\n x = self.embedding(x)\n x = self.rnn(x)\n\n x = self.dropout(x)\n\n # Using the avg and max pool of all RNN outputs\n avg_pool = torch.mean(x, dim=1)\n max_pool, _ = torch.max(x, 1)\n\n # We concatenate them (hidden size before the linear layer is multiplied by 2)\n out = torch.cat((avg_pool, max_pool), dim=1)\n out = self.linear(out)\n\n # We dont apply sigmoid to output since nn.BCEWithLogitsLoss\n # combines a Sigmoid layer and the BCELoss\n return torch.squeeze(out, dim=1)\n" ]
[ [ "torch.mean", "torch.nn.Dropout", "torch.max", "torch.cat", "torch.nn.Embedding", "torch.nn.Linear", "torch.squeeze" ] ]
MGibsonint/nncf_pytorch
[ "1c1ee370460d2e4531c2bf353c7b89ccc659fa38", "1c1ee370460d2e4531c2bf353c7b89ccc659fa38" ]
[ "nncf/quantization/init_precision.py", "nncf/sparsity/magnitude/algo.py" ]
[ "import itertools\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom typing import List, Dict, Union\n\nimport os\nimport torch\nfrom bisect import bisect_left\nfrom operator import itemgetter\nfrom torch import Tensor, nn\nfrom torch.nn.modules.loss import _Loss\n\nfrom nncf.debug import is_debug\nfrom nncf.dynamic_graph.context import no_nncf_trace\nfrom nncf.nncf_logger import logger as nncf_logger\nfrom nncf.nncf_network import NNCFNetwork, CompressionModuleType\nfrom nncf.quantization.layers import QUANTIZATION_MODULES, QuantizersSwitcher, BaseQuantizer\nfrom .hessian_trace import HessianTraceEstimator\nfrom .hw_precision_constraints import HWPrecisionConstraints\nfrom .quantizer_id import QuantizerId\nfrom ..dynamic_graph.graph import NNCFGraph\nfrom ..dynamic_graph.transform_graph import is_nncf_module\nfrom ..layers import NNCFConv2d\nfrom ..structures import QuantizationPrecisionInitArgs\nfrom ..utils import in_scope_list, get_all_modules_by_type\n\n\nclass ManualPrecisionInitializer:\n def __init__(self, algo: 'QuantizationController', config: 'NNCFConfig',\n init_args: QuantizationPrecisionInitArgs = None):\n self._algo = algo\n self._model = self._algo._model # type: NNCFNetwork\n all_quantizers = algo.all_quantizations\n self._bitwidth_per_scope = config.get('bitwidth_per_scope', {}) # type: List[List]\n self._hw_precision_constraints = algo._hw_precision_constraints\n self.original_precisions = {q_id: quantizer.num_bits for q_id, quantizer in all_quantizers.items()}\n self._quantizers_handler = WeightQuantizersHandler(self._model, all_quantizers,\n self._hw_precision_constraints)\n\n quantization_types = [class_type.__name__ for class_type in QUANTIZATION_MODULES.registry_dict.values()]\n self._ordered_weight_quantizations = self._quantizers_handler.get_ordered_weight_quantizers_per_id()\n\n self._all_quantizers_per_scope = get_all_modules_by_type(\n self._model.get_compression_modules_by_type(CompressionModuleType.ACTIVATION_QUANTIZER), quantization_types)\n self._all_quantizers_per_scope.update(get_all_modules_by_type(\n self._model.get_compression_modules_by_type(CompressionModuleType.FUNCTION_QUANTIZER), quantization_types))\n self._all_quantizers_per_scope.update(self._quantizers_handler.get_all_ordered_weight_quantizers_per_scope())\n\n def apply_init(self):\n for pair in self._bitwidth_per_scope:\n if len(pair) != 2:\n raise ValueError('Invalid format of bitwidth per scope: [int, str] is expected')\n bitwidth = pair[0]\n scope_name = pair[1]\n is_matched = False\n for scope, quantizer in self._all_quantizers_per_scope.items():\n if in_scope_list(str(scope), scope_name):\n quantizer.num_bits = bitwidth\n is_matched = True\n if not is_matched:\n raise ValueError(\n 'Invalid scope name `{}`, failed to assign bitwidth {} to it'.format(scope_name, bitwidth))\n\n\nclass PerturbationObserver:\n def __init__(self, device):\n super().__init__()\n self.device = device\n self.perturbation = None\n self.numels = None\n\n def calc_perturbation(self, module, inputs: torch.Tensor, output: torch.Tensor):\n input_ = inputs[0] if isinstance(inputs, tuple) else inputs\n with no_nncf_trace():\n self.perturbation = torch.norm(input_ - output, p=2) ** 2\n self.numels = input_.size().numel()\n self.input_norm = torch.norm(input_, p=2) ** 2\n\n def reset(self):\n self.perturbation = None\n self.numels = None\n\n def get_observation(self):\n return self.perturbation\n\n def get_numels(self):\n return self.numels\n\n def get_input_norm(self):\n return self.input_norm\n\n\nclass Perturbations:\n def __init__(self):\n self._perturbations = {} # type: Dict[int, Dict[int, Tensor]]\n\n def add(self, layer_id: int, bitwidth: int, perturbation: Tensor):\n if layer_id in self._perturbations:\n self._perturbations[layer_id].update({bitwidth: perturbation})\n else:\n self._perturbations[layer_id] = {bitwidth: perturbation}\n\n def get(self, layer_id: int, bitwidth: int) -> Tensor:\n layer_perturbations = self._perturbations[layer_id]\n return layer_perturbations[bitwidth]\n\n def get_all(self) -> Dict[int, Dict[int, Tensor]]:\n return self._perturbations\n\n\nclass TracesPerLayer:\n def __init__(self, traces_per_layer: Tensor):\n self._traces_per_layer = traces_per_layer\n self._traces_order = [i[0] for i in\n sorted(enumerate(traces_per_layer), reverse=False, key=lambda x: x[1])]\n\n def get(self, index: int) -> Tensor:\n return self._traces_per_layer[index]\n\n def get_order_of_traces(self) -> List[int]:\n return self._traces_order\n\n def get_all(self) -> Tensor:\n return self._traces_per_layer\n\n def __bool__(self):\n return bool(self._traces_order)\n\n\nclass HAWQPrecisionInitializer(ManualPrecisionInitializer):\n def __init__(self, algo: 'QuantizationController', config: 'NNCFConfig',\n init_args: QuantizationPrecisionInitArgs):\n super().__init__(algo, config, init_args)\n self._criterion = init_args.criterion\n self._data_loader = init_args.data_loader\n self._traces_per_layer_path = config.get('traces_per_layer_path', None)\n self._num_data_points = config.get('num_data_points', 1000)\n self._iter_number = config.get('iter_number', 500)\n self._tolerance = config.get('tolerance', 1e-5)\n self._compression_ratio = config.get('compression_ratio', 1.5)\n self._bits = self._hw_precision_constraints.get_all_unique_bits() \\\n if self._hw_precision_constraints else config.get('bits', [4, 8])\n self._init_device = init_args.device\n self.flops_counter = CompressionRatioCalculator(self._model, self._quantizers_handler)\n\n def apply_init(self):\n original_device = next(self._model.parameters()).device\n self._model.to(self._init_device)\n\n traces_per_layer = self._calc_traces(self._criterion, self._iter_number, self._tolerance)\n if not traces_per_layer:\n raise RuntimeError('Failed to calculate hessian traces!')\n\n traces_order = traces_per_layer.get_order_of_traces()\n num_weights = len(self._ordered_weight_quantizations)\n bits_configurations = self.get_configs_constrained_by_order(self._bits, num_weights)\n\n ordered_weight_quantization_ids = list(self._ordered_weight_quantizations.keys())\n bits_configurations = self._filter_configs_by_precision_constraints(bits_configurations,\n self._hw_precision_constraints,\n ordered_weight_quantization_ids,\n traces_order)\n if not bits_configurations:\n raise RuntimeError('All bits configurations are incompatible with HW Config!')\n\n skipped_quantizers = self._quantizers_handler.get_skipped_weight_quantizers_per_id()\n min_ratio, max_ratio = self.flops_counter.ratio_limits(self._bits, traces_order, self._hw_precision_constraints,\n skipped_quantizers)\n if not min_ratio <= self._compression_ratio <= max_ratio:\n raise AttributeError('Invalid compression ratio={}. Should be between within range [{:.2f}, {:.2f}]'.format(\n self._compression_ratio, min_ratio, max_ratio))\n\n perturbations, weight_observers = self.calc_quantization_noise()\n\n configuration_metric = self.calc_hawq_metric_per_configuration(bits_configurations, perturbations,\n traces_per_layer, self._init_device)\n\n flops_bits_per_config = self.get_flops_bits_per_config(bits_configurations, traces_order)\n config_index = self.choose_configuration(configuration_metric, flops_bits_per_config)\n chosen_config_per_layer = bits_configurations[config_index]\n chosen_config_per_layer = self.get_ordered_config(chosen_config_per_layer, traces_order)\n nncf_logger.info('Chosen HAWQ configuration with ratio={:.2f}, bitwidth per weightable layer={}'.format(\n flops_bits_per_config[config_index], chosen_config_per_layer))\n nncf_logger.debug('Order of the weightable layers in the HAWQ configuration={}'.format(traces_order))\n\n self.set_chosen_config(chosen_config_per_layer)\n\n if is_debug():\n hawq_debugger = HAWQDebugger(bits_configurations, perturbations,\n weight_observers, traces_per_layer, self._bits)\n hawq_debugger.dump_metric_MB(configuration_metric)\n hawq_debugger.dump_metric_flops(configuration_metric, flops_bits_per_config, config_index)\n hawq_debugger.dump_avg_traces()\n hawq_debugger.dump_density_of_quantization_noise()\n hawq_debugger.dump_perturbations_ratio()\n hawq_debugger.dump_bitwidth_graph(self._algo, self._model)\n\n self._model.rebuild_graph()\n str_bw = [str(element) for element in self.get_bitwidth_per_scope()]\n nncf_logger.info('\\n'.join(['\\n\\\"bitwidth_per_scope\\\": [', ',\\n'.join(str_bw), ']']))\n\n self._model.to(original_device)\n\n ordered_metric_per_layer = self.get_metric_per_layer(chosen_config_per_layer, perturbations, traces_per_layer)\n return ordered_metric_per_layer\n\n def get_flops_bits_per_config(self, bits_configurations, traces_order):\n skipped = self._quantizers_handler.get_skipped_weight_quantizers_per_id()\n flops_bits_per_config = []\n for bits_config in bits_configurations:\n bits_config = self.get_ordered_config(bits_config, traces_order)\n flops_bits_per_config.append(self.flops_counter.ratio_for_bits_configuration(bits_config, skipped))\n return flops_bits_per_config\n\n def get_bitwidth_per_scope(self) -> List[List[Union[int, str]]]:\n sorted_quantizers = OrderedDict(sorted(self._all_quantizers_per_scope.items(), key=lambda x: str(x[0])))\n full_bitwidth_per_scope = []\n for scope, quantizer in sorted_quantizers.items():\n quantizer_id = self._quantizers_handler.get_id(quantizer)\n if quantizer.num_bits != self.original_precisions[quantizer_id]:\n full_bitwidth_per_scope.append([quantizer.num_bits, str(scope)])\n return full_bitwidth_per_scope\n\n @staticmethod\n def get_ordered_config(bit_configuration: List[int], order: List[int]) -> List[int]:\n ordered_config = [0] * len(bit_configuration)\n for i, bitwidth in enumerate(bit_configuration):\n ordered_config[order[i]] = bitwidth\n return ordered_config\n\n @staticmethod\n def disable_all_gradients_except_weights_of_quantized_modules(\n quantizers_switcher: QuantizersSwitcher,\n quantized_weight_modules_registry: Dict[str, torch.nn.Module],\n model: nn.Module,\n scopes_of_skipped_weight_quantizers: List[str] = None) -> List[str]:\n \"\"\"\n Disables gradients of all parameters, except for layers that have quantizers for weights, which wasn't skipped\n because of single precision constraints.\n :param quantizers_switcher: object that is responsible for enabling and disabling quantizers\n :param quantized_weight_modules_registry: modules with quantized weights per scope\n :param model: model to access all parameters\n :param scopes_of_skipped_weight_quantizers: list of string scopes of layers that have a single precision\n constraint and which weights should be skipped from bitwidth initialization\n :return: list of names of the parameters that were originally disabled\n \"\"\"\n disabled_gradients = []\n\n # Some quantizers can be disabled in a staged scenario on creation of staged scheduler\n # Need to save originally disabled quantizers for restoring their state after initialization\n quantizers_switcher.disable_quantizers()\n\n # remember gradients of quantized modules that were enabled\n gradients_to_enable = []\n for scope, quantized_module in quantized_weight_modules_registry.items():\n is_skipped = bool(scopes_of_skipped_weight_quantizers) and (scope in scopes_of_skipped_weight_quantizers)\n for param_name, param in quantized_module.named_parameters():\n if param.requires_grad:\n # disable gradients for skipped module for optimization of Hessian Trace search\n if is_skipped:\n disabled_gradients.append(param_name)\n param.requires_grad = False\n else:\n gradients_to_enable.append(param_name)\n\n # disable all gradients, except already disabled\n for param_name, param in model.named_parameters():\n if not param.requires_grad:\n disabled_gradients.append(param_name)\n else:\n param.requires_grad = False\n\n # enable gradients of quantized modules that were disabled\n for quantized_module in quantized_weight_modules_registry.values():\n for param_name, param in quantized_module.named_parameters():\n if param_name in gradients_to_enable and not 'bias' in param_name:\n param.requires_grad = True\n return disabled_gradients\n\n def _calc_traces(self, criterion: _Loss, iter_number: int, tolerance: float) -> TracesPerLayer:\n if self._traces_per_layer_path:\n return TracesPerLayer(torch.load(self._traces_per_layer_path).to(self._init_device))\n\n quantizers_switcher = QuantizersSwitcher(list(self._all_quantizers_per_scope.values()))\n disabled_gradients = self.disable_all_gradients_except_weights_of_quantized_modules(\n quantizers_switcher,\n self._algo.quantized_weight_modules_registry,\n self._model,\n self._quantizers_handler.get_scope_of_skipped_weight_quantizers())\n\n trace_estimator = HessianTraceEstimator(self._model, criterion, self._init_device, self._data_loader,\n self._num_data_points)\n avg_traces = trace_estimator.get_average_traces(max_iter=iter_number, tolerance=tolerance)\n\n self.restore_disabled_gradients(quantizers_switcher, self._model, disabled_gradients)\n\n return TracesPerLayer(avg_traces)\n\n @staticmethod\n def restore_disabled_gradients(quantizers_switcher: QuantizersSwitcher,\n model: nn.Module, disabled_gradients: List[str]):\n \"\"\"\n Enables gradients of all parameters back, except for ones that were originally disabled\n :param quantizers_switcher: object that is responsible for enabling and disabling quantizers\n :param model: model to access all parameters\n :param disabled_gradients: list of names of the parameters that were originally disabled\n \"\"\"\n for param_name, param in model.named_parameters():\n if param_name not in disabled_gradients:\n param.requires_grad = True\n quantizers_switcher.enable_quantizers()\n\n @staticmethod\n def get_configs_constrained_by_order(bits_: List[int], num_layers: int) -> List[List[int]]:\n bits = sorted(bits_)\n m = len(bits)\n L = num_layers\n bit_configs = []\n for j in range(1, m + 1):\n for combo_bits in itertools.combinations(bits, j):\n for combo_partitions in itertools.combinations(list(range(1, L)), j - 1):\n bit_config = []\n prev_p = 0\n for (p, b) in zip(combo_partitions + (L,), combo_bits):\n bit_config += [b] * (p - prev_p)\n prev_p = p\n bit_configs.append(bit_config)\n return bit_configs\n\n @staticmethod\n def _filter_configs_by_precision_constraints(bits_configurations: List[List[int]],\n hw_precision_constraints: HWPrecisionConstraints,\n ordered_weight_ids: List[QuantizerId],\n traces_order: List[int]) -> List[List[int]]:\n if not hw_precision_constraints:\n return bits_configurations\n\n filtered_bits_configurations = []\n for bits_configuration in bits_configurations:\n is_all_bitwidth_compatible = True\n for i, bitwidth in enumerate(bits_configuration):\n weight_id = ordered_weight_ids[traces_order[i]]\n bits_constraints = hw_precision_constraints.get(weight_id)\n if bitwidth not in bits_constraints:\n is_all_bitwidth_compatible = False\n break\n if is_all_bitwidth_compatible:\n filtered_bits_configurations.append(bits_configuration)\n return filtered_bits_configurations\n\n def calc_quantization_noise(self) -> [Perturbations, List[PerturbationObserver]]:\n hook_handles = []\n observers = []\n for module in self._ordered_weight_quantizations.values():\n observer = PerturbationObserver(self._init_device)\n hook_handles.append(module.register_forward_hook(observer.calc_perturbation))\n observers.append(observer)\n\n perturbations = Perturbations()\n for b in self._bits:\n for wi in self._ordered_weight_quantizations.values():\n wi.num_bits = b\n\n self._model.do_dummy_forward(force_eval=True)\n\n for i, observer in enumerate(observers):\n perturbations.add(layer_id=i, bitwidth=b, perturbation=observer.get_observation().to(self._init_device))\n\n for handle in hook_handles:\n handle.remove()\n return perturbations, observers\n\n @staticmethod\n def calc_hawq_metric_per_configuration(bits_configurations: List[List[int]], perturbations: Perturbations,\n traces_per_layer: TracesPerLayer, device) -> List[Tensor]:\n configuration_metric = []\n for bits_config in bits_configurations:\n hawq_metric = torch.Tensor([0]).to(device)\n for i, layer_bits in enumerate(bits_config):\n order = traces_per_layer.get_order_of_traces()[i]\n hawq_metric += traces_per_layer.get(order) * perturbations.get(layer_id=order,\n bitwidth=layer_bits)\n configuration_metric.append(hawq_metric)\n return configuration_metric\n\n def choose_configuration(self, configuration_metric: List[Tensor], flops_bits_per_config: List[float]) -> int:\n num_configs = len(configuration_metric)\n\n sorted_flops_order = [x[0] for x in sorted(enumerate(flops_bits_per_config), reverse=False, key=lambda x: x[1])]\n sorted_flops_bits_per_config = sorted(flops_bits_per_config)\n\n boundary_index = bisect_left(sorted_flops_bits_per_config, self._compression_ratio)\n indexes_to_check = [sorted_flops_order[i] for i in range(boundary_index, num_configs)]\n best_metric = min(list(itemgetter(*indexes_to_check)(configuration_metric)))\n best_config_index = configuration_metric.index(best_metric)\n return best_config_index\n\n def set_chosen_config(self, weight_bits_per_layer: List[int]):\n for wq, bits in zip(self._ordered_weight_quantizations.values(), weight_bits_per_layer):\n wq.num_bits = bits\n pairs = self._algo.get_weights_activation_quantizers_pairs()\n for pair in pairs:\n wqs, aq = pair\n aq.num_bits = max([wq.num_bits for wq in wqs])\n\n def get_metric_per_layer(self, chosen_config_per_layer: List[int], perturbations: Perturbations,\n traces_per_layer: TracesPerLayer):\n metric_per_layer = []\n for i, layer_bits in enumerate(chosen_config_per_layer):\n metric_per_layer.append(traces_per_layer.get(i) * perturbations.get(i, layer_bits))\n ordered_metric_per_layer = [i[0] for i in\n sorted(enumerate(metric_per_layer), reverse=True, key=lambda x: x[1])]\n return ordered_metric_per_layer\n\n\nclass WeightQuantizersHandler:\n \"\"\"\n Defines weight quantizers for precision initialization in the order of execution.\n \"\"\"\n\n def __init__(self, model, all_quantizers: Dict[QuantizerId, BaseQuantizer], constraints: HWPrecisionConstraints):\n self._quantizer_address_to_id_mapping = {id(quantizer): q_id for q_id, quantizer in all_quantizers.items()}\n quantization_types = [class_type.__name__ for class_type in QUANTIZATION_MODULES.registry_dict.values()]\n weight_module_dict = model.get_nncf_wrapped_model()\n self._ordered_weight_quantizers_per_scope = get_all_modules_by_type(weight_module_dict, quantization_types)\n ordered_weight_quantization_list = []\n self._scopes_of_skipped_weight_quantizers = []\n self._skipped_weight_quantizers = {}\n for scope, quantizer in self._ordered_weight_quantizers_per_scope.items():\n address = id(quantizer)\n if quantizer.is_weights:\n quantizer_id = self._quantizer_address_to_id_mapping[address]\n # no need to init quantizer with single precision constraint\n if len(constraints.get(quantizer_id)) != 1:\n ordered_weight_quantization_list.append((quantizer_id, quantizer))\n else:\n self._scopes_of_skipped_weight_quantizers.append(str(scope))\n self._skipped_weight_quantizers[quantizer_id] = quantizer\n self._ordered_weight_quantizations = OrderedDict(ordered_weight_quantization_list)\n\n def get_scope_of_skipped_weight_quantizers(self) -> List['Scope']:\n return self._scopes_of_skipped_weight_quantizers\n\n def get_all_ordered_weight_quantizers_per_scope(self) -> Dict['Scope', BaseQuantizer]:\n return self._ordered_weight_quantizers_per_scope\n\n def get_ordered_weight_quantizers_per_id(self) -> Dict[QuantizerId, BaseQuantizer]:\n return self._ordered_weight_quantizations\n\n def get_id(self, quantizer: BaseQuantizer) -> Dict[int, QuantizerId]:\n address = id(quantizer)\n return self._quantizer_address_to_id_mapping[address]\n\n def get_skipped_weight_quantizers_per_id(self) -> Dict[QuantizerId, BaseQuantizer]:\n return self._skipped_weight_quantizers\n\n\nclass CompressionRatioCalculator:\n \"\"\"\n Calculates compression ratio - ratio between bits complexity of fully INT8 model and mixed-precision lower-bit one.\n Bit complexity of the model is a sum of bit complexities for each quantized layer, which are a multiplication of\n FLOPS for the layer by number of bits for its quantization. The compression ratio can be used for estimation of\n performance boost for quantized model.\n \"\"\"\n DEFAULT_NUMBER_OF_BITS = 8\n\n def __init__(self, model, quantizers_handler: WeightQuantizersHandler):\n flops_count_per_module_name = model.get_flops_per_module()\n\n self._ordered_weight_quantizations = quantizers_handler.get_ordered_weight_quantizers_per_id()\n\n self.ops_per_quantizer_id = {}\n for name, module in model.named_modules():\n curr_ops = flops_count_per_module_name.get(name, 0)\n if is_nncf_module(module):\n quantization_types = [class_type.__name__ for class_type in QUANTIZATION_MODULES.registry_dict.values()]\n all_quantizers_in_module = get_all_modules_by_type(module, quantization_types)\n for quantizer in all_quantizers_in_module.values():\n if quantizer.is_weights:\n quantizer_id = quantizers_handler.get_id(quantizer)\n self.ops_per_quantizer_id[quantizer_id] = curr_ops\n\n self.total_ops_count = sum(v for v in self.ops_per_quantizer_id.values()) * self.DEFAULT_NUMBER_OF_BITS\n\n def ratio_for_bits_configuration(self, bits_config: List[int],\n skipped: Dict[QuantizerId, BaseQuantizer] = None) -> float:\n \"\"\"\n Calculates compression ratio for a given bits configuration\n\n Args:\n bits_config: list of bits for each weight quantization\n skipped: quantizers that were skipped from bitwidth initialization, since their bitwidth is determined\n unambiguously based on constraints of the HW config\n\n Returns:\n compression ratio of mixed-precision model by relation to fully INT8\n \"\"\"\n quantizer_ops = 0\n for num_bits, (quantizer_id, quantizer) in zip(bits_config, self._ordered_weight_quantizations.items()):\n quantizer_ops += num_bits * self.ops_per_quantizer_id[quantizer_id]\n if skipped:\n for quantizer_id, quantizer in skipped.items():\n quantizer_ops += quantizer.num_bits * self.ops_per_quantizer_id[quantizer_id]\n\n return self.total_ops_count / quantizer_ops\n\n def ratio_limits(self, bits: List[int], order: List[int] = None, constraints: HWPrecisionConstraints = None,\n skipped: Dict[QuantizerId, BaseQuantizer] = None) -> (float, float):\n \"\"\"\n Calculates minimum and maximum compression ratio.\n\n Args:\n bits: list of all available bits for weight quantization\n order: defines the order in which bits are assigned for quantizers\n constraints: precision constraints defined by HW config\n skipped: quantizers that were skipped from bitwidth initialization, since their bitwidth is determined\n unambiguously based on constraints of the HW config\n\n Returns:\n minimum and maximum compression ratio\n \"\"\"\n config_len = len(self._ordered_weight_quantizations)\n min_config = [min(bits)] * config_len\n max_config = [max(bits)] * config_len\n if not order:\n order = list(range(config_len))\n if constraints:\n for i, quantizer_id in enumerate(self._ordered_weight_quantizations):\n bit_constraints = constraints.get(quantizer_id)\n if bit_constraints:\n min_config[order[i]] = min(bit_constraints)\n max_config[order[i]] = max(bit_constraints)\n\n min_config = HAWQPrecisionInitializer.get_ordered_config(min_config, order)\n max_config = HAWQPrecisionInitializer.get_ordered_config(max_config, order)\n\n max_ratio = self.ratio_for_bits_configuration(min_config, skipped)\n min_ratio = self.ratio_for_bits_configuration(max_config, skipped)\n return min_ratio, max_ratio\n\n\nclass HAWQDebugger:\n def __init__(self, bits_configurations: List[List[int]],\n perturbations: Perturbations,\n weight_observers: List[PerturbationObserver],\n traces_per_layer: TracesPerLayer, bits: List[int]):\n self._bits_configurations = bits_configurations\n self._num_weights = len(weight_observers)\n self._perturbations = perturbations\n\n from nncf.debug import DEBUG_LOG_DIR\n self._dump_dir = Path(DEBUG_LOG_DIR) / Path(\"hawq_dumps\")\n self._dump_dir.mkdir(parents=True, exist_ok=True)\n\n self._traces_order = traces_per_layer.get_order_of_traces()\n self._traces_per_layer = traces_per_layer.get_all()\n\n num_of_weights = []\n norm_of_weights = []\n for i in range(self._num_weights):\n order = self._traces_order[i]\n num_of_weights.append(weight_observers[order].get_numels())\n norm_of_weights.append(weight_observers[order].get_input_norm())\n self._num_weights_per_layer = torch.Tensor(num_of_weights)\n self._norm_weights_per_layer = torch.Tensor(norm_of_weights)\n\n bits_in_megabyte = 2 ** 23\n self._model_sizes = []\n for bits_config in self._bits_configurations:\n size = torch.sum(torch.Tensor(bits_config) * self._num_weights_per_layer).item() / bits_in_megabyte\n self._model_sizes.append(size)\n self._bits = bits\n\n @staticmethod\n def get_all_quantizers_per_full_scope(model):\n all_quantizations = OrderedDict()\n for class_type in QUANTIZATION_MODULES.registry_dict.values():\n quantization_type = class_type.__name__\n all_quantizations.update(\n get_all_modules_by_type(\n model.get_compression_modules_by_type(CompressionModuleType.ACTIVATION_QUANTIZER),\n quantization_type))\n all_quantizations.update(\n get_all_modules_by_type(\n model.get_compression_modules_by_type(CompressionModuleType.FUNCTION_QUANTIZER),\n quantization_type))\n all_quantizations.update(get_all_modules_by_type(model.get_nncf_wrapped_model(), quantization_type))\n all_quantizations = OrderedDict(sorted(all_quantizations.items(), key=lambda x: str(x[0])))\n return all_quantizations\n\n @staticmethod\n def get_bitwidth_graph(algo_ctrl, model, all_quantizers_per_full_scope) -> NNCFGraph:\n nncf_graph = model.get_graph()\n for node_key in nncf_graph.get_all_node_keys():\n node = nncf_graph.get_nx_node_by_key(node_key)\n node_id = node[NNCFGraph.ID_NODE_ATTR]\n color = ''\n if node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR]:\n operator_name = node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].operator_name\n scope = node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].input_agnostic.scope_in_model\n module = model.get_module_by_scope(scope)\n if isinstance(module, NNCFConv2d):\n color = 'blue'\n if module.groups == module.in_channels:\n operator_name = 'DW_Conv2d'\n color = 'purple'\n\n node['label'] = '_#'.join([operator_name, str(node_id)])\n if color:\n node['color'] = color\n\n non_weight_quantizers = algo_ctrl.non_weight_quantizers\n bits_color_map = {4: 'red', 8: 'green', 6: 'orange'}\n for quantizer_id, quantizer_info in non_weight_quantizers.items():\n affected_iap_ctx_list = quantizer_info.affected_ia_op_exec_contexts\n\n for activation_iap_ctx in affected_iap_ctx_list:\n post_hooked_nx_node_key = nncf_graph.get_node_id_by_iap_context(activation_iap_ctx)\n post_hooked_module_node = nncf_graph.get_nx_node_by_key(post_hooked_nx_node_key)\n operator_name = post_hooked_module_node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].operator_name\n node_id = post_hooked_module_node[NNCFGraph.ID_NODE_ATTR]\n post_hooked_module_node['label'] = '_#'.join([operator_name, str(node_id)])\n\n for next_nx_node_key in nncf_graph.get_successors(post_hooked_nx_node_key):\n activation_fq_node = nncf_graph.get_nx_node_by_key(next_nx_node_key)\n bits = non_weight_quantizers[quantizer_id].quantizer_module_ref.num_bits\n\n activation_fq_node['color'] = bits_color_map[bits]\n node_id = activation_fq_node[NNCFGraph.ID_NODE_ATTR]\n activation_fq_node['label'] = '{}_bit__AFQ_#{}'.format(bits, str(node_id))\n\n for scope, quantizer in all_quantizers_per_full_scope.items():\n if quantizer.is_weights:\n node = nncf_graph.find_node_in_nx_graph_by_scope(scope)\n if not node:\n raise AttributeError('Failed to get node by scope={}'.format(str(scope)))\n if node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR]:\n bits = quantizer.num_bits\n node_id = node[NNCFGraph.ID_NODE_ATTR]\n node['label'] = '{}_bit__WFQ_#{}'.format(bits, str(node_id))\n node['color'] = bits_color_map[bits]\n return nncf_graph\n\n def dump_avg_traces(self):\n import matplotlib.pyplot as plt\n dump_file = os.path.join(self._dump_dir, 'avg_traces_per_layer')\n torch.save(self._traces_per_layer, dump_file)\n fig = plt.figure()\n fig.suptitle('Average Hessian Trace')\n ax = fig.add_subplot(2, 1, 1)\n ax.set_yscale('log')\n ax.set_xlabel('weight quantizers')\n ax.set_ylabel('average hessian trace')\n ax.plot(self._traces_per_layer.cpu().numpy())\n plt.savefig(dump_file)\n\n def dump_metric_MB(self, configuration_metric: List[Tensor]):\n import matplotlib.pyplot as plt\n list_to_plot = [cm.item() for cm in configuration_metric]\n fig = plt.figure()\n fig.suptitle('Pareto Frontier')\n ax = fig.add_subplot(2, 1, 1)\n ax.set_yscale('log')\n ax.set_xlabel('Model Size (MB)')\n ax.set_ylabel('Metric value (total perturbation)')\n ax.scatter(self._model_sizes, list_to_plot, s=20, facecolors='none', edgecolors='r')\n cm = torch.Tensor(configuration_metric)\n cm_m = cm.median().item()\n configuration_index = configuration_metric.index(cm_m)\n ms_m = self._model_sizes[configuration_index]\n ax.scatter(ms_m, cm_m, s=30, facecolors='none', edgecolors='b', label='median from all metrics')\n ax.legend()\n plt.savefig(os.path.join(self._dump_dir, 'Pareto_Frontier'))\n nncf_logger.info(\n 'Distribution of HAWQ metrics: min_value={:.3f}, max_value={:.3f}, median_value={:.3f}, '\n 'median_index={}, total_number={}'.format(cm.min().item(), cm.max().item(), cm_m,\n configuration_index,\n len(configuration_metric)))\n\n def dump_metric_flops(self, configuration_metric: List[Tensor], flops_per_config: List[float],\n choosen_config_index: int):\n import matplotlib.pyplot as plt\n list_to_plot = [cm.item() for cm in configuration_metric]\n fig = plt.figure()\n fig.suptitle('Pareto Frontier')\n ax = fig.add_subplot(1, 1, 1)\n ax.set_xlabel('Compression ratio: total INT8 FLOPS_BITS / total MIXED INT FLOPS_BITS')\n ax.set_ylabel('Metric value (total perturbation)')\n ax.scatter(flops_per_config, list_to_plot, s=10, alpha=0.3) # s=20, facecolors='none', edgecolors='r')\n flops_per_config = [torch.Tensor([v]) for v in flops_per_config]\n cm = torch.Tensor(flops_per_config)\n cm_m = cm.median().item()\n configuration_index = flops_per_config.index(cm_m)\n ms_m = configuration_metric[configuration_index].item()\n ax.scatter(cm_m, ms_m, s=30, facecolors='none', edgecolors='b', label='median from all metrics')\n cm_c = configuration_metric[choosen_config_index].item()\n fpc_c = flops_per_config[choosen_config_index].item()\n ax.scatter(fpc_c, cm_c, s=30, facecolors='none', edgecolors='r', label='chosen config')\n\n ax.legend()\n plt.savefig(os.path.join(self._dump_dir, 'Pareto_Frontier_compress_ratio'))\n\n def dump_density_of_quantization_noise(self):\n noise_per_config = [] # type: List[Tensor]\n for bits_config in self._bits_configurations:\n qnoise = 0\n for i in range(self._num_weights):\n layer_bits = bits_config[i]\n order = self._traces_order[i]\n qnoise += self._perturbations.get(layer_id=order, bitwidth=layer_bits)\n noise_per_config.append(qnoise)\n\n list_to_plot = [cm.item() for cm in noise_per_config]\n import matplotlib.pyplot as plt\n fig = plt.figure()\n fig.suptitle('Density of quantization noise')\n ax = fig.add_subplot(2, 1, 1)\n ax.set_yscale('log')\n ax.set_xlabel('Blocks')\n ax.set_ylabel('Noise value')\n ax.scatter(self._model_sizes, list_to_plot, s=20, alpha=0.3)\n ax.legend()\n plt.savefig(os.path.join(self._dump_dir, 'Density_of_quantization_noise'))\n\n def dump_perturbations_ratio(self):\n import matplotlib.pyplot as plt\n fig = plt.figure()\n fig.suptitle('Quantization noise vs Average Trace')\n ax = fig.add_subplot(2, 1, 1)\n ax.set_xlabel('Blocks')\n ax.set_yscale('log')\n b = max(self._bits)\n perturb = [p[b] for p in self._perturbations.get_all().values()]\n ax.plot(\n [p / m / n for p, m, n in zip(perturb, self._num_weights_per_layer, self._norm_weights_per_layer)],\n label='normalized {}-bit noise'.format(b))\n ax.plot(perturb, label='{}-bit noise'.format(b))\n ax.plot(self._traces_per_layer.cpu().numpy(), label='trace')\n ax.plot([n * p for n, p in zip(self._traces_per_layer, perturb)], label='trace * noise')\n ax.legend()\n plt.savefig(os.path.join(self._dump_dir, 'Quantization_noise_vs_Average_Trace'))\n\n def dump_bitwidth_graph(self, algo_ctrl: 'QuantizationController', model: 'NNCFNetwork'):\n all_quantizers_per_full_scope = self.get_all_quantizers_per_full_scope(model)\n graph = self.get_bitwidth_graph(algo_ctrl, model, all_quantizers_per_full_scope)\n graph.dump_graph(self._dump_dir / Path('bitwidth_graph.dot'))\n\n\nclass PrecisionInitializerFactory:\n @staticmethod\n def create(init_type: str):\n if init_type == \"manual\":\n return ManualPrecisionInitializer\n if init_type == \"hawq\":\n return HAWQPrecisionInitializer\n raise NotImplementedError\n", "\"\"\"\n Copyright (c) 2019-2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom typing import List\n\nimport torch\n\nfrom nncf.algo_selector import COMPRESSION_ALGORITHMS\nfrom nncf.compression_method_api import CompressionAlgorithmController, CompressionLevel\nfrom nncf.nncf_network import NNCFNetwork\nfrom nncf.sparsity.base_algo import BaseSparsityAlgoBuilder, BaseSparsityAlgoController, SparseModuleInfo\nfrom nncf.sparsity.layers import BinaryMask\nfrom nncf.sparsity.magnitude.functions import WEIGHT_IMPORTANCE_FUNCTIONS, calc_magnitude_binary_mask\nfrom nncf.sparsity.schedulers import SPARSITY_SCHEDULERS\n\n\n@COMPRESSION_ALGORITHMS.register('magnitude_sparsity')\nclass MagnitudeSparsityBuilder(BaseSparsityAlgoBuilder):\n def create_weight_sparsifying_operation(self, module):\n return BinaryMask(module.weight.size())\n\n def build_controller(self, target_model: NNCFNetwork) -> CompressionAlgorithmController:\n params = self.config.get(\"params\", {})\n return MagnitudeSparsityController(target_model, self._sparsified_module_info,\n self.config,\n params.get('weight_importance', 'normed_abs'))\n\n\nclass MagnitudeSparsityController(BaseSparsityAlgoController):\n def __init__(self, target_model: NNCFNetwork,\n sparsified_module_info: List[SparseModuleInfo],\n config, weight_importance: str):\n super().__init__(target_model, sparsified_module_info)\n self.config = config\n params = self.config.get(\"params\", {})\n self.weight_importance = WEIGHT_IMPORTANCE_FUNCTIONS.get(weight_importance)\n scheduler_cls = SPARSITY_SCHEDULERS.get(params.get(\"schedule\", \"polynomial\"))\n self._scheduler = scheduler_cls(self, params)\n\n def statistics(self):\n stats = super().statistics()\n stats['sparsity_threshold'] = self._select_threshold(self.sparsity_rate_for_sparsified_modules)\n return stats\n\n def freeze(self):\n pass\n\n def set_sparsity_level(self, sparsity_level):\n if sparsity_level >= 1 or sparsity_level < 0:\n raise AttributeError(\n 'Sparsity level should be within interval [0,1), actual value to set is: {}'.format(sparsity_level))\n self._set_masks_for_threshold(self._select_threshold(sparsity_level))\n self.run_batchnorm_adaptation(self.config)\n\n def _select_threshold(self, sparsity_level):\n all_weights = self._collect_all_weights()\n if not all_weights:\n return 0.0\n all_weights_tensor, _ = torch.cat(all_weights).sort()\n threshold = all_weights_tensor[int((all_weights_tensor.size(0) - 1) * sparsity_level)].item()\n return threshold\n\n def _set_masks_for_threshold(self, threshold_val):\n for layer in self.sparsified_module_info:\n layer.operand.binary_mask = calc_magnitude_binary_mask(layer.module.weight,\n self.weight_importance,\n threshold_val)\n\n def _collect_all_weights(self):\n all_weights = []\n for minfo in self.sparsified_module_info:\n all_weights.append(self.weight_importance(minfo.module.weight).view(-1))\n return all_weights\n\n def create_weight_sparsifying_operation(self, module):\n return BinaryMask(module.weight.size())\n\n def compression_level(self) -> CompressionLevel:\n return self.scheduler.compression_level()\n" ]
[ [ "torch.norm", "torch.Tensor", "torch.load", "matplotlib.pyplot.savefig", "torch.save", "matplotlib.pyplot.figure" ], [ "torch.cat" ] ]
AWehrhahn/CATS
[ "40b9f21ffccda8f70f9d1a9d7335102083847ce3", "40b9f21ffccda8f70f9d1a9d7335102083847ce3" ]
[ "cats/least_squares/least_squares.py", "cats/data_modules/marcs.py" ]
[ "\"\"\"Generic interface for least-square minimization.\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom warnings import warn\n\nimport numpy as np\nfrom numpy.linalg import norm\n\nfrom scipy.sparse import issparse, csr_matrix\nfrom scipy.sparse.linalg import LinearOperator\nfrom scipy.optimize import _minpack, OptimizeResult\nfrom scipy.optimize._numdiff import approx_derivative, group_columns\n\nfrom scipy.optimize._lsq.dogbox import dogbox\nfrom scipy.optimize._lsq.common import EPS, in_bounds, make_strictly_feasible\n\nfrom scipy.optimize._lsq.least_squares import (\n TERMINATION_MESSAGES,\n FROM_MINPACK_TO_COMMON,\n call_minpack,\n prepare_bounds,\n check_tolerance,\n check_x_scale,\n check_jac_sparsity,\n huber,\n soft_l1,\n cauchy,\n arctan,\n)\n\nfrom .trf import trf\n\n\n# Loss functions.\n\n\ndef linear(z, rho, cost_only):\n rho[0] = 0.5 * z\n if cost_only:\n return\n rho[1] = z ** 0.5\n rho[2] = 1\n\n\ndef linear_regression(x):\n return x ** 2\n\n\ndef delta_regression(x):\n x0 = (x[0] - x[1],)\n xi = 2 * x[1:-1] - x[:-2] - x[2:]\n xn = (x[-1] - x[-2],)\n reg = np.concatenate((x0, xi, xn))\n return reg ** 2\n\n\nIMPLEMENTED_LOSSES = dict(\n linear=linear, huber=huber, soft_l1=soft_l1, cauchy=cauchy, arctan=arctan\n)\n\n\nIMPLEMENTED_REGULARIZATIONS = {\n None: lambda x: 0,\n \"linear\": linear_regression,\n \"delta\": delta_regression,\n}\n\n\ndef construct_loss_function(m, n, loss, regularization, f_scale, r_scale):\n if loss == \"linear\" and regularization is None:\n return None\n\n if not callable(loss):\n loss = IMPLEMENTED_LOSSES[loss]\n rho = np.empty((3, m))\n\n def loss_function(f, cost_only=False):\n z = (f / f_scale) ** 2\n loss(z, rho, cost_only=cost_only)\n if cost_only:\n return 0.5 * f_scale ** 2 * np.sum(rho[0])\n rho[0] *= f_scale ** 2\n rho[2] /= f_scale ** 2\n return rho\n\n else:\n\n def loss_function(f, cost_only=False):\n z = (f / f_scale) ** 2\n rho = loss(z)\n if cost_only:\n return 0.5 * f_scale ** 2 * np.sum(rho[0])\n rho[0] *= f_scale ** 2\n rho[2] /= f_scale ** 2\n return rho\n\n if regularization is not None:\n if not callable(regularization):\n regularization = IMPLEMENTED_REGULARIZATIONS[regularization]\n\n def regr_function(f, x, cost_only=False):\n rho = loss_function(f, cost_only=cost_only)\n reg = regularization(x)\n if cost_only:\n return rho + r_scale ** 2 * np.sum(reg)\n rho[0] += r_scale ** 2 * np.sum(reg) / m\n return rho\n\n else:\n regr_function = lambda f, x, cost_only: loss_function(f, cost_only=cost_only)\n\n return regr_function\n\n\ndef least_squares(\n fun,\n x0,\n jac=\"2-point\",\n bounds=(-np.inf, np.inf),\n method=\"trf\",\n ftol=1e-8,\n xtol=1e-8,\n gtol=1e-8,\n x_scale=1.0,\n loss=\"linear\",\n regularization=None,\n f_scale=1.0,\n r_scale=1.0,\n diff_step=None,\n tr_solver=None,\n tr_options={},\n jac_sparsity=None,\n max_nfev=None,\n verbose=0,\n args=(),\n kwargs={},\n):\n \"\"\"Solve a nonlinear least-squares problem with bounds on the variables.\n\n Given the residuals f(x) (an m-dimensional real function of n real\n variables) and the loss function rho(s) (a scalar function), `least_squares`\n finds a local minimum of the cost function F(x)::\n\n minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)\n subject to lb <= x <= ub\n\n The purpose of the loss function rho(s) is to reduce the influence of\n outliers on the solution.\n\n Parameters\n ----------\n fun : callable\n Function which computes the vector of residuals, with the signature\n ``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with\n respect to its first argument. The argument ``x`` passed to this\n function is an ndarray of shape (n,) (never a scalar, even for n=1).\n It must return a 1-d array_like of shape (m,) or a scalar. If the\n argument ``x`` is complex or the function ``fun`` returns complex\n residuals, it must be wrapped in a real function of real arguments,\n as shown at the end of the Examples section.\n x0 : array_like with shape (n,) or float\n Initial guess on independent variables. If float, it will be treated\n as a 1-d array with one element.\n jac : {'2-point', '3-point', 'cs', callable}, optional\n Method of computing the Jacobian matrix (an m-by-n matrix, where\n element (i, j) is the partial derivative of f[i] with respect to\n x[j]). The keywords select a finite difference scheme for numerical\n estimation. The scheme '3-point' is more accurate, but requires\n twice as many operations as '2-point' (default). The scheme 'cs'\n uses complex steps, and while potentially the most accurate, it is\n applicable only when `fun` correctly handles complex inputs and\n can be analytically continued to the complex plane. Method 'lm'\n always uses the '2-point' scheme. If callable, it is used as\n ``jac(x, *args, **kwargs)`` and should return a good approximation\n (or the exact value) for the Jacobian as an array_like (np.atleast_2d\n is applied), a sparse matrix or a `scipy.sparse.linalg.LinearOperator`.\n bounds : 2-tuple of array_like, optional\n Lower and upper bounds on independent variables. Defaults to no bounds.\n Each array must match the size of `x0` or be a scalar, in the latter\n case a bound will be the same for all variables. Use ``np.inf`` with\n an appropriate sign to disable bounds on all or some variables.\n method : {'trf', 'dogbox', 'lm'}, optional\n Algorithm to perform minimization.\n\n * 'trf' : Trust Region Reflective algorithm, particularly suitable\n for large sparse problems with bounds. Generally robust method.\n * 'dogbox' : dogleg algorithm with rectangular trust regions,\n typical use case is small problems with bounds. Not recommended\n for problems with rank-deficient Jacobian.\n * 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.\n Doesn't handle bounds and sparse Jacobians. Usually the most\n efficient method for small unconstrained problems.\n\n Default is 'trf'. See Notes for more information.\n ftol : float or None, optional\n Tolerance for termination by the change of the cost function. Default\n is 1e-8. The optimization process is stopped when ``dF < ftol * F``,\n and there was an adequate agreement between a local quadratic model and\n the true model in the last step. If None, the termination by this\n condition is disabled.\n xtol : float or None, optional\n Tolerance for termination by the change of the independent variables.\n Default is 1e-8. The exact condition depends on the `method` used:\n\n * For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``\n * For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is\n a trust-region radius and ``xs`` is the value of ``x``\n scaled according to `x_scale` parameter (see below).\n\n If None, the termination by this condition is disabled.\n gtol : float or None, optional\n Tolerance for termination by the norm of the gradient. Default is 1e-8.\n The exact condition depends on a `method` used:\n\n * For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where\n ``g_scaled`` is the value of the gradient scaled to account for\n the presence of the bounds [STIR]_.\n * For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where\n ``g_free`` is the gradient with respect to the variables which\n are not in the optimal state on the boundary.\n * For 'lm' : the maximum absolute value of the cosine of angles\n between columns of the Jacobian and the residual vector is less\n than `gtol`, or the residual vector is zero.\n\n If None, the termination by this condition is disabled.\n x_scale : array_like or 'jac', optional\n Characteristic scale of each variable. Setting `x_scale` is equivalent\n to reformulating the problem in scaled variables ``xs = x / x_scale``.\n An alternative view is that the size of a trust region along j-th\n dimension is proportional to ``x_scale[j]``. Improved convergence may\n be achieved by setting `x_scale` such that a step of a given size\n along any of the scaled variables has a similar effect on the cost\n function. If set to 'jac', the scale is iteratively updated using the\n inverse norms of the columns of the Jacobian matrix (as described in\n [JJMore]_).\n loss : str or callable, optional\n Determines the loss function. The following keyword values are allowed:\n\n * 'linear' (default) : ``rho(z) = z``. Gives a standard\n least-squares problem.\n * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth\n approximation of l1 (absolute value) loss. Usually a good\n choice for robust least squares.\n * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works\n similarly to 'soft_l1'.\n * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers\n influence, but may cause difficulties in optimization process.\n * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on\n a single residual, has properties similar to 'cauchy'.\n\n If callable, it must take a 1-d ndarray ``z=f**2`` and return an\n array_like with shape (3, m) where row 0 contains function values,\n row 1 contains first derivatives and row 2 contains second\n derivatives. Method 'lm' supports only 'linear' loss.\n f_scale : float, optional\n Value of soft margin between inlier and outlier residuals, default\n is 1.0. The loss function is evaluated as follows\n ``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,\n and ``rho`` is determined by `loss` parameter. This parameter has\n no effect with ``loss='linear'``, but for other `loss` values it is\n of crucial importance.\n max_nfev : None or int, optional\n Maximum number of function evaluations before the termination.\n If None (default), the value is chosen automatically:\n\n * For 'trf' and 'dogbox' : 100 * n.\n * For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)\n otherwise (because 'lm' counts function calls in Jacobian\n estimation).\n\n diff_step : None or array_like, optional\n Determines the relative step size for the finite difference\n approximation of the Jacobian. The actual step is computed as\n ``x * diff_step``. If None (default), then `diff_step` is taken to be\n a conventional \"optimal\" power of machine epsilon for the finite\n difference scheme used [NR]_.\n tr_solver : {None, 'exact', 'lsmr'}, optional\n Method for solving trust-region subproblems, relevant only for 'trf'\n and 'dogbox' methods.\n\n * 'exact' is suitable for not very large problems with dense\n Jacobian matrices. The computational complexity per iteration is\n comparable to a singular value decomposition of the Jacobian\n matrix.\n * 'lsmr' is suitable for problems with sparse and large Jacobian\n matrices. It uses the iterative procedure\n `scipy.sparse.linalg.lsmr` for finding a solution of a linear\n least-squares problem and only requires matrix-vector product\n evaluations.\n\n If None (default) the solver is chosen based on the type of Jacobian\n returned on the first iteration.\n tr_options : dict, optional\n Keyword options passed to trust-region solver.\n\n * ``tr_solver='exact'``: `tr_options` are ignored.\n * ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.\n Additionally ``method='trf'`` supports 'regularize' option\n (bool, default is True) which adds a regularization term to the\n normal equation, which improves convergence if the Jacobian is\n rank-deficient [Byrd]_ (eq. 3.4).\n\n jac_sparsity : {None, array_like, sparse matrix}, optional\n Defines the sparsity structure of the Jacobian matrix for finite\n difference estimation, its shape must be (m, n). If the Jacobian has\n only few non-zero elements in *each* row, providing the sparsity\n structure will greatly speed up the computations [Curtis]_. A zero\n entry means that a corresponding element in the Jacobian is identically\n zero. If provided, forces the use of 'lsmr' trust-region solver.\n If None (default) then dense differencing will be used. Has no effect\n for 'lm' method.\n verbose : {0, 1, 2}, optional\n Level of algorithm's verbosity:\n\n * 0 (default) : work silently.\n * 1 : display a termination report.\n * 2 : display progress during iterations (not supported by 'lm'\n method).\n\n args, kwargs : tuple and dict, optional\n Additional arguments passed to `fun` and `jac`. Both empty by default.\n The calling signature is ``fun(x, *args, **kwargs)`` and the same for\n `jac`.\n\n Returns\n -------\n `OptimizeResult` with the following fields defined:\n x : ndarray, shape (n,)\n Solution found.\n cost : float\n Value of the cost function at the solution.\n fun : ndarray, shape (m,)\n Vector of residuals at the solution.\n jac : ndarray, sparse matrix or LinearOperator, shape (m, n)\n Modified Jacobian matrix at the solution, in the sense that J^T J\n is a Gauss-Newton approximation of the Hessian of the cost function.\n The type is the same as the one used by the algorithm.\n grad : ndarray, shape (m,)\n Gradient of the cost function at the solution.\n optimality : float\n First-order optimality measure. In unconstrained problems, it is always\n the uniform norm of the gradient. In constrained problems, it is the\n quantity which was compared with `gtol` during iterations.\n active_mask : ndarray of int, shape (n,)\n Each component shows whether a corresponding constraint is active\n (that is, whether a variable is at the bound):\n\n * 0 : a constraint is not active.\n * -1 : a lower bound is active.\n * 1 : an upper bound is active.\n\n Might be somewhat arbitrary for 'trf' method as it generates a sequence\n of strictly feasible iterates and `active_mask` is determined within a\n tolerance threshold.\n nfev : int\n Number of function evaluations done. Methods 'trf' and 'dogbox' do not\n count function calls for numerical Jacobian approximation, as opposed\n to 'lm' method.\n njev : int or None\n Number of Jacobian evaluations done. If numerical Jacobian\n approximation is used in 'lm' method, it is set to None.\n status : int\n The reason for algorithm termination:\n\n * -1 : improper input parameters status returned from MINPACK.\n * 0 : the maximum number of function evaluations is exceeded.\n * 1 : `gtol` termination condition is satisfied.\n * 2 : `ftol` termination condition is satisfied.\n * 3 : `xtol` termination condition is satisfied.\n * 4 : Both `ftol` and `xtol` termination conditions are satisfied.\n\n message : str\n Verbal description of the termination reason.\n success : bool\n True if one of the convergence criteria is satisfied (`status` > 0).\n\n See Also\n --------\n leastsq : A legacy wrapper for the MINPACK implementation of the\n Levenberg-Marquadt algorithm.\n curve_fit : Least-squares minimization applied to a curve fitting problem.\n\n Notes\n -----\n Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares\n algorithms implemented in MINPACK (lmder, lmdif). It runs the\n Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.\n The implementation is based on paper [JJMore]_, it is very robust and\n efficient with a lot of smart tricks. It should be your first choice\n for unconstrained problems. Note that it doesn't support bounds. Also\n it doesn't work when m < n.\n\n Method 'trf' (Trust Region Reflective) is motivated by the process of\n solving a system of equations, which constitute the first-order optimality\n condition for a bound-constrained minimization problem as formulated in\n [STIR]_. The algorithm iteratively solves trust-region subproblems\n augmented by a special diagonal quadratic term and with trust-region shape\n determined by the distance from the bounds and the direction of the\n gradient. This enhancements help to avoid making steps directly into bounds\n and efficiently explore the whole space of variables. To further improve\n convergence, the algorithm considers search directions reflected from the\n bounds. To obey theoretical requirements, the algorithm keeps iterates\n strictly feasible. With dense Jacobians trust-region subproblems are\n solved by an exact method very similar to the one described in [JJMore]_\n (and implemented in MINPACK). The difference from the MINPACK\n implementation is that a singular value decomposition of a Jacobian\n matrix is done once per iteration, instead of a QR decomposition and series\n of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace\n approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.\n The subspace is spanned by a scaled gradient and an approximate\n Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no\n constraints are imposed the algorithm is very similar to MINPACK and has\n generally comparable performance. The algorithm works quite robust in\n unbounded and bounded problems, thus it is chosen as a default algorithm.\n\n Method 'dogbox' operates in a trust-region framework, but considers\n rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.\n The intersection of a current trust region and initial bounds is again\n rectangular, so on each iteration a quadratic minimization problem subject\n to bound constraints is solved approximately by Powell's dogleg method\n [NumOpt]_. The required Gauss-Newton step can be computed exactly for\n dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large\n sparse Jacobians. The algorithm is likely to exhibit slow convergence when\n the rank of Jacobian is less than the number of variables. The algorithm\n often outperforms 'trf' in bounded problems with a small number of\n variables.\n\n Robust loss functions are implemented as described in [BA]_. The idea\n is to modify a residual vector and a Jacobian matrix on each iteration\n such that computed gradient and Gauss-Newton Hessian approximation match\n the true gradient and Hessian approximation of the cost function. Then\n the algorithm proceeds in a normal way, i.e. robust loss functions are\n implemented as a simple wrapper over standard least-squares algorithms.\n\n .. versionadded:: 0.17.0\n\n References\n ----------\n .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, \"A Subspace, Interior,\n and Conjugate Gradient Method for Large-Scale Bound-Constrained\n Minimization Problems,\" SIAM Journal on Scientific Computing,\n Vol. 21, Number 1, pp 1-23, 1999.\n .. [NR] William H. Press et. al., \"Numerical Recipes. The Art of Scientific\n Computing. 3rd edition\", Sec. 5.7.\n .. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, \"Approximate\n solution of the trust region problem by minimization over\n two-dimensional subspaces\", Math. Programming, 40, pp. 247-263,\n 1988.\n .. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, \"On the estimation of\n sparse Jacobian matrices\", Journal of the Institute of\n Mathematics and its Applications, 13, pp. 117-120, 1974.\n .. [JJMore] J. J. More, \"The Levenberg-Marquardt Algorithm: Implementation\n and Theory,\" Numerical Analysis, ed. G. A. Watson, Lecture\n Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.\n .. [Voglis] C. Voglis and I. E. Lagaris, \"A Rectangular Trust Region\n Dogleg Approach for Unconstrained and Bound Constrained\n Nonlinear Optimization\", WSEAS International Conference on\n Applied Mathematics, Corfu, Greece, 2004.\n .. [NumOpt] J. Nocedal and S. J. Wright, \"Numerical optimization,\n 2nd edition\", Chapter 4.\n .. [BA] B. Triggs et. al., \"Bundle Adjustment - A Modern Synthesis\",\n Proceedings of the International Workshop on Vision Algorithms:\n Theory and Practice, pp. 298-372, 1999.\n\n Examples\n --------\n In this example we find a minimum of the Rosenbrock function without bounds\n on independent variables.\n\n >>> def fun_rosenbrock(x):\n ... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])\n\n Notice that we only provide the vector of the residuals. The algorithm\n constructs the cost function as a sum of squares of the residuals, which\n gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.\n\n >>> from scipy.optimize import least_squares\n >>> x0_rosenbrock = np.array([2, 2])\n >>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)\n >>> res_1.x\n array([ 1., 1.])\n >>> res_1.cost\n 9.8669242910846867e-30\n >>> res_1.optimality\n 8.8928864934219529e-14\n\n We now constrain the variables, in such a way that the previous solution\n becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and\n ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter\n to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.\n\n We also provide the analytic Jacobian:\n\n >>> def jac_rosenbrock(x):\n ... return np.array([\n ... [-20 * x[0], 10],\n ... [-1, 0]])\n\n Putting this all together, we see that the new solution lies on the bound:\n\n >>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,\n ... bounds=([-np.inf, 1.5], np.inf))\n >>> res_2.x\n array([ 1.22437075, 1.5 ])\n >>> res_2.cost\n 0.025213093946805685\n >>> res_2.optimality\n 1.5885401433157753e-07\n\n Now we solve a system of equations (i.e., the cost function should be zero\n at a minimum) for a Broyden tridiagonal vector-valued function of 100000\n variables:\n\n >>> def fun_broyden(x):\n ... f = (3 - x) * x + 1\n ... f[1:] -= x[:-1]\n ... f[:-1] -= 2 * x[1:]\n ... return f\n\n The corresponding Jacobian matrix is sparse. We tell the algorithm to\n estimate it by finite differences and provide the sparsity structure of\n Jacobian to significantly speed up this process.\n\n >>> from scipy.sparse import lil_matrix\n >>> def sparsity_broyden(n):\n ... sparsity = lil_matrix((n, n), dtype=int)\n ... i = np.arange(n)\n ... sparsity[i, i] = 1\n ... i = np.arange(1, n)\n ... sparsity[i, i - 1] = 1\n ... i = np.arange(n - 1)\n ... sparsity[i, i + 1] = 1\n ... return sparsity\n ...\n >>> n = 100000\n >>> x0_broyden = -np.ones(n)\n ...\n >>> res_3 = least_squares(fun_broyden, x0_broyden,\n ... jac_sparsity=sparsity_broyden(n))\n >>> res_3.cost\n 4.5687069299604613e-23\n >>> res_3.optimality\n 1.1650454296851518e-11\n\n Let's also solve a curve fitting problem using robust loss function to\n take care of outliers in the data. Define the model function as\n ``y = a + b * exp(c * t)``, where t is a predictor variable, y is an\n observation and a, b, c are parameters to estimate.\n\n First, define the function which generates the data with noise and\n outliers, define the model parameters, and generate data:\n\n >>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):\n ... y = a + b * np.exp(t * c)\n ...\n ... rnd = np.random.RandomState(random_state)\n ... error = noise * rnd.randn(t.size)\n ... outliers = rnd.randint(0, t.size, n_outliers)\n ... error[outliers] *= 10\n ...\n ... return y + error\n ...\n >>> a = 0.5\n >>> b = 2.0\n >>> c = -1\n >>> t_min = 0\n >>> t_max = 10\n >>> n_points = 15\n ...\n >>> t_train = np.linspace(t_min, t_max, n_points)\n >>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)\n\n Define function for computing residuals and initial estimate of\n parameters.\n\n >>> def fun(x, t, y):\n ... return x[0] + x[1] * np.exp(x[2] * t) - y\n ...\n >>> x0 = np.array([1.0, 1.0, 0.0])\n\n Compute a standard least-squares solution:\n\n >>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))\n\n Now compute two solutions with two different robust loss functions. The\n parameter `f_scale` is set to 0.1, meaning that inlier residuals should\n not significantly exceed 0.1 (the noise level used).\n\n >>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,\n ... args=(t_train, y_train))\n >>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,\n ... args=(t_train, y_train))\n\n And finally plot all the curves. We see that by selecting an appropriate\n `loss` we can get estimates close to optimal even in the presence of\n strong outliers. But keep in mind that generally it is recommended to try\n 'soft_l1' or 'huber' losses first (if at all necessary) as the other two\n options may cause difficulties in optimization process.\n\n >>> t_test = np.linspace(t_min, t_max, n_points * 10)\n >>> y_true = gen_data(t_test, a, b, c)\n >>> y_lsq = gen_data(t_test, *res_lsq.x)\n >>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)\n >>> y_log = gen_data(t_test, *res_log.x)\n ...\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(t_train, y_train, 'o')\n >>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')\n >>> plt.plot(t_test, y_lsq, label='linear loss')\n >>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')\n >>> plt.plot(t_test, y_log, label='cauchy loss')\n >>> plt.xlabel(\"t\")\n >>> plt.ylabel(\"y\")\n >>> plt.legend()\n >>> plt.show()\n\n In the next example, we show how complex-valued residual functions of\n complex variables can be optimized with ``least_squares()``. Consider the\n following function:\n\n >>> def f(z):\n ... return z - (0.5 + 0.5j)\n\n We wrap it into a function of real variables that returns real residuals\n by simply handling the real and imaginary parts as independent variables:\n\n >>> def f_wrap(x):\n ... fx = f(x[0] + 1j*x[1])\n ... return np.array([fx.real, fx.imag])\n\n Thus, instead of the original m-dimensional complex function of n complex\n variables we optimize a 2m-dimensional real function of 2n real variables:\n\n >>> from scipy.optimize import least_squares\n >>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))\n >>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j\n >>> z\n (0.49999999999925893+0.49999999999925893j)\n\n \"\"\"\n if method not in [\"trf\", \"dogbox\", \"lm\"]:\n raise ValueError(\"`method` must be 'trf', 'dogbox' or 'lm'.\")\n\n if jac not in [\"2-point\", \"3-point\", \"cs\"] and not callable(jac):\n raise ValueError(\"`jac` must be '2-point', '3-point', 'cs' or \" \"callable.\")\n\n if tr_solver not in [None, \"exact\", \"lsmr\"]:\n raise ValueError(\"`tr_solver` must be None, 'exact' or 'lsmr'.\")\n\n if loss not in IMPLEMENTED_LOSSES and not callable(loss):\n raise ValueError(\n \"`loss` must be one of {0} or a callable.\".format(IMPLEMENTED_LOSSES.keys())\n )\n\n if regularization not in IMPLEMENTED_REGULARIZATIONS and not callable(\n regularization\n ):\n raise ValueError(\n \"`regularization` must be one of {0} or a callable.\".format(\n IMPLEMENTED_REGULARIZATIONS.keys()\n )\n )\n\n if method == \"lm\" and loss != \"linear\":\n raise ValueError(\"method='lm' supports only 'linear' loss function.\")\n\n if method == \"lm\" and regularization is not None:\n raise ValueError(\"method='lm' does not support regularization.\")\n\n if verbose not in [0, 1, 2]:\n raise ValueError(\"`verbose` must be in [0, 1, 2].\")\n\n if len(bounds) != 2:\n raise ValueError(\"`bounds` must contain 2 elements.\")\n\n if max_nfev is not None and max_nfev <= 0:\n raise ValueError(\"`max_nfev` must be None or positive integer.\")\n\n if np.iscomplexobj(x0):\n raise ValueError(\"`x0` must be real.\")\n\n x0 = np.atleast_1d(x0).astype(float)\n\n if x0.ndim > 1:\n raise ValueError(\"`x0` must have at most 1 dimension.\")\n\n lb, ub = prepare_bounds(bounds, x0.shape[0])\n\n if method == \"lm\" and not np.all((lb == -np.inf) & (ub == np.inf)):\n raise ValueError(\"Method 'lm' doesn't support bounds.\")\n\n if lb.shape != x0.shape or ub.shape != x0.shape:\n raise ValueError(\"Inconsistent shapes between bounds and `x0`.\")\n\n if np.any(lb >= ub):\n raise ValueError(\n \"Each lower bound must be strictly less than each \" \"upper bound.\"\n )\n\n if not in_bounds(x0, lb, ub):\n raise ValueError(\"`x0` is infeasible.\")\n\n x_scale = check_x_scale(x_scale, x0)\n\n ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)\n\n def fun_wrapped(x):\n return np.atleast_1d(fun(x, *args, **kwargs))\n\n if method == \"trf\":\n x0 = make_strictly_feasible(x0, lb, ub)\n\n f0 = fun_wrapped(x0)\n\n if f0.ndim != 1:\n raise ValueError(\n \"`fun` must return at most 1-d array_like. \"\n \"f0.shape: {0}\".format(f0.shape)\n )\n\n if not np.all(np.isfinite(f0)):\n raise ValueError(\"Residuals are not finite in the initial point.\")\n\n n = x0.size\n m = f0.size\n\n if method == \"lm\" and m < n:\n raise ValueError(\n \"Method 'lm' doesn't work when the number of \"\n \"residuals is less than the number of variables.\"\n )\n\n loss_function = construct_loss_function(\n m, n, loss, regularization, f_scale, r_scale\n )\n if callable(loss):\n rho = loss_function(f0, x0)\n if rho.shape != (3, m):\n raise ValueError(\"The return value of `loss` callable has wrong \" \"shape.\")\n initial_cost = 0.5 * np.sum(rho[0])\n elif loss_function is not None:\n initial_cost = loss_function(f0, x0, cost_only=True)\n else:\n initial_cost = 0.5 * np.dot(f0, f0)\n\n if callable(jac):\n J0 = jac(x0, *args, **kwargs)\n\n if issparse(J0):\n J0 = csr_matrix(J0)\n\n def jac_wrapped(x, _=None):\n return csr_matrix(jac(x, *args, **kwargs))\n\n elif isinstance(J0, LinearOperator):\n\n def jac_wrapped(x, _=None):\n return jac(x, *args, **kwargs)\n\n else:\n J0 = np.atleast_2d(J0)\n\n def jac_wrapped(x, _=None):\n return np.atleast_2d(jac(x, *args, **kwargs))\n\n else: # Estimate Jacobian by finite differences.\n if method == \"lm\":\n if jac_sparsity is not None:\n raise ValueError(\"method='lm' does not support \" \"`jac_sparsity`.\")\n\n if jac != \"2-point\":\n warn(\n \"jac='{0}' works equivalently to '2-point' \"\n \"for method='lm'.\".format(jac)\n )\n\n J0 = jac_wrapped = None\n else:\n if jac_sparsity is not None and tr_solver == \"exact\":\n raise ValueError(\n \"tr_solver='exact' is incompatible \" \"with `jac_sparsity`.\"\n )\n\n def jac_wrapped(x, f):\n J = approx_derivative(\n fun,\n x,\n rel_step=diff_step,\n method=jac,\n f0=f,\n bounds=bounds,\n args=args,\n kwargs=kwargs,\n sparsity=jac_sparsity,\n )\n if J.ndim != 2: # J is guaranteed not sparse.\n J = np.atleast_2d(J)\n\n return J\n\n if jac_sparsity == \"auto\":\n jac_sparsity = None\n J0 = jac_wrapped(x0, f0)\n jac_sparsity = J0 != 0\n\n jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)\n J0 = jac_wrapped(x0, f0)\n\n if J0 is not None:\n if J0.shape != (m, n):\n raise ValueError(\n \"The return value of `jac` has wrong shape: expected {0}, \"\n \"actual {1}.\".format((m, n), J0.shape)\n )\n\n if not isinstance(J0, np.ndarray):\n if method == \"lm\":\n raise ValueError(\n \"method='lm' works only with dense \" \"Jacobian matrices.\"\n )\n\n if tr_solver == \"exact\":\n raise ValueError(\n \"tr_solver='exact' works only with dense \" \"Jacobian matrices.\"\n )\n\n jac_scale = isinstance(x_scale, str) and x_scale == \"jac\"\n if isinstance(J0, LinearOperator) and jac_scale:\n raise ValueError(\n \"x_scale='jac' can't be used when `jac` \" \"returns LinearOperator.\"\n )\n\n if tr_solver is None:\n if isinstance(J0, np.ndarray):\n tr_solver = \"exact\"\n else:\n tr_solver = \"lsmr\"\n\n if method == \"lm\":\n result = call_minpack(\n fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol, max_nfev, x_scale, diff_step\n )\n\n elif method == \"trf\":\n result = trf(\n fun_wrapped,\n jac_wrapped,\n x0,\n f0,\n J0,\n lb,\n ub,\n ftol,\n xtol,\n gtol,\n max_nfev,\n x_scale,\n loss_function,\n tr_solver,\n tr_options.copy(),\n verbose,\n )\n\n elif method == \"dogbox\":\n if tr_solver == \"lsmr\" and \"regularize\" in tr_options:\n warn(\n \"The keyword 'regularize' in `tr_options` is not relevant \"\n \"for 'dogbox' method.\"\n )\n tr_options = tr_options.copy()\n del tr_options[\"regularize\"]\n\n result = dogbox(\n fun_wrapped,\n jac_wrapped,\n x0,\n f0,\n J0,\n lb,\n ub,\n ftol,\n xtol,\n gtol,\n max_nfev,\n x_scale,\n loss_function,\n tr_solver,\n tr_options,\n verbose,\n )\n\n result.message = TERMINATION_MESSAGES[result.status]\n result.success = result.status > 0\n\n if verbose >= 1:\n print(result.message)\n print(\n \"Function evaluations {0}, initial cost {1:.4e}, final cost \"\n \"{2:.4e}, first-order optimality {3:.2e}.\".format(\n result.nfev, initial_cost, result.cost, result.optimality\n )\n )\n\n return result\n", "import logging\nfrom os.path import dirname, join\n\nimport numpy as np\nimport pandas as pd\nfrom astropy import units as u\n\nfrom .datasource import DataSource\nfrom ..spectrum import Spectrum1D\n\nlogger = logging.getLogger(__name__)\n\n\nclass MarcsStellar(DataSource):\n\n flux_units = u.erg / u.cm ** 2 / u.s / u.AA\n\n def __init__(self, star):\n super().__init__()\n self.star = star\n self.data_dir = self.config[\"data_dir\"]\n\n\n def get(self):\n # TODO Interpolate from grid\n \n teff = self.star.teff\n logg = self.star.logg\n monh = self.star.monh\n vt = self.star.vturb\n\n geom = \"p\" # or s for spherical\n mass = 0 # only has a value for spherical\n # alpha, C, N, O abundance\n a, c, n, o = 0, 0, 0, 0\n r, s = 0, 0\n\n\n fname = f\"{geom}{teff:04d}_g{logg:+1.1f}_m{mass:1.1f}_t{vt:02d}_st_z{monh:+1.2f}_a{a:+1.2f}_c{c:+1.2f}_n{n:+1.2f}_o{o:+1.2f}_r{r:+1.2f}_s{s:+1.2f}.flx\"\n flux_file = join(self.data_dir, fname)\n wl_file = join(self.data_dir, \"flx_wavelengths.vac\")\n\n flux = pd.read_csv(flux_file, header=None, names=[\"flx\"], sep=\"\\t\")\n wave = pd.read_csv(wl_file, header=None, names=[\"wave\"], sep=\"\\t\")\n flux = flux.values.ravel() << self.flux_units\n wave = wave.values.ravel() << u.AA\n\n spec = Spectrum1D(\n flux=flux,\n spectral_axis=wave,\n reference_frame=\"barycentric\",\n star=self.star,\n source=\"marcs\",\n description=f\"stellar spectrum of {self.star['name']}\",\n )\n\n return spec\n" ]
[ [ "numpy.dot", "numpy.concatenate", "scipy.optimize._lsq.least_squares.check_tolerance", "scipy.optimize._lsq.common.in_bounds", "numpy.all", "numpy.any", "numpy.iscomplexobj", "scipy.optimize._lsq.common.make_strictly_feasible", "scipy.optimize._lsq.dogbox.dogbox", "scipy.sparse.issparse", "numpy.atleast_1d", "scipy.optimize._numdiff.approx_derivative", "scipy.optimize._lsq.least_squares.prepare_bounds", "scipy.sparse.csr_matrix", "numpy.atleast_2d", "scipy.optimize._lsq.least_squares.check_jac_sparsity", "numpy.sum", "scipy.optimize._lsq.least_squares.call_minpack", "numpy.isfinite", "scipy.optimize._lsq.least_squares.check_x_scale", "numpy.empty" ], [ "pandas.read_csv" ] ]
aashrithbandaru/fmltc
[ "3b95626583d4004d06c542992cf8e35967dcada5" ]
[ "model_trainer.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__author__ = \"lizlooney@google.com (Liz Looney)\"\n\n# Python Standard Library\nfrom datetime import timedelta\nimport json\nimport logging\nimport os\nimport time\nimport traceback\n\n# Other Modules\nfrom google.oauth2 import service_account\nimport googleapiclient.discovery\nfrom tensorflow.python.summary.summary_iterator import summary_iterator\n\n# My Modules\nimport action\nimport blob_storage\nimport constants\nimport exceptions\nimport storage\nimport util\n\nBUCKET = ('%s' % constants.PROJECT_ID)\n\nSTARTING_MODELS = {\n #Takes too long 'ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync': 'ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync_2018_07_03',\n 'ssd_mobilenet_v1_0.75_depth_quantized_300x300_coco14_sync': 'ssd_mobilenet_v1_0.75_depth_quantized_300x300_coco14_sync_2018_07_18',\n #Model never detects any objects 'ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync': 'ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03',\n #'ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco14_sync': 'ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco14_sync_2018_07_03',\n #'ssd_mobilenet_v1_quantized_300x300_coco14_sync': 'ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18',\n}\n\ndef get_starting_model_names():\n names = list(STARTING_MODELS.keys())\n names.sort()\n return names\n\ndef get_normalized_input_image_tensor(starting_model_name):\n if '640x640' in starting_model_name:\n return [1, 640, 640, 3]\n elif '300x300' in starting_model_name:\n return [1, 300, 300, 3]\n else:\n message = 'Error: cannot determine normalized_input_image_tensor for %s.' % starting_model_name\n logging.critical(message)\n raise exceptions.HttpErrorInternalServerError(message)\n\ndef start_training_model(team_uuid, description, dataset_uuids_json,\n starting_model, max_running_minutes, num_training_steps, create_time_ms):\n # Call retrieve_model_list to update all models (which may have finished training) and update\n # the team_entity.\n model_entities = retrieve_model_list(team_uuid)\n\n found_starting_model = False\n for starting_model_name, starting_model_checkpoint in STARTING_MODELS.items():\n if starting_model == starting_model_name:\n found_starting_model = True\n starting_model_uuid = None\n starting_model_entity = None\n user_visible_starting_model = starting_model\n original_starting_model = starting_model\n fine_tune_checkpoint = 'gs://%s/static/training/models/%s/model.ckpt' % (\n BUCKET, starting_model_checkpoint)\n break\n if not found_starting_model:\n # starting_model is the model_uuid of one of the user's own models.\n starting_model_uuid = starting_model\n starting_model_entity = retrieve_model_entity(team_uuid, starting_model_uuid)\n if starting_model_entity['trained_checkpoint_path'] == '':\n message = 'Error: Trained checkpoint not found for model_uuid=%s.' % starting_model_uuid\n logging.critical(message)\n raise exceptions.HttpErrorNotFound(message)\n # user_visible_starting_model is the description of that model.\n user_visible_starting_model = starting_model_entity['description']\n original_starting_model = starting_model_entity['original_starting_model']\n fine_tune_checkpoint = starting_model_entity['trained_checkpoint_path']\n\n # storage.model_trainer_starting will raise an exception if the team doesn't have enough\n # training time left.\n model_uuid = storage.model_trainer_starting(team_uuid, max_running_minutes)\n try:\n object_detection_tar_gz = 'gs://%s/static/training/object_detection-0.1.tar.gz' % BUCKET\n slim_tar_gz = 'gs://%s/static/training/slim-0.1.tar.gz' % BUCKET\n pycocotools_tar_gz = 'gs://%s/static/training/pycocotools-2.0.tar.gz' % BUCKET\n\n dataset_uuid_list = json.loads(dataset_uuids_json)\n dataset_entities = storage.retrieve_dataset_entities(team_uuid, dataset_uuid_list)\n if len(dataset_entities) != len(dataset_uuid_list):\n message = 'Error: One or more datasets not found for dataset_uuids=%s.' % dataset_uuids_json\n logging.critical(message)\n raise exceptions.HttpErrorNotFound(message)\n\n previous_training_steps = 0\n dataset_uuids = []\n train_input_path = []\n eval_input_path = []\n train_frame_count = 0\n eval_frame_count = 0\n train_negative_frame_count = 0\n eval_negative_frame_count = 0\n train_dict_label_to_count = {}\n eval_dict_label_to_count = {}\n sorted_label_list = None\n label_map_path = None\n if starting_model_entity is not None:\n previous_training_steps += starting_model_entity['previous_training_steps']\n dataset_uuids.extend(starting_model_entity['dataset_uuids'])\n train_input_path.extend(starting_model_entity['train_input_path'])\n eval_input_path.extend(starting_model_entity['eval_input_path'])\n train_frame_count += starting_model_entity['train_frame_count']\n eval_frame_count += starting_model_entity['eval_frame_count']\n train_negative_frame_count += starting_model_entity['train_negative_frame_count']\n eval_negative_frame_count += starting_model_entity['eval_negative_frame_count']\n util.extend_dict_label_to_count(train_dict_label_to_count, starting_model_entity['train_dict_label_to_count'])\n util.extend_dict_label_to_count(eval_dict_label_to_count, starting_model_entity['eval_dict_label_to_count'])\n sorted_label_list = starting_model_entity['sorted_label_list']\n label_map_path = starting_model_entity['label_map_path']\n\n for dataset_entity in dataset_entities:\n dataset_uuids.append(dataset_entity['dataset_uuid'])\n train_input_path.append(dataset_entity['train_input_path'])\n eval_input_path.append(dataset_entity['eval_input_path'])\n train_frame_count += dataset_entity['train_frame_count']\n eval_frame_count += dataset_entity['eval_frame_count']\n train_negative_frame_count += dataset_entity['train_negative_frame_count']\n eval_negative_frame_count += dataset_entity['eval_negative_frame_count']\n util.extend_dict_label_to_count(train_dict_label_to_count, dataset_entity['train_dict_label_to_count'])\n util.extend_dict_label_to_count(eval_dict_label_to_count, dataset_entity['eval_dict_label_to_count'])\n if sorted_label_list is None:\n sorted_label_list = dataset_entity['sorted_label_list']\n label_map_path = dataset_entity['label_map_path']\n elif sorted_label_list != dataset_entity['sorted_label_list']:\n message = \"Error: The datasets contain different labels and cannot be used together.\"\n logging.critical(message)\n raise exceptions.HttpErrorBadRequest(message)\n\n # Create the pipeline.config file and store it in cloud storage.\n bucket = util.storage_client().get_bucket(BUCKET)\n config_template_blob_name = 'static/training/models/configs/%s.config' % original_starting_model\n quantization_delay = max(0, num_training_steps - 200)\n pipeline_config = (bucket.blob(config_template_blob_name).download_as_string().decode('utf-8')\n .replace('TO_BE_CONFIGURED/num_classes', str(len(sorted_label_list)))\n .replace('TO_BE_CONFIGURED/fine_tune_checkpoint', fine_tune_checkpoint)\n .replace('TO_BE_CONFIGURED/train_input_path', json.dumps(train_input_path))\n .replace('TO_BE_CONFIGURED/label_map_path', label_map_path)\n .replace('TO_BE_CONFIGURED/eval_input_path', json.dumps(eval_input_path))\n .replace('TO_BE_CONFIGURED/num_examples', str(eval_frame_count))\n .replace('TO_BE_CONFIGURED/num_training_steps', str(num_training_steps))\n .replace('TO_BE_CONFIGURED/quantization_delay', str(quantization_delay))\n )\n pipeline_config_path = blob_storage.store_pipeline_config(team_uuid, model_uuid, pipeline_config)\n\n model_dir = blob_storage.get_model_folder_path(team_uuid, model_uuid)\n job_dir = model_dir\n checkpoint_dir = model_dir\n\n ml = __get_ml_service()\n parent = __get_parent()\n train_job_id = __get_train_job_id(model_uuid)\n scheduling = {\n 'maxRunningTime': '%ds' % (max_running_minutes * 60),\n }\n train_training_input = {\n 'scaleTier': 'BASIC_TPU',\n 'packageUris': [\n object_detection_tar_gz,\n slim_tar_gz,\n pycocotools_tar_gz,\n ],\n 'pythonModule': 'object_detection.model_tpu_main',\n 'args': [\n '--model_dir', model_dir,\n '--pipeline_config_path', pipeline_config_path,\n '--num_train_steps', str(num_training_steps),\n\n # Note(lizloone) I commented out the tpu_zone argument after jobs were failing on\n # July 10, 2020. I found documentation at\n # https://cloud.google.com/ai-platform/training/docs/using-tpus#connecting_to_the_tpu_grpc_server\n # that says \"However, you must make one important change when you use\n # TPUClusterResolver for code that runs on AI Platform Training: Do not provide any\n # arguments when you construct the TPUClusterResolver instance. When the tpu, zone,\n # and project keyword arguments are all set to their default value of None, AI\n # Platform Training automatically provides the cluster resolver with the necessary\n # connection details through environment variables.\"\n #'--tpu_zone', 'us-central1',\n ],\n # TODO(lizlooney): Specify hyperparameters.\n #'hyperparameters': {\n # object (HyperparameterSpec)\n #},\n 'region': 'us-central1', # Don't hardcode?\n 'jobDir': job_dir,\n 'runtimeVersion': '1.15',\n 'pythonVersion': '3.7',\n 'scheduling': scheduling,\n }\n train_job = {\n 'jobId': train_job_id,\n 'trainingInput': train_training_input,\n }\n train_job_response = ml.projects().jobs().create(parent=parent, body=train_job).execute()\n except:\n util.log('model_trainer.start_training_model - creating training job - except %s' %\n traceback.format_exc().replace('\\n', ' ... '))\n # storage.failed_to_start_training will adjust the team's remaining training time.\n storage.model_trainer_failed_to_start(team_uuid, model_uuid, max_running_minutes)\n raise\n\n try:\n if eval_frame_count > 0:\n eval_job_id = __get_eval_job_id(model_uuid)\n eval_training_input = {\n 'scaleTier': 'BASIC_GPU',\n 'packageUris': [\n object_detection_tar_gz,\n slim_tar_gz,\n pycocotools_tar_gz,\n ],\n 'pythonModule': 'object_detection.model_main',\n 'args': [\n '--model_dir', model_dir,\n '--pipeline_config_path', pipeline_config_path,\n '--checkpoint_dir', checkpoint_dir,\n ],\n 'region': 'us-central1',\n 'jobDir': job_dir,\n 'runtimeVersion': '1.15',\n 'pythonVersion': '3.7',\n }\n eval_job = {\n 'jobId': eval_job_id,\n 'trainingInput': eval_training_input,\n }\n eval_job_response = ml.projects().jobs().create(parent=parent, body=eval_job).execute()\n else:\n eval_job_response = None\n except:\n util.log('model_trainer.start_training_model - creating eval job - except %s' %\n traceback.format_exc().replace('\\n', ' ... '))\n # storage.model_trainer_failed_to_start will adjust the team's remaining training time.\n storage.model_trainer_failed_to_start(team_uuid, model_uuid, max_running_minutes)\n # Cancel the training job.\n ml.projects().jobs().cancel(name=__get_train_job_name(model_uuid)).execute()\n raise\n model_entity = storage.model_trainer_started(team_uuid, model_uuid, description,\n dataset_uuids, create_time_ms, max_running_minutes, num_training_steps,\n previous_training_steps, starting_model, user_visible_starting_model,\n original_starting_model, fine_tune_checkpoint,\n sorted_label_list, label_map_path, train_input_path, eval_input_path,\n train_frame_count, eval_frame_count, train_negative_frame_count, eval_negative_frame_count,\n train_dict_label_to_count, eval_dict_label_to_count,\n train_job_response, eval_job_response)\n return model_entity\n\ndef retrieve_model_list(team_uuid):\n model_entities = storage.retrieve_model_list(team_uuid)\n ml = None\n for model_entity in model_entities:\n model_entity, ml = update_model_entity(model_entity, ml)\n return model_entities\n\ndef retrieve_model_entity(team_uuid, model_uuid):\n model_entity = storage.retrieve_model_entity(team_uuid, model_uuid)\n model_entity, _ = update_model_entity(model_entity)\n return model_entity\n\ndef update_model_entity(model_entity, ml=None):\n # If the train and eval jobs weren't done last time we checked, check now.\n if is_not_done(model_entity):\n if ml is None:\n ml = __get_ml_service()\n train_job_name = __get_train_job_name(model_entity['model_uuid'])\n train_job_response = ml.projects().jobs().get(name=train_job_name).execute()\n if model_entity['eval_job']:\n eval_job_name = __get_eval_job_name(model_entity['model_uuid'])\n eval_job_response = ml.projects().jobs().get(name=eval_job_name).execute()\n # If the train job has failed or been cancelled, cancel the eval job is it's still alive.\n if __is_dead_or_dying(train_job_response['state']) and __is_alive(eval_job_response['state']):\n ml.projects().jobs().cancel(name=eval_job_name).execute()\n eval_job_response = ml.projects().jobs().get(name=eval_job_name).execute()\n else:\n eval_job_response = None\n model_entity = storage.update_model_entity(\n model_entity['team_uuid'], model_entity['model_uuid'], train_job_response, eval_job_response)\n return model_entity, ml\n\ndef is_not_done(model_entity):\n return (\n __is_not_done(model_entity['train_job_state']) or\n __is_not_done(model_entity['eval_job_state']))\n\ndef is_done(model_entity):\n return (\n __is_done(model_entity['train_job_state']) and\n __is_done(model_entity['eval_job_state']))\n\ndef cancel_training_model(team_uuid, model_uuid):\n model_entity = retrieve_model_entity(team_uuid, model_uuid)\n ml = __get_ml_service()\n if __is_alive(model_entity['train_job_state']):\n try:\n train_job_name = __get_train_job_name(model_uuid)\n ml.projects().jobs().cancel(name=train_job_name).execute()\n except:\n util.log('model_trainer.cancel_training_model - canceling training job - except %s' %\n traceback.format_exc().replace('\\n', ' ... '))\n if model_entity['eval_job']:\n if __is_alive(model_entity['eval_job_state']):\n try:\n eval_job_name = __get_eval_job_name(model_uuid)\n ml.projects().jobs().cancel(name=eval_job_name).execute()\n except:\n util.log('model_trainer.cancel_training_model - canceling eval job - except %s' %\n traceback.format_exc().replace('\\n', ' ... '))\n return storage.cancel_training_requested(team_uuid, model_uuid)\n\ndef __get_ml_service():\n scopes = ['https://www.googleapis.com/auth/cloud-platform']\n credentials = service_account.Credentials.from_service_account_file('key.json', scopes=scopes)\n return googleapiclient.discovery.build(\n serviceName='ml', version='v1', credentials=credentials, cache_discovery=False)\n\ndef __get_parent():\n # TODO(lizlooney): Is the project id here supposed to be our Google Cloud Project ID?\n return 'projects/%s' % constants.PROJECT_ID\n\ndef __get_train_job_id(model_uuid):\n return 'train_%s' % model_uuid\n\ndef __get_eval_job_id(model_uuid):\n return 'eval_%s' % model_uuid\n\ndef __get_train_job_name(model_uuid):\n return '%s/jobs/%s' % (__get_parent(), __get_train_job_id(model_uuid))\n\ndef __get_eval_job_name(model_uuid):\n return '%s/jobs/%s' % (__get_parent(), __get_eval_job_id(model_uuid))\n\ndef __is_alive(state):\n return (state == 'QUEUED' or\n state == 'PREPARING' or\n state == 'RUNNING')\n\ndef __is_dead_or_dying(state):\n return (state == 'FAILED' or\n state == 'CANCELLING' or\n state == 'CANCELLED')\n\ndef __is_not_done(state):\n return (state != '' and\n state != 'SUCCEEDED' and\n state != 'FAILED' and\n state != 'CANCELLED')\n\ndef __is_done(state):\n return not __is_not_done(state)\n\n\ndef make_action_parameters(team_uuid, model_uuid):\n action_parameters = action.create_action_parameters(action.ACTION_NAME_EXTRACT_SUMMARY_IMAGES)\n action_parameters['team_uuid'] = team_uuid\n action_parameters['model_uuid'] = model_uuid\n return action_parameters\n\ndef extract_summary_images(action_parameters):\n team_uuid = action_parameters['team_uuid']\n model_uuid = action_parameters['model_uuid']\n\n previous_training_updated = None\n previous_eval_updated = None\n\n while True:\n model_entity = retrieve_model_entity(team_uuid, model_uuid)\n\n training_folder, training_event_file_path, training_updated = blob_storage.get_training_event_file_path(\n team_uuid, model_uuid)\n if training_event_file_path is not None and training_updated != previous_training_updated:\n __extract_summary_images_for_event_file(team_uuid, model_uuid,\n training_folder, training_event_file_path, action_parameters)\n previous_training_updated = training_updated\n\n eval_folder, eval_event_file_path, eval_updated = blob_storage.get_eval_event_file_path(\n team_uuid, model_uuid)\n if eval_event_file_path is not None and eval_updated != previous_eval_updated:\n __extract_summary_images_for_event_file(team_uuid, model_uuid,\n eval_folder, eval_event_file_path, action_parameters)\n previous_eval_updated = eval_updated\n\n if is_done(model_entity):\n return\n\n if action.remaining_timedelta(action_parameters) > timedelta(minutes=2):\n time.sleep(60)\n action.retrigger_if_necessary(action_parameters)\n\n\ndef __extract_summary_images_for_event_file(team_uuid, model_uuid, folder, event_file_path,\n action_parameters):\n for event in summary_iterator(event_file_path):\n action.retrigger_if_necessary(action_parameters)\n for value in event.summary.value:\n if value.HasField('image'):\n blob_storage.store_event_summary_image(team_uuid, model_uuid,\n folder, event.step, value.tag, value.image.encoded_image_string)\n\n\ndef retrieve_training_summaries(team_uuid, model_uuid, retrieve_scalars, retrieve_images):\n training_folder, training_event_file_path, training_updated = blob_storage.get_training_event_file_path(\n team_uuid, model_uuid)\n if training_event_file_path is None:\n training_sorted_tags = []\n training_sorted_steps = []\n training_summaries = []\n else:\n training_sorted_tags, training_sorted_steps, training_summaries = __retrieve_summaries_for_event_file(\n team_uuid, model_uuid, training_folder, training_event_file_path, retrieve_scalars, retrieve_images)\n return training_updated, training_sorted_tags, training_sorted_steps, training_summaries\n\n\ndef retrieve_eval_summaries(team_uuid, model_uuid, retrieve_scalars, retrieve_images):\n eval_folder, eval_event_file_path, eval_updated = blob_storage.get_eval_event_file_path(\n team_uuid, model_uuid)\n if eval_event_file_path is None:\n eval_sorted_tags = []\n eval_sorted_steps = []\n eval_summaries = []\n else:\n eval_sorted_tags, eval_sorted_steps, eval_summaries = __retrieve_summaries_for_event_file(\n team_uuid, model_uuid, eval_folder, eval_event_file_path, retrieve_scalars, retrieve_images)\n return eval_updated, eval_sorted_tags, eval_sorted_steps, eval_summaries\n\n\ndef __retrieve_summaries_for_event_file(team_uuid, model_uuid, folder, event_file_path, retrieve_scalars, retrieve_images):\n steps_set = set()\n tags_set = set()\n summaries = []\n for event in summary_iterator(event_file_path):\n values = {}\n for value in event.summary.value:\n if retrieve_scalars and value.HasField('simple_value'):\n tags_set.add(value.tag)\n values[value.tag] = value.simple_value\n elif retrieve_images and value.HasField('image'):\n exists, image_url = blob_storage.get_event_summary_image_download_url(team_uuid, model_uuid,\n folder, event.step, value.tag, value.image.encoded_image_string)\n if exists:\n tags_set.add(value.tag)\n values[value.tag] = {\n 'width': value.image.width,\n 'height': value.image.height,\n 'image_url': image_url,\n }\n if len(values) > 0:\n steps_set.add(event.step)\n summary = {\n 'step': event.step,\n }\n summary['values'] = values\n summaries.append(summary)\n return sorted(tags_set), sorted(steps_set), summaries\n" ]
[ [ "tensorflow.python.summary.summary_iterator.summary_iterator" ] ]
mspayam/MeTooIran
[ "63c2ae2dbb06ef2d6836840e66af21a025668491" ]
[ "MeTooIran.py" ]
[ "import tweepy\nfrom tweepy import OAuthHandler\nimport pandas as pd\n\n\naccess_token = ''\naccess_token_secret = ''\nAPI_key = ''\nAPI_key_secret = ''\n\n\nauth = tweepy.OAuthHandler(API_key, API_key_secret)\nauth.set_access_token(access_token, access_token_secret)\n\n\napi = tweepy.API(auth, wait_on_rate_limit=True)\n\ntweets = []\n\ncount = 1\n\n\nfor tweet in tweepy.Cursor(api.search_full_archive, label= \"use your own label\", fromDate=\"200608081010\", query=\"Me_Too_Iran\").items(50000):\n \n print(count)\n count += 1\n\n try: \n data = [tweet.created_at, tweet.id, tweet.text,\ntweet.user._json['screen_name'], tweet.user._json['name'], tweet.user._json['created_at'], tweet.entities['urls']]\n data = tuple(data)\n tweets.append(data)\n \n\n except tweepy.TweepError as e:\n print(e.reason)\n continue\n\n except StopIteration:\n break\n\ndf = pd.DataFrame(tweets, columns = ['created_at','tweet_id', 'tweet_text', 'screen_name', 'name', 'account_creation_date', 'urls'])\n\ndf.to_csv(path_or_buf = 'Yourpath.csv', index=False)" ]
[ [ "pandas.DataFrame" ] ]
RichardoMrMu/facial-emotion-recognition
[ "d8abd1bcf685eaeb55f844b21e2fda5ebfa25a00" ]
[ "code/models/swin_transformer.py" ]
[ "# -*- coding:utf-8 -*-\n# @Time : 2021/8/1 16:53\n# @Author : Richardo Mu\n# @FILE : swin_transformer.PY\n# @Software : PyCharm\n\n\n\"\"\" Swin Transformer\nA PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`\n - https://arxiv.org/pdf/2103.14030\nCode/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below\n\"\"\"\n# --------------------------------------------------------\n# Swin Transformer\n# Copyright (c) 2021 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ze Liu\n# --------------------------------------------------------\nimport logging\nimport math\nfrom copy import deepcopy\nfrom typing import Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as checkpoint\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom timm.models.helpers import build_model_with_cfg, overlay_external_default_cfg\nfrom timm.models.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_\nfrom timm.models.registry import register_model\nfrom timm.models.vision_transformer import checkpoint_filter_fn, _init_vit_weights\n\n_logger = logging.getLogger(__name__)\n\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url,\n 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,\n 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'patch_embed.proj', 'classifier': 'head',\n **kwargs\n }\n\n\ndefault_cfgs = {\n # patch models (my experiments)\n 'swin_base_patch4_window12_384': _cfg(\n url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth',\n input_size=(3, 384, 384), crop_pct=1.0),\n\n 'swin_base_patch4_window7_224': _cfg(\n url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth',\n ),\n\n 'swin_large_patch4_window12_384': _cfg(\n url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth',\n input_size=(3, 384, 384), crop_pct=1.0),\n\n 'swin_large_patch4_window7_224': _cfg(\n url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth',\n ),\n\n 'swin_small_patch4_window7_224': _cfg(\n url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth',\n ),\n\n 'swin_tiny_patch4_window7_224': _cfg(\n url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth',\n ),\n\n 'swin_base_patch4_window12_384_in22k': _cfg(\n url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth',\n input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),\n\n 'swin_base_patch4_window7_224_in22k': _cfg(\n url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth',\n num_classes=21841),\n\n 'swin_large_patch4_window12_384_in22k': _cfg(\n url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth',\n input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),\n\n 'swin_large_patch4_window7_224_in22k': _cfg(\n url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth',\n num_classes=21841),\n\n}\n\n\ndef window_partition(x, window_size: int):\n \"\"\"\n Args:\n x: (B, H, W, C)\n window_size (int): window size\n Returns:\n windows: (num_windows*B, window_size, window_size, C)\n \"\"\"\n B, H, W, C = x.shape\n x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return windows\n\n\ndef window_reverse(windows, window_size: int, H: int, W: int):\n \"\"\"\n Args:\n windows: (num_windows*B, window_size, window_size, C)\n window_size (int): Window size\n H (int): Height of image\n W (int): Width of image\n Returns:\n x: (B, H, W, C)\n \"\"\"\n B = int(windows.shape[0] / (H * W / window_size / window_size))\n x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n return x\n\n\nclass WindowAttention(nn.Module):\n r\"\"\" Window based multi-head self attention (W-MSA) module with relative position bias.\n It supports both of shifted and non-shifted window.\n Args:\n dim (int): Number of input channels.\n window_size (tuple[int]): The height and width of the window.\n num_heads (int): Number of attention heads.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set\n attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0\n proj_drop (float, optional): Dropout ratio of output. Default: 0.0\n \"\"\"\n\n def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):\n\n super().__init__()\n self.dim = dim\n self.window_size = window_size # Wh, Ww\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n\n # define a parameter table of relative position bias\n self.relative_position_bias_table = nn.Parameter(\n torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH\n\n # get pair-wise relative position index for each token inside the window\n coords_h = torch.arange(self.window_size[0])\n coords_w = torch.arange(self.window_size[1])\n coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww\n coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww\n relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww\n relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2\n relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0\n relative_coords[:, :, 1] += self.window_size[1] - 1\n relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1\n relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww\n self.register_buffer(\"relative_position_index\", relative_position_index)\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n trunc_normal_(self.relative_position_bias_table, std=.02)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x, mask: Optional[torch.Tensor] = None):\n \"\"\"\n Args:\n x: input features with shape of (num_windows*B, N, C)\n mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None\n \"\"\"\n B_, N, C = x.shape\n qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n q = q * self.scale\n attn = (q @ k.transpose(-2, -1))\n\n relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(\n self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww\n attn = attn + relative_position_bias.unsqueeze(0)\n\n if mask is not None:\n nW = mask.shape[0]\n attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)\n attn = attn.view(-1, self.num_heads, N, N)\n attn = self.softmax(attn)\n else:\n attn = self.softmax(attn)\n\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B_, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n\nclass SwinTransformerBlock(nn.Module):\n r\"\"\" Swin Transformer Block.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resulotion.\n num_heads (int): Number of attention heads.\n window_size (int): Window size.\n shift_size (int): Shift size for SW-MSA.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float, optional): Stochastic depth rate. Default: 0.0\n act_layer (nn.Module, optional): Activation layer. Default: nn.GELU\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,\n act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.num_heads = num_heads\n self.window_size = window_size\n self.shift_size = shift_size\n self.mlp_ratio = mlp_ratio\n if min(self.input_resolution) <= self.window_size:\n # if window size is larger than input resolution, we don't partition windows\n self.shift_size = 0\n self.window_size = min(self.input_resolution)\n assert 0 <= self.shift_size < self.window_size, \"shift_size must in 0-window_size\"\n\n self.norm1 = norm_layer(dim)\n self.attn = WindowAttention(\n dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,\n qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n if self.shift_size > 0:\n # calculate attention mask for SW-MSA\n H, W = self.input_resolution\n img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1\n h_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n w_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n cnt = 0\n for h in h_slices:\n for w in w_slices:\n img_mask[:, h, w, :] = cnt\n cnt += 1\n\n mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1\n mask_windows = mask_windows.view(-1, self.window_size * self.window_size)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n else:\n attn_mask = None\n\n self.register_buffer(\"attn_mask\", attn_mask)\n\n def forward(self, x):\n H, W = self.input_resolution\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n\n shortcut = x\n x = self.norm1(x)\n x = x.view(B, H, W, C)\n\n # cyclic shift\n if self.shift_size > 0:\n shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))\n else:\n shifted_x = x\n\n # partition windows\n x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C\n x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C\n\n # W-MSA/SW-MSA\n attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C\n\n # merge windows\n attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)\n shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C\n\n # reverse cyclic shift\n if self.shift_size > 0:\n x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))\n else:\n x = shifted_x\n x = x.view(B, H * W, C)\n\n # FFN\n x = shortcut + self.drop_path(x)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n\n return x\n\n\nclass PatchMerging(nn.Module):\n r\"\"\" Patch Merging Layer.\n Args:\n input_resolution (tuple[int]): Resolution of input feature.\n dim (int): Number of input channels.\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):\n super().__init__()\n self.input_resolution = input_resolution\n self.dim = dim\n self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n self.norm = norm_layer(4 * dim)\n\n def forward(self, x):\n \"\"\"\n x: B, H*W, C\n \"\"\"\n H, W = self.input_resolution\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n assert H % 2 == 0 and W % 2 == 0, f\"x size ({H}*{W}) are not even.\"\n\n x = x.view(B, H, W, C)\n\n x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C\n x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C\n x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C\n x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C\n x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C\n x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C\n\n x = self.norm(x)\n x = self.reduction(x)\n\n return x\n\n def extra_repr(self) -> str:\n return f\"input_resolution={self.input_resolution}, dim={self.dim}\"\n\n def flops(self):\n H, W = self.input_resolution\n flops = H * W * self.dim\n flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim\n return flops\n\n\nclass BasicLayer(nn.Module):\n \"\"\" A basic Swin Transformer layer for one stage.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resolution.\n depth (int): Number of blocks.\n num_heads (int): Number of attention heads.\n window_size (int): Local window size.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n \"\"\"\n\n def __init__(self, dim, input_resolution, depth, num_heads, window_size,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):\n\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.depth = depth\n self.use_checkpoint = use_checkpoint\n\n # build blocks\n self.blocks = nn.ModuleList([\n SwinTransformerBlock(dim=dim, input_resolution=input_resolution,\n num_heads=num_heads, window_size=window_size,\n shift_size=0 if (i % 2 == 0) else window_size // 2,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop, attn_drop=attn_drop,\n drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,\n norm_layer=norm_layer)\n for i in range(depth)])\n\n # patch merging layer\n if downsample is not None:\n self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)\n else:\n self.downsample = None\n\n def forward(self, x):\n for blk in self.blocks:\n if not torch.jit.is_scripting() and self.use_checkpoint:\n x = checkpoint.checkpoint(blk, x)\n else:\n x = blk(x)\n if self.downsample is not None:\n x = self.downsample(x)\n return x\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}\"\n\n\nclass SwinTransformer(nn.Module):\n r\"\"\" Swin Transformer\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,\n embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24),\n window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, weight_init='', **kwargs):\n super().__init__()\n\n self.num_classes = num_classes\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.ape = ape\n self.patch_norm = patch_norm\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n self.mlp_ratio = mlp_ratio\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n num_patches = self.patch_embed.num_patches\n self.patch_grid = self.patch_embed.grid_size\n\n # absolute position embedding\n if self.ape:\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n else:\n self.absolute_pos_embed = None\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n layers = []\n for i_layer in range(self.num_layers):\n layers += [BasicLayer(\n dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(self.patch_grid[0] // (2 ** i_layer), self.patch_grid[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint)\n ]\n self.layers = nn.Sequential(*layers)\n\n self.norm = norm_layer(self.num_features)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')\n head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.\n if weight_init.startswith('jax'):\n for n, m in self.named_modules():\n _init_vit_weights(m, n, head_bias=head_bias, jax_impl=True)\n else:\n self.apply(_init_vit_weights)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n if self.absolute_pos_embed is not None:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n x = self.layers(x)\n x = self.norm(x) # B L C\n x = self.avgpool(x.transpose(1, 2)) # B C 1\n x = torch.flatten(x, 1)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n\ndef _create_swin_transformer(variant, pretrained=False, default_cfg=None, **kwargs):\n if default_cfg is None:\n default_cfg = deepcopy(default_cfgs[variant])\n overlay_external_default_cfg(default_cfg, kwargs)\n default_num_classes = default_cfg['num_classes']\n default_img_size = default_cfg['input_size'][-2:]\n\n num_classes = kwargs.pop('num_classes', default_num_classes)\n img_size = kwargs.pop('img_size', default_img_size)\n if kwargs.get('features_only', None):\n raise RuntimeError('features_only not implemented for Vision Transformer models.')\n\n model = build_model_with_cfg(\n SwinTransformer, variant, pretrained,\n default_cfg=default_cfg,\n img_size=img_size,\n num_classes=num_classes,\n pretrained_filter_fn=checkpoint_filter_fn,\n **kwargs)\n\n return model\n\n\n@register_model\ndef swin_base_patch4_window12_384(pretrained=False, **kwargs):\n \"\"\" Swin-B @ 384x384, pretrained ImageNet-22k, fine tune 1k\n \"\"\"\n model_kwargs = dict(\n patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)\n return _create_swin_transformer('swin_base_patch4_window12_384', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef swin_base_patch4_window7_224(pretrained=False, **kwargs):\n \"\"\" Swin-B @ 224x224, pretrained ImageNet-22k, fine tune 1k\n \"\"\"\n model_kwargs = dict(\n patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)\n return _create_swin_transformer('swin_base_patch4_window7_224', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef swin_large_patch4_window12_384(pretrained=False, **kwargs):\n \"\"\" Swin-L @ 384x384, pretrained ImageNet-22k, fine tune 1k\n \"\"\"\n model_kwargs = dict(\n patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)\n return _create_swin_transformer('swin_large_patch4_window12_384', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef swin_large_patch4_window7_224(pretrained=False, **kwargs):\n \"\"\" Swin-L @ 224x224, pretrained ImageNet-22k, fine tune 1k\n \"\"\"\n model_kwargs = dict(\n patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)\n return _create_swin_transformer('swin_large_patch4_window7_224', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef swin_small_patch4_window7_224(pretrained=False, **kwargs):\n \"\"\" Swin-S @ 224x224, trained ImageNet-1k\n \"\"\"\n model_kwargs = dict(\n patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs)\n return _create_swin_transformer('swin_small_patch4_window7_224', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef swin_tiny_patch4_window7_224(pretrained=False, **kwargs):\n \"\"\" Swin-T @ 224x224, trained ImageNet-1k\n \"\"\"\n model_kwargs = dict(\n patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs)\n return _create_swin_transformer('swin_tiny_patch4_window7_224', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef swin_base_patch4_window12_384_in22k(pretrained=False, **kwargs):\n \"\"\" Swin-B @ 384x384, trained ImageNet-22k\n \"\"\"\n model_kwargs = dict(\n patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)\n return _create_swin_transformer('swin_base_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef swin_base_patch4_window7_224_in22k(pretrained=False, **kwargs):\n \"\"\" Swin-B @ 224x224, trained ImageNet-22k\n \"\"\"\n model_kwargs = dict(\n patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)\n return _create_swin_transformer('swin_base_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef swin_large_patch4_window12_384_in22k(pretrained=False, **kwargs):\n \"\"\" Swin-L @ 384x384, trained ImageNet-22k\n \"\"\"\n model_kwargs = dict(\n patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)\n return _create_swin_transformer('swin_large_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs)\n\n\n@register_model\ndef swin_large_patch4_window7_224_in22k(pretrained=False, **kwargs):\n \"\"\" Swin-L @ 224x224, trained ImageNet-22k\n \"\"\"\n model_kwargs = dict(\n patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)\n return _create_swin_transformer('swin_large_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs)" ]
[ [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.nn.Sequential", "torch.cat", "torch.zeros", "torch.arange", "torch.nn.Linear", "torch.nn.Identity", "torch.utils.checkpoint.checkpoint", "torch.jit.is_scripting", "torch.flatten", "torch.roll", "torch.nn.AdaptiveAvgPool1d", "torch.meshgrid" ] ]
aks2203/deep-thinking
[ "089fc5d04a0997ccdbad601b3e025f547a8b6327" ]
[ "deepthinking/models/dt_net_1d.py" ]
[ "\"\"\" dt_net_1d.py\n DeepThinking 1D convolutional neural network.\n\n Collaboratively developed\n by Avi Schwarzschild, Eitan Borgnia,\n Arpit Bansal, and Zeyad Emam.\n\n Developed for DeepThinking project\n October 2021\n\"\"\"\n\nimport torch\nfrom torch import nn\n\nfrom .blocks import BasicBlock1D as BasicBlock\n\n# Ignore statemenst for pylint:\n# Too many branches (R0912), Too many statements (R0915), No member (E1101),\n# Not callable (E1102), Invalid name (C0103), No exception (W0702)\n# pylint: disable=R0912, R0915, E1101, E1102, C0103, W0702, R0914\n\n\nclass DTNet1D(nn.Module):\n \"\"\"DeepThinking 1D Network model class\"\"\"\n\n def __init__(self, block, num_blocks, width, recall, group_norm=False, **kwargs):\n super().__init__()\n\n self.width = int(width)\n self.recall = recall\n self.group_norm = group_norm\n\n proj_conv = nn.Conv1d(1, width, kernel_size=3,\n stride=1, padding=1, bias=False)\n\n conv_recall = nn.Conv1d(width + 1, width, kernel_size=3,\n stride=1, padding=1, bias=False)\n\n if self.recall:\n recur_layers = [conv_recall, nn.ReLU()]\n else:\n recur_layers = []\n\n for i in range(len(num_blocks)):\n recur_layers.append(self._make_layer(block, width, num_blocks[i], stride=1))\n\n head_conv1 = nn.Conv1d(width, width, kernel_size=3,\n stride=1, padding=1, bias=False)\n head_conv2 = nn.Conv1d(width, int(width/2), kernel_size=3,\n stride=1, padding=1, bias=False)\n head_conv3 = nn.Conv1d(int(width/2), 2, kernel_size=3,\n stride=1, padding=1, bias=False)\n\n self.projection = nn.Sequential(proj_conv, nn.ReLU())\n self.recur_block = nn.Sequential(*recur_layers)\n self.head = nn.Sequential(head_conv1, nn.ReLU(),\n head_conv2, nn.ReLU(),\n head_conv3)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for strd in strides:\n layers.append(block(self.width, planes, strd, self.group_norm))\n self.width = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x, iters_to_do, interim_thought=None, **kwargs):\n initial_thought = self.projection(x)\n\n if interim_thought is None:\n interim_thought = initial_thought\n\n all_outputs = torch.zeros((x.size(0), iters_to_do, 2, x.size(2))).to(x.device)\n\n for i in range(iters_to_do):\n if self.recall:\n interim_thought = torch.cat([interim_thought, x], 1)\n\n interim_thought = self.recur_block(interim_thought)\n out = self.head(interim_thought)\n all_outputs[:, i] = out\n\n if self.training:\n return out, interim_thought\n\n return all_outputs\n\n\ndef dt_net_1d(width, **kwargs):\n return DTNet1D(BasicBlock, [2], width, recall=False)\n\n\ndef dt_net_recall_1d(width, **kwargs):\n return DTNet1D(BasicBlock, [2], width, recall=True)\n\n\ndef dt_net_gn_1d(width, **kwargs):\n return DTNet1D(BasicBlock, [2], width, recall=False, group_norm=True)\n\n\ndef dt_net_recall_gn_1d(width, **kwargs):\n return DTNet1D(BasicBlock, [2], width, recall=True, group_norm=True)\n" ]
[ [ "torch.nn.Sequential", "torch.nn.ReLU", "torch.cat", "torch.nn.Conv1d" ] ]
Anonymous-px/ID2445_DFFT
[ "89a1a482c1b9d5a664dc9e77536ac8c65dc6b614" ]
[ "mmdet/models/backbones/DFFTNet.py" ]
[ "# --------------------------------------------------------\n# Swin Transformer\n# Copyright (c) 2021 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ze Liu, Yutong Lin, Yixuan Wei\n# --------------------------------------------------------\n\nfrom numpy.core.numeric import cross\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as checkpoint\nimport numpy as np\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\n\nfrom mmcv_custom import load_checkpoint\nfrom mmdet.utils import get_root_logger\nfrom ..builder import BACKBONES\n#from .swin_utils import CSP_DenseBlock\nfrom .CA_layer import *\nfrom .SA_layer import *\nfrom .DOT_blocks import *\n\n\ndef b16(n, activation, resolution=224):\n # Conv2d_BN(self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1, resolution=-10000)\n return torch.nn.Sequential(\n Conv2d_BN(3, n, 3, 2, 1, resolution=resolution),\n activation(),)\n #Conv2d_BN(n // 2, n, 3, 1, 1, resolution=resolution),\n #activation(),)\n #Conv2d_BN(n // 4, n // 2, 3, 2, 1, resolution=resolution // 4),\n #activation(),\n #Conv2d_BN(n // 2, n, 3, 2, 1, resolution=resolution // 8))\n\n@BACKBONES.register_module()\nclass DFFTNet(nn.Module):\n def __init__(self,\n pretrain_img_size=224,\n patch_size=4,\n embed_dim=128,\n depths=[2, 2, 18, 2],\n alldepths=[3, 3, 19, 3],\n num_heads=[4, 4, 7, 12],\n window_size=7,\n mlp_ratio=4.,\n qkv_bias=True,\n qk_scale=None,\n drop_rate=0.,\n attn_drop_rate=0.,\n drop_path_rate=0.2,\n norm_layer=nn.LayerNorm,\n ape=False,\n patch_norm=True,\n out_indices=(0, 2, 4, 6),\n frozen_stages=-1,\n use_checkpoint=False,\n crossca_position = [1, 2, 3], \n crossca_type = \"CrossAddCa_a_n_l\"):\n super().__init__()\n\n print(\"depths:\", depths)\n print(\"num_heads\", num_heads)\n print(\"crossca_position:\", crossca_position)\n\n self.pretrain_img_size = pretrain_img_size\n self.num_layers = len(depths)\n self.embed_dim = num_heads[0] * 32\n self.ape = ape\n self.patch_norm = patch_norm\n self.out_indices = out_indices\n self.frozen_stages = frozen_stages\n\n self.b16 = b16(self.embed_dim, torch.nn.Hardswish)\n self.patch_embed = PatchEmbed(\n patch_size=patch_size, in_chans=self.embed_dim, embed_dim=self.embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n if self.ape:\n pretrain_img_size = to_2tuple(pretrain_img_size)\n patch_size = to_2tuple(patch_size)\n patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]]\n\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer_dim = int(num_heads[i_layer]*32)\n layer_dimout = int(num_heads[i_layer+1]*32) if (i_layer < self.num_layers - 1) else int(num_heads[i_layer]*32)\n layer = DOTBlock(\n dim=layer_dim,\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n use_checkpoint=use_checkpoint,\n alldepths=alldepths[i_layer])\n self.layers.append(layer)\n if i_layer in crossca_position:\n saablock = SAAModule(layer_dim, 2, torch.nn.Hardswish, resolution=224, drop_path=0.,)\n self.layers.append(saablock)\n if (i_layer < self.num_layers - 1):\n downsample = PatchMerging(dim=layer_dim, dimout = layer_dimout, norm_layer=norm_layer)\n self.layers.append(downsample)\n\n num_features = [int(num_heads[i]*32) for i in range(self.num_layers)]\n self.num_features = num_features\n self.upsample_2 = torch.nn.Upsample(scale_factor=2)\n\n self.links = nn.ModuleList()\n for i_layer in range(1, self.num_layers):\n if i_layer == 1:\n layer_dim = 128\n else:\n layer_dim = self.num_features[-1]\n saeblock = SAEBlock(layer_dim, 2, torch.nn.Hardswish, resolution=224, drop_path=0.)\n self.links.append(saeblock)\n\n self.out_norm = norm_layer(self.num_features[-1])\n\n saaconv = []\n saaconv.append(nn.Sequential(nn.Conv2d(self.num_features[0], self.num_features[1], 3, 1, 1, bias=False), nn.BatchNorm2d(self.num_features[1]), nn.ReLU6(inplace=True)))\n saaconv.append(nn.Sequential(nn.Conv2d(self.num_features[1], self.num_features[1], 3, 1, 1, bias=False), nn.BatchNorm2d(self.num_features[1]), nn.ReLU6(inplace=True)))\n saaconv.append(nn.Sequential(nn.Conv2d(self.num_features[1], self.num_features[2], 3, 1, 1, bias=False), nn.BatchNorm2d(self.num_features[2]), nn.ReLU6(inplace=True)))\n saaconv.append(nn.Sequential(nn.Conv2d(self.num_features[2], self.num_features[3], 3, 1, 1, bias=False), nn.BatchNorm2d(self.num_features[3]), nn.ReLU6(inplace=True)))\n for idx in range(4):\n layer_name = f'saaconv{idx}'\n self.add_module(layer_name, saaconv[idx]) \n\n saeconv = []\n saeconv = []\n saeconv.append(nn.Sequential(nn.Conv2d(self.num_features[1], 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU6(inplace=True)))\n saeconv.append(nn.Sequential(nn.Conv2d(self.num_features[2], 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU6(inplace=True)))\n saeconv.append(nn.Sequential(nn.Conv2d(128, self.num_features[3], 3, 1, 1, bias=False), nn.BatchNorm2d(self.num_features[3]), nn.ReLU6(inplace=True)))\n saeconv.append(nn.Sequential(nn.Conv2d(self.num_features[3], self.num_features[3], 3, 1, 1, bias=False), nn.BatchNorm2d(self.num_features[3]), nn.ReLU6(inplace=True)))\n for idx in range(4):\n layer_name = f'saeconv{idx}'\n self.add_module(layer_name, saeconv[idx]) \n\n self._freeze_stages()\n\n def _freeze_stages(self):\n if self.frozen_stages >= 0:\n self.patch_embed.eval()\n for param in self.patch_embed.parameters():\n param.requires_grad = False\n\n if self.frozen_stages >= 1 and self.ape:\n self.absolute_pos_embed.requires_grad = False\n\n if self.frozen_stages >= 2:\n self.pos_drop.eval()\n for i in range(0, self.frozen_stages - 1):\n m = self.layers[i]\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n def init_weights(self, pretrained=None):\n def _init_weights(m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n if isinstance(pretrained, str):\n self.apply(_init_weights)\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n self.apply(_init_weights)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def forward(self, x):\n x = self.b16(x)\n x = self.patch_embed(x)\n Wh, Ww = x.size(2), x.size(3)\n if self.ape:\n absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')\n x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C\n else:\n x = x.flatten(2).transpose(1, 2)\n x = self.pos_drop(x)\n\n dot_feature, dot_HW = [], []\n saa_feature = []\n channel = [self.num_features[1], self.num_features[1], self.num_features[2], self.num_features[3]]\n for i, layer in enumerate(self.layers):\n if isinstance(layer, DOTBlock):\n x, H, W = layer(x, Wh, Ww)\n ca_x = x\n B, _, C = x.shape\n cross_x = x.view(B, H, W, C).permute(0, 3, 1, 2).contiguous()\n conv_layer = getattr(self, f'saaconv{len(dot_feature)}')\n if len(dot_feature) < 2:\n cross_x = conv_layer(cross_x)\n dot_feature.append(cross_x.contiguous().view(B, channel[len(dot_feature)], -1).transpose(-2, -1))\n dot_HW.append([H, W])\n elif isinstance(layer, SAAModule):\n if i == len(self.layers)-1:\n last_layer = True\n x, _ = layer(dot_feature[-2:], dot_HW[-2:], last_layer=last_layer)\n saa_feature.append(x)\n ca_x = x\n else:\n link_x, x = layer(dot_feature[-2:], dot_HW[-2:])\n saa_feature.append(link_x)\n if len(dot_feature) > 1:\n cross_x = x.view(B, H, W, C).permute(0, 3, 1, 2).contiguous()\n conv_layer = getattr(self, f'saaconv{len(dot_feature)}')\n cross_x = conv_layer(cross_x)\n dot_feature[-1] =cross_x.contiguous().view(B, channel[len(dot_feature)], -1).transpose(-2, -1)\n else:\n dot_feature[-1] = link_x\n elif isinstance(layer, PatchMerging):\n x = layer(x, H, W)\n Wh, Ww = (H + 1) // 2, (W + 1) // 2\n \n saa_feature.append(dot_feature[-1])\n\n # addlink2:\n dot_feature = saa_feature\n\n channel = [128, 128, self.num_features[3], self.num_features[3]]\n for i in range(2):\n H, W = dot_HW[i]\n B, _, C = dot_feature[i].shape\n cross_x = dot_feature[i].view(B, H, W, C).permute(0, 3, 1, 2).contiguous()\n conv_layer = getattr(self, f'saeconv{i}')\n cross_x = conv_layer(cross_x)\n dot_feature[i] = cross_x.contiguous().view(B, channel[i], -1).transpose(-2, -1)\n for i in range(len(self.links)):\n H, W = dot_HW[i+1]\n B, _, C = dot_feature[i+1].shape\n layer = self.links[i]\n if i == len(self.links)-1:\n last_layer = True\n x, _ = layer(dot_feature[i:i+2], dot_HW[i:i+2], last_layer=last_layer)\n else:\n conv_layer = getattr(self, f'saeconv{i+2}')\n last_layer = False\n _, x = layer(dot_feature[i:i+2], dot_HW[i:i+2], last_layer=last_layer)\n x = x.view(B, H, W, C).permute(0, 3, 1, 2).contiguous()\n x = conv_layer(x)\n dot_feature[i+1] = x.contiguous().view(B, channel[i+2], -1).transpose(-2, -1)\n\n x = self.out_norm(x)\n x = x.view(-1, dot_HW[2][0], dot_HW[2][1], self.num_features[-1]).permute(0, 3, 1, 2).contiguous()\n return tuple([x])\n\n def train(self, mode=True):\n \"\"\"Convert the model into training mode while keep layers freezed.\"\"\"\n super(DFFTNet, self).train(mode)\n self._freeze_stages()\n# \n" ]
[ [ "torch.nn.Dropout", "torch.nn.ReLU6", "torch.zeros", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Upsample", "torch.nn.functional.interpolate", "torch.nn.BatchNorm2d" ] ]
yotamfr/prot2vec
[ "eaee36f9e3929054b1c324acd053a52d0e7be2bd" ]
[ "src/python/word2vec.py" ]
[ "import os\nimport sys\nimport operator\nimport numpy as np\nimport pandas as pd\n\nfrom shutil import copyfile\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.modules.loss import _Loss\n\nfrom itertools import combinations\n\nfrom pymongo.errors import CursorNotFound\nfrom pymongo import MongoClient\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\n\nfrom gensim.matutils import unitvec\n\ntry:\n import matplotlib.pyplot as plt\nexcept ImportError as err:\n plt = None\n print(err)\n\nfrom tempfile import gettempdir\n\n\nimport argparse\n\naa_sim = pd.read_csv('Data/aa_sim.csv')\naa_unlike = [np.where(aa_sim.loc[[w], :] == 0)[1] for w in range(0, 25)]\nAA = aa_sim.columns\ndictionary, reverse_dictionary = dict(zip(AA, range(25))), dict(zip(range(25), AA))\nvocabulary_size = len(AA)\nn_clstr = 8\n\nimport json\nprint(json.dumps(dictionary, indent=1))\n\nassert vocabulary_size == 25\n\nverbose = False\n\nprint(\"WARNING! Deprecated!\")\n\n\nclass BatchLoader(object):\n def __init__(self, win_size, batch_size, train):\n self.win_size = win_size\n self.batch_size = batch_size\n self.train = train\n self.batch_buffer = np.ndarray([])\n self.labels_buffer = np.ndarray([])\n self.test_set = list(collection_test.aggregate([{\"$sample\": {\"size\": size_test}}]))\n\n def __iter__(self):\n\n if self.train:\n self.stream = collection_train.aggregate([{\"$sample\": {\"size\": size_train}}])\n else:\n self.stream = (seq for seq in self.test_set)\n\n i, n = 1, size_test\n seq = self._get_sequence()\n seq_pos = 0\n\n batch_buffer, labels_buffer = self.batch_buffer, self.labels_buffer\n\n while True:\n\n batch_buffer, labels_buffer, seq_pos, batch_pos = \\\n self._get_batch(seq, batch_buffer, labels_buffer, batch_pos=0, seq_pos=seq_pos)\n\n if seq_pos == 0: # seq finished\n try:\n if verbose:\n sys.stdout.write(\"\\r{0:.0f}%\".format(100.0 * i / n))\n seq = self._get_sequence()\n i += 1\n except (CursorNotFound, StopIteration) as e:\n print(e)\n break\n\n batch_buffer, labels_buffer, seq_pos, batch_pos = \\\n self._get_batch(seq, batch_buffer, labels_buffer, batch_pos=batch_pos, seq_pos=0)\n\n else:\n yield np.random.permutation(batch_buffer), np.random.permutation(labels_buffer)\n\n def _get_batch(self, seq, batch, labels, batch_pos=0, seq_pos=0):\n return batch, labels, seq_pos, batch_pos\n\n def _get_sequence(self):\n return ''\n\n\nclass WindowBatchLoader(BatchLoader):\n def __init__(self, win_size, batch_size, train=True):\n super(WindowBatchLoader, self).__init__(win_size, batch_size, train)\n self.mask = np.array(range(2 * win_size + 1)) != win_size\n self.batch_buffer = np.ndarray(shape=(batch_size, win_size * 2), dtype=np.int32)\n self.labels_buffer = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n\n def _get_sequence(self):\n return np.array(list(map(lambda aa: dictionary[aa], next(self.stream)['sequence'])))\n\n def _get_batch(self, seq, batch, labels, batch_pos=0, seq_pos=0):\n win = self.win_size\n mask = self.mask\n if seq_pos == 0:\n seq_pos = win\n batch_size = self.batch_size\n while batch_pos < batch_size:\n if seq_pos + win >= len(seq): # seq finished before batch\n return batch, labels, 0, batch_pos\n if batch_pos == batch_size: # batch finished before seq\n break\n start = seq_pos - win\n end = seq_pos + win + 1\n context = seq[start:end]\n batch[batch_pos, :] = context[mask]\n labels[batch_pos] = [context[win]]\n batch_pos += 1\n seq_pos += 1\n\n return batch, labels, seq_pos, batch_pos\n\n\nclass SkipGramBatchLoader(BatchLoader):\n def __init__(self, win_size, batch_size, train=True):\n super(SkipGramBatchLoader, self).__init__(win_size, batch_size, train)\n self.batch_buffer = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n self.labels_buffer = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n\n def _get_sequence(self):\n return next(self.stream)['sequence']\n\n def _get_batch(self, seq, batch, labels, batch_pos=0, seq_pos=0):\n batch_size = self.batch_size\n while batch_pos < batch_size:\n win = np.random.choice(self.win_size) + 1\n for offset in range(-win, win + 1):\n label_pos = seq_pos + offset\n if offset == 0 or label_pos < 0:\n continue\n if label_pos >= len(seq):\n continue\n if batch_pos == batch_size: # batch finished before seq\n break\n if seq_pos == len(seq): # seq finished before batch\n return batch, labels, 0, batch_pos\n labels[batch_pos][0] = dictionary[seq[label_pos]]\n batch[batch_pos][0] = dictionary[seq[seq_pos]]\n batch_pos += 1\n seq_pos += 1\n\n return batch, labels, seq_pos, batch_pos\n\n\ndef get_negative_samples(words):\n return np.array([np.random.choice(aa_unlike[w[0]], 1) for w in words])\n\n\ndef save_checkpoint(state, is_best):\n filename_late = \"%s/w2v_%s_latest.tar\" % (ckptpath, arch)\n filename_best = \"%s/w2v_%s_best.tar\" % (ckptpath, arch)\n torch.save(state, filename_late)\n if is_best:\n copyfile(filename_late, filename_best)\n\n\ndef get_loss2(word, context, model, criterion):\n c = Variable(torch.from_numpy(context).long())\n w = Variable(torch.from_numpy(word).long())\n p = model((w, c))\n _ = Variable(torch.from_numpy(np.ones(word.shape[0])))\n return criterion(p, _)\n\n\ndef get_loss1(word, context, model, criterion):\n word_tag = get_negative_samples(word)\n c = Variable(torch.from_numpy(context).long())\n w = Variable(torch.from_numpy(word).long())\n w_tag = Variable(torch.from_numpy(word_tag).long())\n l_pos = Variable(torch.from_numpy(np.ones((word.shape[0], 1))).float())\n l_neg = Variable(torch.from_numpy(np.zeros((word.shape[0], 1))).float())\n p_pos = model((w, c))\n p_neg = 1 - model((w_tag, c))\n return criterion(p_pos, l_pos) + criterion(p_neg, l_neg)\n\n\ndef train_w2v(model, train_loader, test_loader, criterion=nn.MSELoss(), get_loss=get_loss1):\n # Hyper Parameters\n\n num_epochs = args.num_epochs\n\n # Loss and Optimizer\n optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)\n\n train_loss = 0\n best_loss = np.inf\n start_epoch = 0\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '%s'\" % args.resume)\n checkpoint = torch.load(args.resume)\n start_epoch = checkpoint['epoch']\n best_loss = checkpoint['best_loss']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '%s' (epoch %s)\" %\n (args.resume, checkpoint['epoch'] + 1))\n else:\n print(\"=> no checkpoint found at '%s'\" % args.resume)\n\n for epoch in range(start_epoch, num_epochs):\n\n for step, (context, word) in enumerate(train_loader):\n model.train()\n optimizer.zero_grad()\n loss = get_loss(word, context, model, criterion)\n train_loss += loss.data[0]\n loss.backward()\n optimizer.step()\n\n # loss = p_pos.log() + p_neg.log()\n\n if (step + 1) % args.steps_per_stats == 0:\n test_loss = 0\n for i, (c, w) in enumerate(test_loader):\n loss = get_loss(w, c, model, criterion)\n test_loss += loss.data[0]\n print('Epoch [%d/%d], Train Loss: %.5f, Test Loss: %.5f'\n % (epoch + 1, num_epochs, train_loss / args.steps_per_stats, test_loss / i))\n train_loss = 0\n\n # remember best prec@1 and save checkpoint\n is_best = best_loss > test_loss\n best_loss = min(best_loss, test_loss)\n save_checkpoint({\n 'epoch': epoch,\n 'arch': arch,\n 'state_dict': model.state_dict(),\n 'best_loss': best_loss,\n 'optimizer': optimizer.state_dict(),\n }, is_best)\n\n\ndef embeddings(w2v):\n return w2v.emb.weight.data.numpy()\n\n\nclass Word2VecAPI(object):\n def __init__(self, w2v):\n self._emb = np.copy(embeddings(w2v))\n\n @property\n def embeddings(self):\n return np.copy(self._emb)\n\n def __getitem__(self, aa):\n return self._emb[dictionary[aa]]\n\n def __contains__(self, aa):\n return aa in dictionary\n\n @property\n def vocab(self):\n keys = list(dictionary.keys())\n values = [self[aa] for aa in keys]\n return dict(zip(keys, values))\n\n def similarity(self, aa1, aa2):\n return np.dot(unitvec(self[aa1]), unitvec(self[aa2]))\n\n\nclass Word2VecImpl(nn.Module):\n\n def __init__(self, emb_size):\n super(Word2VecImpl, self).__init__()\n self.emb_w = nn.Embedding(vocabulary_size, emb_size)\n self.emb_c = nn.Embedding(vocabulary_size, emb_size)\n\n @property\n def emb(self):\n return self.emb_w\n\n def forward(self, x):\n return x\n\n\nclass LogLoss(_Loss):\n\n def forward(self, input, target):\n out = -input.log().mean()\n return out\n\n\nclass SoftMax(Word2VecImpl):\n def __init__(self, emb_size):\n super(SoftMax, self).__init__(emb_size)\n\n def forward(self, x):\n word, context = x\n batch_size = word.data.shape[0]\n vocab = np.array([list(range(vocabulary_size))\n for _ in range(batch_size)])\n vocab = Variable(torch.from_numpy(vocab).long())\n v_emb = self.emb_c(vocab)\n w_emb = self.emb_w(word).transpose(1, 2)\n c_emb = self.emb_c(context)\n nom = torch.exp(torch.bmm(c_emb, w_emb))\n dnom = torch.exp(torch.bmm(v_emb, w_emb))\n dnom = dnom.sum(1).pow(-1).unsqueeze(1)\n out = nom.bmm(dnom).view(-1)\n return out\n\n\nclass CBOW(Word2VecImpl):\n\n def __init__(self, emb_size):\n super(CBOW, self).__init__(emb_size)\n self.sig = nn.Sigmoid()\n\n def forward(self, x):\n word, context = x\n w_emb = self.emb_w(word)\n c_emb = self.emb_c(context)\n out = torch.bmm(w_emb, c_emb.transpose(1, 2))\n out = out.sum(2)\n out = self.sig(out)\n return out\n\n\nclass SkipGram(Word2VecImpl):\n def __init__(self, emb_size):\n super(SkipGram, self).__init__(emb_size)\n self.sig = nn.Sigmoid()\n\n def forward(self, x):\n word, context = x\n w_emb = self.emb_w(word)\n c_emb = self.emb_c(context)\n out = torch.bmm(w_emb, c_emb.transpose(1, 2))\n out = self.sig(out).mean(2)\n return out\n\n\ndef pca(embeddings):\n pca = PCA(n_components=2)\n return pca.fit_transform(embeddings)\n\n\ndef tsne(embeddings):\n tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')\n return tsne.fit_transform(embeddings)\n\n\ndef plot(low_dim_embs, fname=None):\n labels = [reverse_dictionary[i] for i in range(vocabulary_size)]\n assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'\n plt.figure(figsize=(18, 18)) # in inches\n for i, label in enumerate(labels):\n x, y = low_dim_embs[i, :]\n plt.scatter(x, y)\n plt.annotate(label,\n xy=(x, y),\n xytext=(5, 2),\n textcoords='offset points',\n ha='right', va='bottom')\n if fname:\n fpath = os.path.join(ckptpath, fname)\n print(\"Saving to %s\" % fpath)\n plt.savefig(fpath)\n plt.show()\n\n\ndef kmeans(w2v, k):\n keys = np.array(list(w2v.vocab.keys()))\n vectors = np.array([w2v[aa] for aa in keys])\n km = KMeans(n_clusters=k).fit(vectors)\n return keys, km.labels_\n\n\ndef clstr_stats(w2v, k):\n keys, labels = kmeans(w2v, k)\n clstr = '\\n'.join(\"cluster %s: %s\" %\n (lbl, ' '.join(keys[labels == lbl]))\n for lbl in np.unique(labels))\n cs = combinations(keys, 2)\n ds = {c: w2v.similarity(c[0], c[1]) for c in cs}\n hi_i = max(ds.items(), key=operator.itemgetter(1))[0]\n lo_i = min(ds.items(), key=operator.itemgetter(1))[0]\n av = np.mean(list(ds.values()))\n hi_s = \"highest similarity: sim(%s, %s)=%s\" % (hi_i[0], hi_i[1], ds[hi_i])\n lo_s = \"lowest similarity: sim(%s, %s)=%s\" % (lo_i[0], lo_i[1], ds[lo_i])\n av_s = \"average similarity: %s\" % av\n return '\\n'.join([clstr, hi_s, lo_s, av_s])\n\n\ndef nn_stats(w2v):\n for i in range(vocabulary_size):\n aa = reverse_dictionary[i]\n top_k = 3 # number of nearest neighbors\n nearest = sorted(list(range(vocabulary_size)),\n key=lambda o: -w2v.similarity(aa, reverse_dictionary[o]))\n log_str = 'Nearest to %s:' % aa\n for k in range(1, top_k+1):\n close_word = reverse_dictionary[nearest[k]]\n log_str = '%s %s,' % (log_str, close_word)\n print(log_str)\n\n\ndef add_arguments(parser):\n parser.add_argument(\"-w\", \"--win_size\", type=int, required=True,\n help=\"Give the length of the context window.\")\n parser.add_argument(\"-d\", \"--emb_dim\", type=int, required=True,\n help=\"Give the dimension of the embedding vector.\")\n parser.add_argument(\"-b\", \"--batch_size\", type=int, default=64,\n help=\"Give the size of bach to use when training.\")\n parser.add_argument(\"-e\", \"--num_epochs\", type=int, default=5,\n help=\"Give the number of epochs to use when training.\")\n parser.add_argument(\"--mongo_url\", type=str, default='mongodb://localhost:27017/',\n help=\"Supply the URL of MongoDB\")\n parser.add_argument(\"-a\", \"--arch\", type=str, choices=['cbow', 'sg', 'sf'],\n default=\"cbow\", help=\"Choose what type of model to use.\")\n parser.add_argument(\"-o\", \"--out_dir\", type=str, required=False,\n default=gettempdir(), help=\"Specify the output directory.\")\n parser.add_argument(\"-v\", '--verbose', action='store_true', default=False,\n help=\"Run in verbose mode.\")\n parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument(\"--steps_per_stats\", type=int, default=1000,\n help=\"How many training steps to do per stats logging, save.\")\n parser.add_argument(\"--size_train\", type=int, default=50000,\n help=\"The number of sequences sampled to create the test set.\")\n parser.add_argument(\"--size_test\", type=int, default=1000,\n help=\"The number of sequences sampled to create the train set.\")\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n add_arguments(parser)\n args = parser.parse_args()\n\n client = MongoClient(args.mongo_url)\n db = client['prot2vec']\n collection_train = db['uniprot']\n collection_test = db['sprot']\n\n arch = args.arch\n\n ckptpath = args.out_dir\n if not os.path.exists(ckptpath):\n os.makedirs(ckptpath)\n\n size_train = args.size_train\n size_test = args.size_test\n\n if arch == 'cbow':\n w2v = CBOW(args.emb_dim)\n train_loader = WindowBatchLoader(args.win_size, args.batch_size // 2)\n test_loader = WindowBatchLoader(args.win_size, args.batch_size // 2, False)\n train_w2v(w2v, train_loader, test_loader)\n\n elif arch == 'sg':\n w2v = SkipGram(args.emb_dim)\n train_loader = SkipGramBatchLoader(args.win_size, args.batch_size // 2)\n test_loader = SkipGramBatchLoader(args.win_size, args.batch_size // 2, False)\n train_w2v(w2v, train_loader, test_loader)\n\n elif arch == 'sf':\n w2v = SoftMax(args.emb_dim)\n train_loader = SkipGramBatchLoader(args.win_size, args.batch_size // 2)\n test_loader = SkipGramBatchLoader(args.win_size, args.batch_size // 2, False)\n train_w2v(w2v, train_loader, test_loader, LogLoss(), get_loss=get_loss2)\n\n else:\n print(\"Unknown model\")\n exit(1)\n\n if args.verbose:\n api = Word2VecAPI(w2v)\n print(clstr_stats(api, n_clstr))\n print(nn_stats(api))\n if args.verbose and plt:\n plot(tsne(api.embeddings), 'w2v_%s_tsne.png' % arch)\n plot(pca(api.embeddings), 'w2v_%s_pca.png' % arch)\n" ]
[ [ "sklearn.cluster.KMeans", "torch.load", "numpy.ndarray", "torch.nn.Embedding", "sklearn.manifold.TSNE", "numpy.where", "torch.save", "pandas.read_csv", "numpy.unique", "torch.from_numpy", "torch.nn.Sigmoid", "numpy.copy", "torch.bmm", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.random.choice", "matplotlib.pyplot.annotate", "matplotlib.pyplot.savefig", "numpy.array", "matplotlib.pyplot.show", "sklearn.decomposition.PCA", "matplotlib.pyplot.scatter", "numpy.ones", "numpy.random.permutation", "torch.nn.MSELoss" ] ]
xjtuchenchao888/xjtuchenchao888.github.io
[ "71dd380be3be17b22874d9add511b436852e9d67" ]
[ "backend/tf_inference.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\nfrom backend.config import id2name\n\nPATH_TO_CKPT = 'models/ssdlite_mobilenet_v2.pb'\n\ndef load_model():\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n with detection_graph.as_default():\n sess = tf.Session(graph=detection_graph)\n return sess, detection_graph\n\n\n\ndef inference(sess, detection_graph, img_arr, conf_thresh=0.5):\n # with detection_graph.as_default():\n # with tf.Session(graph=detection_graph) as sess:\n # Definite input and output Tensors for detection_graph\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n image_np_expanded = np.expand_dims(img_arr, axis=0)\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n height, width, _ = img_arr.shape\n results = []\n for idx, class_id in enumerate(classes[0]):\n conf = scores[0, idx]\n if conf > conf_thresh:\n bbox = boxes[0, idx]\n ymin, xmin, ymax, xmax = bbox[0] * height, bbox[1] * width, bbox[2] * height, bbox[3] * width\n \n results.append({\"name\": id2name[class_id],\n \"conf\": str(conf),\n \"bbox\": [int(xmin), int(ymin), int(xmax), int(ymax)]\n })\n\n return {\"results\":results}" ]
[ [ "tensorflow.Graph", "numpy.expand_dims", "tensorflow.import_graph_def", "tensorflow.gfile.GFile", "tensorflow.Session", "tensorflow.GraphDef" ] ]
Annusha/unsup_temp_embed
[ "2fd98b4d70d6180cb9f4a5adc107c8a24dd256bb" ]
[ "ute/models/training_embed.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"Implementation of training and testing functions for embedding.\"\"\"\n\n__all__ = ['training', 'load_model']\n__author__ = 'Anna Kukleva'\n__date__ = 'August 2018'\n\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom os.path import join\nimport time\nimport numpy as np\nimport random\n\nfrom ute.utils.arg_pars import opt\nfrom ute.utils.logging_setup import logger\nfrom ute.utils.util_functions import Averaging, adjust_lr\nfrom ute.utils.util_functions import dir_check\n\n\ndef training(train_loader, epochs, save, **kwargs):\n \"\"\"Training pipeline for embedding.\n\n Args:\n train_loader: iterator within dataset\n epochs: how much training epochs to perform\n n_subact: number of subactions in current complex activity\n mnist: if training with mnist dataset (just to test everything how well\n it works)\n Returns:\n trained pytorch model\n \"\"\"\n logger.debug('create model')\n\n # make everything deterministic -> seed setup\n torch.manual_seed(opt.seed)\n torch.cuda.manual_seed(opt.seed)\n np.random.seed(opt.seed)\n random.seed(opt.seed)\n torch.backends.cudnn.deterministic = True\n\n model = kwargs['model']\n loss = kwargs['loss']\n optimizer = kwargs['optimizer']\n\n cudnn.benchmark = True\n\n batch_time = Averaging()\n data_time = Averaging()\n losses = Averaging()\n\n adjustable_lr = opt.lr\n\n logger.debug('epochs: %s', epochs)\n for epoch in range(epochs):\n # model.cuda()\n model.to(opt.device)\n model.train()\n\n logger.debug('Epoch # %d' % epoch)\n if opt.lr_adj:\n # if epoch in [int(epochs * 0.3), int(epochs * 0.7)]:\n # if epoch in [int(epochs * 0.5)]:\n if epoch % 30 == 0 and epoch > 0:\n adjustable_lr = adjust_lr(optimizer, adjustable_lr)\n logger.debug('lr: %f' % adjustable_lr)\n end = time.time()\n for i, (features, labels) in enumerate(train_loader):\n data_time.update(time.time() - end)\n features = features.float()\n labels = labels.float().to(opt.device)\n if opt.device == 'cuda':\n features = features.cuda(non_blocking=True)\n # features = features.float().cuda(non_blocking=True)\n # labels = labels.float().cuda()\n output = model(features)\n loss_values = loss(output.squeeze(), labels.squeeze())\n losses.update(loss_values.item(), features.size(0))\n\n optimizer.zero_grad()\n loss_values.backward()\n optimizer.step()\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % 100 == 0 and i:\n logger.debug('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses))\n logger.debug('loss: %f' % losses.avg)\n losses.reset()\n\n opt.resume_str = join(opt.dataset_root, 'models',\n '%s.pth.tar' % opt.log_str)\n if save:\n save_dict = {'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict()}\n if opt.global_pipe:\n dir_check(join(opt.dataset_root, 'models', 'global'))\n opt.resume_str = join(opt.dataset_root, 'models', 'global',\n '%s.pth.tar' % opt.log_str)\n else:\n dir_check(join(opt.dataset_root, 'models'))\n torch.save(save_dict, opt.resume_str)\n return model\n\n\ndef load_model():\n if opt.loaded_model_name:\n if opt.global_pipe:\n resume_str = opt.loaded_model_name\n else:\n resume_str = opt.loaded_model_name % opt.subaction\n # resume_str = opt.resume_str\n else:\n resume_str = opt.log_str + '.pth.tar'\n # opt.loaded_model_name = resume_str\n if opt.device == 'cpu':\n checkpoint = torch.load(join(opt.dataset_root, 'models',\n '%s' % resume_str),\n map_location='cpu')\n else:\n checkpoint = torch.load(join(opt.dataset_root, 'models',\n '%s' % resume_str))\n checkpoint = checkpoint['state_dict']\n logger.debug('loaded model: ' + '%s' % resume_str)\n return checkpoint\n\n" ]
[ [ "torch.manual_seed", "numpy.random.seed", "torch.cuda.manual_seed", "torch.save" ] ]
JustaTinyDot/BiTr-Unet
[ "52c1a68a9fd1cc7968e43d3f89ef700bcd71d60d" ]
[ "evaluation/evaluation.py" ]
[ "#Modified from the following:\n# -*- coding: utf-8 -*-\n# Implementation of Wang et al 2017: Automatic Brain Tumor Segmentation using Cascaded Anisotropic Convolutional Neural Networks. https://arxiv.org/abs/1709.00382\n\n# Author: Guotai Wang\n# Copyright (c) 2017-2018 University College London, United Kingdom. All rights reserved.\n# http://cmictig.cs.ucl.ac.uk\n#\n# Distributed under the BSD-3 licence. Please see the file licence.txt\n# This software is not certified for clinical use.\n\n# Partially adopted from https://github.com/Issam28/Brain-tumor-segmentation/blob/master/evaluation_metrics.py\n\nfrom __future__ import absolute_import, print_function\nimport os\nimport sys\nsys.path.append('./')\nimport numpy as np\nfrom data_process import load_3d_volume_as_array, binary_dice3d\nfrom scipy import ndimage\n\ndef sensitivity(seg,ground): \n #computs false negative rate\n num=np.sum(np.multiply(ground, seg))\n denom=np.sum(ground)\n if denom==0:\n return 1\n else:\n return num/denom\n\ndef specificity(seg,ground): \n #computes false positive rate\n num=np.sum(np.multiply(ground==0, seg ==0))\n denom=np.sum(ground==0)\n if denom==0:\n return 1\n else:\n return num/denom\n\ndef sensitivity_whole(seg,ground):\n return sensitivity(seg>0,ground>0)\n\ndef sensitivity_en(seg,ground):\n return sensitivity(seg==4,ground==4)\n\ndef sensitivity_core(seg,ground):\n seg_=np.copy(seg)\n ground_=np.copy(ground)\n seg_[seg_==2]=0\n ground_[ground_==2]=0\n return sensitivity(seg_>0,ground_>0)\n\ndef specificity_whole(seg,ground):\n return specificity(seg>0,ground>0)\n\ndef specificity_en(seg,ground):\n return specificity(seg==4,ground==4)\n\ndef specificity_core(seg,ground):\n seg_=np.copy(seg)\n ground_=np.copy(ground)\n seg_[seg_==2]=0\n ground_[ground_==2]=0\n return specificity(seg_>0,ground_>0)\n\ndef border_map(binary_img,neigh):\n \"\"\"\n Creates the border for a 3D image\n \"\"\"\n binary_map = np.asarray(binary_img, dtype=np.uint8)\n neigh = neigh\n west = ndimage.shift(binary_map, [-1, 0,0], order=0)\n east = ndimage.shift(binary_map, [1, 0,0], order=0)\n north = ndimage.shift(binary_map, [0, 1,0], order=0)\n south = ndimage.shift(binary_map, [0, -1,0], order=0)\n top = ndimage.shift(binary_map, [0, 0, 1], order=0)\n bottom = ndimage.shift(binary_map, [0, 0, -1], order=0)\n cumulative = west + east + north + south + top + bottom\n border = ((cumulative < 6) * binary_map) == 1\n return border\n\n\ndef border_distance(ref,seg):\n \"\"\"\n This functions determines the map of distance from the borders of the\n segmentation and the reference and the border maps themselves\n \"\"\"\n neigh=8\n border_ref = border_map(ref,neigh)\n border_seg = border_map(seg,neigh)\n oppose_ref = 1 - ref\n oppose_seg = 1 - seg\n # euclidean distance transform\n distance_ref = ndimage.distance_transform_edt(oppose_ref)\n distance_seg = ndimage.distance_transform_edt(oppose_seg)\n distance_border_seg = border_ref * distance_seg\n distance_border_ref = border_seg * distance_ref\n return distance_border_ref, distance_border_seg#, border_ref, border_seg\n\ndef Hausdorff_distance(ref,seg):\n \"\"\"\n This functions calculates the average symmetric distance and the\n hausdorff distance between a segmentation and a reference image\n :return: hausdorff distance and average symmetric distance\n \"\"\"\n ref_border_dist, seg_border_dist = border_distance(ref,seg)\n hausdorff_distance = np.max(\n [np.max(ref_border_dist), np.max(seg_border_dist)])\n return hausdorff_distance\n\n\n\ndef get_ground_truth_names(g_folder, patient_names_file):\n with open(patient_names_file) as f:\n content = f.readlines()\n patient_names = [x.strip() for x in content]\n full_gt_names = []\n for patient_name in patient_names:\n patient_dir = os.path.join(g_folder, patient_name)\n img_names = os.listdir(patient_dir)\n gt_name = None\n for img_name in img_names:\n if 'seg.' in img_name:\n gt_name = img_name\n break\n gt_name = os.path.join(patient_dir, gt_name)\n full_gt_names.append(gt_name)\n return full_gt_names\n\ndef get_segmentation_names(seg_folder, patient_names_file):\n with open(patient_names_file) as f:\n content = f.readlines()\n patient_names = [x.strip() for x in content]\n full_seg_names = []\n for patient_name in patient_names:\n seg_name = os.path.join(seg_folder, patient_name + '.nii.gz')\n full_seg_names.append(seg_name)\n return full_seg_names\n\ndef dice_of_brats_data_set(gt_names, seg_names, type_idx):\n assert(len(gt_names) == len(seg_names))\n dice_all_data = []\n for i in range(len(gt_names)):\n g_volume = load_3d_volume_as_array(gt_names[i])\n s_volume = load_3d_volume_as_array(seg_names[i])\n dice_one_volume = []\n if(type_idx ==0): #ET\n s_volume[s_volume == 2] = 0\n s_volume[s_volume == 1] = 0 \n g_volume[g_volume == 2] = 0\n g_volume[g_volume == 1] = 0\n temp_dice = binary_dice3d(s_volume > 0, g_volume > 0)\n dice_one_volume = [temp_dice]\n elif(type_idx == 1): # WT\n temp_dice = binary_dice3d(s_volume > 0, g_volume > 0)\n dice_one_volume = [temp_dice]\n elif(type_idx == 2): # TC\n s_volume[s_volume == 2] = 0 \n g_volume[g_volume == 2] = 0\n temp_dice = binary_dice3d(s_volume > 0, g_volume > 0)\n dice_one_volume = [temp_dice]\n \n \n else:\n for label in [1, 2, 4]: # dice of each class\n temp_dice = binary_dice3d(s_volume == label, g_volume == label)\n dice_one_volume.append(temp_dice)\n dice_all_data.append(dice_one_volume)\n return dice_all_data\n\ndef sensitivity_of_brats_data_set(gt_names, seg_names, type_idx):\n assert(len(gt_names) == len(seg_names))\n dice_all_data = []\n for i in range(len(gt_names)):\n g_volume = load_3d_volume_as_array(gt_names[i])\n s_volume = load_3d_volume_as_array(seg_names[i])\n sensi_one_volume = []\n if(type_idx ==0): #ET\n temp_sensi = sensitivity_en(s_volume,g_volume)\n sensi_one_volume = [temp_sensi]\n elif(type_idx == 1): # WT\n temp_sensi = sensitivity_whole(s_volume,g_volume)\n sensi_one_volume = [temp_sensi]\n elif(type_idx == 2): # TC\n temp_sensi = sensitivity_core(s_volume,g_volume)\n sensi_one_volume = [temp_sensi]\n\n return sensi_one_volume\n\ndef specificity_of_brats_data_set(gt_names, seg_names, type_idx):\n assert(len(gt_names) == len(seg_names))\n dice_all_data = []\n for i in range(len(gt_names)):\n g_volume = load_3d_volume_as_array(gt_names[i])\n s_volume = load_3d_volume_as_array(seg_names[i])\n speci_one_volume = []\n if(type_idx ==0): #ET\n temp_speci = specificity_en(s_volume,g_volume)\n speci_one_volume = [temp_speci]\n elif(type_idx == 1): # WT\n temp_speci = specificity_whole(s_volume,g_volume)\n speci_one_volume = [temp_speci]\n elif(type_idx == 2): # TC\n temp_speci = specificity_core(s_volume,g_volume)\n speci_one_volume = [temp_speci]\n\n return speci_one_volume\n\ndef hd_of_brats_data_set(gt_names, seg_names, type_idx):\n assert(len(gt_names) == len(seg_names))\n dice_all_data = []\n for i in range(len(gt_names)):\n g_volume = load_3d_volume_as_array(gt_names[i])\n s_volume = load_3d_volume_as_array(seg_names[i])\n hd_one_volume = []\n if(type_idx ==0): #ET\n temp_hd = Hausdorff_distance(g_volume,s_volume)\n hd_one_volume = temp_hd\n elif(type_idx == 1): # WT\n temp_hd = Hausdorff_distance(g_volume,s_volume)\n hd_one_volume = temp_hd\n elif(type_idx == 2): # TC\n temp_hd = Hausdorff_distance(g_volume,s_volume)\n hd_one_volume = temp_hd\n\n return hd_one_volume\n\n\nif __name__ == '__main__':\n \n \n \n s_folder = '/scratch/qj2022/TransBTS-main-2/output/submission/TransBTS20212021-07-30'\n g_folder = '/scratch/qj2022/TransBTS-main-2/data/BraTS2021_ValidationData'\n patient_names_file = '/scratch/qj2022/TransBTS-main-2/data/BraTS2021_ValidationData/valid.txt'\n\n test_types = ['ET','WT', 'TC']\n gt_names = get_ground_truth_names(g_folder, patient_names_file)\n seg_names = get_segmentation_names(s_folder, patient_names_file)\n for type_idx in range(3):\n dice = dice_of_brats_data_set(gt_names, seg_names, type_idx)\n dice = np.asarray(dice)\n dice_mean = dice.mean(axis = 0)\n dice_std = dice.std(axis = 0)\n test_type = test_types[type_idx]\n np.savetxt(s_folder + '/dice_{0:}.txt'.format(test_type), dice)\n np.savetxt(s_folder + '/dice_{0:}_mean.txt'.format(test_type), dice_mean)\n np.savetxt(s_folder + '/dice_{0:}_std.txt'.format(test_type), dice_std)\n\n sensitivity = sensitivity_of_brats_data_set(gt_names, seg_names, type_idx)\n np.savetxt(s_folder + '/sensitivity_{0:}.txt'.format(test_type), sensitivity)\n\n specificity = specificity_of_brats_data_set(gt_names, seg_names, type_idx)\n np.savetxt(s_folder + '/specificity_{0:}.txt'.format(test_type), specificity)\n\n hd = hd_of_brats_data_set(gt_names, seg_names, type_idx)\n np.savetxt(s_folder + '/Hausdorff_distance_{0:}.txt'.format(test_type), hd)\n\n print('tissue type', test_type)\n if(test_type == 'all'):\n print('tissue label', [1, 2, 4])\n print('dice mean ', dice_mean)\n print('dice std ', dice_std)\n print('sensitivity ',sensitivity)\n print('specificity ',specificity)\n print('Hausdorff_distance ',hd)\n \n" ]
[ [ "numpy.multiply", "numpy.asarray", "scipy.ndimage.distance_transform_edt", "numpy.max", "numpy.copy", "scipy.ndimage.shift", "numpy.sum" ] ]
devYaoYH/hackillinois_2020
[ "36fdcd2e4848d2b7f513ee729dc124dbdbeb3125" ]
[ "example_histogram.py" ]
[ "import h5py\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\ncwd = os.getcwd()\n\n#Open the data file\nfilepath = cwd + '\\\\demo.hdf'\nf = h5py.File(filepath, 'r')\n\n#Show all channels available in file\nchanIDs = f['DYNAMIC DATA']\n\nprint(\"Channels available in this data file\")\nprint(list(chanIDs.keys()))\n\n#Plot a sample dataset\nChannelName = 'ch_0'\ndset = chanIDs[ChannelName]['MEASURED']\nplt.hist(dset, bins='auto') # arguments are passed to np.histogram\nplt.title(\"Histogram ch0 (without noise reduction)\")\nplt.xlabel(\"Datapoint #\")\nplt.ylabel(\"Frequency\")\nplt.show()\n\nprint(\"Max of dataset: \" + str(max(dset)))\nprint(\"Min of dataset: \" + str(min(dset)))\n\n# Determined that initial 0 values are noise because of .....???\n# They may not be noise. They may be noise. You may need to make your own determination of what is and isn't noise\n# How do you determine what's noise? Great question! The answer is.....\nplt.hist(dset[6:-1], bins='auto') # arguments are passed to np.histogram\nplt.title(\"Histogram ch0 (with noise reduction)\")\nplt.xlabel(\"Datapoint #\")\nplt.ylabel(\"Frequency\")\nplt.show()\n\nprint(\"Max of dataset (wo noise): \" + str(max(dset[6:-1])))\nprint(\"Min of dataset (wo noise): \" + str(min(dset[6:-1])))\n\n#Close the file\nf.close()" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel" ] ]
claudiodtbarros/GEM
[ "82ba349e18bb700a9380db2827ad3beb45ff1731" ]
[ "gem/embedding/lle.py" ]
[ "disp_avlbl = True\nimport os\nif 'DISPLAY' not in os.environ:\n disp_avlbl = False\n import matplotlib\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport scipy.io as sio\nimport scipy.sparse as sp\nimport scipy.sparse.linalg as lg\nfrom sklearn.preprocessing import normalize\nfrom time import time\nimport pdb\n\nimport sys\nsys.path.append('./')\nsys.path.append(os.path.realpath(__file__))\n\nfrom .static_graph_embedding import StaticGraphEmbedding\nfrom gem.utils import graph_util, plot_util\nfrom gem.evaluation import visualize_embedding as viz\n\n\nclass LocallyLinearEmbedding(StaticGraphEmbedding):\n\n def __init__(self, *hyper_dict, **kwargs):\n ''' Initialize the LocallyLinearEmbedding class\n\n Args:\n d: dimension of the embedding\n '''\n hyper_params = {\n 'method_name': 'lle_svd'\n }\n hyper_params.update(kwargs)\n for key in hyper_params.keys():\n self.__setattr__('_%s' % key, hyper_params[key])\n for dictionary in hyper_dict:\n for key in dictionary:\n self.__setattr__('_%s' % key, dictionary[key])\n\n def get_method_name(self):\n return self._method_name\n\n def get_method_summary(self):\n return '%s_%d' % (self._method_name, self._d)\n\n def learn_embedding(self, graph=None, edge_f=None,\n is_weighted=False, no_python=False):\n if not graph and not edge_f:\n raise Exception('graph/edge_f needed')\n if not graph:\n graph = graph_util.loadGraphFromEdgeListTxt(edge_f)\n graph = graph.to_undirected()\n t1 = time()\n A = nx.to_scipy_sparse_matrix(graph)\n normalize(A, norm='l1', axis=1, copy=False)\n I_n = sp.eye(graph.number_of_nodes())\n I_min_A = I_n - A\n u, s, vt = lg.svds(I_min_A, k=self._d + 1, which='SM')\n t2 = time()\n self._X = vt.T\n self._X = self._X[:, 1:]\n return self._X.real, (t2 - t1)\n\n def get_embedding(self):\n return self._X\n\n def get_edge_weight(self, i, j):\n return np.exp(\n -np.power(np.linalg.norm(self._X[i, :] - self._X[j, :]), 2)\n )\n\n def get_reconstructed_adj(self, X=None, node_l=None):\n if X is not None:\n node_num = X.shape[0]\n self._X = X\n else:\n node_num = self._node_num\n adj_mtx_r = np.zeros((node_num, node_num))\n for v_i in range(node_num):\n for v_j in range(node_num):\n if v_i == v_j:\n continue\n adj_mtx_r[v_i, v_j] = self.get_edge_weight(v_i, v_j)\n return adj_mtx_r\n\n\nif __name__ == '__main__':\n # load Zachary's Karate graph\n edge_f = 'data/karate.edgelist'\n G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=False)\n G = G.to_directed()\n res_pre = 'results/testKarate'\n graph_util.print_graph_stats(G)\n t1 = time()\n embedding = LocallyLinearEmbedding(2)\n embedding.learn_embedding(graph=G, edge_f=None,\n is_weighted=True, no_python=True)\n print('Graph Factorization:\\n\\tTraining time: %f' % (time() - t1))\n\n viz.plot_embedding2D(embedding.get_embedding(),\n di_graph=G, node_colors=None)\n plt.show()\n" ]
[ [ "matplotlib.use", "numpy.linalg.norm", "scipy.sparse.linalg.svds", "sklearn.preprocessing.normalize", "matplotlib.pyplot.show", "numpy.zeros" ] ]
llealgt/PySyft
[ "76c91adde068ed930cff7ca9249ab06e08210e97" ]
[ "syft/core/frameworks/torch/__init__.py" ]
[ "from .hook import TorchHook\nfrom .tensor import _SyftTensor, _LocalTensor, _PointerTensor\nfrom .tensor import _FixedPrecisionTensor, _TorchTensor, _PlusIsMinusTensor, _GeneralizedPointerTensor\nfrom .tensor import _SPDZTensor, _SNNTensor\n\n__all__ = ['TorchHook', '_SyftTensor', '_LocalTensor',\n '_PointerTensor', '_FixedPrecisionTensor', '_TorchTensor',\n '_PlusIsMinusTensor', '_GeneralizedPointerTensor', '_SPDZTensor',\n '_SNNTensor']\n\nimport torch\n\n# this is a list of all module functions in the torch module\ntorch.torch_funcs = dir(torch)\n\n# this is a list of all module functions in torch.nn.functional\ntorch.torch_functional_funcs = dir(torch.nn.functional)\n\n# Gathers all the functions from above\ntorch.torch_modules = {\n 'torch': torch.torch_funcs,\n 'torch.nn.functional': torch.torch_functional_funcs\n}\n\n# this is the list of torch tensor types that we will override for remote execution\ntorch.tensor_types = [torch.FloatTensor,\n torch.DoubleTensor,\n torch.HalfTensor,\n torch.ByteTensor,\n torch.CharTensor,\n torch.ShortTensor,\n torch.IntTensor,\n torch.LongTensor]\n\ntorch.var_types = [torch.autograd.variable.Variable, torch.nn.Parameter]\n\n# a list of all classes in which we will override their methods for remote execution\ntorch.tensorvar_types = torch.tensor_types + \\\n [torch.autograd.variable.Variable]\n\ntorch.tensorvar_types_strs = [x.__name__ for x in torch.tensorvar_types]\n\ntorch.tensorvar_methods = list(\n set(\n [method\n for tensorvar in torch.tensorvar_types\n for method in dir(tensorvar)]\n )\n)\ntorch.tensorvar_methods.append('get_shape')\ntorch.tensorvar_methods.append(\"share\")\ntorch.tensorvar_methods.append(\"fix_precision\")\ntorch.tensorvar_methods.append(\"decode\")\n\n# Torch functions we don't want to override\ntorch.torch_exclude = ['save', 'load', 'typename', 'is_tensor', 'manual_seed']\n\ntorch.guard = {\n 'syft.core.frameworks.torch.tensor.Variable': torch.autograd.Variable,\n 'syft.core.frameworks.torch.tensor._PointerTensor': _PointerTensor,\n 'syft.core.frameworks.torch.tensor._SyftTensor': _SyftTensor,\n 'syft.core.frameworks.torch.tensor._LocalTensor': _LocalTensor,\n 'syft.core.frameworks.torch.tensor._FixedPrecisionTensor': _FixedPrecisionTensor,\n 'syft.core.frameworks.torch.tensor._GeneralizedPointerTensor': _GeneralizedPointerTensor,\n 'syft.core.frameworks.torch.tensor._SNNTensor': _SNNTensor,\n 'syft._PlusIsMinusTensor': _PlusIsMinusTensor,\n 'syft._SPDZTensor': _SPDZTensor,\n 'syft._SNNTensor': _SNNTensor,\n 'syft._FixedPrecisionTensor': _FixedPrecisionTensor,\n 'syft.core.frameworks.torch.tensor.FloatTensor': torch.FloatTensor,\n 'syft.core.frameworks.torch.tensor.DoubleTensor': torch.DoubleTensor,\n 'syft.core.frameworks.torch.tensor.HalfTensor': torch.HalfTensor,\n 'syft.core.frameworks.torch.tensor.ByteTensor': torch.ByteTensor,\n 'syft.core.frameworks.torch.tensor.CharTensor': torch.CharTensor,\n 'syft.core.frameworks.torch.tensor.ShortTensor': torch.ShortTensor,\n 'syft.core.frameworks.torch.tensor.IntTensor': torch.IntTensor,\n 'syft.core.frameworks.torch.tensor.LongTensor': torch.LongTensor,\n 'syft.Variable': torch.autograd.Variable,\n 'syft.FloatTensor': torch.FloatTensor,\n 'syft.DoubleTensor': torch.DoubleTensor,\n 'syft.HalfTensor': torch.HalfTensor,\n 'syft.ByteTensor': torch.ByteTensor,\n 'syft.CharTensor': torch.CharTensor,\n 'syft.ShortTensor': torch.ShortTensor,\n 'syft.IntTensor': torch.IntTensor,\n 'syft.LongTensor': torch.LongTensor,\n 'syft.Parameter': torch.nn.Parameter\n}\n\n\ndef _command_guard(command, allowed):\n if isinstance(allowed, dict):\n allowed_names = []\n for module_name, func_names in allowed.items():\n for func_name in func_names:\n allowed_names.append(module_name + '.' + func_name)\n allowed = allowed_names\n if command not in allowed:\n raise RuntimeError(\n 'Command \"{}\" is not a supported Torch operation.'.format(command))\n return command\n\ntorch._command_guard = _command_guard\n\n\ndef _is_command_valid_guard(command, allowed):\n try:\n torch._command_guard(command, allowed)\n except RuntimeError:\n return False\n return True\n\ntorch._is_command_valid_guard = _is_command_valid_guard\n" ]
[ [ "torch._command_guard", "torch.tensorvar_methods.append" ] ]
k59047318/ML-final-project
[ "6adcc6fd830279d368dcc506f476aff873a36678" ]
[ "code/build_vocabulary.py" ]
[ "from PIL import Image\nimport numpy as np\nfrom cyvlfeat.sift.dsift import dsift\nfrom cyvlfeat.kmeans import kmeans\nfrom time import time\n\nimport pdb\n\n#This function will sample SIFT descriptors from the training images,\n#cluster them with kmeans, and then return the cluster centers.\n\ndef build_vocabulary(image_paths, vocab_size):\n ##################################################################################\n # TODO: #\n # Load images from the training set. To save computation time, you don't #\n # necessarily need to sample from all images, although it would be better #\n # to do so. You can randomly sample the descriptors from each image to save #\n # memory and speed up the clustering. Or you can simply call vl_dsift with #\n # a large step size here. #\n # #\n # For each loaded image, get some SIFT features. You don't have to get as #\n # many SIFT features as you will in get_bags_of_sift.py, because you're only #\n # trying to get a representative sample here. #\n # #\n # Once you have tens of thousands of SIFT features from many training #\n # images, cluster them with kmeans. The resulting centroids are now your #\n # visual word vocabulary. #\n ##################################################################################\n ##################################################################################\n # NOTE: Some useful functions #\n # This function will sample SIFT descriptors from the training images, #\n # cluster them with kmeans, and then return the cluster centers. #\n # #\n # Function : dsift() #\n # SIFT_features is a N x 128 matrix of SIFT features #\n # There are step, bin size, and smoothing parameters you can #\n # manipulate for dsift(). We recommend debugging with the 'fast' #\n # parameter. This approximate version of SIFT is about 20 times faster to #\n # compute. Also, be sure not to use the default value of step size. It will #\n # be very slow and you'll see relatively little performance gain from #\n # extremely dense sampling. You are welcome to use your own SIFT feature. #\n # #\n # Function : kmeans(X, K) #\n # X is a M x d matrix of sampled SIFT features, where M is the number of #\n # features sampled. M should be pretty large! #\n # K is the number of clusters desired (vocab_size) #\n # centers is a d x K matrix of cluster centroids. #\n # #\n # NOTE: #\n # e.g. 1. dsift(img, step=[?,?], fast=True) #\n # 2. kmeans( ? , vocab_size) # \n # #\n # ################################################################################\n '''\n Input : \n image_paths : a list of training image path\n vocal size : number of clusters desired\n Output :\n Clusters centers of Kmeans\n '''\n\n bag_of_features = []\n print(\"Extract SIFT features\")\n for path in image_paths:\n img = np.asarray(Image.open(path),dtype='float32')\n frames, descriptors = dsift(img, step=[10,10], fast=True)\n bag_of_features.append(descriptors)\n bag_of_features = np.concatenate(bag_of_features, axis=0).astype('float32')\n start_time = time()\n vocab = kmeans(bag_of_features, vocab_size, initialization=\"PLUSPLUS\") \n end_time = time()\n print(\"It takes %.2f s to build vocabulary.\"%(end_time-start_time)) \n \n ##################################################################################\n # END OF YOUR CODE #\n ##################################################################################\n return vocab\n\n" ]
[ [ "numpy.concatenate" ] ]
CookiePPP/hifi-gan
[ "688af111556b39d5f105870a1f292190396fb6b2" ]
[ "train.py" ]
[ "import warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport itertools\nimport os\nimport time\nimport argparse\nimport json\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils.data import DistributedSampler, DataLoader\nimport torch.multiprocessing as mp\nfrom torch.distributed import init_process_group\nfrom torch.nn.parallel import DistributedDataParallel\nfrom env import AttrDict, build_env\nfrom meldataset import MelDataset, mel_spectrogram, get_dataset_filelist\nfrom models import Generator, MultiPeriodDiscriminator, MultiScaleDiscriminator, feature_loss, generator_loss,\\\n discriminator_loss\nfrom utils import plot_spectrogram, scan_checkpoint, load_checkpoint, save_checkpoint\n\ntorch.backends.cudnn.benchmark = True\n\n\ndef train(rank, a, h):\n if h.num_gpus > 1:\n init_process_group(backend=h.dist_config['dist_backend'], init_method=h.dist_config['dist_url'],\n world_size=h.dist_config['world_size'] * h.num_gpus, rank=rank)\n\n torch.cuda.manual_seed(h.seed)\n device = torch.device('cuda:{:d}'.format(rank))\n\n generator = Generator(h).to(device)\n mpd = MultiPeriodDiscriminator().to(device)\n msd = MultiScaleDiscriminator().to(device)\n\n if rank == 0:\n print(generator)\n os.makedirs(a.checkpoint_path, exist_ok=True)\n print(\"checkpoints directory : \", a.checkpoint_path)\n\n if os.path.isdir(a.checkpoint_path):\n cp_g = scan_checkpoint(a.checkpoint_path, 'g_')\n cp_do = scan_checkpoint(a.checkpoint_path, 'do_')\n\n steps = 0\n if cp_g is None or cp_do is None:\n state_dict_do = None\n last_epoch = -1\n else:\n state_dict_g = load_checkpoint(cp_g, device)\n state_dict_do = load_checkpoint(cp_do, device)\n generator.load_state_dict(state_dict_g['generator'])\n mpd.load_state_dict(state_dict_do['mpd'])\n msd.load_state_dict(state_dict_do['msd'])\n steps = state_dict_do['steps'] + 1\n last_epoch = state_dict_do['epoch']\n\n if h.num_gpus > 1:\n generator = DistributedDataParallel(generator, device_ids=[rank]).to(device)\n mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device)\n msd = DistributedDataParallel(msd, device_ids=[rank]).to(device)\n\n optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])\n optim_d = torch.optim.AdamW(itertools.chain(msd.parameters(), mpd.parameters()),\n h.learning_rate, betas=[h.adam_b1, h.adam_b2])\n\n if state_dict_do is not None:\n optim_g.load_state_dict(state_dict_do['optim_g'])\n optim_d.load_state_dict(state_dict_do['optim_d'])\n\n scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)\n scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch)\n\n training_filelist, validation_filelist = get_dataset_filelist(a)\n\n trainset = MelDataset(training_filelist, h.segment_size, h.n_fft, h.num_mels,\n h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0,\n shuffle=False if h.num_gpus > 1 else True, fmax_loss=h.fmax_for_loss, device=device,\n fine_tuning=a.fine_tuning, base_mels_path=a.input_mels_dir)\n\n train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None\n\n train_loader = DataLoader(trainset, num_workers=h.num_workers, shuffle=False,\n sampler=train_sampler,\n batch_size=h.batch_size,\n pin_memory=True,\n drop_last=True)\n\n if rank == 0:\n validset = MelDataset(validation_filelist, h.segment_size, h.n_fft, h.num_mels,\n h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, False, False, n_cache_reuse=0,\n fmax_loss=h.fmax_for_loss, device=device, fine_tuning=a.fine_tuning,\n base_mels_path=a.input_mels_dir)\n validation_loader = DataLoader(validset, num_workers=1, shuffle=False,\n sampler=None,\n batch_size=1,\n pin_memory=True,\n drop_last=True)\n\n sw = SummaryWriter(os.path.join(a.checkpoint_path, 'logs'))\n\n generator.train()\n mpd.train()\n msd.train()\n for epoch in range(max(0, last_epoch), a.training_epochs):\n if rank == 0:\n start = time.time()\n print(\"Epoch: {}\".format(epoch+1))\n\n if h.num_gpus > 1:\n train_sampler.set_epoch(epoch)\n\n for i, batch in enumerate(train_loader):\n if rank == 0:\n start_b = time.time()\n x, y, _, mel_spec = batch\n x = torch.autograd.Variable(x.to(device, non_blocking=True))\n y = torch.autograd.Variable(y.to(device, non_blocking=True))\n mel_spec = torch.autograd.Variable(mel_spec.to(device, non_blocking=True))\n y = y.unsqueeze(1)\n\n y_g_hat = generator(x)\n y_mel = mel_spec\n y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size,\n h.fmin, h.fmax_for_loss)\n\n optim_d.zero_grad()\n\n # MPD\n y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())\n loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g)\n\n # MSD\n y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach())\n loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g)\n\n loss_disc_all = loss_disc_s + loss_disc_f\n\n loss_disc_all.backward()\n optim_d.step()\n\n # Generator\n optim_g.zero_grad()\n\n # L1 Mel-Spectrogram Loss\n loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45\n\n y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)\n y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat)\n loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)\n loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)\n loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g)\n loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g)\n loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel\n\n loss_gen_all.backward()\n optim_g.step()\n\n if rank == 0:\n # STDOUT logging\n if steps % a.stdout_interval == 0:\n with torch.no_grad():\n mel_error = F.l1_loss(y_mel, y_g_hat_mel).item()\n\n print('Steps : {:d}, Gen Loss Total : {:4.3f}, Mel-Spec. Error : {:4.3f}, s/b : {:4.3f}'.\n format(steps, loss_gen_all, mel_error, time.time() - start_b))\n\n # checkpointing\n if steps % a.checkpoint_interval == 0 and steps != 0:\n checkpoint_path = \"{}/g_{:08d}\".format(a.checkpoint_path, steps)\n save_checkpoint(checkpoint_path,\n {'generator': (generator.module if h.num_gpus > 1 else generator).state_dict()})\n checkpoint_path = \"{}/do_{:08d}\".format(a.checkpoint_path, steps)\n save_checkpoint(checkpoint_path, \n {'mpd': (mpd.module if h.num_gpus > 1\n else mpd).state_dict(),\n 'msd': (msd.module if h.num_gpus > 1\n else msd).state_dict(),\n 'optim_g': optim_g.state_dict(), 'optim_d': optim_d.state_dict(), 'steps': steps,\n 'epoch': epoch})\n\n # Tensorboard summary logging\n if steps % a.summary_interval == 0:\n sw.add_scalar(\"training/gen_loss_total\", loss_gen_all, steps)\n sw.add_scalar(\"training/mel_spec_error\", mel_error, steps)\n\n # Validation\n if steps % a.validation_interval == 0: # and steps != 0:\n generator.eval()\n torch.cuda.empty_cache()\n with torch.no_grad():\n for i, batch in enumerate(validation_loader):\n x, y, _, _ = batch\n y_g_hat = generator(x.to(device))\n\n if steps == 0:\n sw.add_audio('gt/y_{}'.format(i), y[0], steps, h.sampling_rate)\n sw.add_figure('gt/y_spec_{}'.format(i), plot_spectrogram(x[0]), steps)\n\n sw.add_audio('generated/y_hat_{}'.format(i), y_g_hat[0], steps, h.sampling_rate)\n y_hat_spec = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels,\n h.sampling_rate, h.hop_size, h.win_size,\n h.fmin, h.fmax)\n sw.add_figure('generated/y_hat_spec_{}'.format(i),\n plot_spectrogram(y_hat_spec.squeeze(0).cpu().numpy()), steps)\n if i == 4:\n break\n generator.train()\n\n steps += 1\n\n scheduler_g.step()\n scheduler_d.step()\n \n if rank == 0:\n print('Time taken for epoch {} is {} sec\\n'.format(epoch + 1, int(time.time() - start)))\n\n\ndef main():\n print('Initializing Training Process..')\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--group_name', default=None)\n parser.add_argument('--input_wavs_dir', default='LJSpeech-1.1/wavs')\n parser.add_argument('--input_mels_dir', default='ft_dataset')\n parser.add_argument('--input_training_file', default='LJSpeech-1.1/training.txt')\n parser.add_argument('--input_validation_file', default='LJSpeech-1.1/validation.txt')\n parser.add_argument('--checkpoint_path', default='cp_hifigan')\n parser.add_argument('--config', default='')\n parser.add_argument('--training_epochs', default=3100, type=int)\n parser.add_argument('--stdout_interval', default=5, type=int)\n parser.add_argument('--checkpoint_interval', default=5000, type=int)\n parser.add_argument('--summary_interval', default=100, type=int)\n parser.add_argument('--validation_interval', default=1000, type=int)\n parser.add_argument('--fine_tuning', default=False, type=bool)\n\n a = parser.parse_args()\n\n with open(a.config) as f:\n data = f.read()\n\n json_config = json.loads(data)\n h = AttrDict(json_config)\n build_env(a.config, 'config.json', a.checkpoint_path)\n\n torch.manual_seed(h.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(h.seed)\n h.num_gpus = torch.cuda.device_count()\n h.batch_size = int(h.batch_size / h.num_gpus)\n print('Batch size per GPU :', h.batch_size)\n else:\n pass\n\n if h.num_gpus > 1:\n mp.spawn(train, nprocs=h.num_gpus, args=(a, h,))\n else:\n train(0, a, h)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.utils.data.DistributedSampler", "torch.distributed.init_process_group", "torch.cuda.manual_seed", "torch.multiprocessing.spawn", "torch.nn.functional.l1_loss", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.cuda.empty_cache", "torch.optim.lr_scheduler.ExponentialLR", "torch.no_grad", "torch.cuda.is_available", "torch.cuda.device_count", "torch.nn.parallel.DistributedDataParallel" ] ]
robjordan/gps-accuracy
[ "fbf5a094c96dcbb692065a928c95d3702d0152b0" ]
[ "gps-accuracy.py" ]
[ "import argparse\nimport gpxpy\nimport gpxpy.gpx\nfrom pyproj import Proj\nimport numpy as np\nfrom scipy.spatial import cKDTree\nimport itertools\nimport math\nimport statistics as st\nfrom datetime import datetime, timezone, MINYEAR\n\n# assume England - change this if you live elsewhere\nUTMZ = '30U'\nmyProj = Proj(\"+proj=utm +zone=\"+UTMZ+\", +north, +ellps=WGS84 +datum=WGS84 +units=m +no_defs\")\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n description='Measure GPS accuracy by comparing recorded track points with a planned route.',\n prog='gps-accuracy')\n parser.add_argument(\n '-d', \n '--debug', \n action='store_true', \n help='generate debug output, including a GPX file visualising tracking errors')\n parser.add_argument('route', help='filename of route (GPX track format)')\n parser.add_argument('track', help='filename of track (GPX track format)')\n args = parser.parse_args()\n return args\n\n\ndef gpx_to_utm(filename, prefix=None):\n \"\"\"Return arrays X and Y, which are UTM coordinates of points in the GPX\"\"\"\n # convert points to XY in Universal Transverse Mercator - assume England\n coords = []\n prev = None\n distance = 0\n end_time = None\n start_time = None\n intervals = []\n\n if prefix is not None:\n print(\"{}.filename\\t{}\".format(prefix, filename))\n t = gpxpy.parse(open(filename))\n\n for track in t.tracks:\n for segment in track.segments:\n for point in segment.points:\n if point != prev: # prevent identical successive coordinates\n coords.append(myProj(point.longitude, point.latitude))\n if prev is not None and prefix is not None:\n intervals.append(point.time - prev.time)\n prev = point\n\n distance = distance + track.length_2d()\n track_start, end_time = track.get_time_bounds()\n if not start_time:\n start_time = track_start\n\n if prefix is not None:\n print(\"{}.num_points\\t{}\".format(prefix, len(coords)))\n print(\"{0}.distance\\t{1: .0f}\".format(prefix, distance))\n print(\"{}.start\\t{}\".format(prefix, start_time))\n print(\"{}.end\\t{}\".format(prefix, end_time))\n print(\"{}.intervals.mean\\t{}\".format(\n prefix,\n np.mean(intervals)))\n print(\"{}.intervals.max\\t{}\".format(\n prefix, np.max(intervals)))\n return coords\n\n\ndef print_error_stats(errors):\n \"\"\"Print a variety of stats characterising the track errors.\"\"\"\n print(\"errors.mean\\t{0: .2f}\".format(np.mean(errors)))\n print(\"errors.median\\t{0: .2f}\".format(np.median(errors)))\n print(\"errors.95th_percentile\\t{0: .2f}\".format(\n np.percentile(errors, 95)))\n\n\ndef utm_to_gpx(p):\n \"\"\"Convert a single UTM cooordinate, passed as tuple, to lat, lon\"\"\"\n lon, lat = myProj(p[0], p[1], inverse=True)\n return lat, lon\n\n\ndef distance(a, b):\n \"\"\"Calculate euclidian distance between 2 points which are (x, y) tuples\"\"\"\n return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)\n\n\ndef intersection(t, r1, r2):\n \"\"\"Given two route points, r1, r2, find the perpendicular intersection from track point t\"\"\"\n # Calculate the gradient and intercept of the route vector.\n try: # there's a risk of divide by zero errors\n r_grad = (r1[1] - r2[1]) / (r1[0] - r2[0])\n r_intercept = r1[1] - r_grad * r1[0]\n # gradient of the perpendicular error bar is -1/m\n e_grad = -1.0 / r_grad\n e_intercept = t[1] - e_grad * t[0]\n # solve the two equations of form y = mx + c to find the\n # intersection\n multiplier = - r_grad / e_grad\n y = (r_intercept + multiplier * e_intercept) / (multiplier + 1)\n x = (y - r_intercept) / r_grad\n except ZeroDivisionError:\n # R1 and R2 must be either due N-S or due E-W\n if r1[0] == r2[0]:\n # due N-S. Intersection point is X from R1 & R2, Y from T\n x = r1[0]\n y = t[1]\n elif r1[1] == r2[1]:\n # due E-W. Intersection is X from T and Y from R1 & R2\n x = t[0]\n y = r1[1]\n\n return is_on_line((x, y), r1, r2), x, y, distance((x, y), t)\n\n\ndef is_on_line(crosspt, r1, r2):\n # In fact it only checks if crosspt is in the bounding box defined\n # by R1 and R2 but since we know this is a solution of the two\n # linear equations, if it's in the box, it's also on the line.\n flag = False\n if crosspt[0] > min(r1[0], r2[0]) and crosspt[0] < max(r1[0], r2[0]):\n if crosspt[1] > min(r1[1], r2[1]) and crosspt[1] < max(r1[1], r2[1]):\n flag = True\n dbg_print(\"cross: {}, r1: {}, r2: {}, valid: {}\".format(crosspt, r1, r2, flag))\n return flag\n\n\ndef dbg_print(*prargs):\n if args.debug:\n print(prargs)\n\n\n# For debug / test purposes, create a GPX file that visualises the\n# track and the error bar for each track point\nclass VisGpx:\n\n def __init__(self):\n self.gpx = gpxpy.gpx.GPX()\n self.gpx_track = gpxpy.gpx.GPXTrack()\n self.gpx.tracks.append(self.gpx_track)\n self.gpx_segment = gpxpy.gpx.GPXTrackSegment()\n self.gpx_track.segments.append(self.gpx_segment)\n\n def append(self, t, e_point):\n # Add three points to the track: T, the calculated error, and T again.\n e_point_lat, e_point_lon = utm_to_gpx(e_point)\n t_lat, t_lon = utm_to_gpx(t)\n self.gpx_segment.points.append(\n gpxpy.gpx.GPXTrackPoint(t_lat, t_lon))\n self.gpx_segment.points.append(\n gpxpy.gpx.GPXTrackPoint(e_point_lat, e_point_lon))\n self.gpx_segment.points.append(\n gpxpy.gpx.GPXTrackPoint(t_lat, t_lon))\n\n def finish(self):\n open(\"__VisGPX.gpx\", \"w+\").write(self.gpx.to_xml())\n\n\n# MAIN #\nvis = VisGpx()\nargs = get_args()\ndbg_print(args)\ndbg_print(args.debug)\ndbg_print(args.route)\ndbg_print(args.track)\n\n# load each GPX file as a series of track points with location as lat, lon\ntrack = gpx_to_utm(args.track, \"track\")\nroute = gpx_to_utm(args.route)\nerrors = []\n\ndbg_print(\"track:\", len(track), \"x\", len(track[0]), \"track[0]:\", track[0])\ndbg_print(\"route:\", len(route), \"x\", len(route[0]), \"route[0]:\", route[0])\n\n# Our task is to find the nearest adjacent pair of points in the route\n# for each point in the track, so set up a KD tree of route points and\n# query the nearest neighbour or each point in the current track\ndistances, indexes = cKDTree(route).query(track)\n\n# For each track point, we now know the index of the nearest route\n# point, and its distance, d.\nfor (t, d, i) in zip(track, distances, indexes):\n nearest = route[i]\n\n # Two cases:\n # 1. Closest distance from track point T to route is directly to\n # point 'nearest'\n # 2. Closest distance from track point T to route is to a point on a\n # line between successive nearby route points. It's indeterminate\n # now many route points to check, but in practice we seem to\n # correctly find the shortest distance by considering (i-2, i-1),\n # (i-1, i), (i, i+1), (i+1, i+2)\n\n # Set up for case 1.\n dbg_print(\"Considering t={}, {}\".format(t, utm_to_gpx(t)))\n dbg_print(\n \"Nearest point i={}, {}, {}\".format(i, route[i], utm_to_gpx(route[i])))\n shortest_d = d\n closest_x = nearest[0]\n closest_y = nearest[1]\n dbg_print(\n \"Route point i={}, location={}, d={}\".format(i, nearest, shortest_d))\n\n # Check for case 2. We can find the potential closest point by\n # expressing each line R1-R2 as an equation in the form y = mx + c,\n # then describing another line through T, perpendicular to R1-R2\n # (which will have gradient -1/m), and solving the two equations to\n # find the point of intersection.\n for r1 in range(max(0, i-2), min(i+2, len(route)-1)):\n valid, x, y, d = intersection(t, route[r1], route[r1+1])\n dbg_print(\n \"Route segment r1={}, r2={}, intersect={}, d={}, valid={}\".format(\n r1, r1+1, (x, y), d, valid))\n if valid and d < shortest_d:\n dbg_print(\"new closest\")\n closest_x = x\n closest_y = y\n shortest_d = d\n\n errors.append(shortest_d)\n if args.debug:\n vis.append(t, (closest_x, closest_y))\n\nif args.debug:\n vis.finish()\n\n# Print a summary of the results of our analysis\nprint_error_stats(errors)\n" ]
[ [ "numpy.median", "numpy.percentile", "numpy.max", "numpy.mean", "scipy.spatial.cKDTree" ] ]
nairoukh-code/Python_Projects
[ "9a0e2adb6e352b301ed9e542be9c9f1cd16b95b0" ]
[ "NLP/project.py" ]
[ "from nltk.corpus import stopwords\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom nltk.corpus import words\nfrom sklearn.model_selection import KFold\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import accuracy_score\n\n# open and clean true and fake tweets and split them into train and test data\ntrue_tweets = open(\"genuine_tweets.csv\", errors='ignore')\ntweets_collection = []\nfor tweet in true_tweets:\n if len(tweet) > 0:\n tweet_split = tweet.split(\",\")\n if len(tweet_split) > 2:\n if tweet_split[1] != \"“just posted a photo”\":\n tweets_collection.append([tweet_split[1], \"true\"])\nfake_tweets = open(\"fake_tweets.csv\", errors='ignore')\nfake_tweets_collection = []\nfor tweet in fake_tweets:\n if len(tweet) > 0:\n tweet_split = tweet.split(\",\")\n if len(tweet_split) > 2:\n if tweet_split[2] != \"“just posted a photo”\":\n tweets_collection.append([tweet_split[2], \"fake\"])\n\ntweet_train, tweet_test = train_test_split(tweets_collection, train_size=0.8)\n\n# extract feature vector\n\ntrain_tweet_features = []\ntest_tweet_features = []\nstopWords = set(stopwords.words('english'))\n\nfor tweet in tweet_train:\n stopword_occur = 0\n capital_occur = 0\n number_occur = 0\n eng_word_occur = 0\n mention = 0\n for word in tweet[0]:\n if word in stopWords:\n stopword_occur += 1\n if word.isupper():\n capital_occur += 1\n if word.isdecimal():\n number_occur += 1\n\n if word[0] == \"@\":\n mention += 1\n train_tweet_features.append([len(tweet[0]), stopword_occur, capital_occur, number_occur, mention, tweet[1]])\n\nfor tweet in tweet_test:\n stopword_occur = 0\n capital_occur = 0\n number_occur = 0\n eng_word_occur = 0\n mention = 0\n for word in tweet[0]:\n if word in stopWords:\n stopword_occur += 1\n if word.isupper():\n capital_occur += 1\n if word.isdecimal():\n number_occur += 1\n\n if word[0] == \"@\":\n mention += 1\n test_tweet_features.append([len(tweet[0]), stopword_occur, capital_occur, number_occur, mention, tweet[1]])\n\n# place \"Data & Test_Data\" in pandas DataFrame\n\ncol_names = [\"length of tweet\", \"stopwords occur\", \"capital occur\", \"number_occur\", \"mention\", \"label\"]\ntrain_data = pd.DataFrame(train_tweet_features, columns=col_names)\ntest_data = pd.DataFrame(test_tweet_features, columns=col_names)\ntrain_data.to_csv(\"train_data\")\ntest_data.to_csv(\"test_data\")\n\n#combine and shuffle data for k-folds\nTweets=pd.concat([train_data,test_data])\n\nTweets = Tweets.sample(frac = 1)\nTweets=Tweets.reset_index(drop=True)\n\n# Get Features\nfeatures=[]\n\nfor i in range(len(Tweets.columns)-1):\n features.append(Tweets.columns[i])\n\n#Split into folds and preform Evaluation\nFolds = KFold(n_splits=5)\ni=1\nfor train,test in Folds.split(Tweets):\n print(\"Fold %d\"%i)\n X=Tweets.iloc[train][features]\n Y=Tweets.iloc[train]['label']\n Xtest=Tweets.iloc[test][features]\n Ytest=Tweets.iloc[test]['label']\n model=DecisionTreeClassifier()\n model.fit(X,Y)\n Ypred=model.predict(Xtest)\n print(\"accuracy_Score %2fPERCENT\"%(accuracy_score(Ytest,Ypred)))\n i=i+1" ]
[ [ "pandas.concat", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.model_selection.KFold", "sklearn.tree.DecisionTreeClassifier", "sklearn.metrics.accuracy_score" ] ]
Ahmedjjj/MiDaS
[ "915e88ecad177f04fb84b2f3cdf6892b8c603b07" ]
[ "midas/midas_net.py" ]
[ "\"\"\"MidashNet: Network for monocular depth estimation trained by mixing several datasets.\nThis file contains code that is adapted from\nhttps://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py\n\"\"\"\nimport torch\nimport torch.nn as nn\n\nfrom .base_model import BaseModel\nfrom .blocks import FeatureFusionBlock, Interpolate, _make_encoder\n\n\nclass MidasNet(BaseModel):\n \"\"\"Network for monocular depth estimation.\n \"\"\"\n\n def __init__(self, path=None, features=256, non_negative=True):\n \"\"\"Init.\n\n Args:\n path (str, optional): Path to saved model. Defaults to None.\n features (int, optional): Number of features. Defaults to 256.\n backbone (str, optional): Backbone network for encoder. Defaults to resnet50\n \"\"\"\n print(\"Loading weights: \", path)\n\n super(MidasNet, self).__init__()\n\n use_pretrained = False if path is None else True\n\n self.pretrained, self.scratch = _make_encoder(\n backbone=\"resnext101_wsl\", features=features, use_pretrained=use_pretrained)\n\n self.scratch.refinenet4 = FeatureFusionBlock(features)\n self.scratch.refinenet3 = FeatureFusionBlock(features)\n self.scratch.refinenet2 = FeatureFusionBlock(features)\n self.scratch.refinenet1 = FeatureFusionBlock(features)\n\n self.scratch.output_conv = nn.Sequential(\n nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),\n Interpolate(scale_factor=2, mode=\"bilinear\"),\n nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),\n nn.ReLU(True),\n nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),\n nn.ReLU(True) if non_negative else nn.Identity(),\n )\n\n if path:\n self.load(path)\n\n def forward(self, x):\n \"\"\"Forward pass.\n\n Args:\n x (tensor): input data (image)\n\n Returns:\n tensor: depth\n \"\"\"\n\n layer_1 = self.pretrained.layer1(x)\n layer_2 = self.pretrained.layer2(layer_1)\n layer_3 = self.pretrained.layer3(layer_2)\n layer_4 = self.pretrained.layer4(layer_3)\n\n layer_1_rn = self.scratch.layer1_rn(layer_1)\n layer_2_rn = self.scratch.layer2_rn(layer_2)\n layer_3_rn = self.scratch.layer3_rn(layer_3)\n layer_4_rn = self.scratch.layer4_rn(layer_4)\n\n path_4 = self.scratch.refinenet4(layer_4_rn)\n path_3 = self.scratch.refinenet3(path_4, layer_3_rn)\n path_2 = self.scratch.refinenet2(path_3, layer_2_rn)\n path_1 = self.scratch.refinenet1(path_2, layer_1_rn)\n\n out = self.scratch.output_conv(path_1)\n\n return torch.squeeze(out, dim=1)\n" ]
[ [ "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.Identity", "torch.squeeze" ] ]
pnarvor/nephelae_base
[ "d5f1abeae0b0473b895b4735f182ddae0516a1bd" ]
[ "tests/mapping/map_resolution01.py" ]
[ "#! /usr/bin/python3\n\n# changing process priority (linux only)\nimport os\n# os.nice(-19) # probably a bit harsh (requires sudo)\n\nimport sys\nsys.path.append('../../')\nimport numpy as np\nimport numpy.fft as npfft\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nimport time\nfrom PIL import Image\n\nfrom nephelae_mesonh import MesonhDataset, MesonhMap\nfrom nephelae.types import Bounds\n\nfrom nephelae.mapping import GprPredictor\nfrom nephelae.mapping import StdMap\nfrom nephelae.mapping import ValueMap\nfrom nephelae.mapping import WindKernel\nfrom nephelae.mapping import WindMapConstant\nfrom nephelae.database import NephelaeDataServer\n\n# from nephelae.mapping import compute_com\n\n# mesonhPath = '/local/fseguin/nephelae_data/REFHR.1.ARMCu.4D.nc'\nmesonhPath = '/home/pnarvor/work/nephelae/data/MesoNH-2019-02/REFHR.1.ARMCu.4D.nc'\ndataset = MesonhDataset(mesonhPath)\n\nrct = MesonhMap(\"RCT Map\", dataset, 'RCT')\n# ut = MesonhMap(\"UT Map\", dataset, 'UT')\n# vt = MesonhMap(\"VT Map\", dataset, 'VT')\n\n# Kernel\nv0 = np.array([8.5, 0.9])\nprocessVariance = 1.0e-8\nnoiseStddev = 0.1 * np.sqrt(processVariance)\n# kernel0 = WindKernel(lengthScales, processVariance, noiseStddev**2, v0)\n# lengthScales = [70.0, 60.0, 60.0, 60.0]\nlengthScales = [70.0, 80.0, 80.0, 60.0]\nkernel0 = WindKernel(lengthScales, processVariance, noiseStddev**2, WindMapConstant('Wind',v0))\n\ndtfile = 'output/wind_data04.neph'\ndtbase = NephelaeDataServer.load(dtfile)\n\ngpr = GprPredictor(dtbase, ['RCT'], kernel0)\nmap_gpr = ValueMap('RCT_gpr', gpr)\nstd_gpr = StdMap('RCT_gpr', gpr)\n\nt0 = 200.0\nz0 = 1100.0\nb = [Bounds(0.0, 715), Bounds(12.5, 6387.5), Bounds(1837.5, 2712.5), Bounds(12.5, 3987)]\n\nmesonhSlice = rct[t0, b[1].min:b[1].max, b[2].min:b[2].max, z0]\nr2 = map_gpr.resolution()[1] / 2.0\ngprSlice = map_gpr[t0, b[1].min+r2:b[1].max-r2, b[2].min+r2:b[2].max-r2, z0]\n\n# gprCenter0 = compute_com(gprSlice)\n# gprSlice.data[gprSlice.data < 0] = 0.0\n# gprCenter1 = compute_com(gprSlice)\n\ngprSliceResampled = np.array(Image.fromarray(gprSlice.data.T).resize(mesonhSlice.shape, Image.BICUBIC)).T\n\nmask = np.zeros(mesonhSlice.data.shape)\nmask[gprSliceResampled > 1.0e-10] = 1.0\nmesonhSlice.data = mesonhSlice.data * mask\ngprSliceResampled = gprSliceResampled * mask\n\n\neqm = np.sum(((mesonhSlice.data.T - gprSliceResampled.T)**2)[:]) / np.sum(mask[:])\nmesonhVar = np.sum((mesonhSlice.data**2)[:]) / np.sum(mask[:])\neqmr = eqm / mesonhVar\n\n\nfig, axes = plt.subplots(4,1, sharex=True, sharey=True)\nb = mesonhSlice.bounds\nprint(b)\n\naxes[0].imshow(mesonhSlice.data.T, origin='lower', extent=[b[0].min, b[0].max, b[1].min, b[1].max])\naxes[1].imshow(gprSlice.data.T, origin='lower', extent=[b[0].min, b[0].max, b[1].min, b[1].max])\n\n# axes[1].plot(gprCenter0[0], gprCenter0[1], '.r')\n# axes[1].plot(gprCenter1[0], gprCenter1[1], '.b')\n\naxes[2].imshow(gprSliceResampled.T, origin='lower', extent=[b[0].min, b[0].max, b[1].min, b[1].max])\naxes[3].imshow((mesonhSlice.data.T - gprSliceResampled.T)**2 / np.max(mesonhSlice.data.ravel())**2, origin='lower', extent=[b[0].min, b[0].max, b[1].min, b[1].max])\n\nplt.show(block=False)\n\n\n\n" ]
[ [ "numpy.sqrt", "matplotlib.pyplot.subplots", "numpy.array", "numpy.zeros", "numpy.sum", "matplotlib.pyplot.show" ] ]
neurokernel/neurodriver
[ "9dcafdeddfbcde928e3c688d9240cdc1da40aa1b" ]
[ "neurokernel/LPU/InputProcessors/StepInputProcessor.py" ]
[ "import numpy as np\n\nfrom .BaseInputProcessor import BaseInputProcessor\n\n\nclass StepInputProcessor(BaseInputProcessor):\n\n def __init__(self, variable, uids, val, start, stop,\n input_file = None, input_interval = 1,\n sensory_file = None, sensory_interval = 1):\n super(StepInputProcessor, self).__init__([(variable, uids)],\n mode = 1,\n memory_mode = 'gpu',\n input_file = input_file,\n input_interval = input_interval,\n sensory_file = sensory_file,\n sensory_interval = sensory_interval)\n self.start = start\n self.stop = stop\n self.var = variable\n self.num = len(uids)\n self.started = False\n self.stopped = False\n\n if np.isscalar(val):\n self.val = np.full((self.num,), val)\n else:\n assert len(val) == self.num, \\\n f\"Step Input specified with {self.num} uids but got input value of length {len(val)}\"\n self.val = val.copy()\n\n\n def update_input(self):\n if self.stopped:\n self.variables[self.var]['input'].fill(0)\n else:\n if self.started:\n self.variables[self.var]['input'].set(self.val)\n # if self.LPU_obj.time == self.start:\n # self.variables[self.var]['input'].fill(self.val) # * np.ones(self.num, self.dtypes[self.var])\n # else:\n # self.variables[self.var]['input'].fill(0)\n\n def is_input_available(self):\n if not self.started:\n if self.LPU_obj.time >= self.start:\n self.started = True\n return True\n else:\n if not self.stopped:\n if self.LPU_obj.time >= self.stop:\n self.stopped = True\n return True\n return False\n\n def post_run(self):\n super(StepInputProcessor, self).post_run()\n" ]
[ [ "numpy.isscalar", "numpy.full" ] ]
giocard/yeast_segmentation
[ "0b1b2d8d2c71ff1d0959b286851245ffee868c6f" ]
[ "mrcnn/my_inference.py" ]
[ "import os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\nos.environ['KERAS_BACKEND'] = 'tensorflow'\n\nseed = 123\nfrom keras import backend as K\n\nimport numpy as np\n\nnp.random.seed(seed)\nimport tensorflow as tf\n\ntf.set_random_seed(seed)\n\nimport random\n\nrandom.seed(seed)\n\nfrom PIL import Image\nimport skimage.transform\nfrom skimage import img_as_ubyte\n\nfrom . import model as modellib\nimport pandas as pd\nimport os\n\nfrom . import my_functions as f\n\nimport time\n\n#######################################################################################\n## SET UP CONFIGURATION\nfrom . import config\n\nclass BowlConfig(config.Config):\n \"\"\"Configuration for training on the toy shapes dataset.\n Derives from the base Config class and overrides values specific\n to the toy shapes dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"Inference\"\n\n IMAGE_RESIZE_MODE = \"pad64\" ## tried to modfied but I am using other git clone\n ## No augmentation\n ZOOM = False\n ASPECT_RATIO = 1\n MIN_ENLARGE = 1\n IMAGE_MIN_SCALE = False ## Not using this\n\n IMAGE_MIN_DIM = 512 # We scale small images up so that smallest side is 512\n IMAGE_MAX_DIM = False\n\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n DETECTION_MAX_INSTANCES = 512\n DETECTION_NMS_THRESHOLD = 0.2\n DETECTION_MIN_CONFIDENCE = 0.9\n\n LEARNING_RATE = 0.001\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 1 # background + nuclei\n\n # Use smaller anchors because our image and objects are small\n RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels\n\n # Reduce training ROIs per image because the images are small and have\n # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n TRAIN_ROIS_PER_IMAGE = 600\n\n USE_MINI_MASK = True\n\n#######################################################################################\n\n'''Run images through the pre-trained neural network.\n\nArguments:\ntest_path: Path where the images are stored (preprocess these using preprocess_images.py)\ninputfile: Path of the comma-delimited file of images names.\noutputfile: Path to write the comma-delimited run-length file to.\nrescale: Set to True if rescale images before processing (saves time)\nscale_factor: Multiplier to downsample images by\nmax_detected_objects: maximum number of detections allowed\nverbose: Verbose or not (true/false)'''\ndef predict_images(test_path, sample_submission, outputfilename, rescale = False, scale_factor = 2, max_detected_objects = None, verbose = True):\n inference_config = BowlConfig()\n if max_detected_objects: inference_config.DETECTION_MAX_INSTANCES = max_detected_objects\n ROOT_DIR = os.getcwd()\n MODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n output = open(outputfilename, \"w\")\n output.truncate()\n output.write(\"ImageId, EncodedPixels\\n\")\n output.close()\n\n sample_submission = pd.read_csv(sample_submission)\n\n if test_path[-1] != \"/\":\n test_path = test_path + \"/\"\n \n dirname = os.path.dirname(__file__)\n model_path = os.path.join(dirname, '../weights/deepretina_final.h5')\n\n if verbose:\n print(\"Loading weights from \", model_path)\n\n start_time = time.time()\n\n # Recreate the model in inference mode\n model = modellib.MaskRCNN(mode=\"inference\",\n config=inference_config,\n model_dir=MODEL_DIR)\n model.load_weights(model_path, by_name=True)\n\n n_images = len(sample_submission.ImageId)\n for i in np.arange(n_images):\n start_time = time.time()\n image_id = sample_submission.ImageId[i]\n if verbose:\n print('Start detect', i, ' ', image_id)\n ##Set seeds for each image, just in case..\n random.seed(seed)\n np.random.seed(seed)\n tf.set_random_seed(seed)\n\n ## Load the image\n image_path = os.path.join(test_path, image_id, 'images', image_id + '.png')\n original_image = np.array(Image.open(image_path))\n\n if rescale:\n height = original_image.shape[0]\n width = original_image.shape[1]\n original_image = skimage.transform.resize(original_image, output_shape=(height // scale_factor,\n width // scale_factor),\n preserve_range=True)\n\n ####################################################################\n ## This is needed for the stage 2 image that has only one channel\n if len(original_image.shape) < 3:\n original_image = img_as_ubyte(original_image)\n original_image = np.expand_dims(original_image, 2)\n original_image = original_image[:, :, [0, 0, 0]] # flip r and b\n ####################################################################\n original_image = original_image[:, :, :3]\n\n ## Make prediction for that image\n results = model.detect([original_image], verbose=0)\n\n ## Proccess prediction into rle\n pred_masks = results[0]['masks']\n scores_masks = results[0]['scores']\n class_ids = results[0]['class_ids']\n\n if len(class_ids): ## Some objects are detected\n ImageId_batch, EncodedPixels_batch, _ = f.numpy2encoding(pred_masks, image_id, scores=scores_masks,\n dilation=True)\n f.write2csv(outputfilename, ImageId_batch, EncodedPixels_batch)\n else:\n pass\n\n if verbose:\n print(\"Completed in\", time.time() - start_time)\n\n" ]
[ [ "pandas.read_csv", "numpy.expand_dims", "numpy.random.seed", "numpy.arange", "tensorflow.set_random_seed" ] ]
jselvam11/numpyro
[ "42ed07f5b0c761b5fa0c951e4fe64cdd6b5d0723" ]
[ "test/contrib/test_funsor.py" ]
[ "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import OrderedDict\nfrom functools import partial\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport pytest\n\nfrom jax import random\nimport jax.numpy as jnp\n\nfrom funsor import Tensor, bint, reals\nimport numpyro\nfrom numpyro.contrib.control_flow import scan\nfrom numpyro.contrib.funsor import config_enumerate, enum, markov, to_data, to_funsor\nfrom numpyro.contrib.funsor.enum_messenger import NamedMessenger\nfrom numpyro.contrib.funsor.enum_messenger import plate as enum_plate\nfrom numpyro.contrib.funsor.infer_util import log_density\nfrom numpyro.contrib.indexing import Vindex\nimport numpyro.distributions as dist\nfrom numpyro.infer import MCMC, NUTS\n\n\ndef test_gaussian_mixture_model():\n K, N = 3, 1000\n\n def gmm(data):\n mix_proportions = numpyro.sample(\"phi\", dist.Dirichlet(jnp.ones(K)))\n with numpyro.plate(\"num_clusters\", K, dim=-1):\n cluster_means = numpyro.sample(\"cluster_means\", dist.Normal(jnp.arange(K), 1.))\n with numpyro.plate(\"data\", data.shape[0], dim=-1):\n assignments = numpyro.sample(\"assignments\", dist.Categorical(mix_proportions))\n numpyro.sample(\"obs\", dist.Normal(cluster_means[assignments], 1.), obs=data)\n\n true_cluster_means = jnp.array([1., 5., 10.])\n true_mix_proportions = jnp.array([0.1, 0.3, 0.6])\n cluster_assignments = dist.Categorical(true_mix_proportions).sample(random.PRNGKey(0), (N,))\n data = dist.Normal(true_cluster_means[cluster_assignments], 1.0).sample(random.PRNGKey(1))\n\n nuts_kernel = NUTS(gmm)\n mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=500)\n mcmc.run(random.PRNGKey(2), data)\n samples = mcmc.get_samples()\n assert_allclose(samples[\"phi\"].mean(0).sort(), true_mix_proportions, atol=0.05)\n assert_allclose(samples[\"cluster_means\"].mean(0).sort(), true_cluster_means, atol=0.2)\n\n\ndef test_bernoulli_latent_model():\n def model(data):\n y_prob = numpyro.sample(\"y_prob\", dist.Beta(1., 1.))\n with numpyro.plate(\"data\", data.shape[0]):\n y = numpyro.sample(\"y\", dist.Bernoulli(y_prob))\n z = numpyro.sample(\"z\", dist.Bernoulli(0.65 * y + 0.1))\n numpyro.sample(\"obs\", dist.Normal(2. * z, 1.), obs=data)\n\n N = 2000\n y_prob = 0.3\n y = dist.Bernoulli(y_prob).sample(random.PRNGKey(0), (N,))\n z = dist.Bernoulli(0.65 * y + 0.1).sample(random.PRNGKey(1))\n data = dist.Normal(2. * z, 1.0).sample(random.PRNGKey(2))\n\n nuts_kernel = NUTS(model)\n mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=500)\n mcmc.run(random.PRNGKey(3), data)\n samples = mcmc.get_samples()\n assert_allclose(samples[\"y_prob\"].mean(0), y_prob, atol=0.05)\n\n\ndef test_change_point():\n def model(count_data):\n n_count_data = count_data.shape[0]\n alpha = 1 / jnp.mean(count_data.astype(np.float32))\n lambda_1 = numpyro.sample('lambda_1', dist.Exponential(alpha))\n lambda_2 = numpyro.sample('lambda_2', dist.Exponential(alpha))\n # this is the same as DiscreteUniform(0, 69)\n tau = numpyro.sample('tau', dist.Categorical(logits=jnp.zeros(70)))\n idx = jnp.arange(n_count_data)\n lambda_ = jnp.where(tau > idx, lambda_1, lambda_2)\n with numpyro.plate(\"data\", n_count_data):\n numpyro.sample('obs', dist.Poisson(lambda_), obs=count_data)\n\n count_data = jnp.array([\n 13, 24, 8, 24, 7, 35, 14, 11, 15, 11, 22, 22, 11, 57, 11,\n 19, 29, 6, 19, 12, 22, 12, 18, 72, 32, 9, 7, 13, 19, 23,\n 27, 20, 6, 17, 13, 10, 14, 6, 16, 15, 7, 2, 15, 15, 19,\n 70, 49, 7, 53, 22, 21, 31, 19, 11, 1, 20, 12, 35, 17, 23,\n 17, 4, 2, 31, 30, 13, 27, 0, 39, 37, 5, 14, 13, 22,\n ])\n\n kernel = NUTS(model)\n mcmc = MCMC(kernel, num_warmup=500, num_samples=500)\n mcmc.run(random.PRNGKey(0), count_data)\n samples = mcmc.get_samples()\n assert_allclose(samples[\"lambda_1\"].mean(0), 18., atol=1.)\n assert_allclose(samples[\"lambda_2\"].mean(0), 22.5, atol=1.5)\n\n\ndef test_gaussian_hmm():\n dim = 4\n num_steps = 10\n\n def model(data):\n with numpyro.plate(\"states\", dim):\n transition = numpyro.sample(\"transition\", dist.Dirichlet(jnp.ones(dim)))\n emission_loc = numpyro.sample(\"emission_loc\", dist.Normal(0, 1))\n emission_scale = numpyro.sample(\"emission_scale\", dist.LogNormal(0, 1))\n\n trans_prob = numpyro.sample(\"initialize\", dist.Dirichlet(jnp.ones(dim)))\n for t, y in markov(enumerate(data)):\n x = numpyro.sample(\"x_{}\".format(t), dist.Categorical(trans_prob))\n numpyro.sample(\"y_{}\".format(t), dist.Normal(emission_loc[x], emission_scale[x]), obs=y)\n trans_prob = transition[x]\n\n def _generate_data():\n transition_probs = np.random.rand(dim, dim)\n transition_probs = transition_probs / transition_probs.sum(-1, keepdims=True)\n emissions_loc = np.arange(dim)\n emissions_scale = 1.\n state = np.random.choice(3)\n obs = [np.random.normal(emissions_loc[state], emissions_scale)]\n for _ in range(num_steps - 1):\n state = np.random.choice(dim, p=transition_probs[state])\n obs.append(np.random.normal(emissions_loc[state], emissions_scale))\n return np.stack(obs)\n\n data = _generate_data()\n nuts_kernel = NUTS(model)\n mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=500)\n mcmc.run(random.PRNGKey(0), data)\n\n\ndef test_iteration():\n\n def testing():\n for i in markov(range(5)):\n v1 = to_data(Tensor(jnp.ones(2), OrderedDict([(str(i), bint(2))]), 'real'))\n v2 = to_data(Tensor(jnp.zeros(2), OrderedDict([('a', bint(2))]), 'real'))\n fv1 = to_funsor(v1, reals())\n fv2 = to_funsor(v2, reals())\n print(i, v1.shape) # shapes should alternate\n if i % 2 == 0:\n assert v1.shape == (2,)\n else:\n assert v1.shape == (2, 1, 1)\n assert v2.shape == (2, 1)\n print(i, fv1.inputs)\n print('a', v2.shape) # shapes should stay the same\n print('a', fv2.inputs)\n\n with NamedMessenger():\n testing()\n\n\ndef test_nesting():\n\n def testing():\n\n with markov():\n v1 = to_data(Tensor(jnp.ones(2), OrderedDict([(\"1\", bint(2))]), 'real'))\n print(1, v1.shape) # shapes should alternate\n assert v1.shape == (2,)\n\n with markov():\n v2 = to_data(Tensor(jnp.ones(2), OrderedDict([(\"2\", bint(2))]), 'real'))\n print(2, v2.shape) # shapes should alternate\n assert v2.shape == (2, 1)\n\n with markov():\n v3 = to_data(Tensor(jnp.ones(2), OrderedDict([(\"3\", bint(2))]), 'real'))\n print(3, v3.shape) # shapes should alternate\n assert v3.shape == (2,)\n\n with markov():\n v4 = to_data(Tensor(jnp.ones(2), OrderedDict([(\"4\", bint(2))]), 'real'))\n print(4, v4.shape) # shapes should alternate\n\n assert v4.shape == (2, 1)\n\n with NamedMessenger():\n testing()\n\n\ndef test_staggered():\n\n def testing():\n for i in markov(range(12)):\n if i % 4 == 0:\n v2 = to_data(Tensor(jnp.zeros(2), OrderedDict([('a', bint(2))]), 'real'))\n fv2 = to_funsor(v2, reals())\n assert v2.shape == (2,)\n print('a', v2.shape)\n print('a', fv2.inputs)\n\n with NamedMessenger():\n testing()\n\n\ndef test_nested_plate():\n with enum(first_available_dim=-3):\n with enum_plate(\"a\", 5):\n with enum_plate(\"b\", 2):\n x = numpyro.sample(\"x\", dist.Normal(0, 1), rng_key=random.PRNGKey(0))\n assert x.shape == (2, 5)\n\n\n@pytest.mark.parametrize('num_steps', [1, 10, 11])\ndef test_scan_enum_one_latent(num_steps):\n data = random.normal(random.PRNGKey(0), (num_steps,))\n init_probs = jnp.array([0.6, 0.4])\n transition_probs = jnp.array([[0.8, 0.2], [0.1, 0.9]])\n locs = jnp.array([-1.0, 1.0])\n\n def model(data):\n x = None\n for i, y in markov(enumerate(data)):\n probs = init_probs if x is None else transition_probs[x]\n x = numpyro.sample(f\"x_{i}\", dist.Categorical(probs))\n numpyro.sample(f\"y_{i}\", dist.Normal(locs[x], 1), obs=y)\n return x\n\n def fun_model(data):\n def transition_fn(x, y):\n probs = init_probs if x is None else transition_probs[x]\n x = numpyro.sample(\"x\", dist.Categorical(probs))\n numpyro.sample(\"y\", dist.Normal(locs[x], 1), obs=y)\n return x, None\n\n x, collections = scan(transition_fn, None, data)\n assert collections is None\n return x\n\n actual_log_joint = log_density(enum(config_enumerate(fun_model)), (data,), {}, {})[0]\n expected_log_joint = log_density(enum(config_enumerate(model)), (data,), {}, {})[0]\n assert_allclose(actual_log_joint, expected_log_joint)\n\n actual_last_x = enum(config_enumerate(fun_model))(data)\n expected_last_x = enum(config_enumerate(model))(data)\n assert_allclose(actual_last_x, expected_last_x)\n\n\ndef test_scan_enum_plate():\n N, D = 10, 3\n data = random.normal(random.PRNGKey(0), (N, D))\n init_probs = jnp.array([0.6, 0.4])\n transition_probs = jnp.array([[0.8, 0.2], [0.1, 0.9]])\n locs = jnp.array([-1.0, 1.0])\n\n def model(data):\n x = None\n D_plate = numpyro.plate(\"D\", D, dim=-1)\n for i, y in markov(enumerate(data)):\n with D_plate:\n probs = init_probs if x is None else transition_probs[x]\n x = numpyro.sample(f\"x_{i}\", dist.Categorical(probs))\n numpyro.sample(f\"y_{i}\", dist.Normal(locs[x], 1), obs=y)\n\n def fun_model(data):\n def transition_fn(x, y):\n probs = init_probs if x is None else transition_probs[x]\n with numpyro.plate(\"D\", D, dim=-1):\n x = numpyro.sample(\"x\", dist.Categorical(probs))\n numpyro.sample(\"y\", dist.Normal(locs[x], 1), obs=y)\n return x, None\n\n scan(transition_fn, None, data)\n\n actual_log_joint = log_density(enum(config_enumerate(fun_model), -2), (data,), {}, {})[0]\n expected_log_joint = log_density(enum(config_enumerate(model), -2), (data,), {}, {})[0]\n assert_allclose(actual_log_joint, expected_log_joint)\n\n\ndef test_scan_enum_separated_plates_same_dim():\n N, D1, D2 = 10, 3, 4\n data = random.normal(random.PRNGKey(0), (N, D1 + D2))\n data1, data2 = data[:, :D1], data[:, D1:]\n init_probs = jnp.array([0.6, 0.4])\n transition_probs = jnp.array([[0.8, 0.2], [0.1, 0.9]])\n locs = jnp.array([-1.0, 1.0])\n\n def model(data1, data2):\n x = None\n D1_plate = numpyro.plate(\"D1\", D1, dim=-1)\n D2_plate = numpyro.plate(\"D2\", D2, dim=-1)\n for i, (y1, y2) in markov(enumerate(zip(data1, data2))):\n probs = init_probs if x is None else transition_probs[x]\n x = numpyro.sample(f\"x_{i}\", dist.Categorical(probs))\n with D1_plate:\n numpyro.sample(f\"y1_{i}\", dist.Normal(locs[x], 1), obs=y1)\n with D2_plate:\n numpyro.sample(f\"y2_{i}\", dist.Normal(locs[x], 1), obs=y2)\n\n def fun_model(data1, data2):\n def transition_fn(x, y):\n y1, y2 = y\n probs = init_probs if x is None else transition_probs[x]\n x = numpyro.sample(\"x\", dist.Categorical(probs))\n with numpyro.plate(\"D1\", D1, dim=-1):\n numpyro.sample(\"y1\", dist.Normal(locs[x], 1), obs=y1)\n with numpyro.plate(\"D2\", D2, dim=-1):\n numpyro.sample(\"y2\", dist.Normal(locs[x], 1), obs=y2)\n return x, None\n\n scan(transition_fn, None, (data1, data2))\n\n actual_log_joint = log_density(enum(config_enumerate(fun_model), -2), (data1, data2), {}, {})[0]\n expected_log_joint = log_density(enum(config_enumerate(model), -2), (data1, data2), {}, {})[0]\n assert_allclose(actual_log_joint, expected_log_joint)\n\n\ndef test_scan_enum_separated_plate_discrete():\n N, D = 10, 3\n data = random.normal(random.PRNGKey(0), (N, D))\n transition_probs = jnp.array([[0.8, 0.2], [0.1, 0.9]])\n locs = jnp.array([[-1.0, 1.0], [2.0, 3.0]])\n\n def model(data):\n x = 0\n D_plate = numpyro.plate(\"D\", D, dim=-1)\n for i, y in markov(enumerate(data)):\n probs = transition_probs[x]\n x = numpyro.sample(f\"x_{i}\", dist.Categorical(probs))\n with D_plate:\n w = numpyro.sample(f\"w_{i}\", dist.Bernoulli(0.6))\n numpyro.sample(f\"y_{i}\", dist.Normal(Vindex(locs)[x, w], 1), obs=y)\n\n def fun_model(data):\n def transition_fn(x, y):\n probs = transition_probs[x]\n x = numpyro.sample(\"x\", dist.Categorical(probs))\n with numpyro.plate(\"D\", D, dim=-1):\n w = numpyro.sample(\"w\", dist.Bernoulli(0.6))\n numpyro.sample(\"y\", dist.Normal(Vindex(locs)[x, w], 1), obs=y)\n return x, None\n\n scan(transition_fn, 0, data)\n\n actual_log_joint = log_density(enum(config_enumerate(fun_model), -2), (data,), {}, {})[0]\n expected_log_joint = log_density(enum(config_enumerate(model), -2), (data,), {}, {})[0]\n assert_allclose(actual_log_joint, expected_log_joint)\n\n\ndef test_scan_enum_discrete_outside():\n data = random.normal(random.PRNGKey(0), (10,))\n probs = jnp.array([[[0.8, 0.2], [0.1, 0.9]],\n [[0.7, 0.3], [0.6, 0.4]]])\n locs = jnp.array([-1.0, 1.0])\n\n def model(data):\n w = numpyro.sample(\"w\", dist.Bernoulli(0.6))\n x = 0\n for i, y in markov(enumerate(data)):\n x = numpyro.sample(f\"x_{i}\", dist.Categorical(probs[w, x]))\n numpyro.sample(f\"y_{i}\", dist.Normal(locs[x], 1), obs=y)\n\n def fun_model(data):\n w = numpyro.sample(\"w\", dist.Bernoulli(0.6))\n\n def transition_fn(x, y):\n x = numpyro.sample(\"x\", dist.Categorical(probs[w, x]))\n numpyro.sample(\"y\", dist.Normal(locs[x], 1), obs=y)\n return x, None\n\n scan(transition_fn, 0, data)\n\n actual_log_joint = log_density(enum(config_enumerate(fun_model)), (data,), {}, {})[0]\n expected_log_joint = log_density(enum(config_enumerate(model)), (data,), {}, {})[0]\n assert_allclose(actual_log_joint, expected_log_joint)\n\n\ndef test_scan_enum_two_latents():\n num_steps = 11\n data = random.normal(random.PRNGKey(0), (num_steps,))\n probs_x = jnp.array([[0.8, 0.2], [0.1, 0.9]])\n probs_w = jnp.array([[0.7, 0.3], [0.6, 0.4]])\n locs = jnp.array([[-1.0, 1.0], [2.0, 3.0]])\n\n def model(data):\n x = w = 0\n for i, y in markov(enumerate(data)):\n x = numpyro.sample(f\"x_{i}\", dist.Categorical(probs_x[x]))\n w = numpyro.sample(f\"w_{i}\", dist.Categorical(probs_w[w]))\n numpyro.sample(f\"y_{i}\", dist.Normal(locs[w, x], 1), obs=y)\n\n def fun_model(data):\n def transition_fn(carry, y):\n x, w = carry\n x = numpyro.sample(\"x\", dist.Categorical(probs_x[x]))\n w = numpyro.sample(\"w\", dist.Categorical(probs_w[w]))\n numpyro.sample(\"y\", dist.Normal(locs[w, x], 1), obs=y)\n # also test if scan's `ys` are recorded corrected\n return (x, w), x\n\n scan(transition_fn, (0, 0), data)\n\n actual_log_joint = log_density(enum(config_enumerate(fun_model)), (data,), {}, {})[0]\n expected_log_joint = log_density(enum(config_enumerate(model)), (data,), {}, {})[0]\n assert_allclose(actual_log_joint, expected_log_joint)\n\n\ndef test_scan_enum_scan_enum():\n num_steps = 11\n data_x = random.normal(random.PRNGKey(0), (num_steps,))\n data_w = data_x[:-1] + 1\n probs_x = jnp.array([[0.8, 0.2], [0.1, 0.9]])\n probs_w = jnp.array([[0.7, 0.3], [0.6, 0.4]])\n locs_x = jnp.array([-1.0, 1.0])\n locs_w = jnp.array([2.0, 3.0])\n\n def model(data_x, data_w):\n x = w = 0\n for i, y in markov(enumerate(data_x)):\n x = numpyro.sample(f\"x_{i}\", dist.Categorical(probs_x[x]))\n numpyro.sample(f\"y_x_{i}\", dist.Normal(locs_x[x], 1), obs=y)\n\n for i, y in markov(enumerate(data_w)):\n w = numpyro.sample(f\"w{i}\", dist.Categorical(probs_w[w]))\n numpyro.sample(f\"y_w_{i}\", dist.Normal(locs_w[w], 1), obs=y)\n\n def fun_model(data_x, data_w):\n def transition_fn(name, probs, locs, x, y):\n x = numpyro.sample(name, dist.Categorical(probs[x]))\n numpyro.sample(\"y_\" + name, dist.Normal(locs[x], 1), obs=y)\n return x, None\n\n scan(partial(transition_fn, \"x\", probs_x, locs_x), 0, data_x)\n scan(partial(transition_fn, \"w\", probs_w, locs_w), 0, data_w)\n\n actual_log_joint = log_density(enum(config_enumerate(fun_model)), (data_x, data_w), {}, {})[0]\n expected_log_joint = log_density(enum(config_enumerate(model)), (data_x, data_w), {}, {})[0]\n assert_allclose(actual_log_joint, expected_log_joint)\n" ]
[ [ "numpy.random.choice", "numpy.arange", "numpy.stack", "numpy.random.normal", "numpy.random.rand", "numpy.testing.assert_allclose" ] ]
DS4A-84/DS4A_Group84_Project
[ "6f9244689156b818e4081727fef574caa038c419" ]
[ "code/imdb_intersect.py" ]
[ "import json, requests\nimport string\nimport re\nimport gzip\nimport shutil\nimport urllib.request \nimport pandas as pd\n# import imdbpy\n\n\n# Downloading IMDB dataset of names\nimdb_url = 'https://datasets.imdbws.com/name.basics.tsv.gz'\nimdb_file = requests.get(imdb_url, stream=True)\nopen('../data/namebasics.tsv.gz', 'wb').write(imdb_file.content) # https://www.tutorialspoint.com/downloading-files-from-web-using-python\n\nwith gzip.open('../data/namebasics.tsv.gz', 'rb') as f_in: # https://stackoverflow.com/questions/31028815/how-to-unzip-gz-file-using-python\n with open('../data/namebasics.tsv', 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n\n# df = pd.read_csv(\"../data/the_oscar_award.csv\")\nimdb = pd.read_csv(\"../data/namebasics.tsv\", sep='\\t')\noscars = pd.read_csv(\"../data/oscar_winners_clean.csv\")\noscars[\"primaryName\"]=oscars[\"name\"]\n\ndel imdb[\"knownForTitles\"]\n\nwinners = pd.merge(oscars, imdb, how='inner', on=['primaryName'])\n\nwinners = winners.drop_duplicates(['name','category', 'film'], keep='first').reset_index() # dropping duplicates with the same name, category, year, and film. 2 people have been nominated for 2 different films\nwinners = winners.replace(r'\\\\N', 0, regex=True)\n\n\n# Age when they were nominated. NOTE: This might not have been their actual age as we don't have their birth days. A better approach would be to find a dataset with their age at nomination\nwinners[\"ceremonyAge\"]=pd.to_numeric(winners['year_ceremony'], errors='coerce')-pd.to_numeric(winners['birthYear'], errors='coerce') # Age at the time of the ceremony. NOTE: Some people were nominated after they died, how should we handle this?\nwinners.loc[ (winners.ceremonyAge>1000), 'ceremonyAge'] = 0 ## Some entries don't have a birth so making them 0\n\n# # Checking if nominees are alive\nwinners.loc[ (winners.deathYear==0) & (winners.birthYear!=0), 'alive'] = True \nwinners.loc[ (winners.deathYear!=0) & (winners.birthYear!=0), 'alive'] = False\nwinners.loc[ (winners.deathYear==0) & (winners.birthYear==0), 'alive'] = False ## if no birth and death year, then not alive\nwinners['birthYear'] = winners['birthYear'].astype(int)\n\n# # Calculating current age. subtract birthYear from currentYear then if alive=\nwinners['currentYear']=int(2021)\n\n# \nwinners[\"currentAge\"] = winners['currentYear']- winners['birthYear']\nwinners.loc[winners.alive==False, 'currentAge'] = 0\n\n# # Deleting duplicate name column\ndel(winners[\"primaryName\"])\n\n\nwinners.to_csv('../data/clean_names.csv', index=False)\n" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.to_numeric" ] ]
tingelst/game
[ "2e9acc1d3052e4135605211a622aa8613ee56949" ]
[ "python/parameterizations.py" ]
[ "import sys\nsys.path.append('../build')\nimport versor as vsr\nimport numpy as np\nnp.set_printoptions(linewidth=120)\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\n\n\ndef create_motor(d_lims=(0, 1), th_lims=(0, np.pi/2)):\n translator = (vsr.Vec(*np.random.random(3)).unit()\n * np.random.uniform(*d_lims)).trs()\n rotator = vsr.Rot(vsr.Biv(*np.random.uniform(-1, 1, 3)).unit()\n * np.random.uniform(*th_lims) * -0.5)\n motor = translator * rotator\n return motor\n\n\ndef create_points(motor, gaussian=False, radius=10, n_points=10, points_std=0.8, noise_std=0.09):\n points = []\n for i in range(n_points):\n if gaussian:\n a = vsr.Vec(*np.random.normal(0.0, points_std, 3)).null()\n else:\n a = (vsr.Vec(*np.random.uniform(-1, 1, 3)).unit()\n * np.random.uniform(0, radius)).null()\n b = a.spin(motor)\n t = vsr.Vec(*np.random.random(3)).unit() * \\\n np.random.normal(0.0, noise_std, 1)\n noise_motor = t.trs() * vsr.Rot(1, 0, 0, 0)\n bn = a.spin(noise_motor).spin(motor)\n points.append((a, b, bn))\n\n return points\n\n\ndef project(Y, M):\n YM2 = Y * M\n YM2[0] = 0.0\n YM2[26] = 0.0\n YM2[27] = 0.0\n YM2[28] = 0.0\n YM2[29] = 0.0\n YM2[30] = 0.0\n return vsr.CGA(vsr.MotRec(YM2 * M.rev()))\n\ndef CayleySelig(B):\n Rp = vsr.Mot(1.0, B[0], B[1], B[2], 0.0, 0.0, 0.0, 0.0)\n Rn = vsr.Mot(1.0, -B[0], -B[1], -B[2], 0.0, 0.0, 0.0, 0.0)\n Rninv = Rn.inv()\n eps = vsr.Mot(0,0,0,0,0,0,0,-1)\n b = vsr.Mot(0.0, B[5], -B[4], B[3], 0.0, 0.0, 0.0, 0.0)\n return Rp * Rninv + eps * Rninv * b * Rninv * 2\n\ndef main1():\n\n motor0 = create_motor()\n motor = create_motor()\n points = create_points(motor)\n\n A, B, _ = points[0]\n dM = vsr.CGA(A) * vsr.CGA(motor0).rev() * vsr.CGA(B) + \\\n vsr.CGA(A).rev() * vsr.CGA(motor0).rev() * vsr.CGA(B).rev()\n\n # print(vsr.CGA(motor0))\n proj = project(dM, vsr.CGA(motor0))\n print(proj)\n\n dM2 = vsr.CGA(motor0).rev() * vsr.CGA(A).spin(vsr.CGA(motor0)) * vsr.CGA(B) * 2.0\n print(project(dM2, vsr.CGA(motor0)))\n\ndef oexp(B):\n n = np.sqrt(1 + B[0] * B[0] + B[1] * B[1] + B[2] * B[2])\n s = B[0] * B[5] - B[1] * B[4] + B[2] * B[3]\n m = vsr.Mot(1.0, B[0], B[1], B[2], B[3], B[4], B[5], s) * (1.0 / n)\n return m\n\ndef CayleyLi(B):\n BB = B * B\n Rp = vsr.Mot(1.0, B[0], B[1], B[2], B[3], B[4], B[5], 0.0)\n R0 = vsr.Mot(1.0 - BB[0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n R4 = vsr.Mot(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, BB[7])\n Rn = R0 + R4\n Rden = R0 * R0 \n return Rp * Rp * Rn * Rden.inv()\n\ndef retr(B, M):\n # print('retr')\n return (B * M + M).retract()\n\n\n\ndef test(a,b):\n g = (vsr.CGA(vsr.Mot(0,1,0,0,0,0,0,0)).comm(vsr.CGA(a)) * vsr.CGA(b) * 2.0)[0]\n h = (a[1]* b[0] - a[0] * b[1]) * 2\n print(h)\n print(g)\n i = (-a[0] * b[0] - a[1] * b[1] ) * 4.0\n j = (vsr.CGA(vsr.Mot(0,1,0,0,0,0,0,0)).comm(vsr.CGA(vsr.Mot(0,1,0,0,0,0,0,0)).comm(vsr.CGA(a))) * vsr.CGA(b) * 4.0)[0]\n print(i)\n print(j)\n\ndef update(points, mot):\n\n g = np.zeros(6)\n H = np.zeros((6,6))\n\n def err(points, mot):\n err = 0.0\n for A, B, _ in points:\n err += (vsr.CGA(A.spin(mot)) * vsr.CGA(B) * -2.0)[0]\n return err\n\n g0 = 0\n for A, B, _ in points:\n MAM = A.spin(mot)\n # Looks like skew\n g[0] += (MAM[1] * B[0] - MAM[0] * B[1]) * 2\n g[1] += (MAM[2] * B[0] - MAM[0] * B[2]) * 2\n g[2] += (MAM[2] * B[1] - MAM[1] * B[2]) * 2\n g[3] += (MAM[0] - B[0]) * 2\n g[4] += (MAM[1] - B[1]) * 2\n g[5] += (MAM[2] - B[2]) * 2\n\n # g[0] += (vsr.CGA(vsr.Mot(0,1,0,0,0,0,0,0)).comm(vsr.CGA(A.spin(mot))) * vsr.CGA(B) * 2.0)[0]\n # g[1] += (vsr.CGA(vsr.Mot(0,0,1,0,0,0,0,0)).comm(vsr.CGA(A.spin(mot))) * vsr.CGA(B) * 2.0)[0]\n # g[2] += (vsr.CGA(vsr.Mot(0,0,0,1,0,0,0,0)).comm(vsr.CGA(A.spin(mot))) * vsr.CGA(B) * 2.0)[0]\n # g[3] += (vsr.CGA(vsr.Mot(0,0,0,0,1,0,0,0)).comm(vsr.CGA(A.spin(mot))) * vsr.CGA(B) * 2.0)[0]\n # g[4] += (vsr.CGA(vsr.Mot(0,0,0,0,0,1,0,0)).comm(vsr.CGA(A.spin(mot))) * vsr.CGA(B) * 2.0)[0]\n # g[5] += (vsr.CGA(vsr.Mot(0,0,0,0,0,0,1,0)).comm(vsr.CGA(A.spin(mot))) * vsr.CGA(B) * 2.0)[0]\n\n H[0,0] += (MAM[0] * B[0] + MAM[1] * B[1] ) * -4.0\n H[0,1] += MAM[1] * B[2] * -4\n H[0,2] += MAM[0] * B[2] * 4\n H[0,3] += MAM[1] * 4\n H[0,4] += MAM[0] * -4\n # H[0,1] += (vsr.CGA(vsr.Mot(0,1,0,0,0,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,1,0,0,0,0,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n # H[0,2] += (vsr.CGA(vsr.Mot(0,1,0,0,0,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,1,0,0,0,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n # H[0,3] += (vsr.CGA(vsr.Mot(0,1,0,0,0,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,0,1,0,0,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n # H[0,4] += (vsr.CGA(vsr.Mot(0,1,0,0,0,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,0,0,1,0,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n # H[0,5] += (vsr.CGA(vsr.Mot(0,1,0,0,0,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,0,0,0,1,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n H[1,0] = H[0,1]\n H[2,0] = H[0,2]\n H[3,0] = H[0,3]\n H[4,0] = H[0,4]\n # H[5,0] = H[0,5]\n\n H[1,1] += (MAM[0] * B[0] + MAM[2] * B[2] ) * -4.0\n H[1,2] += MAM[0] * B[1] * -4.0\n H[1,3] += MAM[2] * 4.0\n # H[1,4] += 0.0\n H[1,5] += MAM[0] * -4.0\n # H[1,2] += (vsr.CGA(vsr.Mot(0,0,1,0,0,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,1,0,0,0,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n # H[1,3] += (vsr.CGA(vsr.Mot(0,0,1,0,0,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,0,1,0,0,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n # H[1,4] += (vsr.CGA(vsr.Mot(0,0,1,0,0,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,0,0,1,0,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n # H[1,5] += (vsr.CGA(vsr.Mot(0,0,1,0,0,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,0,0,0,1,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n H[2,1] = H[1,2]\n H[3,1] = H[1,3]\n H[5,1] = H[1,5]\n \n H[2,2] += (MAM[1] * B[1] + MAM[2] * B[2] ) * -4.0\n # H[2,3] += (vsr.CGA(vsr.Mot(0,0,0,1,0,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,0,1,0,0,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n H[2,4] += MAM[2] * 4.0\n H[2,5] += MAM[1] * -4.0\n # H[2,4] += (vsr.CGA(vsr.Mot(0,0,0,1,0,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,0,0,1,0,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n # H[2,5] += (vsr.CGA(vsr.Mot(0,0,0,1,0,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,0,0,0,1,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n # H[3,2] = H[2,3]\n H[4,2] = H[2,4]\n H[5,2] = H[2,5]\n\n # H[3,4] += (vsr.CGA(vsr.Mot(0,0,0,0,1,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,0,0,1,0,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n # H[3,5] += (vsr.CGA(vsr.Mot(0,0,0,0,1,0,0,0)).comm((vsr.CGA(vsr.Mot(0,0,0,0,0,0,1,0)).comm(vsr.CGA(A.spin(mot))))) * vsr.CGA(B) * 4.0)[0]\n\n H[3,3] += -4.0\n H[4,4] += -4.0\n H[5,5] += -4.0\n\n # print(H)\n\n\n\n\n\n B = np.dot(np.linalg.pinv(H), -g)\n\n # B = g\n\n # line search\n alpha = 1.0\n beta = 0.01\n err0 = err(points, mot)\n\n # while err(points, retr(vsr.Dll(*B) * alpha, mot)) > err0 + alpha * beta * np.inner(g, B) :\n # while err(points, CayleyLi(vsr.Dll(*B) * alpha) * mot) > err0 + alpha * beta * np.inner(g, B) :\n # while err(points, CayleySelig(vsr.Dll(*B) * alpha) * mot) > err0 + alpha * beta * np.inner(g, B) :\n while err(points, oexp(alpha * B) * mot) > err0 + alpha * beta * np.inner(g, B) :\n # while err(points, vsr.Dll(*B * alpha).exp() * mot) > err0 + alpha * beta * np.inner(g, B) :\n # while err(points, vsr.Dll(*B * alpha).exp() * mot) > err0 :\n alpha *= 0.5\n # print(alpha)\n\n\n\n\n # B /= np.sqrt(B[0]**2 + B[1]**2 + B[2]**2)\n\n # mot = vsr.Dll(*B*alpha).exp() * mot\n\n # mot = CayleyLi(vsr.Dll(*B) * alpha) * mot\n # mot = CayleySelig(vsr.Dll(*B) * alpha) * mot\n mot = oexp(B * alpha) * mot\n\n # mot = retr(vsr.Dll(*B) * alpha, mot)\n\n return mot, err(points, mot), np.linalg.norm(g)\n\n\ndef motor_rotate_point(motor, point):\n m1, m2, m3, m4, m5, m6, m7, m8 = motor\n p1, p2, p3, p5, p4 = point\n q = np.zeros(3)\n\n R = np.zeros((5,5))\n R[0,0] = m4 * m4 - m3 * m3 - m2 * m2 + m1 * m1\n R[1,0] = -2.0 * m3 * m4 - 2.0 * m1 * m2\n R[2,0] = 2.0 * m2 * m4 - 2.0 * m1 * m3\n R[3,0] = -2.0 * m4 * m8 + 2.0 * m3 * m7 + 2.0 * m2 * m6 - 2.0 * m1 * m5\n\n R[0,1] = 2.0 * m1 * m2 - 2.0 * m3 * m4\n R[1,1] = - m4 * m4 + m3 * m3 - m2 * m2 + m1 * m1\n R[2,1] = -2.0 * m1 * m4 - 2.0 * m2 * m3\n R[3,1] = 2.0 * m3 * m8 + 2.0 * m4 * m7 - 2.0 * m1 * m6 - 2.0 * m2 * m5\n\n R[0,2] = 2.0 * m2 * m4 + 2.0 * m1 * m3\n R[1,2] = 2.0 * m1 * m4 - 2.0 * m2 * m3\n R[2,2] = m4 * m4 - m3 * m3 + m2 * m2 + m1 * m1\n R[3,2] = -2.0 * m2 * m8 - 2.0 * m1 * m7 - 2.0 * m4 * m6 - 2.0 * m3 * m5\n\n R[3,3] = m4 * m4 + m3 * m3 + m2 * m2 + m1 * m1\n \n R[0,4] = -2.0 * m4 * m8 - 2.0 * m3 * m7 - 2.0 * m2 * m6 - 2.0 * m1 * m5\n R[1,4] = 2.0 * m3 * m8 - 2.0 * m4 * m7 - 2.0 * m1 * m6 + 2.0 * m2 * m5\n R[2,4] = -2.0 * m2 * m8 - 2.0 * m1 * m7 + 2.0 * m4 * m6 + 2.0 * m3 * m5\n R[3,4] = 2.0 * m8 * m8 + 2.0 * m7 * m7 + 2.0 * m6 * m6 + 2.0 * m5 * m5\n R[4,4] = m4 * m4 + m3 * m3 + m2 * m2 + m1 * m1\n\n # print(R[:3,:3])\n\n return np.dot(R, point.reshape(5,1)).flatten()\n\n\ndef spin(m, a):\n m1, m2, m3, m4, m5, m6, m7, m8 = m\n a1, a2, a3, a5, a4 = a\n residual = np.zeros(5)\n residual[0] = ((((-(2.0 * a4 * m4 * m8)) - 2.0 * a4 * m3 * m7 - 2.0 * a4 * m2 * m6 - 2.0 * a4 * m1 * m5 + a1 * m4 * m4 + (2.0 * a3 * m2 - 2.0 * a2 * m3) * m4) - a1 * m3 * m3 + 2.0 * a3 * m1 * m3) - a1 * m2 * m2 + 2.0 * a2 * m1 * m2 + a1 * m1 * m1)\n residual[1] = (((2.0 * a4 * m3 * m8 - 2.0 * a4 * m4 * m7 - 2.0 * a4 * m1 * m6 + 2.0 * a4 * m2 * m5) - a2 * m4 * m4 + (2.0 * a3 * m1 - 2.0 * a1 * m3) * m4 + a2 * m3 * m3) - 2.0 * a3 * m2 * m3 - a2 * m2 * m2 - 2.0 * a1 * m1 * m2 + a2 * m1 * m1)\n residual[2] = ((((-(2.0 * a4 * m2 * m8)) - 2.0 * a4 * m1 * m7 +\n 2.0 * a4 * m4 * m6 + 2.0 * a4 * m3 * m5) -\n a3 * m4 * m4 + (2.0 * a1 * m2 - 2.0 * a2 * m1) * m4) -\n a3 * m3 * m3 + ((-(2.0 * a1 * m1)) - 2.0 * a2 * m2) * m3 +\n a3 * m2 * m2 + a3 * m1 * m1)\n\n return residual\n \n\ndef test_motor_rotate_point():\n motor = create_motor()\n # print(np.array(motor))\n motor = vsr.Vec(4,1,2).trs() * vsr.Rot(vsr.Biv(1,1,1).unit() * (np.pi/6))\n point = vsr.Vec(1,2,3).null()\n print(np.array(point.spin(motor)))\n # print(motor.matrix()[:3,:3])\n print(motor_rotate_point(np.array(motor).copy(), np.array(point).copy()))\n\n # print(spin(np.array(motor), np.array(point)))\n\ndef main():\n m = create_motor()\n m = vsr.Mot(1,0,0,0,0,0,0,0)\n print(m)\n motor = create_motor()\n print(motor)\n points = create_points(motor)\n errs = []\n for i in range(100):\n m, err, gnorm = update(points, m)\n if gnorm < 1e-3:\n break\n if err < 1e-6:\n break\n errs.append(err)\n print(i)\n print(m)\n # print(m.rev() * motor)\n plt.semilogy(errs)\n plt.show()\n\n\nif __name__ == '__main__':\n # main()\n test_motor_rotate_point()\n # test(vsr.Vec(1,2,3).null(), vsr.Vec(4,5,6).null())\n" ]
[ [ "matplotlib.pyplot.semilogy", "numpy.random.random", "numpy.sqrt", "numpy.inner", "matplotlib.use", "numpy.set_printoptions", "numpy.linalg.norm", "numpy.linalg.pinv", "numpy.random.normal", "numpy.random.uniform", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros" ] ]
aidkilda/understanding-drl-navigation
[ "0d637c2390a935ec1182d4f2d5165644d98d6404" ]
[ "src/internal_representation_analysis/decoder/scene_visualizer.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass SceneVisualizer(object):\n def __init__(self, env):\n\n self.env = env\n self.scene_scope = env.scene_name\n self.locations_x = [l[0] for l in env.locations]\n self.locations_y = [l[1] for l in env.locations]\n self.target_x = env.get_x(int(env.terminal_state_id))\n self.target_y = env.get_y(int(env.terminal_state_id))\n self.target_theta = env.get_rotation(int(env.terminal_state_id))\n\n def visualize_trajectory(self, list_env_id):\n plt.title(self.scene_scope + \" \" + str(self.target_x) + \", \" +\n str(self.target_y))\n self.__draw_map()\n #print(\"n loc in\", self.env.n_locations)\n #print(\"list env id\", list_env_id)\n for _, id in enumerate(list_env_id):\n print(id)\n self.__mark_pos(\n self.env.get_x(id), self.env.get_y(id),\n self.env.get_rotation(id))\n # Mark starting position\n self.__mark_pos(\n self.env.get_x(list_env_id[0]),\n self.env.get_y(list_env_id[0]),\n self.env.get_rotation(list_env_id[0]),\n color=\"green\")\n # Mark target\n self.__mark_pos(\n self.target_x, self.target_y, self.target_theta, color=\"red\")\n plt.show()\n\n def visualize_scene(self):\n self.__draw_map()\n self.__mark_target()\n self.__mark_pos(self.env.x, self.env.y, self.env.r)\n plt.show()\n\n def save_scene(self, x, y, r, save_path):\n self.__draw_map()\n self.__mark_target()\n self.__mark_pos(x, y, r)\n plt.savefig(save_path)\n plt.close()\n\n def mark_targets(self, targets, color=\"orange\"):\n for target in targets:\n self.__mark_pos(self.env.get_x(int(target)),\n self.env.get_y(int(target)),\n self.env.get_rotation(int(target)),\n color=color)\n\n def __draw_map(self):\n plt.scatter(self.locations_x, self.locations_y, color=\"black\")\n\n def __mark_target(self):\n plt.scatter(self.target_x, self.target_y, color=\"red\")\n self.__draw_curr_angle(\n self.target_x, self.target_y, self.target_theta, color=\"red\")\n\n def __mark_pos(self, x, y, theta, color=\"blue\"):\n plt.scatter(x, y, color=color)\n self.__draw_curr_angle(x, y, theta, color=color)\n\n #TODO(aidkilda) make code accomodate angles different than multiples of 90.\n def __draw_curr_angle(self,\n curr_pos_x,\n curr_pos_y,\n curr_angle,\n color=\"black\"):\n #print(\"Curr angle\", curr_angle)\n if self.__isclose(curr_angle, 0.0, rel_tol=1e-5):\n dx = 0.0\n dy = 0.2\n elif self.__isclose(curr_angle, 90.0, rel_tol=1e-5):\n dx = 0.2\n dy = 0.0\n elif self.__isclose(curr_angle, 180.0, rel_tol=1e-5):\n dx = 0.0\n dy = -0.2\n elif self.__isclose(curr_angle, 270.0, rel_tol=1e-5):\n dx = -0.2\n dy = 0.0\n else: # Don't draw arrow at all\n return\n plt.arrow(\n curr_pos_x,\n curr_pos_y,\n dx,\n dy,\n head_width=0.05,\n head_length=0.1,\n color=color)\n\n def __draw_curr_angle_flex(self,\n curr_pos_x,\n curr_pos_y,\n curr_angle,\n color=\"black\"):\n #print(\"Curr angle\", curr_angle)\n if self.__isclose(curr_angle, 0.0, rel_tol=1e-5):\n dx = 0.0\n dy = 0.2\n elif self.__isclose(curr_angle, 90.0, rel_tol=1e-5):\n dx = 0.2\n dy = 0.0\n elif self.__isclose(curr_angle, 180.0, rel_tol=1e-5):\n dx = 0.0\n dy = -0.2\n elif self.__isclose(curr_angle, 270.0, rel_tol=1e-5):\n dx = -0.2\n dy = 0.0\n else: # Don't draw arrow at all\n return\n plt.arrow(\n curr_pos_x,\n curr_pos_y,\n dx,\n dy,\n head_width=0.05,\n head_length=0.1,\n color=color)\n\n # Used for equality comparisson of floats. Taken from documentation.\n def __isclose(self, a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n" ]
[ [ "matplotlib.pyplot.scatter", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.show", "matplotlib.pyplot.arrow" ] ]
augmento-ai/quant-reseach
[ "6b3bc4c01a8d533dfa1826d59aa90fbc4c6f98cd", "6b3bc4c01a8d533dfa1826d59aa90fbc4c6f98cd" ]
[ "src/analysis_helper.py", "examples/4_basic_strategy_example.py" ]
[ "import numpy as np\nimport numba as nb\n\n\n@nb.jit(\"(f8[:])(f8[:], f8[:])\", nopython=True, nogil=True, cache=True)\ndef nb_safe_divide(a, b):\n\t# divide each element in a by each element in b\n\t# if element b == 0.0, return element = 0.0\n\tc = np.zeros(a.shape[0], dtype=np.float64)\n\tfor i in range(a.shape[0]):\n\t\tif b[i] != 0.0:\n\t\t\tc[i] = a[i] / b[i]\n\treturn c\n\n@nb.jit(\"(f8[:])(f8[:], i8)\", nopython=True, nogil=True, parallel=False)\ndef nb_causal_rolling_average(arr, window_size):\n\t\n\t# create an output array\n\tout_arr = np.zeros(arr.shape[0])\n\t\n\t# create an array from the input array, with added space for the rolling window\n\tnew_arr = np.hstack((np.ones(window_size-1) * arr[0], arr))\n\t\n\t# for each output element, find the mean of the last few input elements\n\t#for i in nb.prange(out_arr.shape[0]):\n\tfor i in range(out_arr.shape[0]):\n\t\tout_arr[i] = np.mean(new_arr[i : i + window_size])\n\t\n\treturn out_arr\n\n@nb.jit(\"(f8[:])(f8[:], i8)\", nopython=True, nogil=True, parallel=False)\ndef nb_causal_rolling_sd(arr, window_size):\n\t\n\t# create an output array\n\tout_arr = np.zeros(arr.shape[0])\n\t\n\t# create an array from the input array, with added space for the rolling window\n\tnew_arr = np.hstack((np.ones(window_size-1) * arr[0], arr))\n\t\n\t# for each output element, find the mean and std of the last few\n\t# input elements, and standardise the input element by the mean and std of the window\n\t#for i in nb.prange(out_arr.shape[0]):\n\tfor i in range(out_arr.shape[0]):\n\t\tnum = new_arr[i+window_size-1] - np.mean(new_arr[i : i + window_size-1])\n\t\tdenom = np.std(new_arr[i : i + window_size-1])\n\t\tif denom != 0.0:\n\t\t\tout_arr[i] = num / denom\n\t\n\treturn out_arr\n\n@nb.jit(\"(f8[:])(f8[:], i8)\", nopython=True, nogil=True, parallel=False)\ndef nb_causal_rolling_sd_rand(arr, window_size_rand):\n\t\n\t# create an output array \n\tout_arr = np.zeros(arr.shape[0])\n\t\t\t\n\t# create an array from the input array, with added space for the rolling window\n\tnew_arr = np.hstack((np.ones(window_size_rand-1) * arr[0], arr))\n\t\n\t# create an array from the input array, with added space for the rolling window\n\tnew_arr = np.hstack((np.ones(window_size_rand-1) * arr[0], arr))\n\t# for each output element, find the mean and std of the last few\n\t# input elements, and standardise the input element by the mean and std of the window\n\t#for i in nb.prange(out_arr.shape[0]):\n\tfor i in range(out_arr.shape[0]):\n\t\twindow_size_std = 1.0\n\t\twindow_size = round(np.random.normal(window_size_rand, window_size_std)) \n\t\tnum = new_arr[i+window_size-1] - np.mean(new_arr[i : i + window_size-1])\n\t\tdenom = np.std(new_arr[i : i + window_size-1])\n\t\tif denom != 0.0:\n\t\t\tout_arr[i] = num / denom\n\t\n\treturn out_arr\n\n@nb.jit(\"(f8[:])(f8[:], i8)\", nopython=True, nogil=True, parallel=False)\ndef nb_causal_rolling_norm(arr, window_size):\n\t\n\t# create an output array\n\tout_arr = np.zeros(arr.shape[0])\n\t\n\t# create an array from the input array, with added space for the rolling window\n\tnew_arr = np.hstack((np.ones(window_size-1) * arr[0], arr))\n\t\n\t# for each output element, find the mean and std of the last few\n\t# input elements, and standardise the input element by the mean and std of the window\n\t#for i in nb.prange(out_arr.shape[0]):\n\tfor i in range(out_arr.shape[0]):\n\t\t\tnum = new_arr[i+window_size-1] - np.mean(new_arr[i : i + window_size])\n\t\t\tdenom = np.max(np.abs(new_arr[i : i + window_size] - np.mean(new_arr[i : i + window_size])))\n\t\t\tif denom != 0.0:\n\t\t\t\t\tout_arr[i] = num / denom\n\t\n\treturn out_arr\n\n@nb.jit(\"(f8[:])(f8[:], i8, f8)\", nopython=True, nogil=True, parallel=False)\ndef nb_causal_rolling_norm_rand(arr, window_size_rand, peturb):\n\t\n\t# create an output array\n\tout_arr = np.zeros(arr.shape[0])\n\t\n\t# create an array from the input array, with added space for the rolling window\n\tnew_arr = np.hstack((np.ones(window_size_rand-1) * arr[0], arr))\n\n\tindex_new = window_size_rand\n\t\n\t# for each output element, find the mean and std of the last few\n\t# input elements, and standardise the input element by the mean and std of the window\n\t#for i in nb.prange(out_arr.shape[0]):\n\tfor i in range(out_arr.shape[0]):\n\n\t\twindow_size_std = peturb * np.float64(window_size_rand)\n\t\twindow_size = round(np.random.normal(window_size_rand, window_size_std))\n\n\t\ti_end_new = i + window_size_rand\n\t\ti_start_new = i_end_new - window_size\n\n\t\tif i_start_new < 0:\n\t\t\ti_start_new = 0\n\n\t\tout_arr[i] = np.mean(new_arr[i_start_new : i_end_new])\n\t\t#print(out_arr[i-1:i+1])\n\n\t\t#num = new_arr[i+window_size-1] - np.mean(new_arr[i : i + window_size])\n\t\t#denom = np.max(np.abs(new_arr[i : i + window_size] - np.mean(new_arr[i : i + window_size])))\n\t\t#if denom != 0.0:\n\t\t#\tout_arr[i] = num / denom\n\t\n\treturn out_arr\n\n@nb.jit(\"(f8[:])(f8[:], i8)\", nopython=True, nogil=True, parallel=False)\ndef nb_causal_rolling_average(arr, window_size):\n\t\n\t# create an output array\n\tout_arr = np.zeros(arr.shape[0])\n\t\n\t# create an array from the input array, with added space for the rolling window\n\tnew_arr = np.hstack((np.ones(window_size-1) * arr[0], arr))\n\t\n\t# for each output element, find the mean of the last few input elements\n\t#for i in nb.prange(out_arr.shape[0]):\n\tfor i in range(out_arr.shape[0]):\n\t\tout_arr[i] = np.mean(new_arr[i : i + window_size])\n\t\n\treturn out_arr\n\n\n\n#@nb.jit(\"(f8[:])(f8[:], f8[:], i8, i8, f8)\", nopython=True, nogil=True)\ndef nb_calc_sentiment_score_rand_b(sent_a, sent_b, ra_win_size_short, ra_win_size_long,peturb):\n\t# example method for creating a stationary sentiment score based on Augmento data\n\t\n\t# compare the raw sentiment values\n\tsent_ratio = nb_safe_divide(sent_a, sent_b)\n\t\n\t# smooth the sentiment ratio\n\tsent_ratio_short = nb_causal_rolling_norm_rand(sent_ratio, ra_win_size_short, peturb)\n\tsent_ratio_long = nb_causal_rolling_norm_rand(sent_ratio, ra_win_size_long, peturb)\n\t\n\t# create a stationary(ish) representation of the smoothed sentiment ratio\n\tsent_score = sent_ratio_short - sent_ratio_long\n\t\n\treturn sent_score\n\n\n@nb.jit(\"(f8[:])(f8[:], f8[:], i8, i8, f8)\", nopython=True, nogil=True)\ndef nb_calc_sentiment_score_rand_a(sent_a, sent_b, ra_win_size, std_win_size, peturb):\n\t# example method for creating a stationary sentiment score based on Augmento data\n\t\n\t# compare the raw sentiment values\n\tsent_ratio = nb_safe_divide(sent_a, sent_b)\n\t\n\t# smooth the sentiment ratio\n\tsent_ratio_smooth = nb_causal_rolling_norm_rand(sent_ratio, ra_win_size, peturb)\n\t\n\t# create a stationary(ish) representation of the smoothed sentiment ratio\n\tsent_score = nb_causal_rolling_sd(sent_ratio_smooth, std_win_size)\n\t\n\treturn sent_score\n\n@nb.jit(\"(f8[:])(f8[:], f8[:], i8, i8)\", nopython=True, nogil=True)\ndef nb_calc_sentiment_score_a(sent_a, sent_b, ra_win_size, std_win_size):\n\t# example method for creating a stationary sentiment score based on Augmento data\n\t\n\t# compare the raw sentiment values\n\tsent_ratio = nb_safe_divide(sent_a, sent_b)\n\t\n\t# smooth the sentiment ratio\n\tsent_ratio_smooth = nb_causal_rolling_average(sent_ratio, ra_win_size)\n\t\n\t# create a stationary(ish) representation of the smoothed sentiment ratio\n\tsent_score = nb_causal_rolling_sd(sent_ratio_smooth, std_win_size)\n\t\n\treturn sent_score\n\n@nb.jit(\"(f8[:])(f8[:], f8[:], i8, i8)\", nopython=True, nogil=True)\ndef nb_calc_sentiment_score_b(sent_a, sent_b, ra_win_size_short, ra_win_size_long):\n\t# example method for creating a stationary sentiment score based on Augmento data\n\t\n\t# compare the raw sentiment values\n\tsent_ratio = nb_safe_divide(sent_a, sent_b)\n\t\n\t# smooth the sentiment ratio\n\tsent_ratio_short = nb_causal_rolling_average(sent_ratio, ra_win_size_short)\n\tsent_ratio_long = nb_causal_rolling_average(sent_ratio, ra_win_size_long)\n\t\n\t# create a stationary(ish) representation of the smoothed sentiment ratio\n\tsent_score = sent_ratio_short - sent_ratio_long\n\t\n\treturn sent_score\n\n@nb.jit(\"(f8[:])(f8[:], f8[:], i8, i8)\", nopython=True, nogil=True)\ndef nb_calc_sentiment_score_c(sent_a, sent_b, ra_win_size, std_win_size):\n\t# example method for creating a stationary sentiment score based on Augmento data\n\t\n\t# compare the raw sentiment values\n\tsent_ratio = nb_safe_divide(sent_a, sent_b)\n\t\n\t# smooth the sentiment ratio\n\tsent_ratio_smooth = nb_causal_rolling_average(sent_ratio, ra_win_size)\n\t\n\t# create a stationary(ish) representation of the smoothed sentiment ratio\n\tsent_score = nb_causal_rolling_norm(sent_ratio_smooth, std_win_size)\n\t\n\treturn sent_score\n\n@nb.jit(\"(f8[:])(f8[:], f8[:], f8, f8)\", nopython=True, nogil=True, cache=True)\ndef nb_backtest_a(price, sent_score, start_pnl, buy_sell_fee):\n\t# example backtest with approximate model for long/short contracts\n\t\n\t# create an array to hold our pnl, and set the first value\n\tpnl = np.zeros(price.shape, dtype=np.float64)\n\tpnl[0] = start_pnl\n\t\n\t# for each step, run the market model\n\tfor i_p in range(1, price.shape[0]):\n\t\t\n\t\t# if sentiment score is positive, simulate long position\n\t\t# else if sentiment score is negative, simulate short position\n\t\t# else if the sentiment score is 0.0, hold\n\t\t# (note that this is a very approximate market simulation!)\n\t\tn_sample_delay = 2\n\t\tif i_p < n_sample_delay:\n\t\t\tpnl[i_p] = pnl[i_p-1]\n\t\tif sent_score[i_p-n_sample_delay] > 0.0:\n\t\t\tpnl[i_p] = (price[i_p] / price[i_p-1]) * pnl[i_p-1]\n\t\telif sent_score[i_p-n_sample_delay] <= 0.0:\n\t\t\tpnl[i_p] = (price[i_p-1] / price[i_p]) * pnl[i_p-1]\n\t\telif sent_score[i_p-n_sample_delay] == 0.0:\n\t\t\tpnl[i_p] = pnl[i_p-1]\n\t\t\n\t\t# simulate a trade fee if we cross from long to short, or visa versa\n\t\tif i_p > 1 and np.sign(sent_score[i_p-1]) != np.sign(sent_score[i_p-2]):\n\t\t\tpnl[i_p] = pnl[i_p] - (buy_sell_fee * pnl[i_p])\n\t\n\treturn pnl\n\n\n\n\n@nb.jit(\"(f8[:])(f8[:], i8)\", nopython=True, nogil=True, cache=True)\ndef moving_average(arr, window):\n\t\t\n\t\t# output array\n\tma_arr = np.zeros(arr.shape[0])\n\t\t\n\t\t# add space for rolling window\n\tnew_arr = np.hstack((np.ones(window-1) * arr[0], arr))\n\t\t\n\t\t# calculate moving average\n\t#for i in nb.prange(arr.shape[0]):\n\tfor i in range(arr.shape[0]):\n\t\tnum = new_arr[i+window-1] - np.mean(new_arr[i : i+window-1])\n\t\tdenom = np.std(new_arr[i : i + window-1])\n\t\tif denom != 0.0:\n\t\t\t\t\t\tma_arr[i] = num / denom\n\n\treturn ma_arr \n\n#@nb.jit(\"(f8[:])(f8[:], i8)\", nopython=True, nogil=True, cache=True)\n#def signal_ma(positive, negative, short, long):\n\n\n\n\n\n@nb.jit(\"(f8[:])(f8[:], f8[:], f8[:], f8, f8, f8)\",nopython=True, nogil=True,cache=True)\ndef sma_crossover_backtest(price, leading_arr, lagging_arr, start_pnl, buy_sell_fee, threshold=0.0):\n\n\t# create an array to hold our pnl, and set the first value\n\tpnl = np.zeros(price.shape, dtype=np.float64)\n\tpnl[0] = start_pnl\n\n\t# BUY if Leading SMA is above Lagging SMA by some threshold.\n\t# SELL if Leading SMA is below Lagging SMA by some threshold.\n\tsent_signal = leading_arr - lagging_arr\n\t\n\t# for each step, run the market model\n\tfor i_p in range(1, price.shape[0]):\n\t\t\tif sent_signal[i_p-1] > threshold:\n\t\t\t\t\tpnl[i_p] = (price[i_p] / price[i_p-1]) * pnl[i_p-1]\n\t\t\telif sent_signal[i_p-1] < threshold:\n\t\t\t\t\tpnl[i_p] = (price[i_p-1] / price[i_p]) * pnl[i_p-1]\n\t\t\telif sent_signal[i_p-1] == threshold:\n\t\t\t\tpnl[i_p] = pnl[i_p-1]\n\t\n\t# simulate a trade fee if we cross from long to short, or visa versa\n\tif i_p > 1 and np.sign(sent_signal[i_p-1]) != np.sign(sent_signal[i_p-2]):\n\t\tpnl[i_p] = pnl[i_p] - (buy_sell_fee * pnl[i_p])\n\t\n\treturn pnl\n\n\n#@nb.jit(\"(f8[:])(f8[:], f8[:], i8)\", nopython=True, nogil=True, cache=True)\n#def forward_volume(volume_data, price_data, threshold=2000000):\n\n# price_rate_change = np.full(len(volume_data), np.nan)\n\n# for i in range(len(volume_data)):\n# sum_volume = 0\n\n# for j in range(len(price_data)):\n# sum_volume += price_data[j]\n\n# if sum_volume >= threshold:\n# price_rate_change[i] = (price_data[j] - price_data[i])/price_data[i]\n# break\n\n@nb.jit(\"(f8[:])(f8[:], f8[:], i8)\", nopython=True, nogil=True, cache=True)\ndef forward_volume(volume_data, price_data, threshold=2000000):\n\n price_rate_change = np.zeros(len(price_data))\n\n for i in range((len(volume_data))):\n j = i+1\n sum_volume = 0.0\n\n while (sum_volume < threshold) & (j < len(price_rate_change)):\n sum_volume += volume_data[j]\n\n if sum_volume >= threshold:\n price_rate_change[i] = (price_data[j]-price_data[i])/price_data[i]\n\n j += 1\n\n return price_rate_change\n \n@nb.jit(\"(f8[:])(f8[:], f8[:], f8)\", nopython=True, nogil=True, cache=True)\ndef forward_volume(volume_data, price_data, threshold):\n\n price_rate_change = np.zeros(len(price_data))\n\n for i in range((len(volume_data))):\n j = i+1\n sum_volume = 0.0\n\n while (sum_volume < threshold) & (j < len(price_rate_change)):\n sum_volume += volume_data[j]\n\n if sum_volume >= threshold:\n price_rate_change[i] = (price_data[j]-price_data[i])/price_data[i]\n\n j += 1\n\n return price_rate_change\n \n \n@nb.jit(\"(f8[:])(f8[:], i8)\", nopython=True, nogil=True, cache=True)\ndef volume_normalized(volume_data, n_hours):\n norm_volume = np.zeros(len(volume_data))\n start = 0\n for i in range(n_hours,len(volume_data), n_hours):\n for j in range(start,i):\n norm_volume[j] = volume_data[j]/np.sum(volume_data[start:i])\n start = i \n return norm_volume\n\n\n\n\n\n\n", "import sys\nimport msgpack\nimport zlib\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as md\n\n# import files from src\nsys.path.insert(0, \"src\")\nimport example_helper as eh\nimport analysis_helper as ah\n\n# define the location of the input file\nfilename_augmento_topics = \"data/example_data/augmento_topics.msgpack.zlib\"\nfilename_augmento_data = \"data/example_data/augmento_data.msgpack.zlib\"\nfilename_bitmex_data = \"data/example_data/bitmex_data.msgpack.zlib\"\n\n# load the example data\nall_data = eh.load_example_data(filename_augmento_topics,\n filename_augmento_data,\n filename_bitmex_data)\naug_topics, aug_topics_inv, t_aug_data, aug_data, t_price_data, price_data = all_data\n\n# get the signals we're interested in\naug_signal_a = aug_data[:, aug_topics_inv[\"Negative\"]].astype(np.float64)\naug_signal_b = aug_data[:, aug_topics_inv[\"Bearish\"]].astype(np.float64)\n#aug_signal_b = aug_data[:, aug_topics_inv[\"Bullish\"]].astype(np.float64)\n#aug_signal_a = aug_data[:, aug_topics_inv[\"Bearish\"]].astype(np.float64)\n\n# define the window size for the sentiment score calculation\nn_days = 7\nwindow_size = 24 * n_days\n\n# generate the sentiment score\nsent_score = ah.nb_calc_sentiment_score_a(aug_signal_a, aug_signal_b, window_size, window_size)\n#sent_score = ah.nb_calc_sentiment_score_c(aug_signal_a, aug_signal_b, window_size, window_size)\n\n# define some parameters for the backtest\nstart_pnl = 1.0\nbuy_sell_fee = 0.0\n\n# run the backtest\npnl = ah.nb_backtest_a(price_data, sent_score, start_pnl, buy_sell_fee)\n\n# set up the figure\nfig, ax = plt.subplots(3, 1, sharex=True, sharey=False)\n\n# initialise some labels for the plot\ndatenum_aug_data = [md.date2num(datetime.datetime.fromtimestamp(el)) for el in t_aug_data]\ndatenum_price_data = [md.date2num(datetime.datetime.fromtimestamp(el)) for el in t_price_data]\n\n# plot stuff\nax[0].grid(linewidth=0.4)\nax[1].grid(linewidth=0.4)\nax[2].grid(linewidth=0.4)\nax[0].plot(datenum_price_data, price_data, linewidth=0.5)\nax[1].plot(datenum_aug_data, sent_score, linewidth=0.5)\nax[2].plot(datenum_price_data, pnl, linewidth=0.5)\n\n# label axes\nax[0].set_ylabel(\"Price\")\nax[1].set_ylabel(\"Seniment score\")\nax[2].set_ylabel(\"PnL\")\nax[1].set_ylim([-5.5, 5.5])\n\n#ax[0].set_title(\"4_basic_strategy_example.py\")\n\n# generate the time axes\nplt.subplots_adjust(bottom=0.2)\nplt.xticks( rotation=25 )\nax[0]=plt.gca()\nxfmt = md.DateFormatter('%Y-%m-%d')\nax[0].xaxis.set_major_formatter(xfmt)\n\n# show the plot\nplt.show()\n" ]
[ [ "numpy.ones", "numpy.sign", "numpy.random.normal", "numpy.std", "numpy.mean", "numpy.float64", "numpy.zeros", "numpy.sum" ], [ "matplotlib.pyplot.gca", "matplotlib.dates.DateFormatter", "matplotlib.pyplot.subplots", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show" ] ]
craterkamath/Leaf_Disease_detection
[ "75b8b27db9bcdca57ed78c2752b339b73edcd4bf" ]
[ "example.py" ]
[ "\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage import data, img_as_float\nfrom skimage.segmentation import (morphological_chan_vese,\n\t\t\t\t\t\t\t\t morphological_geodesic_active_contour,\n\t\t\t\t\t\t\t\t inverse_gaussian_gradient,\n\t\t\t\t\t\t\t\t checkerboard_level_set)\nimport skimage\n\ndef store_evolution_in(lst):\n\t\"\"\"Returns a callback function to store the evolution of the level sets in\n\tthe given list.\n\t\"\"\"\n\n\tdef _store(x):\n\n\t\tplt.imshow(x, cmap = 'gray')\n\t\tlst.append(np.copy(x))\n\n\treturn _store\n\n\n# Morphological ACWE\nimage = img_as_float(data.camera())\n\n# Initial level set\ninit_ls = checkerboard_level_set(image.shape, 6)\n# List with intermediate results for plotting the evolution\nevolution = []\ncallback = store_evolution_in(evolution)\nls = morphological_chan_vese(image, 35, init_level_set=init_ls, smoothing=3,\n\t\t\t\t\t\t\t iter_callback=callback)\n\nfig, axes = plt.subplots(2, 2, figsize=(8, 8))\nax = axes.flatten()\n\nax[0].imshow(image, cmap=\"gray\")\nax[0].set_axis_off()\nax[0].contour(ls, [0.5], colors='r')\nax[0].set_title(\"Morphological ACWE segmentation\", fontsize=12)\n\nax[1].imshow(ls, cmap=\"gray\")\nax[1].set_axis_off()\ncontour = ax[1].contour(evolution[2], [0.5], colors='g')\ncontour.collections[0].set_label(\"Iteration 2\")\ncontour = ax[1].contour(evolution[7], [0.5], colors='y')\ncontour.collections[0].set_label(\"Iteration 7\")\ncontour = ax[1].contour(evolution[-1], [0.5], colors='r')\ncontour.collections[0].set_label(\"Iteration 35\")\nax[1].legend(loc=\"upper right\")\ntitle = \"Morphological ACWE evolution\"\nax[1].set_title(title, fontsize=12)\n\n\n# Morphological GAC\nimage = img_as_float(data.coins())\ngimage = inverse_gaussian_gradient(image)\n\n# Initial level set\ninit_ls = np.zeros(image.shape, dtype=np.int8)\ninit_ls[10:-10, 10:-10] = 1\n# List with intermediate results for plotting the evolution\nevolution2 = []\ncallback = store_evolution_in(evolution2)\nls = morphological_geodesic_active_contour(gimage, 230, init_ls,\n\t\t\t\t\t\t\t\t\t\t smoothing=1, balloon=-1,\n\t\t\t\t\t\t\t\t\t\t threshold=0.69,\n\t\t\t\t\t\t\t\t\t\t iter_callback=callback)\n\nax[2].imshow(image, cmap=\"gray\")\nax[2].set_axis_off()\nax[2].contour(ls, [0.5], colors='r')\nax[2].set_title(\"Morphological GAC segmentation\", fontsize=12)\n\nax[3].imshow(ls, cmap=\"gray\")\nax[3].set_axis_off()\ncontour = ax[3].contour(evolution2[0], [0.5], colors='g')\ncontour.collections[0].set_label(\"Iteration 0\")\ncontour = ax[3].contour(evolution2[100], [0.5], colors='y')\ncontour.collections[0].set_label(\"Iteration 100\")\ncontour = ax[3].contour(evolution2[-1], [0.5], colors='r')\ncontour.collections[0].set_label(\"Iteration 230\")\nax[3].legend(loc=\"upper right\")\ntitle = \"Morphological GAC evolution\"\nax[3].set_title(title, fontsize=12)\n\nfig.tight_layout()\nplt.show()\nimport pdb\npdb.set_trace()" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.subplots", "numpy.copy", "matplotlib.pyplot.show", "numpy.zeros" ] ]
DarkSZChao/Big-Little_NN_Strategies
[ "5821765c5ed1a2cbdfe7d9586df7bd36e08fa6fd" ]
[ "Model_Training/MOTION_Detector/Tools/lr_draw.py" ]
[ "# -- coding: utf-8 --\nimport math\nimport os\nimport random\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import Input, Model\nfrom tensorflow.keras.layers import Conv1D, MaxPooling1D, Dense, Flatten\n\ntry: # import for pycharm project directory\n from MOTION_Detector import ROOTS\n from MOTION_Detector.load_dataset import load_data_for_MOTION_Detector\nexcept: # import for Ubuntu non-project directory\n import ROOTS\n from load_dataset import load_data_for_MOTION_Detector\n\n\ndef model_structure():\n # for command line TFLite converter\n _model_input = Input(shape=(128, 3), name='model_input')\n _model = Conv1D(4, 3, activation='relu', padding='same')(_model_input)\n _model = MaxPooling1D(pool_size=2)(_model)\n _model = Conv1D(4, 3, activation='relu', padding='same')(_model)\n _model = MaxPooling1D(pool_size=2)(_model)\n _model = Conv1D(2, 3, activation='relu', padding='same')(_model)\n _model = MaxPooling1D(pool_size=2)(_model)\n _model = Flatten()(_model)\n _model = Dense(2, name='model_output')(_model)\n _model = Model(_model_input, _model)\n return _model\n\n\nif __name__ == '__main__':\n activity = 5\n model_h5 = os.path.join(ROOTS.MOTION_Detector_output_model_path, 'single_motion_' + str(activity) + '.h5')\n\n train_data, train_labels, val_data, val_labels = load_data_for_MOTION_Detector(sensor='s3', # when sensor='all', the function return dictionary type which includes s1, s2, s3\n current_activity=activity, # from 0 to 5\n data_range=[0, 255],\n data_type=np.uint8,\n label_categorical='off')\n train_data = train_data.astype(np.int16) - 128\n val_data = val_data.astype(np.int16) - 128\n\n model = model_structure()\n model.summary()\n model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer='adam',\n metrics=['accuracy'])\n\n lr_szc = []\n lr_szc2 = []\n\n def scheduler(epoch, lr):\n if epoch < 2:\n lr = 0.005\n lr2 = 0.005\n elif epoch == 20:\n lr = 0.003\n lr2 = 0.005\n else:\n # lr = lr * 0.95\n lr = lr * math.exp(-lr * 50)\n lr2 = 0.005\n lr_szc.append(lr)\n lr_szc2.append(lr2)\n return lr\n\n\n lr_callback = tf.keras.callbacks.LearningRateScheduler(schedule=scheduler, verbose=1)\n\n checkpoint_path = os.path.join(ROOTS.MOTION_Detector_output_model_path, 'checkpoint/cp-{epoch:02d}.ckpt')\n cp_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_path,\n verbose=1,\n save_weights_only=True)\n\n history = model.fit(train_data, train_labels,\n validation_data=(val_data, val_labels),\n callbacks=[lr_callback, cp_callback],\n epochs=50,\n verbose=1)\n\n model.save(model_h5) # High_API h5 format\n\n print('Epoch with maximum val_accuracy:', np.argmax(history.history['val_acc']) + 1, history.history['val_acc'][np.argmax(history.history['val_acc'])] * 100, '%')\n\n plt.figure()\n plt.plot(lr_szc, label='Ours')\n plt.plot(lr_szc2, label='Default')\n plt.xlabel('Epoch')\n plt.ylabel('Learning rate')\n plt.ylim([0, 0.006])\n plt.legend(loc='lower right')\n plt.show()\n" ]
[ [ "tensorflow.keras.callbacks.ModelCheckpoint", "matplotlib.pyplot.legend", "tensorflow.keras.Input", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "matplotlib.pyplot.ylim", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.layers.Dense", "tensorflow.keras.callbacks.LearningRateScheduler", "tensorflow.keras.layers.MaxPooling1D", "tensorflow.keras.Model", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "numpy.argmax", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "tensorflow.keras.layers.Flatten", "matplotlib.pyplot.figure" ] ]
shreyansh26/DL-Code-Repository
[ "f1974eedc1fef54b2d274703390a22721e46f502" ]
[ "GANs/gan/gan.py" ]
[ "import argparse\nimport os\n\nimport torch\nimport torch.functional as F\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nfrom torchvision import datasets\nfrom torchvision.utils import save_image\n\nfrom model import Discriminator, Generator\n\nCUDA_AVAILABLE = torch.cuda.is_available()\nDEVICE = torch.device('cuda') if CUDA_AVAILABLE else torch.device('cpu')\n\n\nos.makedirs(\"images\", exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n_epochs\", type=int, default=200, help=\"number of epochs of training\")\nparser.add_argument(\"--batch_size\", type=int, default=64, help=\"size of the batches\")\nparser.add_argument(\"--lr\", type=float, default=0.0002, help=\"adam: learning rate\")\nparser.add_argument(\"--b1\", type=float, default=0.5, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--b2\", type=float, default=0.999, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\nparser.add_argument(\"--latent_dim\", type=int, default=100, help=\"dimensionality of the latent space\")\nparser.add_argument(\"--img_size\", type=int, default=28, help=\"size of each image dimension\")\nparser.add_argument(\"--channels\", type=int, default=1, help=\"number of image channels\")\nparser.add_argument(\"--sample_interval\", type=int, default=400, help=\"interval betwen image samples\")\nconfigs = parser.parse_args()\nprint(configs)\n\nimg_shape = (configs.channels, configs.img_size, configs.img_size)\n\n\n# Configure data loader\nos.makedirs(\"../data/mnist\", exist_ok=True)\ndataloader = torch.utils.data.DataLoader(\n datasets.MNIST(\n \"../data/mnist\",\n train=True,\n download=True,\n transform=transforms.Compose(\n [transforms.Resize(configs.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]\n ),\n ),\n batch_size=configs.batch_size,\n shuffle=True,\n)\n\n\n# Loss function\nadversarial_loss = nn.BCELoss()\n\n# Initialize generator and discriminator\ngenerator = Generator(configs, img_shape)\ndiscriminator = Discriminator(configs, img_shape)\n\ngenerator = generator.to(DEVICE)\ndiscriminator = discriminator.to(DEVICE)\nadversarial_loss = adversarial_loss.to(DEVICE)\n\n\n# Optimizers\noptimizer_G = torch.optim.Adam(generator.parameters(), lr=configs.lr, betas=(configs.b1, configs.b2))\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=configs.lr, betas=(configs.b1, configs.b2))\n\nTensor = torch.cuda.FloatTensor if CUDA_AVAILABLE else torch.FloatTensor\n\n# ----------\n# Training\n# ----------\n\nfor epoch in range(configs.n_epochs):\n for i, (imgs, _) in enumerate(dataloader):\n\n # Adversarial ground truths\n real = Variable(Tensor(imgs.size(0), 1).fill_(1.0), requires_grad=False)\n fake = Variable(Tensor(imgs.size(0), 1).fill_(0.0), requires_grad=False)\n\n # Configure input\n real_imgs = Variable(imgs.type(Tensor))\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise as input to generator\n z = Variable(Tensor(torch.randn((imgs.shape[0], configs.latent_dim), device=DEVICE)))\n\n # Generate a batch of imgs\n gen_imgs = generator(z)\n\n # Loss measures generator's ability to fool the discriminator\n g_loss = adversarial_loss(discriminator(gen_imgs), real)\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # Measure discriminator's ability to classify real from generated images\n real_loss = adversarial_loss(discriminator(real_imgs), real)\n fake_loss = adversarial_loss(discriminator(gen_imgs.detach()), fake) # gen_imgs is detached to preveent training generator\n d_loss = (real_loss + fake_loss) / 2\n\n d_loss.backward()\n optimizer_D.step()\n\n print(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\"\n % (epoch, configs.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item())\n )\n\n batches_done = epoch * len(dataloader) + i\n if batches_done % configs.sample_interval == 0:\n save_image(gen_imgs.data[:25], \"images/%d.png\" % batches_done, nrow=5, normalize=True)" ]
[ [ "torch.device", "torch.randn", "torch.cuda.is_available", "torch.nn.BCELoss" ] ]
kellylab/genomic-surveillance-of-the-bronx
[ "14a58b89e99946c92287387c6ac1fb34c6c0cde4" ]
[ "scripts/demographics/figure1.py" ]
[ "\"\"\" \nMakes a figure providing an overview of our dataset with a focus on lineages\nlaid out as follows:\n\na - Patient metadata\nb - Donut plot of our lineage distributions vs the world\nc - Timeline of patient sampling vs lineages identified\nd - Choropleth of lineages by region\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom typing import Dict\n\nimport logging\nimport matplotlib\nfrom matplotlib.lines import Line2D\nfrom mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition,\n mark_inset)\n\nfrom covid_bronx import lineage_colors_dict, lineage_colors_dict_rgb\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nsavefile = \"figures/figure1_v2\"\n\n\nmonths = {\n 1: 'Jan',\n 2: 'Feb',\n 3: 'Mar',\n 4: 'Apr',\n 5: 'May',\n 6: 'Jun',\n 7: 'Jul',\n 8: 'Aug',\n 9: 'Sep',\n 10: 'Oct',\n 11: 'Nov',\n 12: 'Dec',\n}\n\nfrom covid_bronx.metadata import preprocess_metadata\nfrom matplotlib.colors import colorConverter\n\n# a) Timeline of lineages\nlogger.info(\"Plotting 1a\")\ntimeline = pd.read_csv(\"data/external/global_lineages.csv\")\nfrom covid_bronx.metadata import get_metadata\nmetadata = get_metadata()\nindex = pd.date_range(metadata['collection_date'].min(), metadata['collection_date'].max())\nmetadata.index = metadata['name']\ndf = pd.read_csv(\"data/external/pangolin2.csv\")\ndf.index = df['Sequence name'].apply(lambda x: x.split(\" \")[0])\ndf.index = df.index.map(lambda x: \"AECOM-\" + str(int(x.split(\"-\")[1])))\n\nmetadata[df.columns] = df\nlineages_df = pd.read_csv(\"data/external/Lineages_updated.csv\", index_col=0)\nlineages = lineages_df['lineage'].dropna()\nlineages.index = lineages.index.map(lambda x: x.replace(\"_\", \"-\"))\nmetadata['Lineage'] = lineages\n\nddf = pd.DataFrame([ # Incremental Values\n {\n l: (metadata[metadata['collection_date'] == d]['Lineage']==l).sum()\n for l in lineages\n }\n for d in index\n ],\n index=index\n)\n\nddf.index = ddf.index.map(lambda x: months[x.month])\nddmf = pd.DataFrame({k: v.sum(0) for k,v in ddf.groupby(ddf.index)})\n\ncdf = pd.DataFrame([ # Cumulative Values\n {\n l: (metadata[metadata['collection_date'] <= d]['Lineage']==l).sum()\n for l in lineages\n }\n for d in index\n ],\n index=index\n)\n\nbronx_sampling = ddmf.sum(0)\nsampling = pd.read_csv(\"data/external/sampling.csv\", index_col=0)\nsampling['date'] = pd.to_datetime(sampling['date'])\nsampling['month'] = sampling['date'].apply(lambda x: months[x.month])\ndeathsdmf = pd.Series({k:v['Deaths'].sum() for k,v in sampling.groupby('month')})\ncasesdmf = pd.Series({k:v['Cases'].sum() for k,v in sampling.groupby('month')})\nhospitalizationdmf = pd.Series({k:v['Hospitalizations'].sum() for k,v in sampling.groupby('month')})\n\nsampling_df = pd.DataFrame({\"Sampling\": bronx_sampling, \"Cases\": casesdmf, \"Deaths\": deathsdmf, \"Hospitalizations\": hospitalizationdmf}).fillna(0.)\n\n##########################################################\n\n# Start Plotting \nmatplotlib.rcParams.update({'font.size': 16})\nplt.clf()\nplt.close()\nfig1 = plt.figure(figsize=(24,24))\ngs = fig1.add_gridspec(20,20)\n\n\n# a) Sampling Timeline\n\nax_c = fig1.add_subplot(gs[0:8, 10:])\nax_c2 = ax_c.twinx()\nsampling_df[['Cases', 'Deaths', 'Hospitalizations']].loc[['Feb','Mar','Apr','May','Jun','Jul','Aug','Sep']].plot(ax=ax_c, label=True, color=['yellowgreen','red','orange'], linewidth=6)\nax_c.grid(linestyle='--', linewidth=1)\nax_c.set_ylim([0,100000])\nax_c2.set_ylim([0,80])\nax_c.set_ylabel(\"Count of Cases / Hospitalizations / Deaths\")\nax_c.legend()\nax_c2.set_ylabel(\"Count of Genomes Sequenced\")\nax_c.set_xlabel(\"Month\")\nax_c.set_xticklabels(['Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep'])\nsampling_df['Sampling'][['Feb','Mar','Apr','May','Jun','Jul','Aug','Sep']].plot.bar(ax=ax_c2, alpha=.5)\nax_c2.grid(linestyle='--', color='blue', alpha=.5, linewidth=1)\nax_c2.spines['right'].set_color('blue')\nax_c2.yaxis.label.set_color('blue')\nax_c2.tick_params(axis='y', colors='blue')\n\n# d) Choropleth by Lineage\nlogger.info(\"Plotting 1d\")\n\n# ax_d = fig1.add_subplot(gs[6:, 8:])\n\nfrom covid_bronx.geography import gen_points_in_gdf_polys, blank_background_choropleth, get_zip_codes_metadata_geo\nimport geopandas as gpd\n\nmetadata = preprocess_metadata()\ncoverage_levels = pd.read_csv(\"data/processed/sequencing/coverage.csv\", index_col=0)['0']\ncoverage_levels = pd.read_csv(\"data/processed/sequencing/coverage.csv\", index_col=0)['0']\npassed = coverage_levels[coverage_levels>=.95].index.map(lambda x: x.replace(\"_\",\"-\").replace(\"-0\", \"-\").replace(\"-0\", \"-\"))\nnum_samples = len(passed)\n\nfrom covid_bronx.metadata import preprocess_metadata\nfrom matplotlib import colors\n\ndef colorizer(df: pd.DataFrame, color_dict: Dict) -> pd.Series:\n \"\"\"\n Given a dataframe where the rows are zip codes and columns are lineages,\n along with a dict explaining what the RGB color values are, returns a series\n linking zip codes to a color output.\n \"\"\"\n scale_factors = df.sum(1) / max(df.sum(1))\n weights = (df.T / df.sum(1))\n color_series = pd.DataFrame( [np.sum(weights[z][c]*v for c,v in color_dict.items()) for z in weights.columns], index=weights.columns, columns=['r','g','b'])\n\n return color_series.T\n\ndf = pd.read_csv(\"data/external/lineages_final.csv\", index_col=0)\ndf.index = df['taxon'].apply(lambda x: x.split(\" \")[0])\nmetadata[df.columns] = df\n\n# clades = pd.read_csv(\"data/external/nextclade.csv\", sep=\";\")\n# clades.index = clades['seqName'].apply(lambda x: x.split(\" \")[0])\n# metadata[clades.columns] = clades\n\nzips = metadata.loc[metadata.index.intersection(passed)]['zip_code'].to_numpy()\nzips = np.array(sorted(zips)[2:])\n\n# Get a listing of coordinates by zip code\n\ngdf = gpd.read_file(\"data/external/ZIP_CODE_040114/ZIP_CODE_040114.geojson\")\n# Remove extraneous zip codes\ngdf = gdf[~(gdf['PO_NAME']=='Staten Island')]\n\nlatlons = gpd.GeoDataFrame({\"ZIPCODE\": gdf['ZIPCODE'], 'geometry': gdf['geometry'].centroid}).set_index(\"ZIPCODE\")\n\nzdf, bzip = get_zip_codes_metadata_geo()\nzdf.index = zdf['zip_code'].map(lambda x: str(int(float(x))))\ngdf.index = gdf['ZIPCODE']\ngdf[zdf.columns] = zdf\ngdf = gdf.fillna(0.)\n# gdf.plot(ax=ax_d)\n\ngeocolor_dict = {k: lineage_colors_dict_rgb[k] for k in ['B.1', 'B.1.3', 'B.1.1']} # {'B.1': np.array([1,0,0]), 'B.1.3': np.array([0,1,0]), 'B.1.1': np.array([0,0,1])}\nlineage_colors = colorizer(zdf[['B.1', 'B.1.3', 'B.1.1']], geocolor_dict).to_numpy()\nlineage_colors = np.nan_to_num(lineage_colors, 0.)\ngdf['lineage_colors'] = pd.Series([colors.to_rgba(lineage_colors[:,i]/256) for i in range(len(lineage_colors.T))], index=zdf.index)\ngdf['lineage_colors'] = gdf['lineage_colors'].fillna('#000000')\n# gdf.plot(ax=ax_d, color=gdf['lineage_colors'])\n\n\n# ax_d.set_axis_off()\n# # Plot a triangular legend\n# x = np.array([-1,0])\n# y = np.array([1,0])\n# z = np.array([0,1])\n# x_c = geocolor_dict['B.1']/256\n# y_c = geocolor_dict['B.1.3']/256\n# z_c = geocolor_dict['B.1.1']/256\n# \n# # Do convex combinations of everything\n# coordinates = []\n# k = 30\n# for lambd in np.linspace(0,1,k):\n# for mu in np.linspace(0, 1-lambd, int(k*(1-lambd))):\n# for w in np.linspace(0, 1-lambd-mu, int(k*(1-mu))):\n# combo = lambd*x + mu*y + w*z\n# color = colors.to_hex(max(lambd,0)*x_c + max(mu,0)*y_c + max(w,0)*z_c)\n# coordinates.append([combo[0], combo[1], color])\n# \n# coordinates = np.array(coordinates)\n# xy = coordinates[:, 0:2].astype(float)\n# ax2 = plt.axes([0,0,1,1])\n# ip = InsetPosition(ax_d, [-.15,.7,0.3,0.2])\n# ax2.set_axes_locator(ip)\n# ax2.scatter(xy[:,0],xy[:,1], c=coordinates[:,2])\n# ax2.text(-1.4,-.1, 'B.1')\n# ax2.text(1.05,-.1, 'B.1.3')\n# ax2.text(-.25,1.1, 'B.1.1')\n# ax2.set_axis_off()\n\nax_3 = fig1.add_subplot(gs[0:10, 0:10])\ngdf.fillna(0.).plot(column='count', cmap='pink', ax=ax_3, legend=True, legend_kwds={'shrink': 0.3})\nax_3.set_axis_off()\n\n# Plot hospital locations\nfrom shapely.geometry import Point\n\nhospitals = [Point(-73.846184,40.849010)]\nhospitals_df = gpd.GeoDataFrame(geometry=hospitals)\nhospitals_df.plot(ax=ax_3, markersize=500, color='black', marker='.', label=\"Collection Site\")\n\nplt.tight_layout(pad=.3)\n\nplt.savefig(savefile + '.pdf')\nplt.savefig(savefile + '.svg')\n\nfig, ax = plt.subplots(figsize=(20,10))\ncdf_colors = [lineage_colors_dict[k] for k in ['B.1.26', 'B.1', 'B.2', 'B.2.1', 'A.1', 'B.1.3', 'B.1.1.1', 'B.1.1']]\ncdf[['B.1.26', 'B.1', 'B.2', 'B.2.1', 'A.1', 'B.1.3', 'B.1.1.1', 'B.1.1']].plot.line(legend=True, color=cdf_colors, ax=ax)\nax.set_ylabel(\"Cumulative Sample Counts by Lineage\")\nplt.savefig(\"figures/figure2b_v2.pdf\")\nplt.savefig(\"figures/figure2b_v2.svg\")\n\n# b) Donut Plot showing lineage distributions in world, US, NYS, and Bronx\nplt.clf()\n# ax_q = fig1.add_subplot(gs[0:7, 13:])\nfig, ax_q = plt.subplots(figsize=(15,15))\nfacecolor = colorConverter.to_rgba('white', alpha=0)\ncirculo = lambda r: plt.Circle((0,0), r, ec='white', fc=facecolor, lw=2)\nlogger.info(\"Plotting 1b\")\ndonut = pd.read_csv(\"data/external/Donut_churro_plot.csv\", index_col=0)\ndonut_colors = [lineage_colors_dict[k] for k in donut.index]\nartist = donut['world'].plot.pie(radius=1, ax=ax_q, colors=donut_colors)\ncircle_1 = circulo(.8)\nax_q.add_artist(circle_1)\ndonut['USA'].plot.pie(radius=.8, ax=ax_q, labels=None, colors=donut_colors)\ncircle_1a = circulo(.6)\nax_q.add_artist(circle_1a)\ndonut['NYS'].plot.pie(radius=.6, ax=ax_q, labels=None, colors=donut_colors)\ncircle_2 = circulo(.4)\nax_q.add_artist(circle_2)\ndonut['Bronx'].plot.pie(radius=.4, ax=ax_q, labels=None, colors=donut_colors)\ncircle_3 = circulo(.2)\ncircle_4 = plt.Circle((0,0), .2, color='white')\nax_q.add_artist(circle_3)\nax_q.add_artist(circle_4)\nax_q.set_ylabel('')\nplt.savefig(\"figures/figure2c_v2.pdf\")\nplt.savefig(\"figures/figure2c_v2.svg\")\n\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "matplotlib.pyplot.tight_layout", "matplotlib.colors.to_rgba", "numpy.nan_to_num", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "matplotlib.pyplot.Circle", "pandas.DataFrame", "matplotlib.pyplot.clf", "matplotlib.rcParams.update", "matplotlib.pyplot.close", "matplotlib.colors.colorConverter.to_rgba", "matplotlib.pyplot.figure" ] ]
Diriba-Getch/CNN-Multi-Label-Text-Classificati2on
[ "0792c0f244b8190e097da42e8719c8bb03573e14" ]
[ "text_cnn.py" ]
[ "# -*- coding:utf-8 -*-\n\nimport tensorflow as tf\n\n\ndef linear(input_, output_size, scope=None):\n \"\"\"\n Linear map: output[k] = sum_i(Matrix[k, i] * args[i] ) + Bias[k]\n :param input_: a tensor or a list of 2D, batch x n, Tensors.\n :param output_size: int, second dimension of W[i].\n :param scope: VariableScope for the created subgraph; defaults to \"Linear\".\n :returns: A 2D Tensor with shape [batch x output_size] equal to \\\n sum_i(args[i] * W[i]), where W[i]s are newly created matrices.\n :raises: ValueError, if some of the arguments has unspecified or wrong shape.\n \"\"\"\n\n shape = input_.get_shape().as_list()\n if len(shape) != 2:\n raise ValueError(\"Linear is expecting 2D arguments: {}\".format(str(shape)))\n if not shape[1]:\n raise ValueError(\"Linear expects shape[1] of arguments: {}\".format(str(shape)))\n input_size = shape[1]\n\n # Now the computation.\n with tf.variable_scope(scope or \"SimpleLinear\"):\n matrix = tf.get_variable(\"Matrix\", [output_size, input_size], dtype=input_.dtype)\n bias_term = tf.get_variable(\"Bias\", [output_size], dtype=input_.dtype)\n\n return tf.matmul(input_, tf.transpose(matrix)) + bias_term\n\n\ndef highway(input_, size, num_layers=1, bias=-2.0, f=tf.nn.relu, scope='Highway'):\n \"\"\"\n Highway Network (cf. http://arxiv.org/abs/1505.00387).\n t = sigmoid(Wy + b)\n z = t * g(Wy + b) + (1 - t) * y\n where g is nonlinearity, t is transform gate, and (1 - t) is carry gate.\n \"\"\"\n\n with tf.variable_scope(scope):\n for idx in range(num_layers):\n g = f(linear(input_, size, scope='highway_lin_{}'.format(idx)))\n t = tf.sigmoid(linear(input_, size, scope='highway_gate_{}'.format(idx)) + bias)\n output = t * g + (1. - t) * input_\n input_ = output\n\n return output\n\n\nclass TextCNN(object):\n \"\"\"\n A CNN for text classification.\n Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.\n \"\"\"\n\n def __init__(\n self, sequence_length, num_classes, vocab_size, embedding_size,\n embedding_type, filter_sizes, num_filters, l2_reg_lambda=0.0, pretrained_embedding=None):\n\n # Placeholders for input, output and dropout\n self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name=\"input_x\")\n self.input_y = tf.placeholder(tf.float32, [None, num_classes], name=\"input_y\")\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n\n self.global_step = tf.Variable(0, trainable=False, name=\"global_Step\")\n\n # Keeping track of l2 regularization loss (optional)\n l2_loss = tf.constant(0.0)\n\n # Embedding layer\n with tf.device('/cpu:0'), tf.name_scope(\"embedding\"):\n # 默认采用的是随机生成正态分布的词向量。\n # 也可以是通过自己的语料库训练而得到的词向量。\n if pretrained_embedding is None:\n self.W = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), name=\"W\")\n else:\n if embedding_type == 0:\n self.W = tf.constant(pretrained_embedding, name=\"W\")\n self.W = tf.cast(self.W, tf.float32)\n if embedding_type == 1:\n self.W = tf.Variable(pretrained_embedding, name=\"W\", trainable=True)\n self.W = tf.cast(self.W, tf.float32)\n self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)\n self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)\n\n # Create a convolution + maxpool layer for each filter size\n pooled_outputs = []\n\n for i, filter_size in enumerate(filter_sizes):\n with tf.name_scope(\"conv-maxpool-{}\".format(filter_size)):\n # Convolution Layer\n filter_shape = [filter_size, embedding_size, 1, num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name=\"W\")\n b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name=\"b\")\n conv = tf.nn.conv2d(\n self.embedded_chars_expanded,\n W,\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n name=\"conv\")\n\n # Apply nonlinearity\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name=\"relu_front\")\n\n # Maxpooling over the outputs\n pooled = tf.nn.max_pool(\n h,\n ksize=[1, sequence_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n name=\"pool\")\n\n pooled_outputs.append(pooled)\n\n # Combine all the pooled features\n num_filters_total = num_filters * len(filter_sizes)\n self.h_pool = tf.concat(pooled_outputs, 3)\n self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])\n\n # Add highway\n with tf.name_scope(\"highway\"):\n self.h_highway = highway(self.h_pool_flat, self.h_pool_flat.get_shape()[1], 1, 0)\n\n # Add dropout\n with tf.name_scope(\"dropout\"):\n self.h_drop = tf.nn.dropout(self.h_highway, self.dropout_keep_prob)\n\n # Final (unnormalized) scores and predictions\n with tf.name_scope(\"output\"):\n W = tf.get_variable(\n \"W\",\n shape=[num_filters_total, num_classes],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name=\"b\")\n l2_loss += tf.nn.l2_loss(W)\n l2_loss += tf.nn.l2_loss(b)\n self.logits = tf.nn.xw_plus_b(self.h_drop, W, b, name=\"logits\")\n\n # CalculateMean cross-entropy loss\n with tf.name_scope(\"loss\"):\n losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y, logits=self.logits)\n losses = tf.reduce_sum(losses, axis=1)\n self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss" ]
[ [ "tensorflow.get_variable", "tensorflow.device", "tensorflow.concat", "tensorflow.nn.max_pool", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.nn.l2_loss", "tensorflow.nn.conv2d", "tensorflow.Variable", "tensorflow.name_scope", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.nn.dropout", "tensorflow.nn.xw_plus_b", "tensorflow.truncated_normal", "tensorflow.placeholder", "tensorflow.nn.embedding_lookup", "tensorflow.nn.bias_add", "tensorflow.constant", "tensorflow.transpose", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.variable_scope", "tensorflow.random_uniform" ] ]
markusgay/seldon-core
[ "b9ebfdfd63e5f7b23311b81ba78e36aa08e87640" ]
[ "python/seldon_core/serving_test_gen.py" ]
[ "\"\"\"Contains methods to generate a JSON file for Seldon API integration testing.\"\"\"\n\nimport os\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport pandas as pd\n\nRANGE_INTEGER_MIN = 0\nRANGE_INTEGER_MAX = 1\nRANGE_FLOAT_MIN = 0.0\nRANGE_FLOAT_MAX = 1.0\n\n\ndef _column_range(col: pd.Series) -> Optional[List]:\n \"\"\"\n Calculate minimum and maximum of a column and outputs a list.\n\n Parameters\n ----------\n col\n Column to inspect.\n\n Returns\n -------\n Min and max of the column range as a list.\n \"\"\"\n if col.dtype == np.float:\n if pd.isnull(min(col)): # This also means that maximum is null\n return [RANGE_FLOAT_MIN, RANGE_FLOAT_MAX]\n else:\n return [min(col), max(col)]\n elif col.dtype == np.integer:\n if pd.isnull(min(col)): # This also means that maximum is null\n return [RANGE_INTEGER_MIN, RANGE_INTEGER_MAX]\n else:\n return [min(col), max(col)]\n else:\n return np.NaN\n\n\ndef _column_values(column: pd.Series) -> Union[List, float]:\n \"\"\"\n Create a list of unique values for categorical variables.\n\n Parameters\n ----------\n column\n Column to inspect.\n\n Returns\n -------\n List of unique values for categorical variables\n \"\"\"\n if column.dtype != np.number:\n return column.unique().tolist()\n else:\n return np.NaN\n\n\ndef create_seldon_api_testing_file(data: pd.DataFrame, target: str, output_path: str) -> bool:\n \"\"\"\n Create a JSON file for Seldon API testing.\n\n Parameters\n ----------\n data\n Pandas DataFrame used as a recipe for the json file.\n target\n Name of the target column.\n output_path\n Path of output file.\n\n Returns\n -------\n True if file correctly generated.\n \"\"\"\n\n # create a Data frame in the form of JSON object\n df_for_json = pd.DataFrame(data=data.columns.values, columns=[\"name\"])\n df_for_json[\"dtype\"] = np.where(data.dtypes == np.float, 'FLOAT',\n np.where(data.dtypes == np.int, 'INTEGER', np.NaN))\n df_for_json[\"ftype\"] = np.where(data.dtypes == np.number, 'continuous', 'categorical')\n ranges = [_column_range(data[column_name]) for column_name in data.columns.values]\n values = [_column_values(data[column_name]) for column_name in data.columns.values]\n df_for_json[\"range\"] = ranges\n df_for_json[\"values\"] = values\n # Split the target\n df_for_json_target = df_for_json[df_for_json.name == target]\n df_for_json_features = df_for_json[df_for_json.name != target]\n\n # Convert data frames to JSON with a trick that removes records with NaNs\n json_features_df = df_for_json_features.T.apply(lambda row: row[~row.isnull()].to_json())\n json_features = f'[{\",\".join(json_features_df)}]'\n json_target_df = df_for_json_target.T.apply(lambda row: row[~row.isnull()].to_json())\n json_target = f'[{\",\".join(json_target_df)}]'\n json_combined = f'{{\"features\": {json_features}, \"targets\": {json_target}}}'\n\n with open(output_path, 'w+') as output_file:\n output_file.write(str(json_combined))\n return os.path.exists(output_path)\n" ]
[ [ "numpy.where", "pandas.DataFrame" ] ]
hangwudy/Mask_RCNN
[ "8b5d896076b994e2f9136054114c551a8cb3119f" ]
[ "samples/car_door/FusionNet.py" ]
[ "# coding: utf-8\n# # FusionNet for Car Door Detection and Pose Estimation\n# @author: Hang Wu\n# @date: 2018.12.20\n\n\nimport os\nimport sys\nimport json\nimport numpy as np\nimport skimage.io\nimport matplotlib.pyplot as plt\nimport csv\nfrom skimage.color import gray2rgb\nfrom keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nimport tensorflow as tf\nimport argparse\nimport imutils\nimport pickle\nimport cv2\nimport re\n\n# Set the ROOT_DIR variable to the root directory of the Mask_RCNN git repo\nROOT_DIR = '../../'\nsys.path.append(ROOT_DIR)\nfrom mrcnn.config import Config\nimport mrcnn.utils as utils\nimport time\nfrom mrcnn import visualize_car_door\nimport mrcnn.model_new as modellib\n\n# PoseNet\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-m\", \"--model\",\n default=\"/home/hangwu/Repositories/Model_output/Attitude_CNN/attitude_vgg16_random.h5\",\n # required=True,\n help=\"path to trained Attitude CNN model model\")\nap.add_argument(\"-mm\", \"--model_m\",\n default=\"/media/hangwu/TOSHIBA_EXT/Dataset/weights/mask_rcnn_car_door_0250.h5\",\n # required=True,\n help=\"path to trained Mask R-CNN model model\")\nap.add_argument(\"-a\", \"--latitudebin\",\n default=\"/home/hangwu/Repositories/Model_output/Attitude_CNN/latitude_lb.pickle\",\n # required=True,\n help=\"path to output latitude label binarizer\")\nap.add_argument(\"-o\", \"--longitudebin\",\n default=\"/home/hangwu/Repositories/Model_output/Attitude_CNN/longitude_lb.pickle\",\n # required=True,\n help=\"path to output longitude label binarizer\")\nap.add_argument(\"-r\", \"--renderings\",\n default=\"/media/hangwu/TOSHIBA_EXT/Dataset/renderings_square\",\n # required=True,\n help=\"path to input renderings directory\")\nap.add_argument(\"-j\", \"--valjson\",\n default=\"/media/hangwu/TOSHIBA_EXT/Dataset/annotations/mask_rcnn/car_door_val.json\",\n # required=True,\n help=\"path to validation data annotation directory\")\nap.add_argument(\"-v\", \"--valdata\",\n default=\"/media/hangwu/TOSHIBA_EXT/Dataset/val_data\",\n # required=True,\n help=\"path to validation data directory\")\nap.add_argument(\"-test\", \"--test\",\n # default=\"/media/hangwu/TOSHIBA_EXT/Dataset/test_data\",\n # default=\"/home/hangwu/CyMePro/data/test\",\n default=\"/media/hangwu/TOSHIBA_EXT/Dataset/images_from_robot/dataset/images\",\n # required=True,\n help=\"path to test dataset directory\")\nargs = vars(ap.parse_args())\n\n# load the trained convolutional neural network from disk, followed\n# by the latitude and longitude label binarizers, respectively\nprint(\"[INFO] loading network...\")\nmodel2 = load_model(args[\"model\"], custom_objects={\"tf\": tf})\nlatitudeLB = pickle.loads(open(args[\"latitudebin\"], \"rb\").read())\nlongitudeLB = pickle.loads(open(args[\"longitudebin\"], \"rb\").read())\n\n\ndef loadim(image_path='Car_Door', ext='png', key_word='car_door'):\n image_list = []\n for filename in os.listdir(image_path):\n if filename.endswith(ext) and filename.find(key_word) != -1:\n current_path = os.path.abspath(image_path)\n image_abs_path = os.path.join(current_path, filename)\n image_list.append(image_abs_path)\n return image_list\n\n\ndef pose_estimation(image):\n\n scale = 500\n output = cv2.resize(image, (scale, scale))\n image_name = image_path.split(os.path.sep)[-1]\n gt_latitude, gt_longitude = gt_name(image_name)\n print(\"output size:\", output.shape)\n\n # pre-process the image for classification\n image = cv2.resize(image, (224, 224))\n image = image.astype(\"float\") / 255.0\n image = img_to_array(image)\n image = np.expand_dims(image, axis=0)\n\n # classify the input image using Keras' multi-output functionality\n print(\"[INFO] classifying image...\")\n (latitudeProba, longitudeProba) = model2.predict(image)\n\n # find indexes of both the latitude and longitude outputs with the\n # largest probabilities, then determine the corresponding class\n # labels\n latitudeIdx = latitudeProba[0].argmax()\n longitudeIdx = longitudeProba[0].argmax()\n latitudeLabel = latitudeLB.classes_[latitudeIdx]\n longitudeLabel = longitudeLB.classes_[longitudeIdx]\n \"\"\"\n # draw the latitude label and longitude label on the image\n latitudeText = \"latitude: {} ({:.2f}%)\".format(latitudeLabel,\n latitudeProba[0][latitudeIdx] * 100)\n longitudeText = \"longitude: {} ({:.2f}%)\".format(longitudeLabel,\n longitudeProba[0][longitudeIdx] * 100)\n cv2.putText(output, latitudeText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX,\n 0.7, (0, 255, 0), 2)\n cv2.putText(output, longitudeText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX,\n 0.7, (0, 255, 0), 2)\n\n # display the predictions to the terminal as well\n print(\"[INFO] {}\".format(latitudeText))\n print(\"[INFO] {}\".format(longitudeText))\n \"\"\"\n # show the output image\n # cv2.imshow(\"Output\", output)\n image_compare_name = 'car_door_{}_{}.png'.format(latitudeLabel, longitudeLabel)\n image_compare_path = os.path.join(args[\"renderings\"], image_compare_name)\n # print(image_compare_path)\n image_compare = cv2.imread(image_compare_path)\n image_compare = cv2.resize(image_compare, (scale, scale))\n # print(\"Compare size:\", image_compare.shape)\n\n image_horizontal_concat = np.concatenate((output, image_compare), axis=1)\n\n plt_fig_save(image_horizontal_concat, \n image_name[:-4], \n latitudeLabel, \n longitudeLabel, \n gt_latitude, \n gt_longitude)\n\n cv2.imwrite(\"/home/hangwu/Mask_RCNN/detected_images/Results_{:4.0f}.png\".format(time.time()), image_horizontal_concat)\n # cv2.imshow(\"Reality and Prediction\", image_horizontal_concat)\n # # cv2.imshow(\"Comparison\", image_compare)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n return [image_name[:-4], latitudeLabel, gt_latitude, longitudeLabel, gt_longitude]\n\ndef plt_fig_save(img, img_name, pred_latitude, pred_longitude, gt_latitude, gt_longitude):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n _, ax = plt.subplots(1,1)\n text_on_image = \"Prediction: ({},{})\\nGround Truth: ({},{})\".format(pred_latitude, \n pred_longitude, gt_latitude, gt_longitude)\n ax.text(\n 0, 1, text_on_image,\n size='small',\n horizontalalignment='left',\n verticalalignment='top',\n # family='serif',\n bbox={'facecolor': 'white', 'alpha':0.5, 'pad':2},\n color='white',\n transform=ax.transAxes\n )\n ax.axis('off')\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.set_frame_on(False)\n plt.imshow(img)\n plt.savefig(\"/media/hangwu/TOSHIBA_EXT/Dataset/PoseNet_output/prediction_{}.pdf\".format(img_name), \n dpi=500,\n transparent=True,\n bbox_inches='tight',\n pad_inches=0)\n\n\ndef gt_name(file_name):\n match = re.match(r'([0-9]+)(_+)([0-9]+)(_+)([0-9]+)(\\.png)', file_name, re.I)\n latitude = int(match.groups()[0])\n if latitude == 1:\n latitude_fixed = 27\n elif latitude == 2:\n latitude_fixed = 30\n elif latitude == 3:\n latitude_fixed = 18\n elif latitude == 4:\n latitude_fixed = 28\n elif latitude == 5:\n latitude_fixed = 23\n else:\n latitude_fixed = latitude\n longitude = int(match.groups()[4])\n longitude_fixed = 360 - longitude\n if longitude_fixed == 360:\n print(\"longitude_fixed: \", longitude_fixed)\n longitude_fixed = 0\n \n return latitude_fixed, longitude_fixed\n##########################################################################################\n\n\n# ## Set up logging and pre-trained model paths\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n\n# ## Configuration\n# Define configurations for training on the car door dataset.\n\n\nclass CarDoorConfig(Config):\n \"\"\"\n Configuration for training on the car door dataset.\n Derives from the base Config class and overrides values specific\n to the car door dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"car_door\"\n\n # Train on 1 GPU and 1 image per GPU. Batch size is 1 (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 1 # background + 1 (car_door)\n\n # All of our training images are 512x512\n IMAGE_MIN_DIM = 378\n IMAGE_MAX_DIM = 512\n\n # You can experiment with this number to see if it improves training\n STEPS_PER_EPOCH = 500\n\n # This is how often validation is run. If you are using too much hard drive space\n # on saved models (in the MODEL_DIR), try making this value larger.\n VALIDATION_STEPS = 5\n\n # use resnet101 or resnet50\n BACKBONE = 'resnet101'\n\n # To be honest, I haven't taken the time to figure out what these do\n RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)\n TRAIN_ROIS_PER_IMAGE = 32\n MAX_GT_INSTANCES = 50\n PRE_NMS_LIMIT = 6000\n POST_NMS_ROIS_INFERENCE = 500\n POST_NMS_ROIS_TRAINING = 1000\n\n\nconfig = CarDoorConfig()\nconfig.display()\n\n\n# # Define the dataset\nclass CarPartsDataset(utils.Dataset):\n\n def load_data(self, annotation_json, images_dir):\n \"\"\" Load the coco-like dataset from json\n Args:\n annotation_json: The path to the coco annotations json file\n images_dir: The directory holding the images referred to by the json file\n \"\"\"\n # Load json from file\n json_file = open(annotation_json)\n car_door_json = json.load(json_file)\n json_file.close()\n\n # Add the class names using the base method from utils.Dataset\n source_name = \"car_parts\"\n for category in car_door_json['categories']:\n class_id = category['id']\n class_name = category['name']\n if class_id < 1:\n print('Error: Class id for \"{}\" cannot be less than one. (0 is reserved for the background)'.format(\n class_name))\n return\n\n self.add_class(source_name, class_id, class_name)\n\n # Get all annotations\n annotations = {}\n for annotation in car_door_json['annotations']:\n image_id = annotation['image_id']\n if image_id not in annotations:\n annotations[image_id] = []\n annotations[image_id].append(annotation)\n\n # Get all images and add them to the dataset\n seen_images = {}\n for image in car_door_json['images']:\n image_id = image['id']\n if image_id in seen_images:\n print(\"Warning: Skipping duplicate image id: {}\".format(image))\n else:\n seen_images[image_id] = image\n try:\n image_file_name = image['file_name']\n image_width = image['width']\n image_height = image['height']\n except KeyError as key:\n print(\"Warning: Skipping image (id: {}) with missing key: {}\".format(image_id, key))\n\n image_path = os.path.abspath(os.path.join(images_dir, image_file_name))\n image_annotations = annotations[image_id]\n\n # Add the image using the base method from utils.Dataset\n self.add_image(\n source=source_name,\n image_id=image_id,\n path=image_path,\n width=image_width,\n height=image_height,\n annotations=image_annotations\n )\n\n\n# # Create the Training and Validation Datasets\n# In[6]:\n\ndataset_val = CarPartsDataset()\ndataset_val.load_data(args[\"valjson\"], args[\"valdata\"])\ndataset_val.prepare()\n\n\n# ## Display a few images from the training dataset\n\n# In[7]:\n\n\nclass InferenceConfig(CarDoorConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n IMAGE_MIN_DIM = 512\n IMAGE_MAX_DIM = 512\n DETECTION_MIN_CONFIDENCE = 0.85\n\n\ninference_config = InferenceConfig()\n\n# In[8]:\n\n\n# Recreate the model in inference mode\nmodel = modellib.MaskRCNN(mode=\"inference\",\n config=inference_config,\n model_dir=MODEL_DIR)\n\n# In[9]:\n\n\n# Get path to saved weights\n# Either set a specific path or find last trained weights\n# model_path = os.path.join(ROOT_DIR, \".h5 file name here\")\n\n# model_path = model.find_last()\nmodel_path = args[\"model_m\"]\n\n# Load trained weights (fill in path to trained weights here)\nassert model_path != \"\", \"Provide path to trained weights\"\nprint(\"Loading weights from \", model_path)\nmodel.load_weights(model_path, by_name=True)\n\n# # Run Inference\n# Run model.detect()\n\n\n# import skimage\nreal_test_dir = args[\"test\"]\n\n# '/home/hangwu/CyMePro/data/dataset/test'\n# '/home/hangwu/CyMePro/data/test' # '/home/hangwu/CyMePro/data/dataset/test_data'\nimage_paths = []\nfor filename in os.listdir(real_test_dir):\n if os.path.splitext(filename)[1].lower() in ['.png', '.jpg', '.jpeg', '.JPG']:\n image_paths.append(os.path.join(real_test_dir, filename))\n\n\n# analysis\nM_analysis = []\nfor image_path in image_paths:\n img = skimage.io.imread(image_path)\n # img = imutils.resize(img, width=360)\n if len(img.shape) < 3:\n img = gray2rgb(img)\n img_arr = np.array(img)\n results = model.detect([img_arr], verbose=1)\n r = results[0]\n\n image_name = image_path.split(os.path.sep)[-1][:-4]\n if len(r['rois']):\n # xmin ymin\n print('======================================================')\n print('{}: '.format(image_name), r['rois'])\n xmin = r['rois'][:, 1][0]\n ymin = r['rois'][:, 0][0]\n xmax = r['rois'][:, 3][0]\n ymax = r['rois'][:, 2][0]\n xbar = (xmin + xmax) / 2\n ybar = (ymin + ymax) / 2\n center_of_mask = [xbar, ybar]\n print('xmin: {}\\nymin: {}\\nxmax: {}\\nymax: {}'.format(xmin, ymin, xmax, ymax))\n print('Center of the Mask: ', center_of_mask)\n print('======================================================')\n # visualize_car_door.display_instances(img, r['rois'], r['masks'], r['class_ids'],\n # dataset_val.class_names, r['scores'], figsize=(5, 5), image_name=image_name)\n mask_for_pose = visualize_car_door.mask_to_squares(img, r['masks'], xmin, ymin, xmax, ymax)\n # mask_for_pose = visualize_car_door.mask_highlight(img, r['masks'], xmin, ymin, xmax, ymax)\n\n analysis = pose_estimation(mask_for_pose)\n M_analysis.append(analysis)\n\n# write into a csv\nwith open(\"output.csv\", 'w') as csvfile:\n writer = csv.writer(csvfile)\n\n writer.writerow([\"Test_Group\",\"Pred_Latitude\", \"GT_Latitude\",\"Pred_Longitude\",\"GT_Longitude\"])\n writer.writerows(M_analysis)\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.expand_dims", "matplotlib.pyplot.subplots", "numpy.concatenate", "numpy.array" ] ]
muhammadidrees/covid19_data_analysis
[ "4764b0ad20771d319f2b1d5062bc7dc11e9c7243" ]
[ "code/data_preparation.py" ]
[ "# Before performing any analysis we need to check the data\n# and prepare it excluding null values and giving proper\n# format to values so our data can be clean and ready\n\nimport pandas as pd\nimport numpy as np\n\n# covert the data file into a dataframe so it's easy to manupilate\ncovid = pd.read_csv(\"data/covid_19_data.csv\")\n\n# show the head so we know exactly what data are we dealing with\nprint(\"\\nInitial 5 rows of data:\\n\", covid.head())\n\n# to get a better understanding of the numerical data\nprint(\"\\nReport of numerical data:\\n\", covid.describe())\n\n# show the size and shape of data\nprint(\"\\nSize and shape of data:\", covid.shape)\n\n# check null values in each column\nprint(\"\\nNull values in columns:\\n\", covid.isnull().sum())\n\n# check for negative values in numeric columns\nprint(\"Negative values in confirmed column: \", len(covid.loc[covid[\"Confirmed\"] < 0]))\nprint(\"Negative values in deaths column: \", len(covid.loc[covid[\"Deaths\"] < 0]))\nprint(\"Negative values in recovered column: \", len(covid.loc[covid[\"Recovered\"] < 0]))\n\n# droping S. No., province/state and Last Update columns \ncovid.drop([\"SNo\", \"Province/State\", \"Last Update\"], 1, inplace=True)\n\n# verifying update\nprint(\"\\nAfter droping columns:\\n\")\nprint(covid.head())\n\n# converting \"Observation Date\" to date time format\ncovid[\"ObservationDate\"] = pd.to_datetime(covid[\"ObservationDate\"])\n\n# verifying update\nprint(\"\\nAfter formatting observation date:\\n\")\nprint(covid.head())\n\n# give unique values\nunique_countries = covid[\"Country/Region\"].unique()\n\n# print out countries to see the unique one's and check\n# for any repeating or wrong data\nfor a, b, c, d, e in zip(*[iter(unique_countries)]*5):\n print(\"{:35s} {:35s} {:35s} {:35s} {:35s}\\n\".format(a, b, c, d, e))\n\n# no. of unique countires\nprint(\"Number of unique contry names: \", len(unique_countries))\n\n# printing showed some countries have punctuations and also leading traling spaces\n# also Bahamas and Gambia were repeated multiple times so we have to remove this\n# ambiguity and renaming china US and UK to give them proper names\n\n# remove punctuation\ncovid[\"Country/Region\"] = covid[\"Country/Region\"].str.replace(r'[^\\w\\s]','')\n\n# extra spaces in country names\ncovid[\"Country/Region\"] = covid[\"Country/Region\"].str.strip()\n\n# convert all instances of Bahamas to Bahamas\ncovid[\"Country/Region\"] = covid[\"Country/Region\"].apply(lambda x: \"Bahamas\" if \"Bahamas\" in x else x)\n\n# convert all instances of Gambia to Gambia\ncovid[\"Country/Region\"] = covid[\"Country/Region\"].apply(lambda x: \"Gambia\" if \"Gambia\" in x else x)\n\n# convert all instances of China to China\ncovid[\"Country/Region\"] = covid[\"Country/Region\"].apply(lambda x: \"China\" if \"China\" in x else x)\n\n# convert all instances of US to United States\ncovid[\"Country/Region\"] = covid[\"Country/Region\"].apply(lambda x: \"US\" if \"United States\" in x else x)\n\n# convert all instances of UK to United Kingdom\ncovid[\"Country/Region\"] = covid[\"Country/Region\"].apply(lambda x: \"UK\" if \"United Kingdom\" in x else x)\n\n# give unique values\nunique_countries = covid[\"Country/Region\"].unique()\n\nprint()\n\nfor a, b, c, d, e in zip(*[iter(unique_countries)]*5):\n print(\"{:35s} {:35s} {:35s} {:35s} {:35s}\\n\".format(a, b, c, d, e))\n\n# no. of unique countires after cleaning\nprint(\"Number of unique contry names after cleaning: \", len(unique_countries))\n\ncovid.to_csv(\"data/covid_19_clean_data.csv\")" ]
[ [ "pandas.read_csv", "pandas.to_datetime" ] ]
jpenrici/Computer_Graphics
[ "5ba268e9e75de0d7ad733a503400e52b66edc78b" ]
[ "NumPy_Training/img_histogram.py" ]
[ "# -*- Mode: Python3; coding: utf-8; indent-tabs-mpythoode: nil; tab-width: 4 -*-\n\nimport os\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nPATH = \"../Images/\"\nRED = 0\nGREEN = 1\nBLUE = 2\nALL = 3\n\n\ndef view(data, channel=ALL, title=\"histogram\"):\n\n R = data[:, :, RED].flatten()\n G = data[:, :, GREEN].flatten()\n B = data[:, :, BLUE].flatten()\n\n bins = np.arange(0, 256)\n kwargs = dict(histtype='step', bins=bins)\n\n if channel == RED or channel == ALL:\n plt.hist(R, color='r', label=\"RED\", **kwargs)\n\n if channel == GREEN or channel == ALL:\n plt.hist(G, color='g', label=\"GREEN\", **kwargs)\n\n if channel == BLUE or channel == ALL:\n plt.hist(B, color='b', label=\"BLUE\", **kwargs)\n\n plt.title(title)\n plt.legend(loc=\"upper right\")\n plt.ylabel('Frequency')\n plt.xlabel('Value')\n plt.show()\n\n\ndef test(filename):\n\n img_np = PATH + filename + \".npy\"\n print(\"Data: \", img_np)\n\n if not os.path.exists(img_np):\n print(\"File not found!\")\n return\n\n data = np.load(img_np)\n\n h, w, c = data.shape\n if c > 3:\n data = data[:, :, :3]\n\n view(data, RED)\n view(data, GREEN)\n view(data, BLUE)\n view(data)\n\n\nif __name__ == '__main__':\n test(\"folha_croton\")\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "numpy.arange", "numpy.load", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel" ] ]
sergeiissaev/rxrx1-utils
[ "e3c1832dbb5b9396c81cd716a9680ccc0191ce09" ]
[ "rxrx/main.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Train a ResNet-50 model on RxRx1 on TPU.\n\nOriginal file:\n https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_main.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport os\nimport time\nimport argparse\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib import summary\nfrom tensorflow.python.estimator import estimator\n\nfrom rxrx import input as rxinput\nfrom rxrx.official_resnet import resnet_v1\n\nDEFAULT_INPUT_FN_PARAMS = {\n 'tfrecord_dataset_buffer_size': 256,\n 'tfrecord_dataset_num_parallel_reads': None,\n 'parallel_interleave_cycle_length': 32,\n 'parallel_interleave_block_length': 1,\n 'parallel_interleave_buffer_output_elements': None,\n 'parallel_interleave_prefetch_input_elements': None,\n 'map_and_batch_num_parallel_calls': 128,\n 'transpose_num_parallel_calls': 128,\n 'prefetch_buffer_size': tf.contrib.data.AUTOTUNE,\n}\n\n# The mean and stds for each of the channels\nGLOBAL_PIXEL_STATS = (np.array([6.74696984, 14.74640167, 10.51260864,\n 10.45369445, 5.49959796, 9.81545561]),\n np.array([7.95876312, 12.17305868, 5.86172946,\n 7.83451711, 4.701167, 5.43130431]))\n\n\ndef resnet_model_fn(features, labels, mode, params, n_classes, num_train_images,\n data_format, transpose_input, train_batch_size,\n momentum, weight_decay, base_learning_rate, warmup_epochs,\n use_tpu, iterations_per_loop, model_dir, tf_precision,\n resnet_depth):\n \"\"\"The model_fn for ResNet to be used with TPUEstimator.\n\n Args:\n features: `Tensor` of batched images\n labels: `Tensor` of labels for the data samples\n mode: one of `tf.estimator.ModeKeys.{TRAIN,EVAL,PREDICT}`\n params: `dict` of parameters passed to the model from the TPUEstimator,\n `params['batch_size']` is always provided and should be used as the\n effective batch size.\n\n\n Returns:\n A `TPUEstimatorSpec` for the model\n \"\"\"\n if isinstance(features, dict):\n features = features['feature']\n\n # In most cases, the default data format NCHW instead of NHWC should be\n # used for a significant performance boost on GPU/TPU. NHWC should be used\n # only if the network needs to be run on CPU since the pooling operations\n # are only supported on NHWC.\n if data_format == 'channels_first':\n assert not transpose_input # channels_first only for GPU\n features = tf.transpose(features, [0, 3, 1, 2])\n\n if transpose_input and mode != tf.estimator.ModeKeys.PREDICT:\n features = tf.transpose(features, [3, 0, 1, 2]) # HWCN to NHWC\n\n # This nested function allows us to avoid duplicating the logic which\n # builds the network, for different values of --precision.\n def build_network():\n network = resnet_v1(\n resnet_depth=resnet_depth,\n num_classes=n_classes,\n data_format=data_format)\n return network(\n inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))\n\n if tf_precision == 'bfloat16':\n with tf.contrib.tpu.bfloat16_scope():\n logits = build_network()\n logits = tf.cast(logits, tf.float32)\n elif tf_precision == 'float32':\n logits = build_network()\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n 'classes': tf.argmax(logits, axis=1),\n 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')\n }\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n export_outputs={\n 'classify': tf.estimator.export.PredictOutput(predictions)\n })\n\n # If necessary, in the model_fn, use params['batch_size'] instead the batch\n # size flags (--train_batch_size or --eval_batch_size).\n batch_size = params['batch_size'] # pylint: disable=unused-variable\n\n # Calculate loss, which includes softmax cross entropy and L2 regularization.\n one_hot_labels = tf.one_hot(labels, n_classes)\n cross_entropy = tf.losses.softmax_cross_entropy(\n logits=logits,\n onehot_labels=one_hot_labels)\n\n # Add weight decay to the loss for non-batch-normalization variables.\n loss = cross_entropy + weight_decay * tf.add_n([\n tf.nn.l2_loss(v) for v in tf.trainable_variables()\n if 'batch_normalization' not in v.name\n ])\n\n host_call = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n # Compute the current epoch and associated learning rate from global_step.\n global_step = tf.train.get_global_step()\n steps_per_epoch = tf.cast(num_train_images / train_batch_size, tf.float32)\n current_epoch = (tf.cast(global_step, tf.float32) / steps_per_epoch)\n warmup_steps = warmup_epochs * steps_per_epoch\n\n\n period = 10 * steps_per_epoch\n learning_rate = tf.train.cosine_decay_restarts(base_learning_rate,\n global_step,\n period,\n t_mul=1.0,\n m_mul=1.0,\n alpha=0.0,\n name=None)\n\n\n\n optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,\n momentum=momentum,\n use_nesterov=True)\n\n if use_tpu:\n # When using TPU, wrap the optimizer with CrossShardOptimizer which\n # handles synchronization details between different TPU cores. To the\n # user, this should look like regular synchronous training.\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n # Batch normalization requires UPDATE_OPS to be added as a dependency to\n # the train operation.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss, global_step)\n\n\n def host_call_fn(gs, loss, lr, ce):\n \"\"\"Training host call. Creates scalar summaries for training metrics.\n This function is executed on the CPU and should not directly reference\n any Tensors in the rest of the `model_fn`. To pass Tensors from the\n model to the `metric_fn`, provide as part of the `host_call`. See\n https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec\n for more information.\n Arguments should match the list of `Tensor` objects passed as the second\n element in the tuple passed to `host_call`.\n Args:\n gs: `Tensor with shape `[batch]` for the global_step\n loss: `Tensor` with shape `[batch]` for the training loss.\n lr: `Tensor` with shape `[batch]` for the learning_rate.\n ce: `Tensor` with shape `[batch]` for the current_epoch.\n Returns:\n List of summary ops to run on the CPU host.\n \"\"\"\n gs = gs[0]\n # Host call fns are executed FLAGS.iterations_per_loop times after one\n # TPU loop is finished, setting max_queue value to the same as number of\n # iterations will make the summary writer only flush the data to storage\n # once per loop.\n with summary.create_file_writer(model_dir,\n max_queue=iterations_per_loop).as_default():\n with summary.always_record_summaries():\n summary.scalar('loss', loss[0], step=gs)\n summary.scalar('learning_rate', lr[0], step=gs)\n summary.scalar('current_epoch', ce[0], step=gs)\n return summary.all_summary_ops()\n\n # To log the loss, current learning rate, and epoch for Tensorboard, the\n # summary op needs to be run on the host CPU via host_call. host_call\n # expects [batch_size, ...] Tensors, thus reshape to introduce a batch\n # dimension. These Tensors are implicitly concatenated to\n # [params['batch_size']].\n gs_t = tf.reshape(global_step, [1])\n loss_t = tf.reshape(loss, [1])\n lr_t = tf.reshape(learning_rate, [1])\n ce_t = tf.reshape(current_epoch, [1])\n\n host_call = (host_call_fn, [gs_t, loss_t, lr_t, ce_t])\n\n else:\n train_op = None\n\n eval_metrics = None\n if mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(labels, logits):\n \"\"\"Evaluation metric function. Evaluates accuracy.\n This function is executed on the CPU and should not directly reference\n any Tensors in the rest of the `model_fn`. To pass Tensors from the model\n to the `metric_fn`, provide as part of the `eval_metrics`. See\n https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec\n for more information.\n Arguments should match the list of `Tensor` objects passed as the second\n element in the tuple passed to `eval_metrics`.\n Args:\n labels: `Tensor` with shape `[batch]`.\n logits: `Tensor` with shape `[batch, num_classes]`.\n Returns:\n A dict of the metrics to return from evaluation.\n \"\"\"\n predictions = tf.argmax(logits, axis=1)\n top_1_accuracy = tf.metrics.accuracy(labels, predictions)\n in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)\n top_5_accuracy = tf.metrics.mean(in_top_5)\n\n return {\n 'top_1_accuracy': top_1_accuracy,\n 'top_5_accuracy': top_5_accuracy,\n }\n\n eval_metrics = (metric_fn, [labels, logits])\n\n return tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op,\n host_call=host_call,\n eval_metrics=eval_metrics)\n\ndef main(use_tpu,\n tpu,\n gcp_project,\n tpu_zone,\n url_base_path,\n use_cache,\n model_dir,\n train_epochs,\n train_batch_size,\n num_train_images,\n epochs_per_loop,\n log_step_count_epochs,\n num_cores,\n data_format,\n transpose_input,\n tf_precision,\n n_classes,\n momentum,\n weight_decay,\n base_learning_rate,\n warmup_epochs,\n input_fn_params=DEFAULT_INPUT_FN_PARAMS,\n resnet_depth=50):\n\n if use_tpu & (tpu is None):\n tpu = os.getenv('TPU_NAME')\n tf.logging.info('tpu: {}'.format(tpu))\n if gcp_project is None:\n gcp_project = os.getenv('TPU_PROJECT')\n tf.logging.info('gcp_project: {}'.format(gcp_project))\n\n steps_per_epoch = (num_train_images // train_batch_size)\n train_steps = steps_per_epoch * train_epochs\n current_step = estimator._load_global_step_from_checkpoint_dir(model_dir) # pylint: disable=protected-access,line-too-long\n iterations_per_loop = steps_per_epoch * epochs_per_loop\n log_step_count_steps = steps_per_epoch * log_step_count_epochs\n\n\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu if (tpu or use_tpu) else '', zone=tpu_zone, project=gcp_project)\n\n\n config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n model_dir=model_dir,\n save_summary_steps=iterations_per_loop,\n save_checkpoints_steps=iterations_per_loop,\n log_step_count_steps=log_step_count_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=iterations_per_loop,\n num_shards=num_cores,\n per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.\n PER_HOST_V2)) # pylint: disable=line-too-long\n\n model_fn = functools.partial(\n resnet_model_fn,\n n_classes=n_classes,\n num_train_images=num_train_images,\n data_format=data_format,\n transpose_input=transpose_input,\n train_batch_size=train_batch_size,\n iterations_per_loop=iterations_per_loop,\n tf_precision=tf_precision,\n momentum=momentum,\n weight_decay=weight_decay,\n base_learning_rate=base_learning_rate,\n warmup_epochs=warmup_epochs,\n model_dir=model_dir,\n use_tpu=use_tpu,\n resnet_depth=resnet_depth)\n\n\n resnet_classifier = tf.contrib.tpu.TPUEstimator(\n use_tpu=use_tpu,\n model_fn=model_fn,\n config=config,\n train_batch_size=train_batch_size,\n export_to_tpu=False)\n\n\n use_bfloat16 = (tf_precision == 'bfloat16')\n\n train_glob = os.path.join(url_base_path, 'train', '*.tfrecord')\n\n tf.logging.info(\"Train glob: {}\".format(train_glob))\n\n train_input_fn = functools.partial(rxinput.input_fn,\n input_fn_params=input_fn_params,\n tf_records_glob=train_glob,\n pixel_stats=GLOBAL_PIXEL_STATS,\n transpose_input=transpose_input,\n use_bfloat16=use_bfloat16)\n\n\n\n tf.logging.info('Training for %d steps (%.2f epochs in total). Current'\n ' step %d.', train_steps, train_steps / steps_per_epoch,\n current_step)\n\n start_timestamp = time.time() # This time will include compilation time\n\n resnet_classifier.train(input_fn=train_input_fn, max_steps=train_steps)\n\n tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',\n train_steps, int(time.time() - start_timestamp))\n\n\n elapsed_time = int(time.time() - start_timestamp)\n tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',\n train_steps, elapsed_time)\n\n tf.logging.info('Exporting SavedModel.')\n\n def serving_input_receiver_fn():\n features = {\n 'feature': tf.placeholder(dtype=tf.float32, shape=[None, 512, 512, 6]),\n }\n receiver_tensors = features\n return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)\n\n resnet_classifier.export_saved_model(os.path.join(model_dir, 'saved_model'), serving_input_receiver_fn)\n\n\nif __name__ == '__main__':\n\n p = argparse.ArgumentParser(description='Train ResNet on rxrx1')\n # TPU Parameters\n p.add_argument(\n '--use-tpu',\n type=bool,\n default=True,\n help=('Use TPU to execute the model for training and evaluation. If'\n ' --use_tpu=false, will use whatever devices are available to'\n ' TensorFlow by default (e.g. CPU and GPU)'))\n p.add_argument(\n '--tpu',\n type=str,\n default=None,\n help=(\n 'The Cloud TPU to use for training.'\n ' This should be either the name used when creating the Cloud TPU, '\n 'or a grpc://ip.address.of.tpu:8470 url.'))\n p.add_argument(\n '--gcp-project',\n type=str,\n default=None,\n help=('Project name for the Cloud TPU-enabled project. '\n 'If not specified, we will attempt to automatically '\n 'detect the GCE project from metadata.'))\n p.add_argument(\n '--tpu-zone',\n type=str,\n default=None,\n help=('GCE zone where the Cloud TPU is located in. '\n 'If not specified, we will attempt to automatically '\n 'detect the GCE project from metadata.'))\n p.add_argument('--use-cache', type=bool, default=None)\n # Dataset Parameters\n p.add_argument(\n '--url-base-path',\n type=str,\n default='gs://rxrx1-us-central1/tfrecords/random-42',\n help=('Base path for tfrecord storage bucket url.'))\n # Training parameters\n p.add_argument(\n '--model-dir',\n type=str,\n default=None,\n help=(\n 'The Google Cloud Storage bucket where the model and training summaries are'\n ' stored.'))\n p.add_argument(\n '--train-epochs',\n type=int,\n default=1,\n help=(\n 'Defining an epoch as one pass through every training example, '\n 'the number of total passes through all examples during training. '\n 'Implicitly sets the total train steps.'))\n p.add_argument(\n '--num-train-images',\n type=int,\n default=73000\n )\n p.add_argument(\n '--train-batch-size',\n type=int,\n default=512,\n help=('Batch size to use during training.'))\n p.add_argument(\n '--n-classes',\n type=int,\n default=1108,\n help=('The number of label classes - typically will be 1108 '\n 'since there are 1108 experimental siRNA classes.'))\n p.add_argument(\n '--epochs-per-loop',\n type=int,\n default=1,\n help=('The number of steps to run on TPU before outfeeding metrics '\n 'to the CPU. Larger values will speed up training.'))\n p.add_argument(\n '--log-step-count-epochs',\n type=int,\n default=64,\n help=('The number of epochs at '\n 'which global step information is logged .'))\n p.add_argument(\n '--num-cores',\n type=int,\n default=8,\n help=('Number of TPU cores. For a single TPU device, this is 8 because '\n 'each TPU has 4 chips each with 2 cores.'))\n p.add_argument(\n '--data-format',\n type=str,\n default='channels_last',\n choices=[\n 'channels_first',\n 'channels_last',\n ],\n help=('A flag to override the data format used in the model. '\n 'To run on CPU or TPU, channels_last should be used. '\n 'For GPU, channels_first will improve performance.'))\n p.add_argument(\n '--transpose-input',\n type=bool,\n default=True,\n help=('Use TPU double transpose optimization.'))\n p.add_argument(\n '--tf-precision',\n type=str,\n default='bfloat16',\n choices=['bfloat16', 'float32'],\n help=('Tensorflow precision type used when defining the network.'))\n\n # Optimizer Parameters\n\n p.add_argument('--momentum', type=float, default=0.9)\n p.add_argument('--weight-decay', type=float, default=1e-4)\n p.add_argument(\n '--base-learning-rate',\n type=float,\n default=0.2,\n help=('Base learning rate when train batch size is 512. '\n 'Chosen to match the resnet paper.'))\n p.add_argument(\n '--warmup-epochs',\n type=int,\n default=5,\n )\n args = p.parse_args()\n args = vars(args)\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.logging.info('Parsed args: ')\n for k, v in args.items():\n tf.logging.info('{} : {}'.format(k, v))\n main(**args)\n" ]
[ [ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.contrib.tpu.bfloat16_scope", "tensorflow.metrics.accuracy", "tensorflow.control_dependencies", "tensorflow.cast", "tensorflow.train.cosine_decay_restarts", "tensorflow.contrib.tpu.CrossShardOptimizer", "tensorflow.nn.l2_loss", "tensorflow.contrib.summary.always_record_summaries", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.get_collection", "tensorflow.estimator.export.PredictOutput", "tensorflow.train.get_global_step", "tensorflow.losses.softmax_cross_entropy", "tensorflow.train.MomentumOptimizer", "tensorflow.logging.set_verbosity", "tensorflow.estimator.export.ServingInputReceiver", "tensorflow.trainable_variables", "tensorflow.nn.in_top_k", "tensorflow.argmax", "tensorflow.metrics.mean", "tensorflow.contrib.summary.all_summary_ops", "tensorflow.placeholder", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.contrib.tpu.TPUConfig", "numpy.array", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.contrib.summary.create_file_writer", "tensorflow.reshape", "tensorflow.contrib.summary.scalar", "tensorflow.python.estimator.estimator._load_global_step_from_checkpoint_dir" ] ]
licTomasPerez/-Code-Thesis-Non-markovian-Dynamics
[ "bafda3eeb8b9e326c0fb33237cdd7fa8d1412195" ]
[ "Quantum States' distance Notebooks/bures-wooters.py" ]
[ "import qutip\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.optimize as opt\nimport pickle\n\n\ndef prod_basis(b1, b2):\n return [qutip.tensor(b,s) for b in b1 for s in b2]\n\ndef scalar_prod(op1,op2,rho0=None):\n if op1.dims[0][0]!=op1.dims[0][0]:\n return None\n if rho0 is None:\n rho0 = qutip.qeye(op1.dims[0])/op1.dims[0][0]\n return ((op1.dag()*op2+op2.dag()*op1)*rho0).tr()\n \n\ndef base_orto(ops,rho0):\n dim = ops[0].dims[0][0]\n base = []\n # hacer gramm schmidt\n for op in ops:\n coeff = [scalar_prod(op2,op, rho0) for op2 in base]\n op_mod = op - sum([ c*op2 for c, op2 in zip(coeff, base)])\n op_mod = op_mod/np.sqrt(scalar_prod(op_mod,op_mod, rho0))\n base.append(op_mod)\n return base\n\n\ndef proj_op(K,base,rho0):\n return sum([ scalar_prod(b, K,rho0) * b for b in base])\n\ndef logM(rho):\n vals, vecs = rho.eigenstates()\n return sum([np.log(val)*vec*vec.dag() for val,vec in zip(vals, vecs) if val>0])\n\ndef sqrtM(rho):\n vals, vecs = rho.eigenstates()\n return sum([ (abs(val)**.5)*vec*vec.dag() for val,vec in zip(vals, vecs)])\n\ndef rel_entropy(rho, sigma):\n val = (rho*(logM(rho)-logM(sigma))).tr()\n if abs(val.imag)>1.e-6:\n print(\"rho or sigma not positive\")\n print(rho.eigenstates())\n print(sigma.eigenstates())\n return val.real\n\n\ndef bures(rho, sigma):\n val = abs((sqrtM(rho)*sqrtM(sigma)).tr())\n val = max(min(val,1.),-1.)\n return np.arccos(val)/np.pi\n \ndef maxent_rho(rho, basis): \n def test(x, rho, basis):\n k = sum([-u*b for u,b in zip(x, basis)]) \n sigma = (.5*(k+k.dag())).expm()\n sigma = sigma/sigma.tr()\n return rel_entropy(rho, sigma) \n res = opt.minimize(test,np.zeros(len(basis)),args=(rho,basis))\n k = sum([-u*b for u,b in zip(res.x, basis)]) \n sigma = (.5*(k+k.dag())).expm()\n sigma = sigma/sigma.tr()\n return sigma\n \n \ndef error_maxent_state(rho, basis, distance=bures):\n try:\n sigma = maxent_rho(rho, basis)\n return distance(rho,sigma)\n except:\n print(\"fail\")\n return None\n \n \ndef error_proj_state(rho, rho0, basis, distance=bures):\n try:\n basis = base_orto(basis, rho0)\n sigma = proj_op(logM(rho), basis, rho0).expm()\n return distance(rho, sigma)\n except:\n print(\"fail\")\n return None\nclass Result(object):\n def __init__(self, ts=None, states=None):\n self.ts = ts\n self.states = states\n self.max_ent_app = None\n self.projrho0_app = None\n self.projrho_inst_app = None \n \ndim1=7\ndim2=8\n\ndef simul(omega_bos=3, omega_s=3, temp=1, gaussian=False, deltat=10., tmax=500., distance=bures):\n basis_bos1 = [qutip.qeye(dim1), qutip.create(dim1),qutip.create(dim1).dag(),qutip.num(dim1)]\n H_bos1 = qutip.tensor(qutip.num(dim1), qutip.qeye(dim2))\n rho0 = qutip.tensor(-.5*(qutip.num(dim1)/temp), -.5*(qutip.num(dim2)/temp)).expm()\n rho0 = rho0/rho0.tr()\n # Base\n if gaussian:\n basis_bos2 = [qutip.qeye(dim2), qutip.create(dim2),qutip.destroy(dim2).dag(),qutip.num(dim2)]\n H_bos2 = qutip.tensor(qutip.qeye(dim1), qutip.num(dim2))\n else:\n basis_bos2 = [qutip.qeye(dim2), qutip.num(dim2)]\n H_bos2 = qutip.tensor(qutip.qeye(dim1), qutip.num(dim2))\n \n basis = base_orto(prod_basis(basis_bos1, basis_bos2), rho0)\n H0 = omega_bos * H_bos1 + omega_s * H_bos2\n Hi = qutip.tensor(qutip.create(dim1),qutip.destroy(dim2))+qutip.tensor(qutip.destroy(dim1),qutip.create(dim2))\n H=H0+0.05*Hi\n # Hamiltoniano \n \n sampling = int(10*max(1,omega_bos, omega_s)*deltat)\n \n states = [rho0]\n rho = rho0 \n ts= [0]\n for i in range(int(tmax/deltat)):\n result = qutip.mesolve(H, states[-1], np.linspace(0,deltat, sampling),args={'omega_bos': omega_bos, 'omega_s': omega_s})\n states.append(result.states[-1])\n ts.append(deltat*i)\n #[0.05*qutip.tensor(qutip.destroy(dim1),qutip.qeye(dim2)),0.1*qutip.tensor(qutip.create(dim1),qutip.qeye(dim2))]\n result = Result(ts, states)\n result.times = ts\n result.states = states\n result.max_ent_app = np.array([error_maxent_state(rho, basis, distance) for rho in states])\n result.projrho0_app = np.array([error_proj_state(rho, rho0, basis,distance) for rho in states])\n result.projrho_inst_app = np.array([error_proj_state(rho, qutip.tensor(rho.ptrace([0]),rho.ptrace([1])), \n basis, distance) for rho in states])\n \n if gaussian:\n title = f\" BW Dinámica cerrada gaussiana wb1={omega_bos} wb2={omega_s} dim1={dim1} dim2={dim2}\"\n else:\n title = f\" BW Dinámica cerrada no gaussiana wb1={omega_bos} wb2={omega_s} dim1={dim1} dim2={dim2}\" \n\n with open(title+\".pkl\",\"wb\") as f:\n pickle.dump(result, f)\n return result, title\n\n## Dinámica no Gaussiana, no resonante\n\nresult, title = simul(omega_bos=3., omega_s=np.sqrt(48), temp=1, gaussian=True, deltat=5., tmax=500., distance=bures)\n\n\nplt.plot(result.times, result.max_ent_app, color=\"orange\", label=\"max-ent\")\nplt.plot(result.times, result.projrho0_app, color=\"violet\", label=\"proj rho0\")\nplt.plot(result.times, result.projrho_inst_app, color=\"crimson\", label=\"proj rho(t)\")\nplt.xlabel(\"t[s]\")\nplt.ylabel(\"Arccos(F)\")\n\nplt.legend()\nplt.title(title)\nplt.savefig(title + f\" dim1={dim1}dim2={dim2}.svg\")\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.log", "numpy.sqrt", "matplotlib.pyplot.title", "numpy.linspace", "numpy.arccos", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
DavidDworetzky/Pycasso
[ "4810445889d7309b10fc039b57f0c6026633229b" ]
[ "Pycasso/Core/Neural_Transfer.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport torchvision.models as models\nfrom io import BytesIO\nimport base64\nimport copy\nimport uuid\n\n#CONSTANTS\n# desired depth layers to compute style/content losses :\ncontent_layers_default = ['conv_4']\nstyle_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']\n\n#Helper functions for image processing -> loading\n#TODO refactor this for various image processing deep learning processes into one class\ndef image_loader(image_data, loader, device, debug=True):\n decoded = base64.b64decode(image_data)\n #create temp file for processing\n temp_name = str(uuid.uuid4())\n #if debug flag, write out image to directory\n if(debug):\n with open(f\"{temp_name}.jpg\", 'wb') as f:\n f.write(decoded)\n b = BytesIO(decoded)\n b.seek(0)\n image = Image.open(b)\n # fake batch dimension required to fit network's input dimensions\n image = loader(image).unsqueeze(0)\n return image.to(device, torch.float)\n\n#Get Optimizer for Neural Network\ndef get_input_optimizer(input_img):\n # this line to show that input is a parameter that requires a gradient\n optimizer = optim.LBFGS([input_img.requires_grad_()])\n return optimizer\n\n#Core of Neural Transfer in python\n\n#Gram Matrix, Content Loss, Style Loss\nclass ContentLoss(nn.Module):\n def __init__(self, target,):\n super(ContentLoss, self).__init__()\n self.target = target.detach()\n def forward(self, input):\n self.loss = F.mse_loss(input, self.target)\n return input\ndef gram_matrix(input):\n a, b, c, d = input.size() # a=batch size(=1)\n # b=number of feature maps\n # (c,d)=dimensions of a f. map (N=c*d)\n features = input.view(a * b, c * d) # resise F_XL into \\hat F_XL\n G = torch.mm(features, features.t()) # compute the gram product\n #return normalized gram matrix\n return G.div(a * b * c * d)\n\ndef get_style_model_and_losses(cnn, normalization_mean, normalization_std,\n style_img, content_img,\n device,\n content_layers=content_layers_default,\n style_layers=style_layers_default):\n cnn = copy.deepcopy(cnn)\n\n # normalization module\n normalization = Normalization(normalization_mean, normalization_std).to(device)\n\n # just in order to have an iterable access to or list of content/syle\n # losses\n content_losses = []\n style_losses = []\n\n # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential\n # to put in modules that are supposed to be activated sequentially\n model = nn.Sequential(normalization)\n\n i = 0 # increment every time we see a conv\n for layer in cnn.children():\n if isinstance(layer, nn.Conv2d):\n i += 1\n name = 'conv_{}'.format(i)\n elif isinstance(layer, nn.ReLU):\n name = 'relu_{}'.format(i)\n # The in-place version doesn't play very nicely with the ContentLoss\n # and StyleLoss we insert below. So we replace with out-of-place\n # ones here.\n layer = nn.ReLU(inplace=False)\n elif isinstance(layer, nn.MaxPool2d):\n name = 'pool_{}'.format(i)\n elif isinstance(layer, nn.BatchNorm2d):\n name = 'bn_{}'.format(i)\n else:\n raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))\n\n model.add_module(name, layer)\n\n if name in content_layers:\n # add content loss:\n target = model(content_img).detach()\n content_loss = ContentLoss(target)\n model.add_module(\"content_loss_{}\".format(i), content_loss)\n content_losses.append(content_loss)\n\n if name in style_layers:\n # add style loss:\n target_feature = model(style_img).detach()\n style_loss = StyleLoss(target_feature)\n model.add_module(\"style_loss_{}\".format(i), style_loss)\n style_losses.append(style_loss)\n\n # now we trim off the layers after the last content and style losses\n for i in range(len(model) - 1, -1, -1):\n if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):\n break\n\n model = model[:(i + 1)]\n\n return model, style_losses, content_losses\n\ndef run_style_transfer(cnn, normalization_mean, normalization_std,\n content_img, style_img, input_img,\n device,\n num_steps=300,\n style_weight=1000000, content_weight=1):\n \"\"\"Run the style transfer.\"\"\"\n #Building the style transfer model\n model, style_losses, content_losses = get_style_model_and_losses(cnn,\n normalization_mean, normalization_std, style_img, content_img, device)\n optimizer = get_input_optimizer(input_img)\n\n #Optimizing\n run = [0]\n while run[0] <= num_steps:\n\n def closure():\n # correct the values of updated input image\n input_img.data.clamp_(0, 1)\n\n optimizer.zero_grad()\n model(input_img)\n style_score = 0\n content_score = 0\n\n for sl in style_losses:\n style_score += sl.loss\n for cl in content_losses:\n content_score += cl.loss\n\n style_score *= style_weight\n content_score *= content_weight\n\n loss = style_score + content_score\n loss.backward()\n\n run[0] += 1\n return style_score + content_score\n optimizer.step(closure)\n\n # a last correction...\n input_img.data.clamp_(0, 1)\n\n return input_img\n\nclass StyleLoss(nn.Module):\n def __init__(self, target_feature):\n super(StyleLoss, self).__init__()\n self.target = gram_matrix(target_feature).detach()\n\n def forward(self, input):\n G = gram_matrix(input)\n self.loss = F.mse_loss(G, self.target)\n return input\n\n# create a module to normalize input image so we can easily put it in a\n# nn.Sequential\nclass Normalization(nn.Module):\n def __init__(self, mean, std):\n super(Normalization, self).__init__()\n # .view the mean and std to make them [C x 1 x 1] so that they can\n # directly work with image Tensor of shape [B x C x H x W].\n # B is batch size. C is number of channels. H is height and W is width.\n self.mean = torch.tensor(mean).view(-1, 1, 1)\n self.std = torch.tensor(std).view(-1, 1, 1)\n\n def forward(self, img):\n # normalize img\n return (img - self.mean) / self.std\n\nclass Neural_Transfer:\n def __init__(self, image_size, content_image, style_image, debug=True):\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.image_size = image_size\n self.debug = debug\n # scale imported image and transform it into a torch tensor\n self.loader = transforms.Compose([transforms.Resize((self.image_size, self.image_size)), transforms.ToTensor()])\n #initialize style and content image \n self.style_image = image_loader(style_image, self.loader, self.device, self.debug)\n self.content_image = image_loader(content_image, self.loader, self.device, self.debug)\n self.cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(self.device)\n self.cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(self.device)\n self.cnn = models.vgg19(pretrained=True).features.to(self.device).eval()\n \n def run_transfer(self, num_steps):\n input_image = self.content_image.clone()\n output = run_style_transfer(self.cnn, self.cnn_normalization_mean, self.cnn_normalization_std,\n self.content_image, self.style_image, input_image, self.device, num_steps = num_steps)\n\n #after we run the output, now we need to reconstitute it as a PIL image\n unloader = transforms.ToPILImage() # reconvert into PIL image\n image = output.cpu().clone() # we clone the tensor to not do changes on it\n image = image.squeeze(0) # remove the fake batch dimension\n image = unloader(image)\n return image\n\n #common alias for image job processor\n def run_job(self, num_steps):\n return self.run_transfer(num_steps)\n" ]
[ [ "torch.nn.Sequential", "torch.tensor", "torch.nn.functional.mse_loss", "torch.cuda.is_available", "torch.nn.ReLU" ] ]
nitred/img2svd
[ "16845e1e5f01964375af197acce7e0de9247652d" ]
[ "img2svd/__init__.py" ]
[ "\"\"\"Contains the functions for implementing img2svd.\"\"\"\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage.io import imread\n\n\ndef get_svd_from_grayscale_image(imgpath, sigma_coverage_percentage=95, plot=True):\n \"\"\"Returns the compressed U, S and V.H after SVD of the image.\n\n The image is first converted to grayscale and then `n_components` (similar to\n principal components) are chosen based on the `sigma_coverage_percentage`.\n\n We choose the first `n_components` such that:\n `(np.sum(sigma[:n_components]) / np.sum(sigma)) >= (sigma_coverage_percentage / 100)`\n\n Args:\n imgpath (str): The full path of the image.\n sigma_coverage_percentage (int, float): It is an estimate of how much information\n should be preserved. It is used to calculate `n_components` as described above.\n plot (bool): Whether to plot the original vs reconstructed/compressd image.\n\n Returns:\n U, S, V_H (tuple of ndarrays): The same as the output of `np.linalg.svd`, however\n instead of the full U, S, V_H returned by the `np.linalg.svd` we return only\n `U[:, :n_components], S[:n_components], V_H[:n_components, :]` where `n_components`\n is computed as described above.\n\n The `n_components` is the same as U.shape[1] or S.shape[0] or V_H.shape[0].\n \"\"\"\n imgarr = imread(imgpath, as_grey=True)\n u, s, v = np.linalg.svd(imgarr, full_matrices=True) # s is already sorted\n # The number of components required to have the sigma_coverage_percentage. +1 is for index.\n n_components = np.argmax((np.cumsum(s) / np.sum(s)) >= (sigma_coverage_percentage / 100.0)) + 1\n\n if plot is True:\n s_dot_v = np.dot(np.diag(s[:n_components]), v[:n_components, :])\n u_dot_s_dot_v = np.dot(u[:, :n_components], s_dot_v)\n imgarr_reconstructed = u_dot_s_dot_v\n figure, axes = plt.subplots(2, sharex=False)\n axes[0].imshow(imgarr, cmap='Greys_r')\n axes[1].imshow(imgarr_reconstructed, cmap='Greys_r')\n plt.show()\n\n return u[:, :n_components], s[:n_components], v[:n_components, :]\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.linalg.svd", "matplotlib.pyplot.subplots", "numpy.cumsum", "matplotlib.pyplot.show", "numpy.sum" ] ]
Sketos/PyAutoArray
[ "72dc7e8d1c38786915f82a7e7284239e5ce87624" ]
[ "test_autoarray/plot/mapper_voronoi/all.py" ]
[ "import autoarray as aa\nimport autoarray.plot as aplt\nimport numpy as np\n\ngrid_7x7 = aa.grid.uniform(shape_2d=(7, 7), pixel_scales=0.25)\ngrid_9 = aa.grid.manual_1d(\n grid=[\n [0.6, -0.3],\n [0.5, -0.8],\n [0.2, 0.1],\n [0.0, 0.5],\n [-0.3, -0.8],\n [-0.6, -0.5],\n [-0.4, -1.1],\n [-1.2, 0.8],\n [-1.5, 0.9],\n ],\n shape_2d=(3, 3),\n pixel_scales=1.0,\n)\nvoronoi_grid = aa.grid_voronoi(\n grid_1d=grid_9,\n nearest_pixelization_1d_index_for_mask_1d_index=np.zeros(\n shape=grid_7x7.shape_1d, dtype=\"int\"\n ),\n)\nvoronoi_mapper = aa.mapper(grid=grid_7x7, pixelization_grid=voronoi_grid)\n\naplt.mapper_obj(mapper=voronoi_mapper, source_pixel_indexes=[[3, 4], [5]])\n" ]
[ [ "numpy.zeros" ] ]
JavierEscobarOrtiz/skforecast
[ "a3af4a1dd4201c582f159d4e3a1734ed6d29b6c5" ]
[ "skforecast/model_selection/model_selection.py" ]
[ "################################################################################\n# skforecast.model_selection #\n# #\n# This work by Joaquin Amat Rodrigo is licensed under a Creative Commons #\n# Attribution 4.0 International License. #\n################################################################################\n# coding=utf-8\n\n\nfrom typing import Union, Tuple, Optional, Any\nimport numpy as np\nimport pandas as pd\nimport warnings\nimport logging\nfrom copy import deepcopy\nfrom tqdm import tqdm\nfrom sklearn.metrics import mean_squared_error \nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_absolute_percentage_error\nfrom sklearn.metrics import mean_squared_log_error\nfrom sklearn.model_selection import ParameterGrid\nfrom sklearn.model_selection import ParameterSampler\nimport optuna\nfrom optuna.samplers import TPESampler, RandomSampler\noptuna.logging.set_verbosity(optuna.logging.WARNING) # disable optuna logs\nfrom skopt.utils import use_named_args\nfrom skopt import gp_minimize\n\nfrom ..ForecasterAutoreg import ForecasterAutoreg\nfrom ..ForecasterAutoregCustom import ForecasterAutoregCustom\nfrom ..ForecasterAutoregDirect import ForecasterAutoregDirect\nfrom ..ForecasterAutoregMultiOutput import ForecasterAutoregMultiOutput\n\nlogging.basicConfig(\n format = '%(name)-10s %(levelname)-5s %(message)s', \n level = logging.INFO,\n)\n\n\ndef time_series_splitter(\n y: Union[np.ndarray, pd.Series],\n initial_train_size: int,\n steps: int,\n allow_incomplete_fold: bool=True,\n verbose: bool=True\n) -> Union[np.ndarray, np.ndarray]:\n '''\n \n Split indices of a time series into multiple train-test pairs. The order of\n is maintained and the training set increases in each iteration.\n \n Parameters\n ---------- \n y : 1d numpy ndarray, pandas Series\n Training time series values. \n \n initial_train_size: int \n Number of samples in the initial train split.\n \n steps : int\n Number of steps to predict.\n \n allow_incomplete_fold : bool, default `True`\n The last test set is allowed to be incomplete if it does not reach `steps`\n observations. Otherwise, the latest observations are discarded.\n \n verbose : bool, default `True`\n Print number of splits created.\n\n Yields\n ------\n train : 1d numpy ndarray\n Training indices.\n \n test : 1d numpy ndarray\n Test indices.\n \n '''\n \n if not isinstance(y, (np.ndarray, pd.Series)):\n\n raise Exception('`y` must be `1D np.ndarray` o `pd.Series`.')\n\n elif isinstance(y, np.ndarray) and y.ndim != 1:\n\n raise Exception(\n f\"`y` must be `1D np.ndarray` o `pd.Series`, \"\n f\"got `np.ndarray` with {y.ndim} dimensions.\"\n )\n \n if initial_train_size > len(y):\n raise Exception(\n '`initial_train_size` must be smaller than length of `y`.'\n ' Try to reduce `initial_train_size` or `steps`.'\n )\n\n if isinstance(y, pd.Series):\n y = y.to_numpy().copy()\n \n \n folds = (len(y) - initial_train_size) // steps + 1\n # +1 fold is needed to allow including the remainder in the last iteration.\n remainder = (len(y) - initial_train_size) % steps \n \n if verbose:\n if folds == 1:\n print(f\"Number of folds: {folds - 1}\")\n print(\"Not enough observations in `y` to create even a complete fold.\"\n \" Try to reduce `initial_train_size` or `steps`.\"\n )\n\n elif remainder == 0:\n print(f\"Number of folds: {folds - 1}\")\n\n elif remainder != 0 and allow_incomplete_fold:\n print(f\"Number of folds: {folds}\")\n print(\n f\"Since `allow_incomplete_fold=True`, \"\n f\"last fold only includes {remainder} observations instead of {steps}.\"\n )\n print(\n 'Incomplete folds with few observations could overestimate or ',\n 'underestimate validation metrics.'\n )\n elif remainder != 0 and not allow_incomplete_fold:\n print(f\"Number of folds: {folds - 1}\")\n print(\n f\"Since `allow_incomplete_fold=False`, \"\n f\"last {remainder} observations are descarted.\"\n )\n\n if folds == 1:\n # There are no observations to create even a complete fold\n return []\n \n for i in range(folds):\n \n if i < folds - 1:\n train_end = initial_train_size + i * steps \n train_indices = range(train_end)\n test_indices = range(train_end, train_end + steps)\n \n else:\n if remainder != 0 and allow_incomplete_fold:\n train_end = initial_train_size + i * steps \n train_indices = range(train_end)\n test_indices = range(train_end, len(y))\n else:\n break\n \n yield train_indices, test_indices\n \n \ndef _get_metric(metric:str) -> callable:\n '''\n Get the corresponding scikitlearn function to calculate the metric.\n \n Parameters\n ----------\n metric : {'mean_squared_error', 'mean_absolute_error', \n 'mean_absolute_percentage_error', 'mean_squared_log_error'}\n Metric used to quantify the goodness of fit of the model.\n \n Returns \n -------\n metric : callable\n scikitlearn function to calculate the desired metric.\n '''\n \n if metric not in ['mean_squared_error', 'mean_absolute_error',\n 'mean_absolute_percentage_error', 'mean_squared_log_error']:\n raise Exception(\n f\"Allowed metrics are: 'mean_squared_error', 'mean_absolute_error', \"\n f\"'mean_absolute_percentage_error' and 'mean_squared_log_error'. Got {metric}.\"\n )\n \n metrics = {\n 'mean_squared_error': mean_squared_error,\n 'mean_absolute_error': mean_absolute_error,\n 'mean_absolute_percentage_error': mean_absolute_percentage_error,\n 'mean_squared_log_error': mean_squared_log_error\n }\n \n metric = metrics[metric]\n \n return metric\n \n\ndef cv_forecaster(\n forecaster,\n y: pd.Series,\n initial_train_size: int,\n steps: int,\n metric: Union[str, callable],\n exog: Optional[Union[pd.Series, pd.DataFrame]]=None,\n allow_incomplete_fold: bool=True,\n verbose: bool=True\n) -> Tuple[np.array, pd.DataFrame]:\n '''\n Cross-validation of forecaster. The order of data is maintained and the\n training set increases in each iteration.\n \n Parameters\n ----------\n forecaster : ForecasterAutoreg, ForecasterAutoregCustom, ForecasterAutoregDirect,\n ForecasterAutoregMultiOutput\n Forecaster model.\n \n y : pandas Series\n Training time series values. \n \n initial_train_size: int \n Number of samples in the initial train split.\n \n steps : int\n Number of steps to predict.\n \n metric : str, callable\n Metric used to quantify the goodness of fit of the model.\n \n If string:\n {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n\n It callable:\n Function with arguments y_true, y_pred that returns a float.\n \n exog : pandas Series, pandas DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n \n allow_incomplete_fold : bool, default `True`\n The last test partition is allowed to be incomplete if it does not reach `steps`\n observations. Otherwise, the latest observations are discarded.\n \n verbose : bool, default `True`\n Print number of folds used for cross validation.\n\n Returns \n -------\n cv_metrics: 1d numpy ndarray\n Value of the metric for each fold.\n\n cv_predictions: pandas DataFrame\n Predictions.\n\n '''\n\n if initial_train_size > len(y):\n raise Exception(\n '`initial_train_size` must be smaller than length of `y`.'\n )\n \n if initial_train_size is not None and initial_train_size < forecaster.window_size:\n raise Exception(\n f\"`initial_train_size` must be greater than \"\n f\"forecaster's window_size ({forecaster.window_size}).\"\n )\n \n forecaster = deepcopy(forecaster)\n if isinstance(metric, str):\n metric = _get_metric(metric=metric)\n \n splits = time_series_splitter(\n y = y,\n initial_train_size = initial_train_size,\n steps = steps,\n allow_incomplete_fold = allow_incomplete_fold,\n verbose = verbose\n )\n\n cv_predictions = []\n cv_metrics = []\n \n for train_index, test_index in splits:\n \n if exog is None:\n forecaster.fit(y=y.iloc[train_index]) \n pred = forecaster.predict(steps=len(test_index))\n \n else:\n forecaster.fit(y=y.iloc[train_index], exog=exog.iloc[train_index,]) \n pred = forecaster.predict(steps=len(test_index), exog=exog.iloc[test_index])\n \n metric_value = metric(\n y_true = y.iloc[test_index],\n y_pred = pred\n )\n \n cv_predictions.append(pred)\n cv_metrics.append(metric_value)\n \n cv_predictions = pd.concat(cv_predictions)\n cv_predictions = pd.DataFrame(cv_predictions)\n cv_metrics = np.array(cv_metrics)\n \n return cv_metrics, cv_predictions\n\n\ndef _backtesting_forecaster_refit(\n forecaster,\n y: pd.Series,\n steps: int,\n metric: Union[str, callable],\n initial_train_size: int,\n fixed_train_size: bool=True,\n exog: Optional[Union[pd.Series, pd.DataFrame]]=None,\n interval: Optional[list]=None,\n n_boot: int=500,\n random_state: int=123,\n in_sample_residuals: bool=True,\n verbose: bool=False\n) -> Tuple[float, pd.DataFrame]:\n '''\n Backtesting of forecaster model with a re-fitting strategy. A copy of the \n original forecaster is created so it is not modified during the process.\n \n In each iteration:\n - Fit forecaster with the training set.\n - A number of `steps` ahead are predicted.\n - The training set increases with `steps` observations.\n - The model is re-fitted using the new training set.\n\n In order to apply backtesting with re-fit, an initial training set must be\n available, otherwise it would not be possible to increase the training set \n after each iteration. `initial_train_size` must be provided.\n \n Parameters\n ----------\n forecaster : ForecasterAutoreg, ForecasterAutoregCustom, ForecasterAutoregDirect,\n ForecasterAutoregMultiOutput\n Forecaster model.\n \n y : pandas Series\n Training time series values. \n \n initial_train_size: int\n Number of samples in the initial train split. The backtest forecaster is\n trained using the first `initial_train_size` observations.\n \n fixed_train_size: bool, default `True`\n If True, train size doesn't increases but moves by `steps` in each iteration.\n \n steps : int\n Number of steps to predict.\n \n metric : str, callable\n Metric used to quantify the goodness of fit of the model.\n \n If string:\n {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n\n If callable:\n Function with arguments y_true, y_pred that returns a float.\n \n exog :panda Series, pandas DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n\n interval: list, default `None`\n Confidence of the prediction interval estimated. Sequence of percentiles\n to compute, which must be between 0 and 100 inclusive. If `None`, no\n intervals are estimated. Only available for forecaster of type ForecasterAutoreg\n and ForecasterAutoregCustom.\n \n n_boot: int, default `500`\n Number of bootstrapping iterations used to estimate prediction\n intervals.\n\n random_state: int, default `123`\n Sets a seed to the random generator, so that boot intervals are always \n deterministic.\n\n in_sample_residuals: bool, default `True`\n If `True`, residuals from the training data are used as proxy of\n prediction error to create prediction intervals. If `False`, out_sample_residuals\n are used if they are already stored inside the forecaster.\n \n verbose : bool, default `False`\n Print number of folds and index of training and validation sets used for backtesting.\n\n Returns \n -------\n metric_value: float\n Value of the metric.\n\n backtest_predictions: pandas Dataframe\n Value of predictions and their estimated interval if `interval` is not `None`.\n column pred = predictions.\n column lower_bound = lower bound of the interval.\n column upper_bound = upper bound interval of the interval.\n\n '''\n\n forecaster = deepcopy(forecaster)\n if isinstance(metric, str):\n metric = _get_metric(metric=metric)\n backtest_predictions = []\n \n folds = int(np.ceil((len(y) - initial_train_size) / steps))\n remainder = (len(y) - initial_train_size) % steps\n \n if verbose:\n print(f\"Information of backtesting process\")\n print(f\"----------------------------------\")\n print(f\"Number of observations used for initial training: {initial_train_size}\")\n print(f\"Number of observations used for backtesting: {len(y) - initial_train_size}\")\n print(f\" Number of folds: {folds}\")\n print(f\" Number of steps per fold: {steps}\")\n if remainder != 0:\n print(f\" Last fold only includes {remainder} observations.\")\n print(\"\")\n for i in range(folds):\n if fixed_train_size:\n # The train size doesn't increase but moves by `steps` in each iteration.\n train_idx_start = i * steps\n train_idx_end = initial_train_size + i * steps\n else:\n # The train size increases by `steps` in each iteration.\n train_idx_start = 0\n train_idx_end = initial_train_size + i * steps\n print(f\"Data partition in fold: {i}\")\n if i < folds - 1:\n print(f\" Training: {y.index[train_idx_start]} -- {y.index[train_idx_end - 1]} (n={len(y.index[train_idx_start:train_idx_end])})\")\n print(f\" Validation: {y.index[train_idx_end]} -- {y.index[train_idx_end + steps - 1]} (n={len(y.index[train_idx_end:train_idx_end + steps])})\")\n else:\n print(f\" Training: {y.index[train_idx_start]} -- {y.index[train_idx_end - 1]} (n={len(y.index[train_idx_start:train_idx_end])})\")\n print(f\" Validation: {y.index[train_idx_end]} -- {y.index[-1]} (n={len(y.index[train_idx_end:])})\")\n print(\"\")\n \n if folds > 50:\n print(\n f\"Forecaster will be fit {folds} times. This can take substantial amounts of time. \"\n f\"If not feasible, try with `refit = False`. \\n\"\n )\n\n for i in range(folds):\n # In each iteration (except the last one) the model is fitted before making predictions.\n if fixed_train_size:\n # The train size doesn't increases but moves by `steps` in each iteration.\n train_idx_start = i * steps\n train_idx_end = initial_train_size + i * steps\n else:\n # The train size increases by `steps` in each iteration.\n train_idx_start = 0\n train_idx_end = initial_train_size + i * steps\n \n if exog is not None:\n next_window_exog = exog.iloc[train_idx_end:train_idx_end + steps, ]\n\n if interval is None:\n\n if i < folds - 1:\n if exog is None:\n forecaster.fit(y=y.iloc[train_idx_start:train_idx_end])\n pred = forecaster.predict(steps=steps)\n else:\n forecaster.fit(\n y = y.iloc[train_idx_start:train_idx_end], \n exog = exog.iloc[train_idx_start:train_idx_end, ]\n )\n pred = forecaster.predict(steps=steps, exog=next_window_exog)\n else: \n if remainder == 0:\n if exog is None:\n forecaster.fit(y=y.iloc[train_idx_start:train_idx_end])\n pred = forecaster.predict(steps=steps)\n else:\n forecaster.fit(\n y = y.iloc[train_idx_start:train_idx_end], \n exog = exog.iloc[train_idx_start:train_idx_end, ]\n )\n pred = forecaster.predict(steps=steps, exog=next_window_exog)\n else:\n # Only the remaining steps need to be predicted\n steps = remainder\n if exog is None:\n forecaster.fit(y=y.iloc[train_idx_start:train_idx_end])\n pred = forecaster.predict(steps=steps)\n else:\n forecaster.fit(\n y = y.iloc[train_idx_start:train_idx_end], \n exog = exog.iloc[train_idx_start:train_idx_end, ]\n )\n pred = forecaster.predict(steps=steps, exog=next_window_exog)\n else:\n\n if i < folds - 1:\n if exog is None:\n forecaster.fit(y=y.iloc[train_idx_start:train_idx_end])\n pred = forecaster.predict_interval(\n steps = steps,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals\n )\n else:\n forecaster.fit(\n y = y.iloc[train_idx_start:train_idx_end], \n exog = exog.iloc[train_idx_start:train_idx_end, ]\n )\n pred = forecaster.predict_interval(\n steps = steps,\n exog = next_window_exog,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals\n )\n else: \n if remainder == 0:\n if exog is None:\n forecaster.fit(y=y.iloc[train_idx_start:train_idx_end])\n pred = forecaster.predict_interval(\n steps = steps,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals\n )\n else:\n forecaster.fit(\n y = y.iloc[train_idx_start:train_idx_end], \n exog = exog.iloc[train_idx_start:train_idx_end, ]\n )\n pred = forecaster.predict_interval(\n steps = steps,\n exog = next_window_exog,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals\n )\n else:\n # Only the remaining steps need to be predicted\n steps = remainder\n if exog is None:\n forecaster.fit(y=y.iloc[train_idx_start:train_idx_end])\n pred = forecaster.predict_interval(\n steps = steps,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals\n )\n else:\n forecaster.fit(\n y = y.iloc[train_idx_start:train_idx_end], \n exog = exog.iloc[train_idx_start:train_idx_end, ]\n )\n pred = forecaster.predict_interval(\n steps = steps,\n exog = next_window_exog,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals\n )\n\n backtest_predictions.append(pred)\n \n backtest_predictions = pd.concat(backtest_predictions)\n if isinstance(backtest_predictions, pd.Series):\n backtest_predictions = pd.DataFrame(backtest_predictions)\n\n metric_value = metric(\n y_true = y.iloc[initial_train_size: initial_train_size + len(backtest_predictions)],\n y_pred = backtest_predictions['pred']\n )\n\n return metric_value, backtest_predictions\n\n\ndef _backtesting_forecaster_no_refit(\n forecaster,\n y: pd.Series,\n steps: int,\n metric: Union[str, callable],\n initial_train_size: Optional[int]=None,\n exog: Optional[Union[pd.Series, pd.DataFrame]]=None,\n interval: Optional[list]=None,\n n_boot: int=500,\n random_state: int=123,\n in_sample_residuals: bool=True,\n verbose: bool=False\n) -> Tuple[float, pd.DataFrame]:\n '''\n Backtesting of forecaster without iterative re-fitting. In each iteration,\n a number of `steps` are predicted. A copy of the original forecaster is\n created so it is not modified during the process.\n\n If `forecaster` is already trained and `initial_train_size` is `None`,\n no initial train is done and all data is used to evaluate the model.\n However, the first `len(forecaster.last_window)` observations are needed\n to create the initial predictors, so no predictions are calculated for them.\n \n Parameters\n ----------\n forecaster : ForecasterAutoreg, ForecasterAutoregCustom, ForecasterAutoregDirect,\n ForecasterAutoregMultiOutput\n Forecaster model.\n \n y : pandas Series\n Training time series values. \n \n initial_train_size: int, default `None`\n Number of samples in the initial train split. If `None` and `forecaster` is already\n trained, no initial train is done and all data is used to evaluate the model. However, \n the first `len(forecaster.last_window)` observations are needed to create the \n initial predictors, so no predictions are calculated for them.\n \n steps : int, None\n Number of steps to predict.\n \n metric : str, callable\n Metric used to quantify the goodness of fit of the model.\n \n If string:\n {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n\n If callable:\n Function with arguments y_true, y_pred that returns a float.\n \n exog :panda Series, pandas DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n\n interval: list, default `None`\n Confidence of the prediction interval estimated. Sequence of percentiles\n to compute, which must be between 0 and 100 inclusive. If `None`, no\n intervals are estimated. Only available for forecaster of type ForecasterAutoreg\n and ForecasterAutoregCustom.\n \n n_boot: int, default `500`\n Number of bootstrapping iterations used to estimate prediction\n intervals.\n\n random_state: int, default `123`\n Sets a seed to the random generator, so that boot intervals are always \n deterministic.\n\n in_sample_residuals: bool, default `True`\n If `True`, residuals from the training data are used as proxy of\n prediction error to create prediction intervals. If `False`, out_sample_residuals\n are used if they are already stored inside the forecaster.\n \n verbose : bool, default `False`\n Print number of folds and index of training and validation sets used for backtesting.\n\n Returns \n -------\n metric_value: float\n Value of the metric.\n\n backtest_predictions: pandas DataFrame\n Value of predictions and their estimated interval if `interval` is not `None`.\n column pred = predictions.\n column lower_bound = lower bound of the interval.\n column upper_bound = upper bound interval of the interval.\n\n '''\n\n forecaster = deepcopy(forecaster)\n if isinstance(metric, str):\n metric = _get_metric(metric=metric)\n backtest_predictions = []\n\n if initial_train_size is not None:\n if exog is None:\n forecaster.fit(y=y.iloc[:initial_train_size]) \n else:\n forecaster.fit(\n y = y.iloc[:initial_train_size],\n exog = exog.iloc[:initial_train_size, ]\n )\n window_size = forecaster.window_size\n else:\n # Although not used for training, first observations are needed to create\n # the initial predictors\n window_size = forecaster.window_size\n initial_train_size = window_size\n \n folds = int(np.ceil((len(y) - initial_train_size) / steps))\n remainder = (len(y) - initial_train_size) % steps\n \n if verbose:\n print(f\"Information of backtesting process\")\n print(f\"----------------------------------\")\n print(f\"Number of observations used for initial training or as initial window: {initial_train_size}\")\n print(f\"Number of observations used for backtesting: {len(y) - initial_train_size}\")\n print(f\" Number of folds: {folds}\")\n print(f\" Number of steps per fold: {steps}\")\n if remainder != 0:\n print(f\" Last fold only includes {remainder} observations\")\n print(\"\")\n for i in range(folds):\n last_window_end = initial_train_size + i * steps\n print(f\"Data partition in fold: {i}\")\n if i < folds - 1:\n print(f\" Training: {y.index[0]} -- {y.index[initial_train_size - 1]} (n={len(y.index[:initial_train_size])})\")\n print(f\" Validation: {y.index[last_window_end]} -- {y.index[last_window_end + steps -1]} (n={len(y.index[last_window_end:last_window_end + steps])})\")\n else:\n print(f\" Training: {y.index[0]} -- {y.index[initial_train_size - 1]} (n={len(y.index[:initial_train_size])})\")\n print(f\" Validation: {y.index[last_window_end]} -- {y.index[-1]} (n={len(y.index[last_window_end:])})\")\n print(\"\")\n\n for i in range(folds):\n # Since the model is only fitted with the initial_train_size, last_window\n # and next_window_exog must be updated to include the data needed to make\n # predictions.\n last_window_end = initial_train_size + i * steps\n last_window_start = last_window_end - window_size \n last_window_y = y.iloc[last_window_start:last_window_end]\n if exog is not None:\n next_window_exog = exog.iloc[last_window_end:last_window_end + steps, ]\n \n if interval is None: \n\n if i < folds - 1: \n if exog is None:\n pred = forecaster.predict(\n steps = steps,\n last_window = last_window_y\n )\n else:\n pred = forecaster.predict(\n steps = steps,\n last_window = last_window_y,\n exog = next_window_exog\n ) \n else: \n if remainder == 0:\n if exog is None:\n pred = forecaster.predict(\n steps = steps,\n last_window = last_window_y\n )\n else:\n pred = forecaster.predict(\n steps = steps,\n last_window = last_window_y,\n exog = next_window_exog\n )\n else:\n # Only the remaining steps need to be predicted\n steps = remainder\n if exog is None:\n pred = forecaster.predict(\n steps = steps,\n last_window = last_window_y\n )\n else:\n pred = forecaster.predict(\n steps = steps,\n last_window = last_window_y,\n exog = next_window_exog\n )\n \n backtest_predictions.append(pred)\n\n else:\n if i < folds - 1:\n if exog is None:\n pred = forecaster.predict_interval(\n steps = steps,\n last_window = last_window_y,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals\n )\n else:\n pred = forecaster.predict_interval(\n steps = steps,\n last_window = last_window_y,\n exog = next_window_exog,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals\n ) \n else: \n if remainder == 0:\n if exog is None:\n pred = forecaster.predict_interval(\n steps = steps,\n last_window = last_window_y,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals\n )\n else:\n pred = forecaster.predict_interval(\n steps = steps,\n last_window = last_window_y,\n exog = next_window_exog,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals\n )\n else:\n # Only the remaining steps need to be predicted\n steps = remainder\n if exog is None:\n pred = forecaster.predict_interval(\n steps = steps,\n last_window = last_window_y,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals\n )\n else:\n pred = forecaster.predict_interval(\n steps = steps,\n last_window = last_window_y,\n exog = next_window_exog,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals\n )\n \n backtest_predictions.append(pred)\n\n backtest_predictions = pd.concat(backtest_predictions)\n if isinstance(backtest_predictions, pd.Series):\n backtest_predictions = pd.DataFrame(backtest_predictions)\n\n metric_value = metric(\n y_true = y.iloc[initial_train_size : initial_train_size + len(backtest_predictions)],\n y_pred = backtest_predictions['pred']\n )\n\n return metric_value, backtest_predictions\n\n\ndef backtesting_forecaster(\n forecaster,\n y: pd.Series,\n steps: int,\n metric: Union[str, callable],\n initial_train_size: Optional[int],\n fixed_train_size: bool=True,\n exog: Optional[Union[pd.Series, pd.DataFrame]]=None,\n refit: bool=False,\n interval: Optional[list]=None,\n n_boot: int=500,\n random_state: int=123,\n in_sample_residuals: bool=True,\n verbose: bool=False\n) -> Tuple[float, pd.DataFrame]:\n '''\n Backtesting of forecaster model.\n\n If `refit` is False, the model is trained only once using the `initial_train_size`\n first observations. If `refit` is True, the model is trained in each iteration\n increasing the training set. A copy of the original forecaster is created so \n it is not modified during the process.\n\n Parameters\n ----------\n forecaster : ForecasterAutoreg, ForecasterAutoregCustom, ForecasterAutoregDirect,\n ForecasterAutoregMultiOutput\n Forecaster model.\n \n y : pandas Series\n Training time series values. \n \n initial_train_size: int, default `None`\n Number of samples in the initial train split. If `None` and `forecaster` is already \n trained, no initial train is done and all data is used to evaluate the model. However, \n the first `len(forecaster.last_window)` observations are needed to create the \n initial predictors, so no predictions are calculated for them.\n\n `None` is only allowed when `refit` is `False`.\n \n fixed_train_size: bool, default `True`\n If True, train size doesn't increases but moves by `steps` in each iteration.\n \n steps : int\n Number of steps to predict.\n \n metric : str, callable\n Metric used to quantify the goodness of fit of the model.\n \n If string:\n {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n\n If callable:\n Function with arguments y_true, y_pred that returns a float.\n \n exog :panda Series, pandas DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n\n refit: bool, default `False`\n Whether to re-fit the forecaster in each iteration.\n\n interval: list, default `None`\n Confidence of the prediction interval estimated. Sequence of percentiles\n to compute, which must be between 0 and 100 inclusive. If `None`, no\n intervals are estimated. Only available for forecaster of type ForecasterAutoreg\n and ForecasterAutoregCustom.\n \n n_boot: int, default `500`\n Number of bootstrapping iterations used to estimate prediction\n intervals.\n\n random_state: int, default `123`\n Sets a seed to the random generator, so that boot intervals are always \n deterministic.\n\n in_sample_residuals: bool, default `True`\n If `True`, residuals from the training data are used as proxy of\n prediction error to create prediction intervals. If `False`, out_sample_residuals\n are used if they are already stored inside the forecaster.\n \n verbose : bool, default `False`\n Print number of folds and index of training and validation sets used for backtesting.\n\n Returns \n -------\n metric_value: float\n Value of the metric.\n\n backtest_predictions: pandas DataFrame\n Value of predictions and their estimated interval if `interval` is not `None`.\n column pred = predictions.\n column lower_bound = lower bound of the interval.\n column upper_bound = upper bound interval of the interval.\n\n '''\n\n if initial_train_size is not None and initial_train_size > len(y):\n raise Exception(\n 'If used, `initial_train_size` must be smaller than length of `y`.'\n )\n \n if initial_train_size is not None and initial_train_size < forecaster.window_size:\n raise Exception(\n f\"`initial_train_size` must be greater than \"\n f\"forecaster's window_size ({forecaster.window_size}).\"\n )\n\n if initial_train_size is None and not forecaster.fitted:\n raise Exception(\n '`forecaster` must be already trained if no `initial_train_size` is provided.'\n )\n\n if not isinstance(refit, bool):\n raise Exception(\n f'`refit` must be boolean: True, False.'\n )\n\n if initial_train_size is None and refit:\n raise Exception(\n f'`refit` is only allowed when there is a initial_train_size.'\n )\n\n if interval is not None and isinstance(forecaster, (ForecasterAutoregDirect,\n ForecasterAutoregMultiOutput)):\n raise Exception(\n ('Interval prediction is only available when forecaster is of type '\n 'ForecasterAutoreg or ForecasterAutoregCustom.')\n )\n \n if refit:\n metric_value, backtest_predictions = _backtesting_forecaster_refit(\n forecaster = forecaster,\n y = y,\n steps = steps,\n metric = metric,\n initial_train_size = initial_train_size,\n fixed_train_size = fixed_train_size,\n exog = exog,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals,\n verbose = verbose\n )\n else:\n metric_value, backtest_predictions = _backtesting_forecaster_no_refit(\n forecaster = forecaster,\n y = y,\n steps = steps,\n metric = metric,\n initial_train_size = initial_train_size,\n exog = exog,\n interval = interval,\n n_boot = n_boot,\n random_state = random_state,\n in_sample_residuals = in_sample_residuals,\n verbose = verbose\n )\n\n return metric_value, backtest_predictions\n\n\ndef grid_search_forecaster(\n forecaster,\n y: pd.Series,\n param_grid: dict,\n steps: int,\n metric: Union[str, callable],\n initial_train_size: int,\n fixed_train_size: bool=True,\n exog: Optional[Union[pd.Series, pd.DataFrame]]=None,\n lags_grid: Optional[list]=None,\n refit: bool=False,\n return_best: bool=True,\n verbose: bool=True\n) -> pd.DataFrame:\n '''\n Exhaustive search over specified parameter values for a Forecaster object.\n Validation is done using time series backtesting.\n \n Parameters\n ----------\n forecaster : ForecasterAutoreg, ForecasterAutoregCustom, ForecasterAutoregDirect,\n ForecasterAutoregMultiOutput\n Forcaster model.\n \n y : pandas Series\n Training time series values. \n \n param_grid : dict\n Dictionary with parameters names (`str`) as keys and lists of parameter\n settings to try as values.\n\n steps : int\n Number of steps to predict.\n \n metric : str, callable\n Metric used to quantify the goodness of fit of the model.\n \n If string:\n {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n\n If callable:\n Function with arguments y_true, y_pred that returns a float.\n\n initial_train_size: int \n Number of samples in the initial train split.\n \n fixed_train_size: bool, default `True`\n If True, train size doesn't increases but moves by `steps` in each iteration.\n\n exog : pandas Series, pandas DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n \n lags_grid : list of int, lists, np.narray or range, default `None`\n Lists of `lags` to try. Only used if forecaster is an instance of \n `ForecasterAutoreg`, `ForecasterAutoregDirect` or `ForecasterAutoregMultiOutput`.\n \n refit: bool, default `False`\n Whether to re-fit the forecaster in each iteration of backtesting.\n \n return_best : bool, default `True`\n Refit the `forecaster` using the best found parameters on the whole data.\n \n verbose : bool, default `True`\n Print number of folds used for cv or backtesting.\n\n Returns \n -------\n results: pandas DataFrame\n Results for each combination of parameters.\n column lags = predictions.\n column params = lower bound of the interval.\n column metric = metric value estimated for the combination of parameters.\n additional n columns with param = value.\n\n '''\n\n param_grid = list(ParameterGrid(param_grid))\n\n results = _evaluate_grid_hyperparameters(\n forecaster = forecaster,\n y = y,\n param_grid = param_grid,\n steps = steps,\n metric = metric,\n initial_train_size = initial_train_size,\n fixed_train_size = fixed_train_size,\n exog = exog,\n lags_grid = lags_grid,\n refit = refit,\n return_best = return_best,\n verbose = verbose\n )\n\n return results\n\n\ndef random_search_forecaster(\n forecaster,\n y: pd.Series,\n param_distributions: dict,\n steps: int,\n metric: Union[str, callable],\n initial_train_size: int,\n fixed_train_size: bool=True,\n exog: Optional[Union[pd.Series, pd.DataFrame]]=None,\n lags_grid: Optional[list]=None,\n refit: bool=False,\n n_iter: int=10,\n random_state: int=123,\n return_best: bool=True,\n verbose: bool=True\n) -> pd.DataFrame:\n '''\n Random search over specified parameter values or distributions for a Forecaster object.\n Validation is done using time series backtesting.\n \n Parameters\n ----------\n forecaster : ForecasterAutoreg, ForecasterAutoregCustom, ForecasterAutoregDirect,\n ForecasterAutoregMultiOutput\n Forcaster model.\n \n y : pandas Series\n Training time series values. \n \n param_distributions : dict\n Dictionary with parameters names (`str`) as keys and \n distributions or lists of parameters to try.\n\n steps : int\n Number of steps to predict.\n \n metric : str, callable\n Metric used to quantify the goodness of fit of the model.\n \n If string:\n {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n\n If callable:\n Function with arguments y_true, y_pred that returns a float.\n\n initial_train_size: int \n Number of samples in the initial train split.\n \n fixed_train_size: bool, default `True`\n If True, train size doesn't increases but moves by `steps` in each iteration.\n\n exog : pandas Series, pandas DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n \n lags_grid : list of int, lists, np.narray or range, default `None`\n Lists of `lags` to try. Only used if forecaster is an instance of \n `ForecasterAutoreg`, `ForecasterAutoregDirect` or `ForecasterAutoregMultiOutput`.\n \n refit: bool, default `False`\n Whether to re-fit the forecaster in each iteration of backtesting.\n\n n_iter: int, default `10`\n Number of parameter settings that are sampled. \n n_iter trades off runtime vs quality of the solution.\n\n random_state: int, default `123`\n Sets a seed to the random sampling for reproducible output.\n\n return_best : bool, default `True`\n Refit the `forecaster` using the best found parameters on the whole data.\n \n verbose : bool, default `True`\n Print number of folds used for cv or backtesting.\n\n Returns \n -------\n results: pandas DataFrame\n Results for each combination of parameters.\n column lags = predictions.\n column params = lower bound of the interval.\n column metric = metric value estimated for the combination of parameters.\n additional n columns with param = value.\n\n '''\n\n param_grid = list(ParameterSampler(param_distributions, n_iter=n_iter, random_state=random_state))\n\n results = _evaluate_grid_hyperparameters(\n forecaster = forecaster,\n y = y,\n param_grid = param_grid,\n steps = steps,\n metric = metric,\n initial_train_size = initial_train_size,\n fixed_train_size = fixed_train_size,\n exog = exog,\n lags_grid = lags_grid,\n refit = refit,\n return_best = return_best,\n verbose = verbose\n )\n\n return results\n\n\ndef _evaluate_grid_hyperparameters(\n forecaster,\n y: pd.Series,\n param_grid: dict,\n steps: int,\n metric: Union[str, callable],\n initial_train_size: int,\n fixed_train_size: bool=True,\n exog: Optional[Union[pd.Series, pd.DataFrame]]=None,\n lags_grid: Optional[list]=None,\n refit: bool=False,\n return_best: bool=True,\n verbose: bool=True\n) -> pd.DataFrame:\n '''\n Evaluate parameter values for a Forecaster object using time series backtesting.\n \n Parameters\n ----------\n forecaster : ForecasterAutoreg, ForecasterAutoregCustom, ForecasterAutoregDirect,\n ForecasterAutoregMultiOutput\n Forcaster model.\n \n y : pandas Series\n Training time series values. \n \n param_grid : dict\n Dictionary with parameters names (`str`) as keys and lists of parameter\n settings to try as values.\n\n steps : int\n Number of steps to predict.\n \n metric : str, callable\n Metric used to quantify the goodness of fit of the model.\n \n If string:\n {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n\n If callable:\n Function with arguments y_true, y_pred that returns a float.\n\n initial_train_size: int \n Number of samples in the initial train split.\n \n fixed_train_size: bool, default `True`\n If True, train size doesn't increases but moves by `steps` in each iteration.\n\n exog : pandas Series, pandas DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n \n lags_grid : list of int, lists, np.narray or range, default `None`\n Lists of `lags` to try. Only used if forecaster is an instance of \n `ForecasterAutoreg`, `ForecasterAutoregDirect` or `ForecasterAutoregMultiOutput`.\n \n refit: bool, default `False`\n Whether to re-fit the forecaster in each iteration of backtesting.\n \n return_best : bool, default `True`\n Refit the `forecaster` using the best found parameters on the whole data.\n \n verbose : bool, default `True`\n Print number of folds used for cv or backtesting.\n\n Returns \n -------\n results: pandas DataFrame\n Results for each combination of parameters.\n column lags = predictions.\n column params = lower bound of the interval.\n column metric = metric value estimated for the combination of parameters.\n additional n columns with param = value.\n\n '''\n\n if isinstance(forecaster, ForecasterAutoregCustom):\n if lags_grid is not None:\n warnings.warn(\n '`lags_grid` ignored if forecaster is an instance of `ForecasterAutoregCustom`.'\n )\n lags_grid = ['custom predictors']\n \n elif lags_grid is None:\n lags_grid = [forecaster.lags]\n \n lags_list = []\n params_list = []\n metric_list = []\n\n print(\n f\"Number of models compared: {len(param_grid)*len(lags_grid)}.\"\n )\n\n for lags in tqdm(lags_grid, desc='loop lags_grid', position=0, ncols=90):\n \n if isinstance(forecaster, (ForecasterAutoreg, ForecasterAutoregDirect, \n ForecasterAutoregMultiOutput)):\n forecaster.set_lags(lags)\n lags = forecaster.lags.copy()\n \n for params in tqdm(param_grid, desc='loop param_grid', position=1, leave=False, ncols=90):\n\n forecaster.set_params(**params)\n metrics = backtesting_forecaster(\n forecaster = forecaster,\n y = y,\n exog = exog,\n steps = steps,\n metric = metric,\n initial_train_size = initial_train_size,\n fixed_train_size = fixed_train_size,\n refit = refit,\n interval = None,\n verbose = verbose\n )[0]\n\n lags_list.append(lags)\n params_list.append(params)\n metric_list.append(metrics)\n \n results = pd.DataFrame({\n 'lags' : lags_list,\n 'params': params_list,\n 'metric': metric_list})\n \n results = results.sort_values(by='metric', ascending=True)\n results = pd.concat([results, results['params'].apply(pd.Series)], axis=1)\n \n if return_best:\n \n best_lags = results['lags'].iloc[0]\n best_params = results['params'].iloc[0]\n best_metric = results['metric'].iloc[0]\n \n if isinstance(forecaster, (ForecasterAutoreg, ForecasterAutoregDirect, \n ForecasterAutoregMultiOutput)):\n forecaster.set_lags(best_lags)\n forecaster.set_params(**best_params)\n forecaster.fit(y=y, exog=exog)\n \n print(\n f\"`Forecaster` refitted using the best-found lags and parameters, and the whole data set: \\n\"\n f\" Lags: {best_lags} \\n\"\n f\" Parameters: {best_params}\\n\"\n f\" Backtesting metric: {best_metric}\\n\"\n )\n \n return results\n\n\ndef bayesian_search_forecaster(\n forecaster,\n y: pd.Series,\n search_space: Union[callable, dict],\n steps: int,\n metric: Union[str, callable],\n initial_train_size: int,\n fixed_train_size: bool=True,\n exog: Optional[Union[pd.Series, pd.DataFrame]]=None,\n lags_grid: Optional[list]=None,\n refit: bool=False,\n n_trials: int=10,\n random_state: int=123,\n return_best: bool=True,\n verbose: bool=True,\n engine: str='skopt',\n kwargs_create_study: dict={},\n kwargs_study_optimize: dict={},\n kwargs_gp_minimize: dict={},\n) -> Tuple[pd.DataFrame, object]:\n '''\n Bayesian optimization for a Forecaster object using time series backtesting and \n optuna or skopt library.\n \n Parameters\n ----------\n forecaster : ForecasterAutoreg, ForecasterAutoregCustom, ForecasterAutoregDirect,\n ForecasterAutoregMultiOutput\n Forcaster model.\n \n y : pandas Series\n Training time series values. \n \n search_space : callable (optuna), dict (skopt)\n If optuna engine: callable\n Function with argument `trial` which returns a dictionary with parameters names \n (`str`) as keys and Trial object from optuna (trial.suggest_float, \n trial.suggest_int, trial.suggest_categorical) as values.\n\n If skopt engine: dict\n Dictionary with parameters names (`str`) as keys and Space object from skopt \n (Real, Integer, Categorical) as values.\n\n steps : int\n Number of steps to predict.\n \n metric : str, callable\n Metric used to quantify the goodness of fit of the model.\n \n If string:\n {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n\n If callable:\n Function with arguments y_true, y_pred that returns a float.\n\n initial_train_size: int \n Number of samples in the initial train split.\n \n fixed_train_size: bool, default `True`\n If True, train size doesn't increases but moves by `steps` in each iteration.\n\n exog : pandas Series, pandas DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n \n lags_grid : list of int, lists, np.narray or range, default `None`\n Lists of `lags` to try. Only used if forecaster is an instance of \n `ForecasterAutoreg`, `ForecasterAutoregDirect` or `ForecasterAutoregMultiOutput`.\n \n refit: bool, default `False`\n Whether to re-fit the forecaster in each iteration of backtesting.\n \n n_trials: int, default `10`\n Number of parameter settings that are sampled in each lag configuration.\n\n random_state: int, default `123`\n Sets a seed to the sampling for reproducible output.\n\n return_best : bool, default `True`\n Refit the `forecaster` using the best found parameters on the whole data.\n \n verbose : bool, default `True`\n Print number of folds used for cv or backtesting.\n\n engine : str, default `'skopt'`\n If 'optuna':\n Bayesian optimization runs through the optuna library \n\n If 'skopt':\n Bayesian optimization runs through the skopt library\n\n kwargs_create_study : dict, default `{'direction':'minimize', 'sampler':TPESampler(seed=123)}`\n Only applies to engine='optuna'.\n Keyword arguments (key, value mappings) to pass to optuna.create_study.\n\n kwargs_study_optimize : dict, default `{}`\n Only applies to engine='optuna'.\n Other keyword arguments (key, value mappings) to pass to study.optimize().\n\n kwargs_gp_minimize : dict, default `{}`\n Only applies to engine='skopt'.\n Other keyword arguments (key, value mappings) to pass to skopt.gp_minimize().\n\n Returns \n -------\n results: pandas DataFrame\n Results for each combination of parameters.\n column lags = predictions.\n column params = lower bound of the interval.\n column metric = metric value estimated for the combination of parameters.\n additional n columns with param = value.\n\n results_opt_best: optuna object (optuna), scipy object (skopt) \n If optuna engine:\n The best optimization result returned as a FrozenTrial optuna object.\n\n If skopt engine:\n The best optimization result returned as a OptimizeResult object.\n '''\n\n if engine not in ['optuna', 'skopt']:\n raise Exception(\n f'''`engine` only allows 'optuna' or 'skopt', got {engine}.'''\n )\n\n if engine == 'optuna':\n results, results_opt_best = _bayesian_search_optuna(\n forecaster = forecaster,\n y = y,\n exog = exog,\n lags_grid = lags_grid,\n search_space = search_space,\n steps = steps,\n metric = metric,\n refit = refit,\n initial_train_size = initial_train_size,\n fixed_train_size = fixed_train_size,\n n_trials = n_trials,\n random_state = random_state,\n return_best = return_best,\n verbose = verbose,\n kwargs_create_study = kwargs_create_study,\n kwargs_study_optimize = kwargs_study_optimize\n )\n else:\n results, results_opt_best = _bayesian_search_skopt(\n forecaster = forecaster,\n y = y,\n exog = exog,\n lags_grid = lags_grid,\n search_space = search_space,\n steps = steps,\n metric = metric,\n refit = refit,\n initial_train_size = initial_train_size,\n fixed_train_size = fixed_train_size,\n n_trials = n_trials,\n random_state = random_state,\n return_best = return_best,\n verbose = verbose,\n kwargs_gp_minimize = kwargs_gp_minimize\n )\n\n return results, results_opt_best\n\n\ndef _bayesian_search_optuna(\n forecaster,\n y: pd.Series,\n search_space: callable,\n steps: int,\n metric: Union[str, callable],\n initial_train_size: int,\n fixed_train_size: bool=True,\n exog: Optional[Union[pd.Series, pd.DataFrame]]=None,\n lags_grid: Optional[list]=None,\n refit: bool=False,\n n_trials: int=10,\n random_state: int=123,\n return_best: bool=True,\n verbose: bool=True,\n kwargs_create_study: dict={},\n kwargs_study_optimize: dict={}\n) -> Tuple[pd.DataFrame, object]:\n '''\n Bayesian optimization for a Forecaster object using time series backtesting \n and optuna library.\n \n Parameters\n ----------\n forecaster : ForecasterAutoreg, ForecasterAutoregCustom, ForecasterAutoregDirect,\n ForecasterAutoregMultiOutput\n Forcaster model.\n \n y : pandas Series\n Training time series values. \n \n search_space : callable\n Function with argument `trial` which returns a dictionary with parameters names \n (`str`) as keys and Trial object from optuna (trial.suggest_float, \n trial.suggest_int, trial.suggest_categorical) as values.\n\n steps : int\n Number of steps to predict.\n \n metric : str, callable\n Metric used to quantify the goodness of fit of the model.\n \n If string:\n {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n\n If callable:\n Function with arguments y_true, y_pred that returns a float.\n\n initial_train_size: int \n Number of samples in the initial train split.\n \n fixed_train_size: bool, default `True`\n If True, train size doesn't increases but moves by `steps` in each iteration.\n\n exog : pandas Series, pandas DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n \n lags_grid : list of int, lists, np.narray or range, default `None`\n Lists of `lags` to try. Only used if forecaster is an instance of \n `ForecasterAutoreg`, `ForecasterAutoregDirect` or `ForecasterAutoregMultiOutput`.\n \n refit: bool, default `False`\n Whether to re-fit the forecaster in each iteration of backtesting.\n \n n_trials: int, default `10`\n Number of parameter settings that are sampled in each lag configuration.\n\n random_state: int, default `123`\n Sets a seed to the sampling for reproducible output.\n\n return_best : bool, default `True`\n Refit the `forecaster` using the best found parameters on the whole data.\n \n verbose : bool, default `True`\n Print number of folds used for cv or backtesting.\n\n kwargs_create_study : dict, default `{'direction':'minimize', 'sampler':TPESampler(seed=123)}`\n Keyword arguments (key, value mappings) to pass to optuna.create_study.\n\n kwargs_study_optimize : dict, default `{}`\n Other keyword arguments (key, value mappings) to pass to study.optimize().\n\n Returns \n -------\n results: pandas DataFrame\n Results for each combination of parameters.\n column lags = predictions.\n column params = lower bound of the interval.\n column metric = metric value estimated for the combination of parameters.\n additional n columns with param = value.\n\n results_opt_best: optuna object\n The best optimization result returned as a FrozenTrial optuna object.\n '''\n\n if isinstance(forecaster, ForecasterAutoregCustom):\n if lags_grid is not None:\n warnings.warn(\n '`lags_grid` ignored if forecaster is an instance of `ForecasterAutoregCustom`.'\n )\n lags_grid = ['custom predictors']\n \n elif lags_grid is None:\n lags_grid = [forecaster.lags]\n \n lags_list = []\n params_list = []\n metric_list = []\n results_opt_best = None\n\n # Objective function using backtesting_forecaster\n def _objective(\n trial,\n forecaster = forecaster,\n y = y,\n exog = exog,\n initial_train_size = initial_train_size,\n fixed_train_size = fixed_train_size,\n steps = steps,\n metric = metric,\n refit = refit,\n verbose = verbose,\n search_space = search_space,\n ) -> float:\n \n forecaster.set_params(**search_space(trial))\n \n metric, _ = backtesting_forecaster(\n forecaster = forecaster,\n y = y,\n exog = exog,\n steps = steps,\n metric = metric,\n initial_train_size = initial_train_size,\n fixed_train_size = fixed_train_size,\n refit = refit,\n verbose = verbose\n )\n\n return abs(metric)\n\n print(\n f'''Number of models compared: {n_trials*len(lags_grid)}, {n_trials} bayesian search in each lag configuration.'''\n )\n\n for lags in tqdm(lags_grid, desc='loop lags_grid', position=0, ncols=90):\n \n if isinstance(forecaster, (ForecasterAutoreg, ForecasterAutoregDirect, \n ForecasterAutoregMultiOutput)):\n forecaster.set_lags(lags)\n lags = forecaster.lags.copy()\n \n if 'sampler' in kwargs_create_study.keys():\n kwargs_create_study['sampler']._rng = np.random.RandomState(random_state)\n kwargs_create_study['sampler']._random_sampler = RandomSampler(seed=random_state) \n\n study = optuna.create_study(**kwargs_create_study)\n\n if 'sampler' not in kwargs_create_study.keys():\n study.sampler = TPESampler(seed=random_state)\n\n study.optimize(_objective, n_trials=n_trials, **kwargs_study_optimize)\n\n best_trial = study.best_trial\n\n if search_space(best_trial).keys() != best_trial.params.keys():\n raise Exception(\n f'''Some of the key values do not match the search_space key names.\n Dict keys : {list(search_space(best_trial).keys())}\n Trial objects : {list(best_trial.params.keys())}.'''\n )\n\n for trial in study.get_trials():\n params_list.append(trial.params)\n lags_list.append(lags)\n metric_list.append(trial.value)\n \n if results_opt_best is None:\n results_opt_best = best_trial\n else:\n if best_trial.value < results_opt_best.value:\n results_opt_best = best_trial\n \n results = pd.DataFrame({\n 'lags' : lags_list,\n 'params': params_list,\n 'metric': metric_list})\n \n results = results.sort_values(by='metric', ascending=True)\n results = pd.concat([results, results['params'].apply(pd.Series)], axis=1)\n \n if return_best:\n \n best_lags = results['lags'].iloc[0]\n best_params = results['params'].iloc[0]\n best_metric = results['metric'].iloc[0]\n \n if isinstance(forecaster, (ForecasterAutoreg, ForecasterAutoregDirect, \n ForecasterAutoregMultiOutput)):\n forecaster.set_lags(best_lags)\n forecaster.set_params(**best_params)\n forecaster.fit(y=y, exog=exog)\n \n print(\n f\"`Forecaster` refitted using the best-found lags and parameters, and the whole data set: \\n\"\n f\" Lags: {best_lags} \\n\"\n f\" Parameters: {best_params}\\n\"\n f\" Backtesting metric: {best_metric}\\n\"\n )\n \n return results, results_opt_best\n\n\ndef _bayesian_search_skopt(\n forecaster,\n y: pd.Series,\n search_space: dict,\n steps: int,\n metric: Union[str, callable],\n initial_train_size: int,\n fixed_train_size: bool=True,\n exog: Optional[Union[pd.Series, pd.DataFrame]]=None,\n lags_grid: Optional[list]=None,\n refit: bool=False,\n n_trials: int=10,\n random_state: int=123,\n return_best: bool=True,\n verbose: bool=True,\n kwargs_gp_minimize: dict={}\n) -> Tuple[pd.DataFrame, object]:\n '''\n Bayesian optimization for a Forecaster object using time series backtesting and skopt library.\n \n Parameters\n ----------\n forecaster : ForecasterAutoreg, ForecasterAutoregCustom, ForecasterAutoregDirect, \n ForecasterAutoregMultiOutput\n Forcaster model.\n \n y : pandas Series\n Training time series values. \n \n search_space : dict\n Dictionary with parameters names (`str`) as keys and Space object from skopt \n (Real, Integer, Categorical) as values.\n\n steps : int\n Number of steps to predict.\n \n metric : str, callable\n Metric used to quantify the goodness of fit of the model.\n \n If string:\n {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n\n It callable:\n Function with arguments y_true, y_pred that returns a float.\n\n initial_train_size: int \n Number of samples in the initial train split.\n \n fixed_train_size: bool, default `True`\n If True, train size doesn't increases but moves by `steps` in each iteration.\n\n exog : pandas Series, pandas DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n \n lags_grid : list of int, lists, np.narray or range, default `None`\n Lists of `lags` to try. Only used if forecaster is an instance of \n `ForecasterAutoreg`, `ForecasterAutoregDirect` or `ForecasterAutoregMultiOutput`.\n \n refit: bool, default `False`\n Whether to re-fit the forecaster in each iteration of backtesting.\n \n n_trials: int, default `10`\n Number of parameter settings that are sampled in each lag configuration.\n\n random_state: int, default `123`\n Sets a seed to the sampling for reproducible output.\n\n return_best : bool, default `True`\n Refit the `forecaster` using the best found parameters on the whole data.\n \n verbose : bool, default `True`\n Print number of folds used for cv or backtesting.\n\n kwargs_gp_minimize : dict, default `{}`\n Other keyword arguments (key, value mappings) to pass to skopt.gp_minimize().\n\n Returns \n -------\n results: pandas DataFrame\n Results for each combination of parameters.\n column lags = predictions.\n column params = lower bound of the interval.\n column metric = metric value estimated for the combination of parameters.\n additional n columns with param = value.\n\n results_opt_best: scipy object\n The best optimization result returned as a OptimizeResult object.\n '''\n\n if isinstance(forecaster, ForecasterAutoregCustom):\n if lags_grid is not None:\n warnings.warn(\n '`lags_grid` ignored if forecaster is an instance of `ForecasterAutoregCustom`.'\n )\n lags_grid = ['custom predictors']\n \n elif lags_grid is None:\n lags_grid = [forecaster.lags]\n \n lags_list = []\n params_list = []\n metric_list = []\n results_opt_best = None\n\n for key in search_space.keys():\n if key != search_space[key].name:\n raise Exception(\n f'''Some of the key values do not match the Space object name from skopt.\n {key} != {search_space[key].name}.'''\n )\n\n search_space = list(search_space.values())\n\n # Objective function using backtesting_forecaster\n @use_named_args(search_space)\n def _objective(\n forecaster = forecaster,\n y = y,\n exog = exog,\n initial_train_size = initial_train_size,\n fixed_train_size = fixed_train_size,\n steps = steps,\n metric = metric,\n refit = refit,\n verbose = verbose,\n **params\n ) -> float:\n \n forecaster.set_params(**params)\n \n metric, _ = backtesting_forecaster(\n forecaster = forecaster,\n y = y,\n exog = exog,\n steps = steps,\n metric = metric,\n initial_train_size = initial_train_size,\n fixed_train_size = fixed_train_size,\n refit = refit,\n verbose = verbose\n )\n\n return abs(metric)\n\n print(\n f'''Number of models compared: {n_trials*len(lags_grid)}, {n_trials} bayesian search in each lag configuration.'''\n )\n\n for lags in tqdm(lags_grid, desc='loop lags_grid', position=0, ncols=90):\n \n if isinstance(forecaster, (ForecasterAutoreg, ForecasterAutoregDirect, \n ForecasterAutoregMultiOutput)):\n forecaster.set_lags(lags)\n lags = forecaster.lags.copy()\n \n results_opt = gp_minimize(\n func = _objective,\n dimensions = search_space,\n n_calls = n_trials,\n random_state = random_state,\n **kwargs_gp_minimize\n )\n\n for i, x in enumerate(results_opt.x_iters):\n params = {}\n for j, x in enumerate(search_space):\n params[x.name] = results_opt.x_iters[i][j]\n \n params_list.append(params)\n lags_list.append(lags)\n metric_list.append(results_opt.func_vals[i])\n\n if results_opt_best is None:\n results_opt_best = results_opt\n else:\n if results_opt.fun < results_opt_best.fun:\n results_opt_best = results_opt\n \n results = pd.DataFrame({\n 'lags' : lags_list,\n 'params': params_list,\n 'metric': metric_list})\n \n results = results.sort_values(by='metric', ascending=True)\n results = pd.concat([results, results['params'].apply(pd.Series)], axis=1)\n \n if return_best:\n \n best_lags = results['lags'].iloc[0]\n best_params = results['params'].iloc[0]\n best_metric = results['metric'].iloc[0]\n \n if isinstance(forecaster, (ForecasterAutoreg, ForecasterAutoregDirect, \n ForecasterAutoregMultiOutput)):\n forecaster.set_lags(best_lags)\n forecaster.set_params(**best_params)\n forecaster.fit(y=y, exog=exog)\n \n print(\n f\"`Forecaster` refitted using the best-found lags and parameters, and the whole data set: \\n\"\n f\" Lags: {best_lags} \\n\"\n f\" Parameters: {best_params}\\n\"\n f\" Backtesting metric: {best_metric}\\n\"\n )\n\n return results, results_opt_best" ]
[ [ "pandas.concat", "sklearn.model_selection.ParameterSampler", "pandas.DataFrame", "sklearn.model_selection.ParameterGrid", "numpy.array", "numpy.random.RandomState" ] ]