repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
Heegreis/pytorch-YOLOv4
[ "99045748bd6fdbb55c7dac48ef82941c641e65c6" ]
[ "tool/utils.py" ]
[ "import sys\nimport os\nimport time\nimport math\nimport numpy as np\n\nimport itertools\nimport struct # get_image_size\nimport imghdr # get_image_size\n\n\ndef sigmoid(x):\n return 1.0 / (np.exp(-x) + 1.)\n\n\ndef softmax(x):\n x = np.exp(x - np.expand_dims(np.max(x, axis=1), axis=1))\n x = x / np.expand_dims(x.sum(axis=1), axis=1)\n return x\n\n\ndef bbox_iou(box1, box2, x1y1x2y2=True):\n \n # print('iou box1:', box1)\n # print('iou box2:', box2)\n\n if x1y1x2y2:\n mx = min(box1[0], box2[0])\n Mx = max(box1[2], box2[2])\n my = min(box1[1], box2[1])\n My = max(box1[3], box2[3])\n w1 = box1[2] - box1[0]\n h1 = box1[3] - box1[1]\n w2 = box2[2] - box2[0]\n h2 = box2[3] - box2[1]\n else:\n w1 = box1[2]\n h1 = box1[3]\n w2 = box2[2]\n h2 = box2[3]\n\n mx = min(box1[0], box2[0])\n Mx = max(box1[0] + w1, box2[0] + w2)\n my = min(box1[1], box2[1])\n My = max(box1[1] + h1, box2[1] + h2)\n uw = Mx - mx\n uh = My - my\n cw = w1 + w2 - uw\n ch = h1 + h2 - uh\n carea = 0\n if cw <= 0 or ch <= 0:\n return 0.0\n\n area1 = w1 * h1\n area2 = w2 * h2\n carea = cw * ch\n uarea = area1 + area2 - carea\n return carea / uarea\n\n\ndef nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False):\n # print(boxes.shape)\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n\n areas = (x2 - x1) * (y2 - y1)\n order = confs.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n idx_self = order[0]\n idx_other = order[1:]\n\n keep.append(idx_self)\n\n xx1 = np.maximum(x1[idx_self], x1[idx_other])\n yy1 = np.maximum(y1[idx_self], y1[idx_other])\n xx2 = np.minimum(x2[idx_self], x2[idx_other])\n yy2 = np.minimum(y2[idx_self], y2[idx_other])\n\n w = np.maximum(0.0, xx2 - xx1)\n h = np.maximum(0.0, yy2 - yy1)\n inter = w * h\n\n if min_mode:\n over = inter / np.minimum(areas[order[0]], areas[order[1:]])\n else:\n over = inter / (areas[order[0]] + areas[order[1:]] - inter)\n\n inds = np.where(over <= nms_thresh)[0]\n order = order[inds + 1]\n \n return np.array(keep)\n\n\n\ndef plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None):\n import cv2\n img = np.copy(img)\n colors = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)\n\n def get_color(c, x, max_val):\n ratio = float(x) / max_val * 5\n i = int(math.floor(ratio))\n j = int(math.ceil(ratio))\n ratio = ratio - i\n r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]\n return int(r * 255)\n\n width = img.shape[1]\n height = img.shape[0]\n for i in range(len(boxes)):\n box = boxes[i]\n x1 = int(box[0] * width)\n y1 = int(box[1] * height)\n x2 = int(box[2] * width)\n y2 = int(box[3] * height)\n\n if color:\n rgb = color\n else:\n rgb = (255, 0, 0)\n if len(box) >= 7 and class_names:\n cls_conf = box[5]\n cls_id = box[6]\n print('%s: %f' % (class_names[cls_id], cls_conf))\n classes = len(class_names)\n offset = cls_id * 123457 % classes\n red = get_color(2, offset, classes)\n green = get_color(1, offset, classes)\n blue = get_color(0, offset, classes)\n if color is None:\n rgb = (red, green, blue)\n img = cv2.putText(img, class_names[cls_id], (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1)\n img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, 1)\n if savename:\n print(\"save plot results to %s\" % savename)\n cv2.imwrite(savename, img)\n return img\n\n\ndef read_truths(lab_path):\n if not os.path.exists(lab_path):\n return np.array([])\n if os.path.getsize(lab_path):\n truths = np.loadtxt(lab_path)\n truths = truths.reshape(truths.size / 5, 5) # to avoid single truth problem\n return truths\n else:\n return np.array([])\n\n\ndef load_class_names(namesfile):\n class_names = []\n with open(namesfile, 'r') as fp:\n lines = fp.readlines()\n for line in lines:\n line = line.rstrip()\n class_names.append(line)\n return class_names\n\n\n\ndef post_processing(img, conf_thresh, nms_thresh, output):\n\n # anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]\n # num_anchors = 9\n # anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n # strides = [8, 16, 32]\n # anchor_step = len(anchors) // num_anchors\n\n # [batch, num, 1, 4]\n box_array = output[0]\n # [batch, num, num_classes]\n confs = output[1]\n\n t1 = time.time()\n\n if type(box_array).__name__ != 'ndarray':\n box_array = box_array.cpu().detach().numpy()\n confs = confs.cpu().detach().numpy()\n\n # [batch, num, 4]\n box_array = box_array[:, :, 0]\n\n # [batch, num, num_classes] --> [batch, num]\n max_conf = np.max(confs, axis=2)\n max_id = np.argmax(confs, axis=2)\n\n t2 = time.time()\n\n bboxes_batch = []\n for i in range(box_array.shape[0]):\n \n argwhere = max_conf[i] > conf_thresh\n l_box_array = box_array[i, argwhere, :]\n l_max_conf = max_conf[i, argwhere]\n l_max_id = max_id[i, argwhere]\n\n keep = nms_cpu(l_box_array, l_max_conf, nms_thresh)\n \n bboxes = []\n if (keep.size > 0):\n l_box_array = l_box_array[keep, :]\n l_max_conf = l_max_conf[keep]\n l_max_id = l_max_id[keep]\n\n for j in range(l_box_array.shape[0]):\n bboxes.append([l_box_array[j, 0], l_box_array[j, 1], l_box_array[j, 2], l_box_array[j, 3], l_max_conf[j], l_max_conf[j], l_max_id[j]])\n \n bboxes_batch.append(bboxes)\n\n t3 = time.time()\n\n print('-----------------------------------')\n print(' max and argmax : %f' % (t2 - t1))\n print(' nms : %f' % (t3 - t2))\n print('Post processing total : %f' % (t3 - t1))\n print('-----------------------------------')\n \n return bboxes_batch\n" ]
[ [ "numpy.max", "numpy.array", "numpy.minimum", "numpy.copy", "numpy.exp", "numpy.where", "numpy.loadtxt", "numpy.argmax", "numpy.maximum" ] ]
waiting-gy/Caltech_Pedestrian2
[ "41d7fb3e65866f7c50da556a0e4845149fdb0839" ]
[ "data/coco.py" ]
[ "from .config import HOME\nimport os\nimport os.path as osp\nimport sys\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport cv2\nimport numpy as np\n\n#COCO_ROOT = osp.join(HOME, 'data/coco/')\n#COCO_ROOT = \"/mnt/Younggao/coco/\"\nCOCO_ROOT = \"/kaggle/input/CaltechPedestrian2/coco\"\nIMAGES = 'images'\nANNOTATIONS = 'annotations'\nCOCO_API = 'PythonAPI'\nINSTANCES_SET = 'instances_{}.json'\nCOCO_CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n 'train', 'truck', 'boat', 'traffic light', 'fire', 'hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',\n 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',\n 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave oven', 'toaster', 'sink',\n 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush')\n\n\ndef get_label_map(label_file):\n label_map = {}\n #labels = open(label_file, 'r')\n #labels = open(\"/mnt/Younggao/Caltech_DataDetection/data/coco_labels.txt\", 'r')\n labels = open(\"/kaggle/working/Caltech_Pedestrian2/data/coco_labels.txt\", 'r')\n for line in labels:\n ids = line.split(',')\n label_map[int(ids[0])] = int(ids[1])\n return label_map\n\n\nclass COCOAnnotationTransform(object):\n \"\"\"Transforms a COCO annotation into a Tensor of bbox coords and label index\n Initilized with a dictionary lookup of classnames to indexes\n \"\"\"\n def __init__(self):\n self.label_map = get_label_map(osp.join(COCO_ROOT, 'coco_labels.txt'))\n\n def __call__(self, target, width, height):\n \"\"\"\n Args:\n target (dict): COCO target json annotation as a python dict\n height (int): height\n width (int): width\n Returns:\n a list containing lists of bounding boxes [bbox coords, class idx]\n \"\"\"\n scale = np.array([width, height, width, height])\n res = []\n for obj in target:\n if 'bbox' in obj:\n bbox = obj['bbox']\n bbox[2] += bbox[0]\n bbox[3] += bbox[1]\n label_idx = self.label_map[obj['category_id']] - 1\n final_box = list(np.array(bbox)/scale)\n final_box.append(label_idx)\n res += [final_box] # [xmin, ymin, xmax, ymax, label_idx]\n else:\n print(\"no bbox problem!\")\n\n return res # [[xmin, ymin, xmax, ymax, label_idx], ... ]\n\n\nclass COCODetection(data.Dataset):\n \"\"\"`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.\n Args:\n root (string): Root directory where images are downloaded to.\n set_name (string): Name of the specific set of COCO images.\n transform (callable, optional): A function/transform that augments the\n raw images`\n target_transform (callable, optional): A function/transform that takes\n in the target (bbox) and transforms it.\n \"\"\"\n\n def __init__(self, root, image_set='trainval35k', transform=None,\n target_transform=COCOAnnotationTransform(), dataset_name='MS COCO'):\n sys.path.append(osp.join(root, COCO_API))\n from pycocotools.coco import COCO\n self.root = osp.join(root, IMAGES, image_set)\n self.coco = COCO(osp.join(root, ANNOTATIONS,\n INSTANCES_SET.format(image_set)))\n self.ids = list(self.coco.imgToAnns.keys())\n self.transform = transform\n self.target_transform = target_transform\n self.name = dataset_name\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n im, gt, h, w = self.pull_item(index)\n return im, gt\n\n def __len__(self):\n return len(self.ids)\n\n def pull_item(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target, height, width).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n img_id = self.ids[index]\n target = self.coco.imgToAnns[img_id]\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\n\n target = self.coco.loadAnns(ann_ids)\n path = osp.join(self.root, self.coco.loadImgs(img_id)[0]['file_name'])\n assert osp.exists(path), 'Image path does not exist: {}'.format(path)\n img = cv2.imread(osp.join(self.root, path))\n height, width, _ = img.shape\n if self.target_transform is not None:\n target = self.target_transform(target, width, height)\n if self.transform is not None:\n target = np.array(target)\n img, boxes, labels = self.transform(img, target[:, :4],\n target[:, 4])\n # to rgb\n img = img[:, :, (2, 1, 0)]\n\n target = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n return torch.from_numpy(img).permute(2, 0, 1), target, height, width\n\n def pull_image(self, index):\n '''Returns the original image object at index in PIL form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n cv2 img\n '''\n img_id = self.ids[index]\n path = self.coco.loadImgs(img_id)[0]['file_name']\n return cv2.imread(osp.join(self.root, path), cv2.IMREAD_COLOR)\n\n def pull_anno(self, index):\n '''Returns the original annotation of image at index\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to get annotation of\n Return:\n list: [img_id, [(label, bbox coords),...]]\n eg: ('001718', [('dog', (96, 13, 438, 332))])\n '''\n img_id = self.ids[index]\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\n return self.coco.loadAnns(ann_ids)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n" ]
[ [ "numpy.array", "numpy.expand_dims", "torch.from_numpy" ] ]
icemtel/vedo
[ "b405064a10009b904e226b3f578e4c19aa50320a" ]
[ "examples/advanced/interpolateScalar2.py" ]
[ "\"\"\"Use scipy to interpolate the value of a scalar known on a set\nof points on a new set of points where the scalar is not defined.\n\nTwo interpolation methods are possible:\nRadial Basis Function (used here), and Nearest Point.\n\"\"\"\nimport numpy as np\nfrom vedo import *\nfrom scipy.interpolate import Rbf, NearestNDInterpolator as Near\n\nmesh = load(datadir+\"bunny.obj\").normalize()\npts = mesh.points()\n\n# pick a subset of 100 points where a scalar descriptor is known\nptsubset = pts[:100]\n\n# assume the descriptor value is some function of the point coord y\nx, y, z = np.split(ptsubset, 3, axis=1)\ndesc = 3*sin(4*y)\n\n# build the interpolator to determine the scalar value\n# for the rest of mesh vertices:\nitr = Rbf(x, y, z, desc) # Radial Basis Function interpolator\n#itr = Near(ptsubset, desc) # Nearest-neighbour interpolator\n\n# interpolate desciptor on the full set of mesh vertices\nxi, yi, zi = np.split(pts, 3, axis=1)\ninterpolated_desc = itr(xi, yi, zi)\n\nmesh.cmap('rainbow', interpolated_desc)\nmesh.addScalarBar(title='3sin(4y)')\nrpts = Points(ptsubset, r=8, c='white')\n\nshow(mesh, rpts, __doc__, axes=1)\n" ]
[ [ "scipy.interpolate.Rbf", "numpy.split" ] ]
ClementBM/coronavirus
[ "0cddd4042e163f1ac4460b7a97efcb9d419e99e2" ]
[ "coronavirus/tswindow.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\n\nclass WindowGenerator:\n def __init__(\n self,\n input_width,\n label_width,\n shift,\n train_df,\n validation_df,\n test_df,\n stride=10,\n label_columns=None,\n ):\n \"\"\"\"\"\"\n # Store the raw data.\n self.train_df = train_df\n self.validation_df = validation_df\n self.test_df = test_df\n\n # Work out the label column indices.\n self.label_columns = label_columns\n if label_columns is not None:\n self.label_columns_indices = {\n name: i for i, name in enumerate(label_columns)\n }\n\n self.column_indices = {name: i for i, name in enumerate(train_df.columns)}\n\n # Work out the window parameters.\n self.input_width = input_width\n self.label_width = label_width\n self.shift = shift\n\n self.total_window_size = input_width + shift\n\n self.input_slice = slice(0, input_width)\n self.input_indices = np.arange(self.total_window_size)[self.input_slice]\n\n self.label_start = self.total_window_size - self.label_width\n self.labels_slice = slice(self.label_start, None)\n self.label_indices = np.arange(self.total_window_size)[self.labels_slice]\n\n # data pre processing\n self.shuffle = True\n self.stride = stride\n\n def __repr__(self):\n return \"\\n\".join(\n [\n f\"Total window size: {self.total_window_size}\",\n f\"Input indices: {self.input_indices}\",\n f\"Label indices: {self.label_indices}\",\n f\"Label column name(s): {self.label_columns}\",\n ]\n )\n\n def split_window(self, features):\n inputs = features[:, self.input_slice, :]\n labels = features[:, self.labels_slice, :]\n if self.label_columns is not None:\n labels = tf.stack(\n [\n labels[:, :, self.column_indices[name]]\n for name in self.label_columns\n ],\n axis=-1,\n )\n\n # Slicing doesn't preserve static shape information, so set the shapes\n # manually. This way the `tf.data.Datasets` are easier to inspect.\n inputs.set_shape([None, self.input_width, None])\n labels.set_shape([None, self.label_width, None])\n\n return inputs, labels\n\n def make_dataset(self, data) -> tf.data.Dataset:\n data = np.array(data[self.label_columns], dtype=np.float32)\n dataset = tf.keras.preprocessing.timeseries_dataset_from_array(\n data=data,\n targets=None,\n sequence_length=self.total_window_size,\n sequence_stride=self.stride,\n shuffle=self.shuffle,\n batch_size=32,\n )\n\n dataset = dataset.map(self.split_window)\n\n return dataset\n\n @property\n def train(self):\n \"\"\"\n element_spec\n \"\"\"\n return self.make_dataset(data=self.train_df)\n\n @property\n def validation(self):\n return self.make_dataset(data=self.validation_df)\n\n @property\n def test(self):\n return self.make_dataset(data=self.test_df)\n\n def plot(self, inputs, labels, label_column, model=None, max_subplots=5):\n plt.figure(figsize=(12, 8))\n plot_col_index = self.column_indices[label_column]\n max_n = min(max_subplots, len(inputs))\n\n for n in range(max_n):\n plt.subplot(max_n, 1, n + 1)\n plt.ylabel(f\"{label_column} [normed]\")\n plt.plot(\n self.input_indices,\n inputs[n, :, plot_col_index],\n label=\"Inputs\",\n marker=\".\",\n zorder=-10,\n )\n\n if self.label_columns:\n label_col_index = self.label_columns_indices.get(label_column, None)\n else:\n label_col_index = plot_col_index\n\n if label_col_index is None:\n continue\n\n plt.scatter(\n self.label_indices,\n labels[n, :, label_col_index],\n edgecolors=\"k\",\n label=\"Labels\",\n c=\"#2ca02c\",\n s=64,\n )\n if model is not None:\n predictions = model(inputs)\n\n if predictions.shape[2] == 1:\n label_col_index = 0\n\n plt.scatter(\n self.label_indices,\n predictions[n, :, label_col_index],\n marker=\"X\",\n edgecolors=\"k\",\n label=\"Predictions\",\n c=\"#ff7f0e\",\n s=64,\n )\n\n if n == 0:\n plt.legend()\n\n plt.xlabel(\"Time 100ms\")\n" ]
[ [ "numpy.array", "matplotlib.pyplot.xlabel", "tensorflow.keras.preprocessing.timeseries_dataset_from_array", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.arange", "tensorflow.stack", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "matplotlib.pyplot.subplot" ] ]
nkibrislioglu/NLP_Disaster_Response_Pipelines
[ "d1c8db73d0927b7600991fe5bc11b02967683984" ]
[ "app/run.py" ]
[ "import json\nimport plotly\nimport pandas as pd\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\n\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\nfrom plotly.graph_objs import Bar\nfrom sklearn.externals import joblib\nfrom sqlalchemy import create_engine\n\n\napp = Flask(__name__)\n\ndef tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n# load data\nengine = create_engine('sqlite:///data/DisasterResponse.db')\ndf = pd.read_sql_table('final_table', engine)\n\n# load model\nmodel = joblib.load(\"models/classifier.pkl\")\n\n\n# index webpage displays cool visuals and receives user input text for model\n@app.route('/')\n@app.route('/index')\ndef index():\n \n # extract data needed for visuals\n # TODO: Below is an example - modify to extract data for your own visuals\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n category_names=df.columns.values[5:]\n category_counts=df[category_names].sum()\n # create visuals\n # TODO: Below is an example - modify to create your own visuals\n graphs = [\n \n {\n 'data': [\n Bar(\n x=category_names,\n y=category_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Categories',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Categories\"\n }\n }\n },\n {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n }\n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n\n# web page that handles user query and displays model results\n@app.route('/go')\ndef go():\n # save user input in query\n query = request.args.get('query', '') \n\n # use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the go.html Please see that file. \n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=3001, debug=True)\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "sklearn.externals.joblib.load", "pandas.read_sql_table" ] ]
microsoft/GLIP
[ "fd52c6361f013e70ae7682d90b3ab3ca2bd5e6bc" ]
[ "maskrcnn_benchmark/modeling/rpn/modeling_bert.py" ]
[ "# coding=utf-8\r\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\r\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"PyTorch BERT model. \"\"\"\r\n\r\n\r\nimport math\r\nimport os\r\nimport warnings\r\nfrom dataclasses import dataclass\r\nfrom typing import Optional, Tuple\r\n\r\nimport torch\r\nimport torch.utils.checkpoint\r\nfrom torch import nn\r\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\r\nfrom transformers.activations import ACT2FN\r\nimport pdb\r\nfrom transformers.modeling_utils import find_pruneable_heads_and_indices, prune_linear_layer\r\n\r\n\r\ndef clamp_values(vector, min_val = -50000, max_val = 50000):\r\n vector = torch.clamp(vector, min = min_val, max = max_val)\r\n return vector\r\n\r\n\r\nclass BertSelfAttention(nn.Module):\r\n def __init__(self, config, clamp_min_for_underflow=False, clamp_max_for_overflow=False):\r\n super().__init__()\r\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\r\n raise ValueError(\r\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\r\n f\"heads ({config.num_attention_heads})\"\r\n )\r\n\r\n self.num_attention_heads = config.num_attention_heads\r\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\r\n self.all_head_size = self.num_attention_heads * self.attention_head_size\r\n\r\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\r\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\r\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\r\n\r\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\r\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\r\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\r\n self.max_position_embeddings = config.max_position_embeddings\r\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\r\n self.clamp_min_for_underflow = clamp_min_for_underflow\r\n self.clamp_max_for_overflow = clamp_max_for_overflow\r\n\r\n self.is_decoder = config.is_decoder\r\n\r\n def transpose_for_scores(self, x):\r\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\r\n x = x.view(*new_x_shape)\r\n return x.permute(0, 2, 1, 3)\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n attention_mask=None,\r\n head_mask=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n past_key_value=None,\r\n output_attentions=False,\r\n ):\r\n mixed_query_layer = self.query(hidden_states)\r\n\r\n # If this is instantiated as a cross-attention module, the keys\r\n # and values come from an encoder; the attention mask needs to be\r\n # such that the encoder's padding tokens are not attended to.\r\n is_cross_attention = encoder_hidden_states is not None\r\n\r\n if is_cross_attention and past_key_value is not None:\r\n # reuse k,v, cross_attentions\r\n key_layer = past_key_value[0]\r\n value_layer = past_key_value[1]\r\n attention_mask = encoder_attention_mask\r\n elif is_cross_attention:\r\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\r\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\r\n attention_mask = encoder_attention_mask\r\n elif past_key_value is not None:\r\n key_layer = self.transpose_for_scores(self.key(hidden_states))\r\n value_layer = self.transpose_for_scores(self.value(hidden_states))\r\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\r\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\r\n else:\r\n key_layer = self.transpose_for_scores(self.key(hidden_states))\r\n value_layer = self.transpose_for_scores(self.value(hidden_states))\r\n\r\n query_layer = self.transpose_for_scores(mixed_query_layer)\r\n\r\n if self.is_decoder:\r\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\r\n # Further calls to cross_attention layer can then reuse all cross-attention\r\n # key/value_states (first \"if\" case)\r\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\r\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\r\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\r\n # if encoder bi-directional self-attention `past_key_value` is always `None`\r\n past_key_value = (key_layer, value_layer)\r\n\r\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\r\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\r\n\r\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\r\n seq_length = hidden_states.size()[1]\r\n position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\r\n position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\r\n distance = position_ids_l - position_ids_r\r\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\r\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\r\n\r\n if self.position_embedding_type == \"relative_key\":\r\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\r\n attention_scores = attention_scores + relative_position_scores\r\n elif self.position_embedding_type == \"relative_key_query\":\r\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\r\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\r\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\r\n\r\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\r\n\r\n if self.clamp_min_for_underflow:\r\n attention_scores = torch.clamp(attention_scores, min=-50000) # Do not increase -50000, data type half has quite limited range\r\n if self.clamp_max_for_overflow:\r\n attention_scores = torch.clamp(attention_scores, max=50000) # Do not increase 50000, data type half has quite limited range\r\n\r\n if attention_mask is not None:\r\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\r\n attention_scores = attention_scores + attention_mask\r\n\r\n # Normalize the attention scores to probabilities.\r\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\r\n\r\n # if math.isnan(attention_probs.sum().item()):\r\n # for i in range(attention_probs.size(1)):\r\n # for j in range(attention_probs.size(2)):\r\n # if math.isnan(attention_probs[0, i, j].sum().item()):\r\n # print(i, j)\r\n # pdb.set_trace()\r\n\r\n # This is actually dropping out entire tokens to attend to, which might\r\n # seem a bit unusual, but is taken from the original Transformer paper.\r\n attention_probs = self.dropout(attention_probs)\r\n\r\n # Mask heads if we want to\r\n if head_mask is not None:\r\n attention_probs = attention_probs * head_mask\r\n\r\n context_layer = torch.matmul(attention_probs, value_layer)\r\n\r\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\r\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\r\n context_layer = context_layer.view(*new_context_layer_shape)\r\n\r\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\r\n\r\n if self.is_decoder:\r\n outputs = outputs + (past_key_value,)\r\n return outputs\r\n\r\n\r\nclass BertSelfOutput(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\r\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n\r\n def forward(self, hidden_states, input_tensor):\r\n hidden_states = self.dense(hidden_states)\r\n hidden_states = self.dropout(hidden_states)\r\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\r\n return hidden_states\r\n\r\n\r\nclass BertAttention(nn.Module):\r\n def __init__(self, config, clamp_min_for_underflow=False, clamp_max_for_overflow=False):\r\n super().__init__()\r\n self.self = BertSelfAttention(config, clamp_min_for_underflow, clamp_max_for_overflow)\r\n self.output = BertSelfOutput(config)\r\n self.pruned_heads = set()\r\n\r\n def prune_heads(self, heads):\r\n if len(heads) == 0:\r\n return\r\n heads, index = find_pruneable_heads_and_indices(\r\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\r\n )\r\n\r\n # Prune linear layers\r\n self.self.query = prune_linear_layer(self.self.query, index)\r\n self.self.key = prune_linear_layer(self.self.key, index)\r\n self.self.value = prune_linear_layer(self.self.value, index)\r\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\r\n\r\n # Update hyper params and store pruned heads\r\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\r\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\r\n self.pruned_heads = self.pruned_heads.union(heads)\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n attention_mask=None,\r\n head_mask=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n past_key_value=None,\r\n output_attentions=False,\r\n ):\r\n self_outputs = self.self(\r\n hidden_states,\r\n attention_mask,\r\n head_mask,\r\n encoder_hidden_states,\r\n encoder_attention_mask,\r\n past_key_value,\r\n output_attentions,\r\n )\r\n attention_output = self.output(self_outputs[0], hidden_states)\r\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\r\n return outputs\r\n\r\n\r\nclass BertIntermediate(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\r\n if isinstance(config.hidden_act, str):\r\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\r\n else:\r\n self.intermediate_act_fn = config.hidden_act\r\n\r\n def forward(self, hidden_states):\r\n hidden_states = self.dense(hidden_states)\r\n hidden_states = clamp_values(hidden_states)\r\n hidden_states = self.intermediate_act_fn(hidden_states)\r\n hidden_states = clamp_values(hidden_states)\r\n return hidden_states\r\n\r\n\r\nclass BertOutput(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\r\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\r\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\r\n\r\n def forward(self, hidden_states, input_tensor):\r\n hidden_states = self.dense(hidden_states)\r\n hidden_states = self.dropout(hidden_states)\r\n hidden_states = clamp_values(hidden_states)\r\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\r\n hidden_states = clamp_values(hidden_states)\r\n return hidden_states\r\n\r\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.LayerNorm", "torch.cat", "torch.nn.Softmax", "torch.einsum", "torch.arange", "torch.clamp", "torch.matmul", "torch.nn.Embedding" ] ]
anferben/telemarketing_campaign_success
[ "68a60a63044f65f03afa66ef337e004380116133" ]
[ "scripts/train.py" ]
[ "import pickle\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\n\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import roc_auc_score\n\n\n# Parameters\n\nn_splits = 5\noutput_file = f'rf_model.bin'\n\n\n# train and predict funcionts\n\ndef train(df_train, y_train):\n train_dict = df_train[numerical + categorical].to_dict(orient='records')\n\n dv = DictVectorizer(sparse=False)\n X_train = dv.fit_transform(train_dict)\n\n model = RandomForestClassifier(n_estimators=50, max_depth=10, min_samples_leaf=3)\n model.fit(X_train, y_train)\n\n return dv, model \n\n\ndef predict(df, dv, model):\n df_dict = df[numerical + categorical].to_dict(orient='records')\n\n X = dv.transform(df_dict)\n y_pred = model.predict_proba(X)[:, 1]\n\n return y_pred\n\n\n# Data preparation\n\ndf = pd.read_csv('../data/bank-full.csv', sep=';')\n\ndf.drop_duplicates(inplace=True)\ndf.drop(['day', 'month', 'contact'], axis=1, inplace=True)\n\ndf.rename(columns={'y': 'success'}, inplace=True)\ndf.success = (df.success == 'yes').astype('int')\n\ndf_full_train, df_test = train_test_split(df, test_size=0.2, random_state=7)\n\nnumerical = ['age', 'balance', 'duration', 'campaign', 'pdays', 'previous']\ncategorical = ['job', 'marital', 'education', 'default', 'housing', 'loan', 'poutcome']\n\n\n# Validating the model\n\nprint(f'Validating the model...')\n\nkfold = KFold(n_splits=n_splits, shuffle=True, random_state=1)\n\nscores = []\nfold = 0\n\nfor train_idx, val_idx in kfold.split(df_full_train):\n df_train = df_full_train.iloc[train_idx]\n df_val = df_full_train.iloc[val_idx]\n\n y_train = df_train['success'].values\n y_val = df_val['success'].values\n\n dv, model = train(df_train, y_train)\n y_pred = predict(df_val, dv, model)\n\n auc = roc_auc_score(y_val, y_pred)\n scores.append(auc)\n\n print(f'AUC on fold {fold} is {auc}')\n\n fold = fold + 1\n\nprint()\nprint('Vaidation results:')\nprint('%.3f +- %.3f' % (np.mean(scores), np.std(scores)))\n\n\n# Training the final model\n\nprint()\nprint('Training the final model...')\n\ndv, model = train(df_full_train, df_full_train['success'].values)\ny_pred = predict(df_test, dv, model)\n\ny_test = df_test['success'].values\nauc = roc_auc_score(y_test, y_pred)\n\nprint(f'AUC = {auc}')\n\nwith open(output_file, 'wb') as f_out:\n pickle.dump((dv, model), f_out)\n\nprint()\nprint(f'The model was saved to {output_file}')\n\n" ]
[ [ "sklearn.ensemble.RandomForestClassifier", "numpy.mean", "sklearn.model_selection.KFold", "numpy.std", "sklearn.model_selection.train_test_split", "pandas.read_csv", "sklearn.feature_extraction.DictVectorizer", "sklearn.metrics.roc_auc_score" ] ]
saArbabi/rl-agents
[ "18ae779f015748eefb346e34b8406a3e4ff16208" ]
[ "rl_agents/agents/tree_search/abstract.py" ]
[ "import logging\nfrom collections import defaultdict\n\nimport gym\nimport numpy as np\nfrom gym.utils import seeding\n\nfrom rl_agents.agents.common.abstract import AbstractAgent\nfrom rl_agents.agents.common.factory import preprocess_env, safe_deepcopy_env\nfrom rl_agents.configuration import Configurable\nfrom rl_agents.agents.tree_search.graphics import TreePlot\n\nlogger = logging.getLogger(__name__)\n\n\nclass AbstractTreeSearchAgent(AbstractAgent):\n PLANNER_TYPE = None\n NODE_TYPE = None\n\n def __init__(self,\n env,\n config=None):\n \"\"\"\n A new Tree Search agent.\n :param env: The environment\n :param config: The agent configuration. Use default if None.\n \"\"\"\n super(AbstractTreeSearchAgent, self).__init__(config)\n self.env = env\n self.planner = self.make_planner()\n self.previous_actions = []\n self.remaining_horizon = 0\n self.steps = 0\n\n @classmethod\n def default_config(cls):\n return {\n \"env_preprocessors\": [],\n \"display_tree\": False,\n \"receding_horizon\": 1,\n \"terminal_reward\": 0\n }\n\n def make_planner(self):\n if self.PLANNER_TYPE:\n return self.PLANNER_TYPE(self.env, self.config)\n else:\n raise NotImplementedError()\n\n def plan(self, observation):\n \"\"\"\n Plan an optimal sequence of actions.\n\n Start by updating the previously found tree with the last action performed.\n\n :param observation: the current state\n :return: the list of actions\n \"\"\"\n self.steps += 1\n replanning_required = self.step(self.previous_actions)\n if replanning_required:\n env = preprocess_env(self.env, self.config[\"env_preprocessors\"])\n actions = self.planner.plan(state=env, observation=observation)\n else:\n actions = self.previous_actions[1:]\n self.write_tree()\n\n self.previous_actions = actions\n return actions\n\n def step(self, actions):\n \"\"\"\n Handle receding horizon mechanism\n :return: whether a replanning is required\n \"\"\"\n replanning_required = self.remaining_horizon == 0 or len(actions) <= 1\n if replanning_required:\n self.remaining_horizon = self.config[\"receding_horizon\"] - 1\n else:\n self.remaining_horizon -= 1\n\n self.planner.step_tree(actions)\n return replanning_required\n\n def reset(self):\n self.planner.step_by_reset()\n self.remaining_horizon = 0\n self.steps = 0\n\n def seed(self, seed=None):\n return self.planner.seed(seed)\n\n def record(self, state, action, reward, next_state, done, info):\n pass\n\n def act(self, state):\n return self.plan(state)[0]\n\n def save(self, filename):\n return False\n\n def load(self, filename):\n return False\n\n def write_tree(self):\n if self.config[\"display_tree\"] and self.writer:\n TreePlot(self.planner, max_depth=6).plot_to_writer(self.writer, epoch=self.steps, show=True)\n\n\nclass AbstractPlanner(Configurable):\n def __init__(self, config=None):\n super(AbstractPlanner, self).__init__(config)\n self.np_random = None\n self.root = None\n self.observations = []\n self.reset()\n self.seed()\n\n @classmethod\n def default_config(cls):\n return dict(budget=500,\n gamma=0.8,\n step_strategy=\"reset\")\n\n def seed(self, seed=None):\n \"\"\"\n Seed the planner randomness source, e.g. for rollout policy\n :param seed: the seed to be used\n :return: the used seed\n \"\"\"\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def plan(self, state, observation):\n \"\"\"\n Plan an optimal sequence of actions.\n\n :param state: the initial environment state\n :param observation: the corresponding state observation\n :return: the actions sequence\n \"\"\"\n raise NotImplementedError()\n\n def get_plan(self):\n \"\"\"\n Get the optimal action sequence of the current tree by recursively selecting the best action within each\n node with no exploration.\n\n :return: the list of actions\n \"\"\"\n actions = []\n node = self.root\n while node.children:\n action = node.selection_rule()\n actions.append(action)\n node = node.children[action]\n return actions\n\n def step(self, state, action):\n observation, reward, done, info = state.step(action)\n self.observations.append(observation)\n return observation, reward, done, info\n\n def get_visits(self):\n visits = defaultdict(int)\n for observation in self.observations:\n visits[str(observation)] += 1\n return visits\n\n def get_updates(self):\n return defaultdict(int)\n\n def step_tree(self, actions):\n \"\"\"\n Update the planner tree when the agent performs an action\n\n :param actions: a sequence of actions to follow from the root node\n \"\"\"\n if self.config[\"step_strategy\"] == \"reset\":\n self.step_by_reset()\n elif self.config[\"step_strategy\"] == \"subtree\":\n if actions:\n self.step_by_subtree(actions[0])\n else:\n self.step_by_reset()\n else:\n logger.warning(\"Unknown step strategy: {}\".format(self.config[\"step_strategy\"]))\n self.step_by_reset()\n\n def step_by_reset(self):\n \"\"\"\n Reset the planner tree to a root node for the new state.\n \"\"\"\n self.reset()\n\n def step_by_subtree(self, action):\n \"\"\"\n Replace the planner tree by its subtree corresponding to the chosen action.\n\n :param action: a chosen action from the root node\n \"\"\"\n if action in self.root.children:\n self.root = self.root.children[action]\n self.root.parent = None\n else:\n # The selected action was never explored, start a new tree.\n self.step_by_reset()\n\n def reset(self):\n raise NotImplementedError\n\n\nclass Node(object):\n \"\"\"\n A tree node\n \"\"\"\n\n def __init__(self, parent, planner):\n \"\"\"\n New node.\n\n :param parent: its parent node\n :param planner: the planner using the node\n \"\"\"\n self.parent = parent\n self.planner = planner\n\n self.children = {}\n \"\"\" Dict of children nodes, indexed by action labels\"\"\"\n\n self.count = 0\n \"\"\" Number of times the node was visited.\"\"\"\n\n self.value_upper = 0\n \"\"\" Estimated value of the node's action sequence\"\"\"\n\n def get_value(self):\n return self.value_upper\n\n def expand(self, branching_factor):\n for a in range(branching_factor):\n self.children[a] = type(self)(self, self.planner)\n\n def selection_rule(self):\n raise NotImplementedError()\n\n @staticmethod\n def breadth_first_search(root, operator=None, condition=None, condition_blocking=True):\n \"\"\"\n Breadth-first search of all paths to nodes that meet a given condition\n\n :param root: starting node\n :param operator: will be applied to all traversed nodes\n :param condition: nodes meeting that condition will be returned\n :param condition_blocking: do not explore a node which met the condition\n :return: list of paths to nodes that met the condition\n \"\"\"\n queue = [(root, [])]\n while queue:\n (node, path) = queue.pop(0)\n if (condition is None) or condition(node):\n returned = operator(node, path) if operator else (node, path)\n yield returned\n if (condition is None) or not condition_blocking or not condition(node):\n for next_key, next_node in node.children.items():\n queue.append((next_node, path + [next_key]))\n\n def is_leaf(self):\n return not self.children\n\n def path(self):\n \"\"\"\n :return: sequence of action labels from the root to the node\n \"\"\"\n node = self\n path = []\n while node.parent:\n for a in node.parent.children:\n if node.parent.children[a] == node:\n path.append(a)\n break\n node = node.parent\n return reversed(path)\n\n def sequence(self):\n \"\"\"\n :return: sequence of nodes from the root to the node\n \"\"\"\n node = self\n path = [node]\n while node.parent:\n path.append(node.parent)\n node = node.parent\n return reversed(path)\n\n @staticmethod\n def all_argmax(x):\n \"\"\"\n :param x: a set\n :return: the list of indexes of all maximums of x\n \"\"\"\n m = np.amax(x)\n return np.nonzero(x == m)[0]\n\n def random_argmax(self, x):\n \"\"\"\n Randomly tie-breaking arg max\n :param x: an array\n :return: a random index among the maximums\n \"\"\"\n indices = Node.all_argmax(x)\n return self.planner.np_random.choice(indices)\n\n def __str__(self):\n return \"{} (n:{}, v:{:.2f})\".format(list(self.path()), self.count, self.get_value())\n\n def __repr__(self):\n return '<node {}>'.format(id(self))\n\n def get_trajectories(self, full_trajectories=True, include_leaves=True):\n \"\"\"\n Get a list of visited nodes corresponding to the node subtree\n\n :param full_trajectories: return a list of observation sequences, else a list of observations\n :param include_leaves: include leaves or only expanded nodes\n :return: the list of trajectories\n \"\"\"\n trajectories = []\n if self.children:\n for action, child in self.children.items():\n child_trajectories = child.get_trajectories(full_trajectories, include_leaves)\n if full_trajectories:\n trajectories.extend([[self] + trajectory for trajectory in child_trajectories])\n else:\n trajectories.extend(child_trajectories)\n if not full_trajectories:\n trajectories.append(self)\n elif include_leaves:\n trajectories = [[self]] if full_trajectories else [self]\n return trajectories\n\n def get_obs_visits(self, state=None):\n visits = defaultdict(int)\n updates = defaultdict(int)\n if hasattr(self, \"observation\"):\n for node in self.get_trajectories(full_trajectories=False,\n include_leaves=False):\n if hasattr(node, \"observation\"):\n visits[str(node.observation)] += 1\n if hasattr(node, \"updates_count\"):\n updates[str(node.observation)] += node.updates_count\n else: # Replay required\n for node in self.get_trajectories(full_trajectories=False,\n include_leaves=False):\n replay_state = safe_deepcopy_env(state)\n for action in node.path():\n observation, _, _, _ = replay_state.step(action)\n visits[str(observation)] += 1\n return visits, updates\n" ]
[ [ "numpy.amax", "numpy.nonzero" ] ]
denyslazarenko/neural_prophet
[ "868e3f23c2a565ea14d54fdea2ff6d199f30a5c8" ]
[ "neuralprophet/df_utils.py" ]
[ "from collections import OrderedDict\nfrom dataclasses import dataclass\nimport pandas as pd\nimport numpy as np\nimport logging\nimport math\n\n\nlog = logging.getLogger(\"NP.df_utils\")\n\n\n@dataclass\nclass ShiftScale:\n shift: float = 0.0\n scale: float = 1.0\n\n\ndef prep_copy_df_dict(df):\n \"\"\"Creates or copy a df_dict based on the df input. It either converts a pd.DataFrame to a dict or copies it in case of a dict input.\n Args:\n df (pd.DataFrame,dict): containing df or dict with group of dfs\n Returns:\n df_dict: dict of dataframes or copy of dict of dataframes\n \"\"\"\n received_unnamed_df = False\n if isinstance(df, dict):\n df_dict = {key: df_aux.copy(deep=True) for (key, df_aux) in df.items()}\n elif isinstance(df, pd.DataFrame):\n received_unnamed_df = True\n df_dict = {\"__df__\": df.copy(deep=True)}\n elif df is None:\n return None, None\n else:\n raise ValueError(\"Please insert valid df type (i.e. pd.DataFrame, dict)\")\n return df_dict, received_unnamed_df\n\n\ndef maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df=True):\n \"\"\"extract dataframe from single length dict if placeholder-named.\n\n Args\n df_dict (dict): dict with potentially single pd.DataFrame\n received_unnamed_df (bool): whether the input was unnamed\n Returns:\n df (pd.Dataframe, dict): original input format - dict or df\n \"\"\"\n if received_unnamed_df and isinstance(df_dict, dict) and len(df_dict) == 1:\n if list(df_dict.keys())[0] == \"__df__\":\n return df_dict[\"__df__\"]\n else:\n return df_dict\n\n\ndef join_dataframes(df_dict):\n \"\"\"Join dict of dataframes preserving the episodes so it can be recovered later.\n\n Args:\n df_dict (dict of pd.DataFrame): containing column 'ds', 'y' with training data\n\n Returns:\n df_joined: Dataframe with concatenated episodes\n episodes: list containing keys of each timestamp\n \"\"\"\n if not isinstance(df_dict, dict):\n raise ValueError(\"can not join other than dicts of DataFrames.\")\n episodes = []\n for key in df_dict:\n episodes = episodes + [key] * len(df_dict[key])\n df_joined = pd.concat(df_dict, ignore_index=True)\n return df_joined, episodes\n\n\ndef recover_dataframes(df_joined, episodes):\n \"\"\"Recover dict of dataframes accordingly to Episodes.\n\n Args:\n df_joined (pd.DataFrame): Dataframe concatenated containing column 'ds', 'y' with training data\n episodes: List containing the episodes from each timestamp\n\n Returns:\n df_dict: Original dict before concatenation\n \"\"\"\n df_joined.insert(0, \"eps\", episodes)\n df_dict = {key: df for key, df in df_joined.groupby(\"eps\")}\n df_dict = {key: df.drop([\"eps\"], axis=1) for (key, df) in df_dict.items()}\n return df_dict\n\n\ndef data_params_definition(df, normalize, covariates_config=None, regressor_config=None, events_config=None):\n \"\"\"Initialize data scaling values.\n\n Note: We do a z normalization on the target series 'y',\n unlike OG Prophet, which does shift by min and scale by max.\n Args:\n df (pd.DataFrame): Time series to compute normalization parameters from.\n normalize (str): Type of normalization to apply to the time series.\n options: [ 'off', 'minmax, 'standardize', 'soft', 'soft1']\n default: 'soft', unless the time series is binary, in which case 'minmax' is applied.\n 'off' bypasses data normalization\n 'minmax' scales the minimum value to 0.0 and the maximum value to 1.0\n 'standardize' zero-centers and divides by the standard deviation\n 'soft' scales the minimum value to 0.0 and the 95th quantile to 1.0\n 'soft1' scales the minimum value to 0.1 and the 90th quantile to 0.9\n covariates_config (OrderedDict): extra regressors with sub_parameters\n normalize (bool)\n regressor_config (OrderedDict): extra regressors (with known future values)\n with sub_parameters normalize (bool)\n events_config (OrderedDict): user specified events configs\n\n\n Returns:\n data_params (OrderedDict): scaling values\n with ShiftScale entries containing 'shift' and 'scale' parameters\n \"\"\"\n data_params = OrderedDict({})\n if df[\"ds\"].dtype == np.int64:\n df.loc[:, \"ds\"] = df.loc[:, \"ds\"].astype(str)\n df.loc[:, \"ds\"] = pd.to_datetime(df.loc[:, \"ds\"])\n data_params[\"ds\"] = ShiftScale(\n shift=df[\"ds\"].min(),\n scale=df[\"ds\"].max() - df[\"ds\"].min(),\n )\n if \"y\" in df:\n data_params[\"y\"] = get_normalization_params(\n array=df[\"y\"].values,\n norm_type=normalize,\n )\n\n if covariates_config is not None:\n for covar in covariates_config.keys():\n if covar not in df.columns:\n raise ValueError(\"Covariate {} not found in DataFrame.\".format(covar))\n data_params[covar] = get_normalization_params(\n array=df[covar].values,\n norm_type=covariates_config[covar].normalize,\n )\n\n if regressor_config is not None:\n for reg in regressor_config.keys():\n if reg not in df.columns:\n raise ValueError(\"Regressor {} not found in DataFrame.\".format(reg))\n data_params[reg] = get_normalization_params(\n array=df[reg].values,\n norm_type=regressor_config[reg].normalize,\n )\n if events_config is not None:\n for event in events_config.keys():\n if event not in df.columns:\n raise ValueError(\"Event {} not found in DataFrame.\".format(event))\n data_params[event] = ShiftScale()\n return data_params\n\n\ndef init_data_params(\n df_dict,\n normalize=\"auto\",\n covariates_config=None,\n regressor_config=None,\n events_config=None,\n global_normalization=False,\n global_time_normalization=False,\n):\n \"\"\"Initialize data scaling values.\n\n Note: We compute and store local and global normalization parameters independent of settings.\n Args:\n df (dict): dict of DataFrames to compute normalization parameters from.\n normalize (str): Type of normalization to apply to the time series.\n options: ['soft', 'off', 'minmax, 'standardize']\n default: 'soft' scales minimum to 0.1 and the 90th quantile to 0.9\n covariates_config (OrderedDict): extra regressors with sub_parameters\n normalize (bool)\n regressor_config (OrderedDict): extra regressors (with known future values)\n with sub_parameters normalize (bool)\n events_config (OrderedDict): user specified events configs\n global_normalization (bool): True: sets global modeling training with global normalization\n (otherwise, global modeling is trained with local normalization)\n global_time_normalization (bool): True: normalize time globally across all time series\n False: normalize time locally for each time series\n (only valid in case of global modeling - local normalization)\n Returns:\n local_data_params (OrderedDict): nested dict with data_params for each dataset where each contains\n ShiftScale entries containing 'shift' and 'scale' parameters for each column\n global_data_params (OrderedDict): ShiftScale entries containing 'shift' and 'scale' parameters for each column\n \"\"\"\n # Compute Global data params\n df_merged, _ = join_dataframes(prep_copy_df_dict(df_dict)[0])\n global_data_params = data_params_definition(\n df_merged, normalize, covariates_config, regressor_config, events_config\n )\n if global_normalization:\n log.debug(\n \"Global Normalization Data Parameters (shift, scale): {}\".format(\n [(k, v) for k, v in global_data_params.items()]\n )\n )\n # Compute individual data params\n local_data_params = OrderedDict()\n for key, df_i in df_dict.items():\n local_data_params[key] = data_params_definition(\n df_i, normalize, covariates_config, regressor_config, events_config\n )\n if global_time_normalization:\n # Overwrite local time normalization data_params with global values (pointer)\n local_data_params[key][\"ds\"] = global_data_params[\"ds\"]\n if not global_normalization:\n log.debug(\n \"Local Normalization Data Parameters (shift, scale): {}\".format(\n [(k, v) for k, v in local_data_params[key].items()]\n )\n )\n return local_data_params, global_data_params\n\n\ndef auto_normalization_setting(array):\n if len(np.unique(array)) < 2:\n log.error(\"encountered variable with one unique value\")\n raise ValueError\n # elif set(series.unique()) in ({True, False}, {1, 0}, {1.0, 0.0}, {-1, 1}, {-1.0, 1.0}):\n elif len(np.unique(array)) == 2:\n return \"minmax\" # Don't standardize binary variables.\n else:\n return \"soft\" # default setting\n\n\ndef get_normalization_params(array, norm_type):\n if norm_type == \"auto\":\n norm_type = auto_normalization_setting(array)\n shift = 0.0\n scale = 1.0\n if norm_type == \"soft\":\n lowest = np.min(array)\n q95 = np.quantile(array, 0.95, interpolation=\"higher\")\n width = q95 - lowest\n if math.isclose(width, 0):\n width = np.max(array) - lowest\n shift = lowest\n scale = width\n elif norm_type == \"soft1\":\n lowest = np.min(array)\n q90 = np.quantile(array, 0.9, interpolation=\"higher\")\n width = q90 - lowest\n if math.isclose(width, 0):\n width = (np.max(array) - lowest) / 1.25\n shift = lowest - 0.125 * width\n scale = 1.25 * width\n elif norm_type == \"minmax\":\n shift = np.min(array)\n scale = np.max(array) - shift\n elif norm_type == \"standardize\":\n shift = np.mean(array)\n scale = np.std(array)\n elif norm_type != \"off\":\n log.error(\"Normalization {} not defined.\".format(norm_type))\n return ShiftScale(shift, scale)\n\n\ndef normalize(df, data_params):\n \"\"\"Apply data scales.\n\n Applies data scaling factors to df using data_params.\n\n Args:\n df (pd.DataFrame): with columns 'ds', 'y', (and potentially more regressors)\n data_params (OrderedDict): scaling values,as returned by init_data_params\n with ShiftScale entries containing 'shift' and 'scale' parameters\n Returns:\n df: pd.DataFrame, normalized\n \"\"\"\n for name in df.columns:\n if name not in data_params.keys():\n raise ValueError(\"Unexpected column {} in data\".format(name))\n new_name = name\n if name == \"ds\":\n new_name = \"t\"\n if name == \"y\":\n new_name = \"y_scaled\"\n df[new_name] = df[name].sub(data_params[name].shift).div(data_params[name].scale)\n return df\n\n\ndef check_single_dataframe(df, check_y, covariates, regressors, events):\n \"\"\"Performs basic data sanity checks and ordering\n\n Prepare dataframe for fitting or predicting.\n Args:\n df (pd.DataFrame): with columns ds\n check_y (bool): if df must have series values\n set to True if training or predicting with autoregression\n covariates (list or dict): covariate column names\n regressors (list or dict): regressor column names\n events (list or dict): event column names\n\n Returns:\n pd.DataFrame\n \"\"\"\n if df.shape[0] == 0:\n raise ValueError(\"Dataframe has no rows.\")\n\n if \"ds\" not in df:\n raise ValueError('Dataframe must have columns \"ds\" with the dates.')\n if df.loc[:, \"ds\"].isnull().any():\n raise ValueError(\"Found NaN in column ds.\")\n if df[\"ds\"].dtype == np.int64:\n df.loc[:, \"ds\"] = df.loc[:, \"ds\"].astype(str)\n if not np.issubdtype(df[\"ds\"].dtype, np.datetime64):\n df.loc[:, \"ds\"] = pd.to_datetime(df.loc[:, \"ds\"])\n if df[\"ds\"].dt.tz is not None:\n raise ValueError(\"Column ds has timezone specified, which is not supported. Remove timezone.\")\n\n # FIX Issue #53: Data: fail with specific error message when data contains duplicate date entries.\n if len(df.ds.unique()) != len(df.ds):\n raise ValueError(\"Column ds has duplicate values. Please remove duplicates.\")\n # END FIX\n\n columns = []\n if check_y:\n columns.append(\"y\")\n if covariates is not None:\n if type(covariates) is list:\n columns.extend(covariates)\n else: # treat as dict\n columns.extend(covariates.keys())\n if regressors is not None:\n if type(regressors) is list:\n columns.extend(regressors)\n else: # treat as dict\n columns.extend(regressors.keys())\n if events is not None:\n if type(events) is list:\n columns.extend(events)\n else: # treat as dict\n columns.extend(events.keys())\n for name in columns:\n if name not in df:\n raise ValueError(\"Column {name!r} missing from dataframe\".format(name=name))\n if df.loc[df.loc[:, name].notnull()].shape[0] < 1:\n raise ValueError(\"Dataframe column {name!r} only has NaN rows.\".format(name=name))\n if not np.issubdtype(df[name].dtype, np.number):\n df.loc[:, name] = pd.to_numeric(df.loc[:, name])\n if np.isinf(df.loc[:, name].values).any():\n df.loc[:, name] = df[name].replace([np.inf, -np.inf], np.nan)\n if df.loc[df.loc[:, name].notnull()].shape[0] < 1:\n raise ValueError(\"Dataframe column {name!r} only has NaN rows.\".format(name=name))\n\n if df.index.name == \"ds\":\n df.index.name = None\n df = df.sort_values(\"ds\")\n df = df.reset_index(drop=True)\n return df\n\n\ndef check_dataframe(df, check_y=True, covariates=None, regressors=None, events=None):\n \"\"\"Performs basic data sanity checks and ordering\n\n Prepare dataframe for fitting or predicting.\n Args:\n df (pd.DataFrame, dict): dataframe or dict of dataframes containing column 'ds'\n check_y (bool): if df must have series values\n set to True if training or predicting with autoregression\n covariates (list or dict): covariate column names\n regressors (list or dict): regressor column names\n events (list or dict): event column names\n\n Returns:\n pd.DataFrame or dict of pd.DataFrame\n \"\"\"\n if isinstance(df, pd.DataFrame):\n checked_df = check_single_dataframe(df, check_y, covariates, regressors, events)\n elif isinstance(df, dict):\n checked_df = {}\n for key, df_i in df.items():\n checked_df[key] = check_single_dataframe(df_i, check_y, covariates, regressors, events)\n else:\n raise ValueError(\"Please insert valid df type (i.e. pd.DataFrame, dict)\")\n return checked_df\n\n\ndef crossvalidation_split_df(df, n_lags, n_forecasts, k, fold_pct, fold_overlap_pct=0.0):\n \"\"\"Splits data in k folds for crossvalidation.\n\n Args:\n df (pd.DataFrame): data\n n_lags (int): identical to NeuralProphet\n n_forecasts (int): identical to NeuralProphet\n k (int): number of CV folds\n fold_pct (float): percentage of overall samples to be in each fold\n fold_overlap_pct (float): percentage of overlap between the validation folds.\n default: 0.0\n\n Returns:\n list of k tuples [(df_train, df_val), ...] where:\n df_train (pd.DataFrame): training data\n df_val (pd.DataFrame): validation data\n \"\"\"\n if n_lags == 0:\n assert n_forecasts == 1\n total_samples = len(df) - n_lags + 2 - (2 * n_forecasts)\n samples_fold = max(1, int(fold_pct * total_samples))\n samples_overlap = int(fold_overlap_pct * samples_fold)\n assert samples_overlap < samples_fold\n min_train = total_samples - samples_fold - (k - 1) * (samples_fold - samples_overlap)\n assert min_train >= samples_fold\n folds = []\n df_fold = df.copy(deep=True)\n for i in range(k, 0, -1):\n df_train, df_val = split_df(df_fold, n_lags, n_forecasts, valid_p=samples_fold, inputs_overbleed=True)\n folds.append((df_train, df_val))\n split_idx = len(df_fold) - samples_fold + samples_overlap\n df_fold = df_fold.iloc[:split_idx].reset_index(drop=True)\n folds = folds[::-1]\n return folds\n\n\ndef double_crossvalidation_split_df(df, n_lags, n_forecasts, k, valid_pct, test_pct):\n \"\"\"Splits data in two sets of k folds for crossvalidation on validation and test data.\n\n Args:\n df (pd.DataFrame): data\n n_lags (int): identical to NeuralProphet\n n_forecasts (int): identical to NeuralProphet\n k (int): number of CV folds\n valid_pct (float): percentage of overall samples to be in validation\n test_pct (float): percentage of overall samples to be in test\n\n Returns:\n tuple of folds_val, folds_test, where each are same as crossvalidation_split_df returns\n \"\"\"\n fold_pct_test = float(test_pct) / k\n folds_test = crossvalidation_split_df(df, n_lags, n_forecasts, k, fold_pct=fold_pct_test, fold_overlap_pct=0.0)\n df_train = folds_test[0][0]\n fold_pct_val = float(valid_pct) / k / (1.0 - test_pct)\n folds_val = crossvalidation_split_df(df_train, n_lags, n_forecasts, k, fold_pct=fold_pct_val, fold_overlap_pct=0.0)\n return folds_val, folds_test\n\n\ndef _split_df(df, n_lags, n_forecasts, valid_p, inputs_overbleed):\n \"\"\"Splits timeseries df into train and validation sets.\n\n Prevents overbleed of targets. Overbleed of inputs can be configured. In case of global modeling the split could be either local or global.\n\n Args:\n df (pd.DataFrame): data\n n_lags (int): identical to NeuralProphet\n n_forecasts (int): identical to NeuralProphet\n valid_p (float, int): fraction (0,1) of data to use for holdout validation set,\n or number of validation samples >1\n inputs_overbleed (bool): Whether to allow last training targets to be first validation inputs (never targets)\n\n Returns:\n df_train (pd.DataFrame): training data\n df_val (pd.DataFrame): validation data\n \"\"\"\n n_samples = len(df) - n_lags + 2 - (2 * n_forecasts)\n n_samples = n_samples if inputs_overbleed else n_samples - n_lags\n if 0.0 < valid_p < 1.0:\n n_valid = max(1, int(n_samples * valid_p))\n else:\n assert valid_p >= 1\n assert type(valid_p) == int\n n_valid = valid_p\n n_train = n_samples - n_valid\n assert n_train >= 1\n\n split_idx_train = n_train + n_lags + n_forecasts - 1\n split_idx_val = split_idx_train - n_lags if inputs_overbleed else split_idx_train\n df_train = df.copy(deep=True).iloc[:split_idx_train].reset_index(drop=True)\n df_val = df.copy(deep=True).iloc[split_idx_val:].reset_index(drop=True)\n log.debug(\"{} n_train, {} n_eval\".format(n_train, n_samples - n_train))\n return df_train, df_val\n\n\ndef find_time_threshold(df_dict, n_lags, valid_p, inputs_overbleed):\n \"\"\"Find time threshold for dividing list of timeseries into train and validation sets.\n Prevents overbleed of targets. Overbleed of inputs can be configured.\n\n Args:\n df_dict (dict): dict of data\n n_lags (int): identical to NeuralProphet\n valid_p (float): fraction (0,1) of data to use for holdout validation set\n inputs_overbleed (bool): Whether to allow last training targets to be first validation inputs (never targets)\n\n Returns:\n threshold_time_stamp (str): time stamp in which list of dataframe will be split into train and validation sets.\n \"\"\"\n if not 0 < valid_p < 1:\n log.error(\"Please type a valid value for valid_p (for global modeling it should be between 0 and 1.0)\")\n df_joint, _ = join_dataframes(df_dict)\n df_joint = df_joint.sort_values(\"ds\")\n df_joint = df_joint.reset_index(drop=True)\n n_samples = len(df_joint)\n n_samples = n_samples if inputs_overbleed else n_samples - n_lags\n n_valid = max(1, int(n_samples * valid_p))\n n_train = n_samples - n_valid\n threshold_time_stamp = df_joint.loc[n_train, \"ds\"]\n log.debug(\"Time threshold: \", threshold_time_stamp)\n return threshold_time_stamp\n\n\ndef split_considering_timestamp(df_dict, n_lags, n_forecasts, inputs_overbleed, threshold_time_stamp):\n \"\"\"Splits timeseries into train and validation sets according to given threshold_time_stamp.\n Args:\n df_dict(dict): dict of data\n n_lags (int): identical to NeuralProphet\n n_forecasts (int): identical to NeuralProphet\n inputs_overbleed (bool): Whether to allow last training targets to be first validation inputs (never targets)\n threshold_time_stamp (str): time stamp that defines splitting of data\n\n Returns:\n df_train (pd.DataFrame, list or dict): training data\n df_val (pd.DataFrame, list or dict): validation data\n \"\"\"\n df_train = {}\n df_val = {}\n for key in df_dict:\n if df_dict[key][\"ds\"].max() < threshold_time_stamp:\n df_train[key] = df_dict[key].copy(deep=True).reset_index(drop=True)\n elif df_dict[key][\"ds\"].min() > threshold_time_stamp:\n df_val[key] = df_dict[key].copy(deep=True).reset_index(drop=True)\n else:\n df = df_dict[key].copy(deep=True)\n n_train = len(df[df[\"ds\"] < threshold_time_stamp])\n split_idx_train = n_train + n_lags + n_forecasts - 1\n split_idx_val = split_idx_train - n_lags if inputs_overbleed else split_idx_train\n df_train[key] = df.copy(deep=True).iloc[:split_idx_train].reset_index(drop=True)\n df_val[key] = df.copy(deep=True).iloc[split_idx_val:].reset_index(drop=True)\n return df_train, df_val\n\n\ndef split_df(df, n_lags, n_forecasts, valid_p=0.2, inputs_overbleed=True, local_split=False):\n \"\"\"Splits timeseries df into train and validation sets.\n\n Prevents overbleed of targets. Overbleed of inputs can be configured. In case of global modeling the split could be either local or global.\n\n Args:\n df (pd.DataFrame, dict): dataframe or dict of dataframes containing column 'ds', 'y' with all data\n n_lags (int): identical to NeuralProphet\n n_forecasts (int): identical to NeuralProphet\n valid_p (float, int): fraction (0,1) of data to use for holdout validation set,\n or number of validation samples >1\n inputs_overbleed (bool): Whether to allow last training targets to be first validation inputs (never targets)\n local_normalization (bool): when set to true each episode from list of dataframes will be considered locally (in case of Global modeling) - in this case a dict of dataframes should be the input\n Returns:\n df_train (pd.DataFrame,dict):training data\n df_val (pd.DataFrame,dict): validation data\n \"\"\"\n if isinstance(df, pd.DataFrame):\n df_is_dict = False\n df_dict = {\"__df__\": df}\n elif isinstance(df, dict):\n df_is_dict = True\n df_dict = df\n else:\n raise ValueError(\"Please insert valid df type (i.e. pd.DataFrame, dict)\")\n df_train = {}\n df_val = {}\n if local_split:\n for key in df_dict:\n df_train[key], df_val[key] = _split_df(df_dict[key], n_lags, n_forecasts, valid_p, inputs_overbleed)\n else:\n if len(df_dict) == 1:\n for df_name, df_i in df_dict.items():\n df_train[df_name], df_val[df_name] = _split_df(df_i, n_lags, n_forecasts, valid_p, inputs_overbleed)\n else:\n # Split data according to time threshold defined by the valid_p\n threshold_time_stamp = find_time_threshold(df_dict, n_lags, valid_p, inputs_overbleed)\n df_train, df_val = split_considering_timestamp(\n df_dict, n_lags, n_forecasts, inputs_overbleed, threshold_time_stamp\n )\n if not df_is_dict:\n df_train, df_val = df_train[\"__df__\"], df_val[\"__df__\"]\n return df_train, df_val\n\n\ndef make_future_df(\n df_columns, last_date, periods, freq, events_config=None, events_df=None, regressor_config=None, regressors_df=None\n):\n \"\"\"Extends df periods number steps into future.\n\n Args:\n df_columns (pandas DataFrame): Dataframe columns\n last_date: (pandas Datetime): last history date\n periods (int): number of future steps to predict\n freq (str): Data step sizes. Frequency of data recording,\n Any valid frequency for pd.date_range, such as 'D' or 'M'\n events_config (OrderedDict): User specified events configs\n events_df (pd.DataFrame): containing column 'ds' and 'event'\n regressor_config (OrderedDict): configuration for user specified regressors,\n regressors_df (pd.DataFrame): containing column 'ds' and one column for each of the external regressors\n Returns:\n df2 (pd.DataFrame): input df with 'ds' extended into future, and 'y' set to None\n \"\"\"\n future_dates = pd.date_range(start=last_date, periods=periods + 1, freq=freq) # An extra in case we include start\n future_dates = future_dates[future_dates > last_date] # Drop start if equals last_date\n future_dates = future_dates[:periods] # Return correct number of periods\n future_df = pd.DataFrame({\"ds\": future_dates})\n # set the events features\n if events_config is not None:\n future_df = convert_events_to_features(future_df, events_config=events_config, events_df=events_df)\n # set the regressors features\n if regressor_config is not None:\n for regressor in regressors_df:\n # Todo: iterate over regressor_config instead\n future_df[regressor] = regressors_df[regressor]\n for column in df_columns:\n if column not in future_df.columns:\n if column != \"t\" and column != \"y_scaled\":\n future_df[column] = None\n future_df.reset_index(drop=True, inplace=True)\n return future_df\n\n\ndef convert_events_to_features(df, events_config, events_df):\n \"\"\"\n Converts events information into binary features of the df\n\n Args:\n df (pandas DataFrame): Dataframe with columns 'ds' datestamps and 'y' time series values\n events_config (OrderedDict): User specified events configs\n events_df (pd.DataFrame): containing column 'ds' and 'event'\n\n Returns:\n df (pd.DataFrame): input df with columns for user_specified features\n \"\"\"\n\n for event in events_config.keys():\n event_feature = pd.Series([0.0] * df.shape[0])\n dates = events_df[events_df.event == event].ds\n event_feature[df.ds.isin(dates)] = 1.0\n df[event] = event_feature\n return df\n\n\ndef add_missing_dates_nan(df, freq):\n \"\"\"Fills missing datetimes in 'ds', with NaN for all other columns\n\n Args:\n df (pd.Dataframe): with column 'ds' datetimes\n freq (str):Data step sizes. Frequency of data recording,\n Any valid frequency for pd.date_range, such as 'D' or 'M'\n\n Returns:\n dataframe without date-gaps but nan-values\n \"\"\"\n if df[\"ds\"].dtype == np.int64:\n df.loc[:, \"ds\"] = df.loc[:, \"ds\"].astype(str)\n df.loc[:, \"ds\"] = pd.to_datetime(df.loc[:, \"ds\"])\n\n data_len = len(df)\n r = pd.date_range(start=df[\"ds\"].min(), end=df[\"ds\"].max(), freq=freq)\n df_all = df.set_index(\"ds\").reindex(r).rename_axis(\"ds\").reset_index()\n num_added = len(df_all) - data_len\n return df_all, num_added\n\n\ndef fill_linear_then_rolling_avg(series, limit_linear, rolling):\n \"\"\"Adds missing dates, fills missing values with linear imputation or trend.\n\n Args:\n series (pd.Series): series with nan to be filled in.\n limit_linear (int): maximum number of missing values to impute.\n Note: because imputation is done in both directions, this value is effectively doubled.\n rolling (int): maximal number of missing values to impute.\n Note: window width is rolling + 2*limit_linear\n\n Returns:\n filled df\n \"\"\"\n # impute small gaps linearly:\n series = series.interpolate(method=\"linear\", limit=limit_linear, limit_direction=\"both\")\n # fill remaining gaps with rolling avg\n is_na = pd.isna(series)\n rolling_avg = series.rolling(rolling + 2 * limit_linear, min_periods=2 * limit_linear, center=True).mean()\n series.loc[is_na] = rolling_avg[is_na]\n remaining_na = sum(series.isnull())\n return series, remaining_na\n\n\ndef get_freq_dist(ds_col):\n \"\"\"Get frequency distribution of 'ds' column\n Args:\n ds_col(pd.DataFrame): 'ds' column of dataframe\n\n Returns:\n tuple with numeric delta values (ms) and distribution of frequency counts\n \"\"\"\n converted_ds = pd.to_datetime(ds_col).view(dtype=np.int64)\n diff_ds = np.unique(converted_ds.diff(), return_counts=True)\n return diff_ds\n\n\ndef convert_str_to_num_freq(freq_str):\n \"\"\"Convert frequency tags (str) into numeric delta in ms\n\n Args:\n freq_str(str): frequency tag\n\n Returns:\n frequency numeric delta in ms\n \"\"\"\n if freq_str is None:\n freq_num = 0\n else:\n aux_ts = pd.DataFrame(pd.date_range(\"1994-01-01\", periods=100, freq=freq_str))\n frequencies, distribution = get_freq_dist(aux_ts[0])\n freq_num = frequencies[np.argmax(distribution)]\n # if freq_str == \"B\" or freq_str == \"BH\": # exception - Business day and Business hour\n # freq_num = freq_num + 0.777\n return freq_num\n\n\ndef convert_num_to_str_freq(freq_num, initial_time_stamp):\n \"\"\"Convert numeric frequencies into frequency tags (str)\n\n Args:\n freq_num(int): numeric values of delta in ms\n initial_time_stamp(str): initial time stamp of data\n\n Returns:\n frequency tag (str)\n \"\"\"\n aux_ts = pd.date_range(initial_time_stamp, periods=100, freq=pd.to_timedelta(freq_num))\n freq_str = pd.infer_freq(aux_ts)\n return freq_str\n\n\ndef get_dist_considering_two_freqs(dist):\n \"\"\"Add occasions of the two most common frequencies\n\n Note: useful for the frequency exceptions (i.e. 'M','Y','Q','B', and 'BH').\n\n Args:\n dist (list): list of occasions of frequencies\n\n Returns:\n sum of the two most common frequencies occasions\n \"\"\"\n # get distribution considering the two most common frequencies - useful for monthly and business day\n f1 = dist.max()\n dist = np.delete(dist, np.argmax(dist))\n f2 = dist.max()\n return f1 + f2\n\n\ndef _infer_frequency(df, freq, min_freq_percentage=0.7):\n \"\"\"Automatically infers frequency of dataframe or list of dataframes.\n\n Args:\n df (pd.DataFrame or list of pd.DataFrame): data\n freq (str): Data step sizes. Frequency of data recording,\n Any valid frequency for pd.date_range, such as '5min', 'D', 'MS' or 'auto' (default) to automatically set frequency.\n min_freq_percentage (float): threshold for defining major frequency of data\n default: 0.7\n\n Returns:\n Valid frequency tag according to major frequency.\n\n \"\"\"\n frequencies, distribution = get_freq_dist(df[\"ds\"])\n # exception - monthly df (31 days freq or 30 days freq)\n if frequencies[np.argmax(distribution)] == 2.6784e15 or frequencies[np.argmax(distribution)] == 2.592e15:\n dominant_freq_percentage = get_dist_considering_two_freqs(distribution) / len(df[\"ds\"])\n num_freq = 2.6784e15\n inferred_freq = \"MS\" if pd.to_datetime(df[\"ds\"][0]).day < 15 else \"M\"\n # exception - yearly df (365 days freq or 366 days freq)\n elif frequencies[np.argmax(distribution)] == 3.1536e16 or frequencies[np.argmax(distribution)] == 3.16224e16:\n dominant_freq_percentage = get_dist_considering_two_freqs(distribution) / len(df[\"ds\"])\n num_freq = 3.1536e16\n inferred_freq = \"YS\" if pd.to_datetime(df[\"ds\"][0]).day < 15 else \"Y\"\n # exception - quaterly df (most common == 92 days - 3rd,4th quarters and second most common == 91 days 2nd quarter and 1st quarter in leap year)\n elif (\n frequencies[np.argmax(distribution)] == 7.9488e15\n and frequencies[np.argsort(distribution, axis=0)[-2]] == 7.8624e15\n ):\n dominant_freq_percentage = get_dist_considering_two_freqs(distribution) / len(df[\"ds\"])\n num_freq = 7.9488e15\n inferred_freq = \"QS\" if pd.to_datetime(df[\"ds\"][0]).day < 15 else \"Q\"\n # exception - Business day (most common == day delta and second most common == 3 days delta and second most common is at least 12% of the deltas)\n elif (\n frequencies[np.argmax(distribution)] == 8.64e13\n and frequencies[np.argsort(distribution, axis=0)[-2]] == 2.592e14\n and distribution[np.argsort(distribution, axis=0)[-2]] / len(df[\"ds\"]) >= 0.12\n ):\n dominant_freq_percentage = get_dist_considering_two_freqs(distribution) / len(df[\"ds\"])\n num_freq = 8.64e13\n inferred_freq = \"B\"\n # exception - Business hour (most common == hour delta and second most common == 17 hours delta and second most common is at least 8% of the deltas)\n elif (\n frequencies[np.argmax(distribution)] == 3.6e12\n and frequencies[np.argsort(distribution, axis=0)[-2]] == 6.12e13\n and distribution[np.argsort(distribution, axis=0)[-2]] / len(df[\"ds\"]) >= 0.08\n ):\n dominant_freq_percentage = get_dist_considering_two_freqs(distribution) / len(df[\"ds\"])\n num_freq = 3.6e12\n inferred_freq = \"BH\"\n else:\n dominant_freq_percentage = distribution.max() / len(df[\"ds\"])\n num_freq = frequencies[np.argmax(distribution)] # get value of most common diff\n inferred_freq = convert_num_to_str_freq(num_freq, df[\"ds\"].iloc[0])\n\n log.info(\n \"Major frequency {} corresponds to {}% of the data.\".format(\n inferred_freq, np.round(dominant_freq_percentage * 100, 3)\n )\n )\n ideal_freq_exists = True if dominant_freq_percentage >= min_freq_percentage else False\n if ideal_freq_exists:\n # if major freq exists\n if freq == \"auto\": # automatically set df freq to inferred freq\n freq_str = inferred_freq\n log.info(\"Dataframe freq automatically defined as {}\".format(freq_str))\n else:\n freq_str = freq\n if convert_str_to_num_freq(freq) != convert_str_to_num_freq(\n inferred_freq\n ): # check if given freq is the major\n log.warning(\"Defined frequency {} is different than major frequency {}\".format(freq_str, inferred_freq))\n else:\n log.info(\"Defined frequency is equal to major frequency - {}\".format(freq_str))\n else:\n # if ideal freq does not exist\n if freq == \"auto\":\n log.warning(\n \"The auto-frequency feature is not able to detect the following frequencies: SM, BM, CBM, SMS, BMS, CBMS, BQ, BQS, BA, or, BAS. If the frequency of the dataframe is any of the mentioned please define it manually.\"\n )\n raise ValueError(\"Detected multiple frequencies in the timeseries please pre-process data.\")\n else:\n freq_str = freq\n log.warning(\n \"Dataframe has multiple frequencies. It will be resampled according to given freq {}. Ignore message if actual frequency is any of the following: SM, BM, CBM, SMS, BMS, CBMS, BQ, BQS, BA, or, BAS.\".format(\n freq\n )\n )\n return freq_str\n\n\ndef infer_frequency(df, freq, n_lags, min_freq_percentage=0.7):\n \"\"\"Automatically infers frequency of dataframe or list of dataframes.\n\n Args:\n df (pd.DataFrame,dict): data\n freq (str): Data step sizes. Frequency of data recording,\n Any valid frequency for pd.date_range, such as '5min', 'D', 'MS' or 'auto' (default) to automatically set frequency.\n n_lags (int): identical to NeuralProphet\n min_freq_percentage (float): threshold for defining major frequency of data\n default: 0.7\n\n Returns:\n Valid frequency tag according to major frequency.\n\n \"\"\"\n\n df_dict, received_unnamed_df = prep_copy_df_dict(df)\n freq_df = list()\n for key in df_dict:\n freq_df.append(_infer_frequency(df_dict[key], freq, min_freq_percentage))\n if len(set(freq_df)) != 1 and n_lags > 0:\n raise ValueError(\n \"One or more dataframes present different major frequencies, please make sure all dataframes present the same major frequency for auto-regression\"\n )\n elif len(set(freq_df)) != 1 and n_lags == 0:\n # The most common freq is set as the main one (but it does not really matter for Prophet approach)\n freq_str = max(set(freq_df), key=freq_df.count)\n log.warning(\"One or more major frequencies are different - setting main frequency as {}\".format(freq_str))\n else:\n freq_str = freq_df[0]\n return freq_str\n\n\ndef compare_dict_keys(dict_1, dict_2, name_dict_1, name_dict_2):\n df_names_1, df_names_2 = list(dict_1.keys()), list(dict_2.keys())\n if len(df_names_1) != len(df_names_2):\n raise ValueError(\n \"Please, make sure {} and {} dicts have the same number of terms\".format(name_dict_1, name_dict_2)\n )\n missing_names = [name for name in df_names_2 if name not in df_names_1]\n if len(missing_names) > 0:\n raise ValueError(\" Key(s) {} not valid - missing from {} dict keys\".format(missing_names, name_dict_1))\n log.debug(\"{} and {} dicts are compatible\".format(name_dict_1, name_dict_2))\n" ]
[ [ "numpy.quantile", "numpy.min", "numpy.mean", "pandas.concat", "numpy.issubdtype", "numpy.max", "pandas.DataFrame", "numpy.argmax", "pandas.to_datetime", "numpy.round", "numpy.std", "numpy.argsort", "pandas.to_numeric", "pandas.infer_freq", "numpy.isinf", "pandas.isna", "pandas.date_range", "pandas.to_timedelta", "pandas.Series", "numpy.unique" ] ]
bfilippi/pandas-highcharts
[ "bf449b7db8b6966bcf95a0280bf2e4518f3e2419" ]
[ "pandas_highcharts/core.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport pandas\nimport copy\n\n\n_pd2hc_kind = {\n \"bar\": \"column\",\n \"barh\": \"bar\",\n \"area\": \"area\",\n \"line\": \"line\",\n \"pie\": \"pie\"\n}\n\n\ndef pd2hc_kind(kind):\n if kind not in _pd2hc_kind:\n raise ValueError(\"%(kind)s plots are not yet supported\" % locals())\n return _pd2hc_kind[kind]\n\n_pd2hc_linestyle = {\n \"-\": \"Solid\",\n \"--\": \"Dash\",\n \"-.\": \"DashDot\",\n \":\": \"Dot\"\n}\n\n\ndef pd2hc_linestyle(linestyle):\n if linestyle not in _pd2hc_linestyle:\n raise ValueError(\"%(linestyle)s linestyles are not yet supported\" % locals())\n return _pd2hc_linestyle[linestyle]\n\n\ndef json_encode(obj):\n return pandas.io.json.dumps(obj)\n\n\ndef serialize(df, output_type=\"javascript\", chart_type=\"default\", *args, **kwargs):\n def serialize_chart(df, output, *args, **kwargs):\n output[\"chart\"] = {}\n if 'render_to' in kwargs:\n output['chart']['renderTo'] = kwargs['render_to']\n if \"figsize\" in kwargs:\n output[\"chart\"][\"width\"] = kwargs[\"figsize\"][0]\n output[\"chart\"][\"height\"] = kwargs[\"figsize\"][1]\n if \"kind\" in kwargs:\n output[\"chart\"][\"type\"] = pd2hc_kind(kwargs[\"kind\"])\n if kwargs.get('polar'):\n output['chart']['polar'] = True\n\n def serialize_colors(df, output, *args, **kwargs):\n pass\n\n def serialize_credits(df, output, *args, **kwargs):\n pass\n\n def serialize_data(df, output, *args, **kwargs):\n pass\n\n def serialize_drilldown(df, output, *args, **kwargs):\n pass\n\n def serialize_exporting(df, output, *args, **kwargs):\n pass\n\n def serialize_labels(df, output, *args, **kwargs):\n pass\n\n def serialize_legend(df, output, *args, **kwargs):\n output[\"legend\"] = {\n \"enabled\": kwargs.get(\"legend\", True)\n }\n\n def serialize_loading(df, output, *args, **kwargs):\n pass\n\n def serialize_navigation(df, output, *args, **kwargs):\n pass\n\n def serialize_noData(df, output, *args, **kwargs):\n pass\n\n def serialize_pane(df, output, *args, **kwargs):\n pass\n\n def serialize_plotOptions(df, output, *args, **kwargs):\n pass\n\n def serialize_series(df, output, *args, **kwargs):\n def is_secondary(c, **kwargs):\n return c in kwargs.get(\"secondary_y\", [])\n if kwargs.get('sort_columns'):\n df = df.sort_index()\n series = df.to_dict('series')\n output[\"series\"] = []\n for name, data in series.items():\n if df[name].dtype.kind in \"biufc\":\n sec = is_secondary(name, **kwargs)\n d = {\n \"name\": name if not sec or not kwargs.get(\"mark_right\", True) else name + \" (right)\",\n \"yAxis\": int(sec),\n \"data\": list(zip(df.index, data.values.tolist()))\n }\n if kwargs.get('polar'):\n d['data'] = [v for k, v in d['data']]\n if kwargs.get(\"kind\") == \"area\" and kwargs.get(\"stacked\", True):\n d[\"stacking\"] = 'normal'\n if kwargs.get(\"style\"):\n d[\"dashStyle\"] = pd2hc_linestyle(kwargs[\"style\"].get(name, \"-\"))\n output[\"series\"].append(d)\n output['series'].sort(key=lambda s: s['name'])\n\n def serialize_subtitle(df, output, *args, **kwargs):\n pass\n\n def serialize_title(df, output, *args, **kwargs):\n if \"title\" in kwargs:\n output[\"title\"] = {\"text\": kwargs[\"title\"]}\n\n def serialize_tooltip(df, output, *args, **kwargs):\n if 'tooltip' in kwargs:\n output['tooltip'] = kwargs['tooltip']\n\n def serialize_xAxis(df, output, *args, **kwargs):\n output[\"xAxis\"] = {}\n if df.index.name:\n output[\"xAxis\"][\"title\"] = {\"text\": df.index.name}\n if df.index.dtype.kind in \"M\":\n output[\"xAxis\"][\"type\"] = \"datetime\"\n if df.index.dtype.kind == 'O':\n output['xAxis']['categories'] = sorted(list(df.index)) if kwargs.get('sort_columns') else list(df.index)\n if kwargs.get(\"grid\"):\n output[\"xAxis\"][\"gridLineWidth\"] = 1\n output[\"xAxis\"][\"gridLineDashStyle\"] = \"Dot\"\n if kwargs.get(\"loglog\") or kwargs.get(\"logx\"):\n output[\"xAxis\"][\"type\"] = 'logarithmic'\n if \"xlim\" in kwargs:\n output[\"xAxis\"][\"min\"] = kwargs[\"xlim\"][0]\n output[\"xAxis\"][\"max\"] = kwargs[\"xlim\"][1]\n if \"rot\" in kwargs:\n output[\"xAxis\"][\"labels\"] = {\"rotation\": kwargs[\"rot\"]}\n if \"fontsize\" in kwargs:\n output[\"xAxis\"].setdefault(\"labels\", {})[\"style\"] = {\"fontSize\": kwargs[\"fontsize\"]}\n if \"xticks\" in kwargs:\n output[\"xAxis\"][\"tickPositions\"] = kwargs[\"xticks\"]\n\n def serialize_yAxis(df, output, *args, **kwargs):\n yAxis = {}\n if kwargs.get(\"grid\"):\n yAxis[\"gridLineWidth\"] = 1\n yAxis[\"gridLineDashStyle\"] = \"Dot\"\n if kwargs.get(\"loglog\") or kwargs.get(\"logy\"):\n yAxis[\"type\"] = 'logarithmic'\n if \"ylim\" in kwargs:\n yAxis[\"min\"] = kwargs[\"ylim\"][0]\n yAxis[\"max\"] = kwargs[\"ylim\"][1]\n if \"rot\" in kwargs:\n yAxis[\"labels\"] = {\"rotation\": kwargs[\"rot\"]}\n if \"fontsize\" in kwargs:\n yAxis.setdefault(\"labels\", {})[\"style\"] = {\"fontSize\": kwargs[\"fontsize\"]}\n if \"yticks\" in kwargs:\n yAxis[\"tickPositions\"] = kwargs[\"yticks\"]\n output[\"yAxis\"] = [yAxis]\n if kwargs.get(\"secondary_y\"):\n yAxis2 = copy.deepcopy(yAxis)\n yAxis2[\"opposite\"] = True\n output[\"yAxis\"].append(yAxis2)\n\n def serialize_zoom(df, output, *args, **kwargs):\n if \"zoom\" in kwargs:\n if kwargs[\"zoom\"] not in (\"x\", \"y\", \"xy\"):\n raise ValueError(\"zoom must be in ('x', 'y', 'xy')\")\n output[\"chart\"][\"zoomType\"] = kwargs[\"zoom\"]\n\n output = {}\n df_copy = copy.deepcopy(df)\n if \"x\" in kwargs:\n df_copy.index = df_copy.pop(kwargs[\"x\"])\n if kwargs.get(\"use_index\", True) is False:\n df_copy = df_copy.reset_index()\n if \"y\" in kwargs:\n df_copy = pandas.DataFrame(df_copy, columns=kwargs[\"y\"])\n serialize_chart(df_copy, output, *args, **kwargs)\n serialize_colors(df_copy, output, *args, **kwargs)\n serialize_credits(df_copy, output, *args, **kwargs)\n serialize_data(df_copy, output, *args, **kwargs)\n serialize_drilldown(df_copy, output, *args, **kwargs)\n serialize_exporting(df_copy, output, *args, **kwargs)\n serialize_labels(df_copy, output, *args, **kwargs)\n serialize_legend(df_copy, output, *args, **kwargs)\n serialize_loading(df_copy, output, *args, **kwargs)\n serialize_navigation(df_copy, output, *args, **kwargs)\n serialize_noData(df_copy, output, *args, **kwargs)\n serialize_pane(df_copy, output, *args, **kwargs)\n serialize_plotOptions(df_copy, output, *args, **kwargs)\n serialize_series(df_copy, output, *args, **kwargs)\n serialize_subtitle(df_copy, output, *args, **kwargs)\n serialize_title(df_copy, output, *args, **kwargs)\n serialize_tooltip(df_copy, output, *args, **kwargs)\n serialize_xAxis(df_copy, output, *args, **kwargs)\n serialize_yAxis(df_copy, output, *args, **kwargs)\n serialize_zoom(df_copy, output, *args, **kwargs)\n if output_type == \"dict\":\n return output\n if output_type == \"json\":\n return json_encode(output)\n if chart_type == \"stock\":\n return \"new Highcharts.StockChart(%s);\" % json_encode(output)\n return \"new Highcharts.Chart(%s);\" % json_encode(output)\n" ]
[ [ "pandas.DataFrame", "pandas.io.json.dumps" ] ]
JKingKong/mmdetection
[ "cfa22397194c592c25bd19e2f9f2f60f1ea699d3", "cfa22397194c592c25bd19e2f9f2f60f1ea699d3" ]
[ "violin_picture.py", "mmdet/models/detectors/two_stage.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\nfig, axes = plt.subplots(figsize=(10, 10))\n\n\nall_data = [np.random.normal(0, std, 10) for std in range(9, 10)]\nall_data = [np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])]\naxes.violinplot(all_data,\n showmeans=False,\n showmedians=True\n )\naxes.set_title('violin plot')\n\n# adding horizontal grid lines\n\naxes.yaxis.grid(True)\nt = [y + 1 for y in range(len(all_data))]\naxes.set_xticks([y + 1 for y in range(len(all_data))], )\n\n\nplt.setp(axes, xticks=[y + 1 for y in range(len(all_data))],\n xticklabels=['correct'],\n )\n\nplt.show()", "import torch\nimport torch.nn as nn\n\nfrom mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler\nfrom .. import builder\nfrom ..registry import DETECTORS\nfrom .base import BaseDetector\nfrom .test_mixins import BBoxTestMixin, MaskTestMixin, RPNTestMixin\n\n\n@DETECTORS.register_module\nclass TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin,\n MaskTestMixin):\n \"\"\"Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n \"\"\"\n\n def __init__(self,\n backbone,\n neck=None,\n shared_head=None,\n rpn_head=None,\n bbox_roi_extractor=None,\n bbox_head=None,\n mask_roi_extractor=None,\n mask_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n mode_name=None\n ):\n super(TwoStageDetector, self).__init__()\n\n self.mode_name = mode_name\n # 上来一波build,这里和之前的build没什么区别,在注册器里面去除对应的type类型,然后返回一个对象,以后就可以肆意调用里面的功能了\n # 这里build了backbone, neck, shared_head, rpn_head,bbox_head,mask_head(如果有)\n # backbone,主干网络用的啥,譬如resnet50, resnext101之类的\n # neck 一般是FPN,需要指定很多参数,譬如用哪些feature map,之后会详细说\n # rpn_head继承了anchor_head,是rpn的核心功能\n self.backbone = builder.build_backbone(backbone)\n\n if neck is not None:\n self.neck = builder.build_neck(neck)\n\n if shared_head is not None:\n self.shared_head = builder.build_shared_head(shared_head)\n\n if rpn_head is not None:\n self.rpn_head = builder.build_head(rpn_head)\n\n if bbox_head is not None:\n self.bbox_roi_extractor = builder.build_roi_extractor(\n bbox_roi_extractor)\n self.bbox_head = builder.build_head(bbox_head)\n\n if mask_head is not None:\n if mask_roi_extractor is not None:\n self.mask_roi_extractor = builder.build_roi_extractor(\n mask_roi_extractor)\n self.share_roi_extractor = False\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n self.mask_head = builder.build_head(mask_head)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n self.init_weights(pretrained=pretrained)\n\n @property\n def with_rpn(self):\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n def init_weights(self, pretrained=None):\n super(TwoStageDetector, self).init_weights(pretrained)\n self.backbone.init_weights(pretrained=pretrained)\n if self.with_neck:\n if isinstance(self.neck, nn.Sequential):\n for m in self.neck:\n m.init_weights()\n else:\n self.neck.init_weights()\n if self.with_shared_head:\n self.shared_head.init_weights(pretrained=pretrained)\n if self.with_rpn:\n self.rpn_head.init_weights()\n if self.with_bbox:\n self.bbox_roi_extractor.init_weights()\n self.bbox_head.init_weights()\n if self.with_mask:\n self.mask_head.init_weights()\n if not self.share_roi_extractor:\n self.mask_roi_extractor.init_weights()\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck\n # 前向backbone 和 neck的函数\n \"\"\"\n x = self.backbone(img) # bcakbone\n if self.with_neck:\n x = self.neck(x) # neck\n return x\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/get_flops.py`\n \"\"\"\n outs = ()\n # backbone\n x = self.extract_feat(img)\n # rpn\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n outs = outs + (rpn_outs, )\n proposals = torch.randn(1000, 4).to(device=img.device)\n # bbox head\n rois = bbox2roi([proposals])\n if self.with_bbox:\n bbox_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs], rois)\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n cls_score, bbox_pred = self.bbox_head(bbox_feats)\n outs = outs + (cls_score, bbox_pred)\n # mask head\n if self.with_mask:\n mask_rois = rois[:100]\n mask_feats = self.mask_roi_extractor(\n x[:self.mask_roi_extractor.num_inputs], mask_rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n mask_pred = self.mask_head(mask_feats)\n outs = outs + (mask_pred, )\n return outs\n\n def forward_train(self, # 核心双阶段检测器的流程\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format(left top point, right bottom point).\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n #  前向 backbone 和 neck\n x = self.extract_feat(img)\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n # rpn_head在上面__init__函数里面build了,返回了rpn_head对象,\n # 但是因为都继承了nn.Module(实现了__call__),可以直接用实例名字调用里面的forward函数,从而进行了前向传播\n\n rpn_outs = self.rpn_head(x)\n rpn_loss_inputs = rpn_outs + (gt_bboxes, img_metas,\n self.train_cfg.rpn)\n # 这里loss是rpn_head里面的实现了,因为rpn_head继承了anchor_head,\n # 所以用了父类anchor_head实现的loss,其实就是anchor的那一套,返回是一个字典\n rpn_losses = self.rpn_head.loss(\n *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n losses.update(rpn_losses)\n\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n proposal_inputs = rpn_outs + (img_metas, proposal_cfg)\n # 得到proposal,把anchor转化为对应的框的信息,然后NMS再取top-N个候选框\n proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)\n else:\n proposal_list = proposals\n '''\n ============\n proposal_list\n2\ntorch.Size([2000, 5])\n******\ntorch.Size([2000, 5])\n============\n============\n '''\n # assign gts and sample proposals\n if self.with_bbox or self.with_mask:\n bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)\n bbox_sampler = build_sampler(\n self.train_cfg.rcnn.sampler, context=self)\n num_imgs = img.size(0)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n for i in range(num_imgs):\n assign_result = bbox_assigner.assign(proposal_list[i],\n gt_bboxes[i],\n gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = bbox_sampler.sample(\n assign_result,\n proposal_list[i],\n gt_bboxes[i],\n gt_labels[i],\n feats=[lvl_feat[i][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n # bbox head forward and loss\n if self.with_bbox:\n rois = bbox2roi([res.bboxes for res in sampling_results])\n # TODO: a more flexible way to decide which feature maps to use\n bbox_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs], rois)\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n cls_score, bbox_pred = self.bbox_head(bbox_feats)\n\n # bbox_targets: 长度为4的tuple,元素是tensor\n '''\n bbox_targets:长度为4\n 四个元素分别的维度\n labels: torch.Size([1024]) \n label_weights: torch.Size([1024])\n bbox_targets torch.Size([1024, 4])\n bbox_weights torch.Size([1024, 4])\n '''\n # tensor维度为1维与tensor长度和cls_score,bbox_pred一致,值为0或1,用来表示预测是否命中真实框\n bbox_targets = self.bbox_head.get_target(sampling_results,\n gt_bboxes, gt_labels,\n self.train_cfg.rcnn)\n loss_bbox = self.bbox_head.loss(cls_score, bbox_pred,\n *bbox_targets)\n losses.update(loss_bbox)\n\n # mask head forward and loss\n if self.with_mask:\n if not self.share_roi_extractor:\n pos_rois = bbox2roi(\n [res.pos_bboxes for res in sampling_results])\n mask_feats = self.mask_roi_extractor(\n x[:self.mask_roi_extractor.num_inputs], pos_rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n else:\n pos_inds = []\n device = bbox_feats.device\n for res in sampling_results:\n pos_inds.append(\n torch.ones(\n res.pos_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds.append(\n torch.zeros(\n res.neg_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds = torch.cat(pos_inds)\n mask_feats = bbox_feats[pos_inds]\n\n if mask_feats.shape[0] > 0:\n mask_pred = self.mask_head(mask_feats)\n mask_targets = self.mask_head.get_target(\n sampling_results, gt_masks, self.train_cfg.rcnn)\n pos_labels = torch.cat(\n [res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head.loss(mask_pred, mask_targets,\n pos_labels)\n losses.update(loss_mask)\n\n return losses\n\n async def async_simple_test(self,\n img,\n img_meta,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = await self.async_test_rpn(x, img_meta,\n self.test_cfg.rpn)\n else:\n proposal_list = proposals\n\n det_bboxes, det_labels = await self.async_test_bboxes(\n x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=rescale)\n bbox_results = bbox2result(det_bboxes, det_labels,\n self.bbox_head.num_classes)\n\n if not self.with_mask:\n return bbox_results\n else:\n segm_results = await self.async_test_mask(\n x,\n img_meta,\n det_bboxes,\n det_labels,\n rescale=rescale,\n mask_test_cfg=self.test_cfg.get('mask'))\n return bbox_results, segm_results\n\n def simple_test(self, img, img_metas, proposals=None, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = self.simple_test_rpn(x, img_metas,\n self.test_cfg.rpn)\n else:\n proposal_list = proposals\n\n # 调用mmdet/models/detectors/test_mixins.py 里的simple_test_mask()\n # 获取框(x,y,w,h,置信度)和类标签\n # 科学计数法\n det_bboxes, det_labels = self.simple_test_bboxes(\n x, img_metas, proposal_list, self.test_cfg.rcnn, rescale=rescale,\n mode_name=self.mode_name,save_mode=True,\n )\n\n # 返回一个列表,不是tensor\n # 用浮点数表示,且是一个列表\n bbox_results = bbox2result(det_bboxes, det_labels,\n self.bbox_head.num_classes)\n # import sys\n # print()\n # print(\"===================****************=====================\")\n # print(\"--- current function from \", sys._getframe().f_code.co_filename)\n # print(\"--- current function is      \", sys._getframe().f_code.co_name)\n # print()\n # print(\"--- called from file           \", sys._getframe().f_back.f_code.co_filename)\n # print(\"--- called by function      \", sys._getframe().f_back.f_code.co_name)\n # print(\"--- called at line               \", sys._getframe().f_back.f_lineno)\n # print(\"===================****************=====================\")\n # print()\n # print()\n # print(\"--------------------------------two_stage.py------------------------------------------------------\")\n # print(\"===det_bboxes:\",det_bboxes.shape)\n # print(\"===det_labels:\",det_labels.shape)\n # print(\"--------------------------------------------------------------------------------------\")\n # print()\n\n if not self.with_mask:\n return bbox_results\n else:\n segm_results = self.simple_test_mask(\n x, img_metas, det_bboxes, det_labels, rescale=rescale)\n return bbox_results, segm_results\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n # recompute feats to save memory\n proposal_list = self.aug_test_rpn(\n self.extract_feats(imgs), img_metas, self.test_cfg.rpn)\n det_bboxes, det_labels = self.aug_test_bboxes(\n self.extract_feats(imgs), img_metas, proposal_list,\n self.test_cfg.rcnn)\n\n if rescale:\n _det_bboxes = det_bboxes\n else:\n _det_bboxes = det_bboxes.clone()\n _det_bboxes[:, :4] *= img_metas[0][0]['scale_factor']\n bbox_results = bbox2result(_det_bboxes, det_labels,\n self.bbox_head.num_classes)\n\n # det_bboxes always keep the original scale\n if self.with_mask:\n segm_results = self.aug_test_mask(\n self.extract_feats(imgs), img_metas, det_bboxes, det_labels)\n return bbox_results, segm_results\n else:\n return bbox_results\n" ]
[ [ "matplotlib.pyplot.show", "numpy.array", "numpy.random.normal", "matplotlib.pyplot.subplots" ], [ "torch.zeros", "torch.cat", "torch.randn", "torch.ones" ] ]
condmat/simple-dmrg
[ "f80db9cb75b99d5627440174a888cd5868a07f28" ]
[ "simple_dmrg_02_finite_system.py" ]
[ "#!/usr/bin/env python\n#\n# Simple DMRG tutorial. This code integrates the following concepts:\n# - Infinite system algorithm\n# - Finite system algorithm\n#\n# Copyright 2013 James R. Garrison and Ryan V. Mishmash.\n# Open source under the MIT license. Source code at\n# <https://github.com/simple-dmrg/simple-dmrg/>\n\n# This code will run under any version of Python >= 2.6. The following line\n# provides consistency between python2 and python3.\nfrom __future__ import print_function, division\n\n# numpy and scipy imports\nimport numpy as np\nfrom scipy.sparse import kron, identity\nfrom scipy.sparse.linalg import eigsh # Lanczos routine from ARPACK\n\n# We will use python's \"namedtuple\" to represent the Block and EnlargedBlock\n# objects\nfrom collections import namedtuple\n\nBlock = namedtuple(\"Block\", [\"length\", \"basis_size\", \"operator_dict\"])\nEnlargedBlock = namedtuple(\"EnlargedBlock\", [\"length\", \"basis_size\", \"operator_dict\"])\n\ndef is_valid_block(block):\n for op in block.operator_dict.values():\n if op.shape[0] != block.basis_size or op.shape[1] != block.basis_size:\n return False\n return True\n\n# This function should test the same exact things, so there is no need to\n# repeat its definition.\nis_valid_enlarged_block = is_valid_block\n\n# Model-specific code for the Heisenberg XXZ chain\nmodel_d = 2 # single-site basis size\n\nSz1 = np.array([[0.5, 0], [0, -0.5]], dtype='d') # single-site S^z\nSp1 = np.array([[0, 1], [0, 0]], dtype='d') # single-site S^+\n\nH1 = np.array([[0, 0], [0, 0]], dtype='d') # single-site portion of H is zero\n\ndef H2(Sz1, Sp1, Sz2, Sp2): # two-site part of H\n \"\"\"Given the operators S^z and S^+ on two sites in different Hilbert spaces\n (e.g. two blocks), returns a Kronecker product representing the\n corresponding two-site term in the Hamiltonian that joins the two sites.\n \"\"\"\n J = Jz = 1.\n return (\n (J / 2) * (kron(Sp1, Sp2.conjugate().transpose()) + kron(Sp1.conjugate().transpose(), Sp2)) +\n Jz * kron(Sz1, Sz2)\n )\n\n# conn refers to the connection operator, that is, the operator on the edge of\n# the block, on the interior of the chain. We need to be able to represent S^z\n# and S^+ on that site in the current basis in order to grow the chain.\ninitial_block = Block(length=1, basis_size=model_d, operator_dict={\n \"H\": H1,\n \"conn_Sz\": Sz1,\n \"conn_Sp\": Sp1,\n})\n\ndef enlarge_block(block):\n \"\"\"This function enlarges the provided Block by a single site, returning an\n EnlargedBlock.\n \"\"\"\n mblock = block.basis_size\n o = block.operator_dict\n\n # Create the new operators for the enlarged block. Our basis becomes a\n # Kronecker product of the Block basis and the single-site basis. NOTE:\n # `kron` uses the tensor product convention making blocks of the second\n # array scaled by the first. As such, we adopt this convention for\n # Kronecker products throughout the code.\n enlarged_operator_dict = {\n \"H\": kron(o[\"H\"], identity(model_d)) + kron(identity(mblock), H1) + H2(o[\"conn_Sz\"], o[\"conn_Sp\"], Sz1, Sp1),\n \"conn_Sz\": kron(identity(mblock), Sz1),\n \"conn_Sp\": kron(identity(mblock), Sp1),\n }\n\n return EnlargedBlock(length=(block.length + 1),\n basis_size=(block.basis_size * model_d),\n operator_dict=enlarged_operator_dict)\n\ndef rotate_and_truncate(operator, transformation_matrix):\n \"\"\"Transforms the operator to the new (possibly truncated) basis given by\n `transformation_matrix`.\n \"\"\"\n return transformation_matrix.conjugate().transpose().dot(operator.dot(transformation_matrix))\n\ndef single_dmrg_step(sys, env, m):\n \"\"\"Perform a single DMRG step using `sys` as the system and `env` as the\n environment, keeping a maximum of `m` states in the new basis.\n \"\"\"\n assert is_valid_block(sys)\n assert is_valid_block(env)\n\n # Enlarge each block by a single site.\n sys_enl = enlarge_block(sys)\n if sys is env: # no need to recalculate a second time\n env_enl = sys_enl\n else:\n env_enl = enlarge_block(env)\n\n assert is_valid_enlarged_block(sys_enl)\n assert is_valid_enlarged_block(env_enl)\n\n # Construct the full superblock Hamiltonian.\n m_sys_enl = sys_enl.basis_size\n m_env_enl = env_enl.basis_size\n sys_enl_op = sys_enl.operator_dict\n env_enl_op = env_enl.operator_dict\n superblock_hamiltonian = kron(sys_enl_op[\"H\"], identity(m_env_enl)) + kron(identity(m_sys_enl), env_enl_op[\"H\"]) + \\\n H2(sys_enl_op[\"conn_Sz\"], sys_enl_op[\"conn_Sp\"], env_enl_op[\"conn_Sz\"], env_enl_op[\"conn_Sp\"])\n\n # Call ARPACK to find the superblock ground state. (\"SA\" means find the\n # \"smallest in amplitude\" eigenvalue.)\n (energy,), psi0 = eigsh(superblock_hamiltonian, k=1, which=\"SA\")\n\n # Construct the reduced density matrix of the system by tracing out the\n # environment\n #\n # We want to make the (sys, env) indices correspond to (row, column) of a\n # matrix, respectively. Since the environment (column) index updates most\n # quickly in our Kronecker product structure, psi0 is thus row-major (\"C\n # style\").\n psi0 = psi0.reshape([sys_enl.basis_size, -1], order=\"C\")\n rho = np.dot(psi0, psi0.conjugate().transpose())\n\n # Diagonalize the reduced density matrix and sort the eigenvectors by\n # eigenvalue.\n w, v = np.linalg.eigh(rho)\n possible_eigenstates = []\n for eval, evec in zip(w, v.transpose()):\n possible_eigenstates.append((eval, evec))\n possible_eigenstates.sort(reverse=True, key=lambda x: x[0]) # largest eigenvalue first\n\n # Build the transformation matrix from the `m` overall most significant\n # eigenvectors.\n my_m = min(len(possible_eigenstates), m)\n transformation_matrix = np.zeros((sys_enl.basis_size, my_m), dtype='d', order='F')\n for i, (eval, evec) in enumerate(possible_eigenstates[:my_m]):\n transformation_matrix[:, i] = evec\n\n truncation_error = 1 - sum([x[0] for x in possible_eigenstates[:my_m]])\n print(\"truncation error\", truncation_error)\n\n # Rotate and truncate each operator.\n new_operator_dict = {}\n for name, op in sys_enl.operator_dict.items():\n new_operator_dict[name] = rotate_and_truncate(op, transformation_matrix)\n\n newblock = Block(length=sys_enl.length,\n basis_size=my_m,\n operator_dict=new_operator_dict)\n\n return newblock, energy\n\ndef graphic(sys_block, env_block, sys_label=\"l\"):\n \"\"\"Returns a graphical representation of the DMRG step we are about to\n perform, using '=' to represent the system sites, '-' to represent the\n environment sites, and '**' to represent the two intermediate sites.\n \"\"\"\n assert sys_label in (\"l\", \"r\")\n graphic = (\"=\" * sys_block.length) + \"**\" + (\"-\" * env_block.length)\n if sys_label == \"r\":\n # The system should be on the right and the environment should be on\n # the left, so reverse the graphic.\n graphic = graphic[::-1]\n return graphic\n\ndef infinite_system_algorithm(L, m):\n block = initial_block\n # Repeatedly enlarge the system by performing a single DMRG step, using a\n # reflection of the current block as the environment.\n while 2 * block.length < L:\n print(graphic(block, block))\n block, energy = single_dmrg_step(block, block, m=m)\n print(\"E/L =\", energy / (block.length * 2))\n\ndef finite_system_algorithm(L, m_warmup, m_sweep_list):\n assert L % 2 == 0 # require that L is an even number\n\n # To keep things simple, this dictionary is not actually saved to disk, but\n # we use it to represent persistent storage.\n block_disk = {} # \"disk\" storage for Block objects\n\n # Use the infinite system algorithm to build up to desired size. Each time\n # we construct a block, we save it for future reference as both a left\n # (\"l\") and right (\"r\") block, as the infinite system algorithm assumes the\n # environment is a mirror image of the system.\n block = initial_block\n block_disk[\"l\", block.length] = block\n block_disk[\"r\", block.length] = block\n while 2 * block.length < L:\n # Perform a single DMRG step and save the new Block to \"disk\"\n print(graphic(block, block))\n block, energy = single_dmrg_step(block, block, m=m_warmup)\n print(\"E/L =\", energy / (block.length * 2))\n block_disk[\"l\", block.length] = block\n block_disk[\"r\", block.length] = block\n\n # Now that the system is built up to its full size, we perform sweeps using\n # the finite system algorithm. At first the left block will act as the\n # system, growing at the expense of the right block (the environment), but\n # once we come to the end of the chain these roles will be reversed.\n sys_label, env_label = \"l\", \"r\"\n sys_block = block; del block # rename the variable\n for m in m_sweep_list:\n while True:\n # Load the appropriate environment block from \"disk\"\n env_block = block_disk[env_label, L - sys_block.length - 2]\n if env_block.length == 1:\n # We've come to the end of the chain, so we reverse course.\n sys_block, env_block = env_block, sys_block\n sys_label, env_label = env_label, sys_label\n\n # Perform a single DMRG step.\n print(graphic(sys_block, env_block, sys_label))\n sys_block, energy = single_dmrg_step(sys_block, env_block, m=m)\n\n print(\"E/L =\", energy / L)\n\n # Save the block from this step to disk.\n block_disk[sys_label, sys_block.length] = sys_block\n\n # Check whether we just completed a full sweep.\n if sys_label == \"l\" and 2 * sys_block.length == L:\n break # escape from the \"while True\" loop\n\nif __name__ == \"__main__\":\n np.set_printoptions(precision=10, suppress=True, threshold=10000, linewidth=300)\n\n #infinite_system_algorithm(L=100, m=20)\n finite_system_algorithm(L=20, m_warmup=10, m_sweep_list=[10, 20, 30, 40, 40])\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.set_printoptions", "numpy.linalg.eigh", "scipy.sparse.identity", "scipy.sparse.linalg.eigsh", "scipy.sparse.kron" ] ]
Henistein/tinygrad
[ "e3be28c825634fa92225a99ea712c2ea58d4dc6a" ]
[ "test/test_train.py" ]
[ "import os\nimport unittest\nimport time\nimport tinygrad.optim as optim\nimport numpy as np\nfrom tinygrad.tensor import Tensor\nfrom extra.training import train\nfrom extra.utils import get_parameters\nfrom models.efficientnet import EfficientNet\nfrom models.transformer import Transformer\nfrom models.resnet import ResNet18, ResNet34, ResNet50\n\nBS = int(os.getenv(\"BS\", \"4\"))\n\ndef train_one_step(model,X,Y):\n params = get_parameters(model)\n pcount = 0\n for p in params:\n pcount += np.prod(p.shape)\n optimizer = optim.Adam(params, lr=0.001)\n print(\"stepping %r with %.1fM params bs %d\" % (type(model), pcount/1e6, BS))\n st = time.time()\n train(model, X, Y, optimizer, steps=1, BS=BS)\n et = time.time()-st\n print(\"done in %.2f ms\" % (et*1000.))\n\nclass TestTrain(unittest.TestCase):\n def test_efficientnet(self):\n model = EfficientNet(0)\n X = np.zeros((BS,3,224,224), dtype=np.float32)\n Y = np.zeros((BS), dtype=np.int32)\n train_one_step(model,X,Y)\n\n def test_transformer(self):\n # this should be small GPT-2, but the param count is wrong\n model = Transformer(syms=10, maxlen=6, layers=12, embed_dim=768, num_heads=12)\n X = np.zeros((BS,6), dtype=np.float32)\n Y = np.zeros((BS,6), dtype=np.int32)\n train_one_step(model,X,Y)\n\n def test_resnet(self):\n X = np.zeros((BS, 3, 224, 224), dtype=np.float32)\n Y = np.zeros((BS), dtype=np.int32)\n for resnet_v in [ResNet18, ResNet34, ResNet50]:\n model = resnet_v(num_classes=1000, pretrained=True)\n train_one_step(model, X, Y)\n\n def test_bert(self):\n # TODO: write this\n pass\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.prod", "numpy.zeros" ] ]
Fredpwol/pygnet
[ "02315c95ef6ee884bd55220a215c4bdd4f035317" ]
[ "graphnet/_vis/decorators.py" ]
[ "import numpy as np \n\n\n\ndef preproccess_plot(func):\n \"\"\"\n creates coordinate in the plot for nodes. \n \"\"\"\n def wrapper(graph, ax, n, weighted, shrinkA, shrinkB, layout, polygon_radius, attr, *args, **kwargs):\n space = np.linspace(0,1,n+1)\n wrapper.scale = 100 // (n+1)\n size = wrapper.scale + 10\n wrapper.nodes = graph.get_nodes\n x = []\n y = []\n if layout == \"random\":\n np.random.shuffle(wrapper.nodes)\n for i in range(0, len(space)-1):\n point = (space[i]+space[i+1]) / 2\n x.append(point)\n y = [np.random.random() for _ in range(n)]\n elif layout == \"polygon\":\n for i in range(n):\n x.append(polygon_radius*np.cos(2*np.pi*i/n))\n y.append(polygon_radius*np.sin(2*np.pi*i/n))\n for i, node in enumerate(wrapper.nodes):\n wrapper.points[node] = (x[i], y[i]) \n\n func(graph, ax, n, weighted, shrinkA, shrinkB, layout, polygon_radius, attr, *args, **kwargs)\n for i, node in enumerate(wrapper.nodes):\n ax.plot(x[i], y[i], \"o\",markersize=size + size * node.radius, color=node.color)\n\n for node in wrapper.points:\n #\n x, y = wrapper.points[node]\n value = eval('node.%s'%attr)\n ax.annotate(value, (x, y))\n wrapper.scale = 0\n wrapper.points = dict()\n wrapper.nodes = []\n return wrapper\n " ]
[ [ "numpy.sin", "numpy.random.shuffle", "numpy.cos", "numpy.random.random", "numpy.linspace" ] ]
Lucciola111/stream_autoencoder_windowing
[ "5456b07bd20220c987598db2cdb832d8195e1575", "5456b07bd20220c987598db2cdb832d8195e1575" ]
[ "TrainingFunctions/DetectDriftADWINOnline.py", "Evaluation/Generate_withIterations_TimeComplexityAnalysis.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom skmultiflow.drift_detection.adwin import ADWIN\nfrom Utility_Functions.ComputeErrorPerDim import compute_error_per_dim\nfrom TrainingFunctions.BuildAutoencoder import build_autoencoder\n\n\ndef detect_drift_adwin_online(\n test_X, idx, dist, adwin, autoencoder, encoder, decoder, drift_epochs, drift_batch_size, early_stopping,\n delta_adwin,\n all_losses, final_losses, model=False, test_y=False,\n fit_new_ae_after_drift=False, fit_after_drift=False,\n initialize_new_adwin_after_drift=False, feed_adwin_with_refreshed_re_after_drift=False):\n \"\"\"\n\n Parameters\n ----------\n test_X: Data stream with all test data\n idx: Index of outer loop\n dist: List of reconstruction errors of new batch\n adwin: ADWIN change detector\n autoencoder: Trained autoencoder\n encoder: Encoder which belongs to autoencoder\n decoder: Decoder which belongs to autoencoder\n drift_epochs: epochs for training after drift was detected\n drift_batch_size: batch size for training after drift was detected\n early_stopping: determine whether early stopping callback should be used\n delta_adwin: Delta value for ADWIN\n all_losses: Array with losses of each gradient decent\n final_losses: Array with losses of each last training/update epoch\n model: The model if algorithm should be validated with classifier\n test_y: The test labels if algorithm should be validated with classifier\n fit_new_ae_after_drift: Whether a new autoencoder should be trained after drift\n fit_after_drift: Whether autoencoder should be updated by fitting again after drift on ADWIN window\n initialize_new_adwin_after_drift: Whether ADWIN should be initialized after drift\n feed_adwin_with_refreshed_re_after_drift: Whether ADWIN should be refilled after drift\n\n Returns the widths of ADWIN and the detected drift points and further necessary parameters for algorithm\n -------\n\n \"\"\"\n # 1. Initialize arrays\n widths = []\n fusioned_refreshed_dist_adwin = []\n drift_decisions = [False] * len(dist)\n errors_per_dimension = []\n # all_errors_per_dimension = []\n weights_copy = False\n\n # 2. Adding stream elements to ADWIN and verifying if drift occurred\n for local_idx in range(len(dist)):\n global_idx = idx + local_idx\n\n # 3. Adding stream elements to ADWIN and verifying if drift occurred\n current_dist = dist[local_idx]\n adwin.add_element(current_dist)\n if adwin.detected_change():\n # 4. Save drift point, error per dimension, and weights of AE\n # Save drift point\n print(f\"Change in index {global_idx} for stream value {dist[local_idx]}\")\n drift_decisions[local_idx] = True\n # Save reconstruction error per dimension of drift point\n error_per_dimension = compute_error_per_dim(point=global_idx, data_stream_test_x=test_X, encoder=encoder,\n decoder=decoder)\n errors_per_dimension.append(error_per_dimension)\n # Save weights of current autoencoder to detect \"where\"\n weights_copy = autoencoder.get_weights()\n\n # 5. Test-then-Train: Define ADWIN window as new train data stream\n window_train_X = test_X[(global_idx - adwin.width): global_idx]\n # 5.1 A new autoencoder should be trained after drift\n if fit_new_ae_after_drift:\n autoencoder, encoder, decoder = build_autoencoder(\n n_dimensions=autoencoder.input_shape[1], size_encoder=autoencoder.layers[1].output_shape[1])\n\n # 5.2 Update autoencoder by fitting (again) after drift on ADWIN window\n if fit_after_drift or fit_new_ae_after_drift:\n # Callback will stop training when there is no improvement in loss for three consecutive epochs\n callback = [tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)] if early_stopping else None\n autoencoder.fit(window_train_X, window_train_X,\n epochs=drift_epochs,\n batch_size=drift_batch_size,\n shuffle=True, verbose=0,\n callbacks=callback\n )\n all_losses.extend(autoencoder.history.history['loss'])\n final_losses.append(autoencoder.history.history['loss'][-1])\n\n # 6. Retrain validation model on ADWIN window\n if model and test_y.any():\n window_train_y = test_y[(global_idx - adwin.width): global_idx]\n model.fit(window_train_X, window_train_y)\n\n # 7. Calculate refreshed reconstruction error for current ADWIN window\n # Apply updated autoencoder to current ADWIN window\n encoded_refreshed = encoder.predict(window_train_X)\n decoded_refreshed = decoder.predict(encoded_refreshed)\n # Calculate refreshed reconstruction error(s) of elements in current ADWIN window\n refreshed_dist_adwin = np.linalg.norm(window_train_X - decoded_refreshed, axis=1)\n fusioned_refreshed_dist_adwin[-len(refreshed_dist_adwin):] = refreshed_dist_adwin\n\n # 8. Initialize ADWIN again\n if initialize_new_adwin_after_drift:\n adwin = ADWIN(delta=delta_adwin)\n # 9. Feed ADWIN with refreshed reconstruction errors\n if feed_adwin_with_refreshed_re_after_drift:\n for i in refreshed_dist_adwin:\n adwin.add_element(i)\n\n # 9. Update dist of current tumbling window with refreshed dist\n # Apply updated autoencoder to further elements in tumbling window\n remaining_tw_X = test_X[global_idx:(idx + len(dist))]\n encoded_remaining_tw = encoder.predict(remaining_tw_X)\n decoded_remaining_tw = decoder.predict(encoded_remaining_tw)\n # Calculate refreshed reconstruction error(s) of elements in current ADWIN window\n refreshed_dist_tw = np.linalg.norm(remaining_tw_X - decoded_remaining_tw, axis=1)\n dist[local_idx:] = refreshed_dist_tw\n\n # Append for every instance (also non-drift) the width of ADWIN and the reconstruction error per dimension\n widths.append(adwin.width)\n\n return adwin, widths, drift_decisions, errors_per_dimension, weights_copy, fusioned_refreshed_dist_adwin, \\\n all_losses, final_losses, autoencoder, encoder, decoder\n", "import numpy as np\nimport pandas as pd\nfrom EvaluationFunctions.LoadFrameworkDesignsFilenames import load_framework_designs_filenames\nfrom EvaluationFunctions.LoadCompetitorsFilenames import load_competitors_filenames\nfrom EvaluationFunctions.Load_withIterations_Results import load_with_iterations_results\nfrom Evaluation.Plot_TimeComplexity import plot_time_complexity\n\n# 0. Read in file names of experiment\nexperiments = [\"10Dim\", \"50Dim\", \"100Dim\", \"500Dim\", \"1000Dim\"]\ncompetitors = True\n\ntimes_per_example = []\ndf_times_per_example = pd.DataFrame()\nfor experiment in experiments:\n time_per_example_experiment = {}\n # 1. Read in File names\n if competitors:\n path, dataset, result_file_names = load_competitors_filenames(experiment=experiment)\n\n file_names = [\"FILE_SAW_NewAE\", \"FILE_SAW_RetrainAE\", \"FILE_Baseline_ADWIN10\", \"FILE_Baseline_ADWIN10-initialized\",\n \"FILE_Competitor_IBDD\", \"FILE_Competitor_D3\"]\n result_folders = [\"SAW_Autoencoder_ADWIN_Training\", \"SAW_Autoencoder_ADWIN_Training\", \"Baseline_MultipleADWINS\",\n \"Baseline_MultipleADWINS\", \"Competitor_IBDD\", \"Competitor_D3\"]\n experiment_names = [\"SAW (NAE-IAW)\", \"SAW (RAE-IAW)\", \"ADWIN-10\", \"ADWIN-10i\", \"IBDD\", \"D3\"]\n else:\n path, dataset, result_file_names = load_framework_designs_filenames(experiment=experiment)\n\n file_names = [\"FILE_TrainNewAE_KeepADWIN\", \"FILE_TrainNewAE_InitializeADWIN\",\n \"FILE_TrainNewAE_InitializeAndFeedADWIN\",\n \"FILE_RetrainAE_KeepADWIN\", \"FILE_RetrainAE_InitializeADWIN\", \"FILE_RetrainAE_InitializeAndFeedADWIN\"]\n result_folders = [\"SAW_Autoencoder_ADWIN_Training\"] * 6\n experiment_names = [\"NAE-KAW\", \"NAE-IAW\", \"NAE-RAW\", \"RAE-KAW\", \"RAE-IAW\", \"RAE-RAW\"]\n\n # 2. Read in Files and generate evaluation metrics\n for experiment_idx in range(len(file_names)):\n if result_file_names[file_names[experiment_idx]] != '-':\n evaluation_results = load_with_iterations_results(\n file_name=result_file_names[file_names[experiment_idx]], result_folder=result_folders[experiment_idx])\n\n time_per_example = np.round(np.mean(evaluation_results['Time per Example']), 4)\n time_per_example_experiment[experiment_names[experiment_idx]] = time_per_example\n else:\n time_per_example_experiment[experiment_names[experiment_idx]] = 0\n\n # Append accuracies of experiment to list of all experiments\n times_per_example.append(time_per_example_experiment)\n\n# Create data frame\ntimes_per_example_table = pd.DataFrame(data=times_per_example, index=experiments)\n\ntimes_per_example_table.to_csv('EvaluationFiles/Competitors/EXPERIMENT_'\n + str(experiment) + '_TIMECOMPLEXITY_EVALUATION_Competitors.csv')\n\nif competitors:\n plot_file_name = \"Figure_5_TimeComplexity_Competitors\"\nelse:\n plot_file_name = \"Figure_5_TimeComplexity_FrameworkDesign\"\n\nplot_time_complexity(data=times_per_example_table, competitors=competitors, plot_file_name=plot_file_name, latex_font=True)\n\n" ]
[ [ "numpy.linalg.norm", "tensorflow.keras.callbacks.EarlyStopping" ], [ "pandas.DataFrame", "numpy.mean" ] ]
hekun520/MEC_offloading
[ "42b17c4172f10ae15d13cc1c30f1389904be647f" ]
[ "simulation.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n MEC_offloading.simulation\n ~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Simulation for the MEC_offloading\n\n :copyright: (c) 2018 by Giorgos Mitsis.\n :license: MIT License, see LICENSE for more details.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom parameters import *\nfrom helper_functions import *\nfrom game_functions import *\nfrom server_selection_functions import *\nfrom metrics import *\nfrom plots import *\nfrom create_plots import *\n\nimport time\nimport itertools\nimport dill\n\n# Keep only three decimal places when printing numbers\nnp.set_printoptions(formatter={'float': lambda x: \"{0:0.3f}\".format(x)})\n\n# Generate all cases\ncases_setup = {\n 'users': ['homo','hetero'],\n 'servers': ['homo','hetero','one-dominant','two-dominant']\n }\n\nkeys, values = zip(*cases_setup.items())\n\n# Select which case to run\ncases = [{\"users\": \"hetero\", \"servers\": \"hetero\"}]\n# cases = [dict(zip(keys, v)) for v in itertools.product(*values)]\n\nfor repetition in range(1000):\n print(\"Repetition no: \" + str(repetition+1))\n\n results = {}\n for case in cases:\n\n if LOAD_SAVED_PARAMETERS == True:\n print(\"Loading parameters\")\n infile = \"saved_runs/parameters/\" + case[\"users\"] + \"_\" + case[\"servers\"] + \"_lr_\" + \"0.20\"\n\n with open(infile, 'rb') as in_strm:\n params = dill.load(in_strm)\n else:\n # Set random parameter in order to generate the same parameters\n print(\"Generating new parameters\")\n np.random.seed(13)\n params = set_parameters(case)\n\n U = params['U']\n S = params['S']\n fs = params['fs']\n c = params['c']\n b_max = params['b_max']\n\n start = time.time()\n\n # Initialize empty arrays for results\n all_server_selected = all_bytes_offloaded = all_user_utility = np.empty((0,U), int)\n all_bytes_to_server = all_prices = all_c = all_fs = all_relative_price = all_server_welfare = all_Rs = all_congestion = all_penetration = np.empty((0,S), int)\n all_probabilities = [[] for i in range(U)]\n\n # Get the initial values for probabilities and prices\n probabilities, prices = initialize(**params)\n\n for i in range(U):\n all_probabilities[i].append(probabilities[i])\n\n if CONSTANT_PRICING:\n # Set constant price if needed\n constant_price = np.array([1.96, 1.88, 1.94, 1.78, 1.92])\n prices = constant_price\n\n # Repeat until every user is sure on the selected server\n while not all_users_sure(probabilities):\n # Each user selects a server to which he will offload computation\n server_selected = server_selection(probabilities, **params)\n # Add the selected servers as a row in the matrix\n all_server_selected = np.append(all_server_selected, [server_selected], axis=0)\n\n # Game starts in order to converge to the optimum values of data offloading\n # Repeat until convergence for both users and servers\n\n if CONSTANT_OFFLOADING:\n b_old = np.ones(U) * 0.586 * b_max\n else:\n b_old = np.ones(U)\n\n prices_old = np.ones(S)\n\n converged = False\n while not converged:\n # Users play a game to converge to the Nash Equilibrium\n if CONSTANT_OFFLOADING:\n b = b_old\n else:\n b = play_offloading_game(server_selected, b_old, prices_old, **params)\n\n if CONSTANT_PRICING:\n # Servers set their next price as they had initally set\n prices = constant_price\n else:\n # Servers update their prices based on the users' offloading of data\n prices = play_pricing_game(server_selected, b, **params)\n\n # Check if game has converged\n converged = game_converged(b,b_old,prices,prices_old, **params)\n\n b_old = b\n prices_old = prices\n\n all_bytes_offloaded = np.append(all_bytes_offloaded, [b], axis=0)\n\n # Find all bytes that are offloaded to each server\n bytes_to_server = np.bincount(server_selected, b, minlength=S)\n all_bytes_to_server = np.append(all_bytes_to_server, [bytes_to_server], axis=0)\n\n all_prices = np.append(all_prices, [prices], axis=0)\n\n all_fs = np.append(all_fs, [fs], axis=0)\n all_c = np.append(all_c, [c], axis=0)\n\n # Calculate the welfare of the servers\n server_welfare = calculate_server_welfare(prices, bytes_to_server, **params)\n all_server_welfare = np.append(all_server_welfare, [server_welfare], axis=0)\n\n # Calculate the perceived utility of the users\n user_utility = calculate_user_utility(b, server_selected, prices, **params)\n all_user_utility = np.append(all_user_utility, [user_utility], axis=0)\n\n # Calculate the competitiveness of each server\n Rs,relative_price,congestion,penetration = calculate_competitiveness(all_bytes_to_server, all_fs, all_prices, **params)\n\n all_Rs = np.append(all_Rs, [Rs], axis=0)\n all_congestion = np.append(all_congestion, [congestion], axis=0)\n all_penetration = np.append(all_penetration, [penetration], axis=0)\n all_relative_price = np.append(all_relative_price, [relative_price], axis=0)\n\n # Update the probabilities\n probabilities = update_probabilities(Rs, probabilities, server_selected, b, **params)\n\n for i in range(U):\n all_probabilities[i].append(probabilities[i])\n\n for i in range(len(all_probabilities)):\n all_probabilities[i] = np.array(all_probabilities[i])\n all_probabilities = np.array(all_probabilities)\n\n end = time.time()\n running_time = end - start\n print(\"Time of simulation:\")\n print(running_time)\n\n # Keep results in a dictionary in order to save and plot them\n key = case[\"users\"] + \"_\" + case[\"servers\"]\n results[key] = {\n \"all_bytes_offloaded\": all_bytes_offloaded,\n \"all_server_selected\": all_server_selected,\n \"all_prices\": all_prices,\n \"all_bytes_to_server\": all_bytes_to_server,\n \"all_server_welfare\": all_server_welfare,\n \"all_user_utility\": all_user_utility,\n \"all_Rs\": all_Rs,\n \"all_relative_price\": all_relative_price,\n \"all_congestion\": all_congestion,\n \"all_penetration\": all_penetration,\n \"all_fs\": all_fs,\n \"all_c\": all_c,\n \"all_probabilities\": all_probabilities,\n \"running_time\": running_time\n }\n\n # Save parameters and results\n if SAVE_PARAMETERS == True:\n if CONSTANT_PRICING == True:\n outfile = \"saved_runs/parameters/\" + case[\"users\"] + \"_\" + case[\"servers\"] + \"_lr_\" + \"{0:.2f}\".format(params[\"learning_rate\"]) + \"_constant-pricing\"\n else:\n outfile = \"saved_runs/parameters/\" + case[\"users\"] + \"_\" + case[\"servers\"] + \"_lr_\" + \"{0:.2f}\".format(params[\"learning_rate\"])\n\n with open(outfile, 'wb') as fp:\n dill.dump(params, fp)\n\n if SAVE_RESULTS == True:\n if CONSTANT_PRICING == True:\n outfile = 'saved_runs/results/individual/' + case[\"users\"] + \"_\" + case[\"servers\"] + \"_lr_\" + \"{0:.2f}\".format(params[\"learning_rate\"]) + \"_constant-pricing\" + \"_rep_\" + str(repetition+1)\n else:\n outfile = 'saved_runs/results/individual/' + case[\"users\"] + \"_\" + case[\"servers\"] + \"_lr_\" + \"{0:.2f}\".format(params[\"learning_rate\"]) + \"_rep_\" + str(repetition+1)\n\n with open(outfile , 'wb') as fp:\n dill.dump(results[key], fp)\n\n# Create the plots\n# create_plots(results, cases, params)\n" ]
[ [ "numpy.bincount", "numpy.array", "numpy.empty", "numpy.random.seed", "numpy.ones", "numpy.append" ] ]
TalhaUsuf/RetinaNet_W9_form
[ "2a66bba1de96bebd679775b841d95ac7dcfcbbbe", "2a66bba1de96bebd679775b841d95ac7dcfcbbbe", "2a66bba1de96bebd679775b841d95ac7dcfcbbbe" ]
[ "detectron_/engine/train_loop.py", "tests/data/test_transforms.py", "detectron_/modeling/roi_heads/rotated_fast_rcnn.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport logging\nimport numpy as np\nimport time\nimport weakref\nfrom typing import List, Mapping, Optional\nimport torch\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\n\nimport detectron.utils.comm as comm\nfrom detectron.utils.events import EventStorage, get_event_storage\nfrom detectron.utils.logger import _log_api_usage\n\n__all__ = [\"HookBase\", \"TrainerBase\", \"SimpleTrainer\", \"AMPTrainer\"]\n\n\nclass HookBase:\n \"\"\"\n Base class for hooks that can be registered with :class:`TrainerBase`.\n\n Each hook can implement 4 methods. The way they are called is demonstrated\n in the following snippet:\n ::\n hook.before_train()\n for iter in range(start_iter, max_iter):\n hook.before_step()\n trainer.run_step()\n hook.after_step()\n iter += 1\n hook.after_train()\n\n Notes:\n 1. In the hook method, users can access ``self.trainer`` to access more\n properties about the context (e.g., model, current iteration, or config\n if using :class:`DefaultTrainer`).\n\n 2. A hook that does something in :meth:`before_step` can often be\n implemented equivalently in :meth:`after_step`.\n If the hook takes non-trivial time, it is strongly recommended to\n implement the hook in :meth:`after_step` instead of :meth:`before_step`.\n The convention is that :meth:`before_step` should only take negligible time.\n\n Following this convention will allow hooks that do care about the difference\n between :meth:`before_step` and :meth:`after_step` (e.g., timer) to\n function properly.\n\n \"\"\"\n\n trainer: \"TrainerBase\" = None\n \"\"\"\n A weak reference to the trainer object. Set by the trainer when the hook is registered.\n \"\"\"\n\n def before_train(self):\n \"\"\"\n Called before the first iteration.\n \"\"\"\n pass\n\n def after_train(self):\n \"\"\"\n Called after the last iteration.\n \"\"\"\n pass\n\n def before_step(self):\n \"\"\"\n Called before each iteration.\n \"\"\"\n pass\n\n def after_step(self):\n \"\"\"\n Called after each iteration.\n \"\"\"\n pass\n\n def state_dict(self):\n \"\"\"\n Hooks are stateless by default, but can be made checkpointable by\n implementing `state_dict` and `load_state_dict`.\n \"\"\"\n return {}\n\n\nclass TrainerBase:\n \"\"\"\n Base class for iterative trainer with hooks.\n\n The only assumption we made here is: the training runs in a loop.\n A subclass can implement what the loop is.\n We made no assumptions about the existence of dataloader, optimizer, model, etc.\n\n Attributes:\n iter(int): the current iteration.\n\n start_iter(int): The iteration to start with.\n By convention the minimum possible value is 0.\n\n max_iter(int): The iteration to end training.\n\n storage(EventStorage): An EventStorage that's opened during the course of training.\n \"\"\"\n\n def __init__(self) -> None:\n self._hooks: List[HookBase] = []\n self.iter: int = 0\n self.start_iter: int = 0\n self.max_iter: int\n self.storage: EventStorage\n _log_api_usage(\"trainer.\" + self.__class__.__name__)\n\n def register_hooks(self, hooks: List[Optional[HookBase]]) -> None:\n \"\"\"\n Register hooks to the trainer. The hooks are executed in the order\n they are registered.\n\n Args:\n hooks (list[Optional[HookBase]]): list of hooks\n \"\"\"\n hooks = [h for h in hooks if h is not None]\n for h in hooks:\n assert isinstance(h, HookBase)\n # To avoid circular reference, hooks and trainer cannot own each other.\n # This normally does not matter, but will cause memory leak if the\n # involved objects contain __del__:\n # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/\n h.trainer = weakref.proxy(self)\n self._hooks.extend(hooks)\n\n def train(self, start_iter: int, max_iter: int):\n \"\"\"\n Args:\n start_iter, max_iter (int): See docs above\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info(\"Starting training from iteration {}\".format(start_iter))\n\n self.iter = self.start_iter = start_iter\n self.max_iter = max_iter\n\n with EventStorage(start_iter) as self.storage:\n try:\n self.before_train()\n for self.iter in range(start_iter, max_iter):\n self.before_step()\n self.run_step()\n self.after_step()\n # self.iter == max_iter can be used by `after_train` to\n # tell whether the training successfully finished or failed\n # due to exceptions.\n self.iter += 1\n except Exception:\n logger.exception(\"Exception during training:\")\n raise\n finally:\n self.after_train()\n\n def before_train(self):\n for h in self._hooks:\n h.before_train()\n\n def after_train(self):\n self.storage.iter = self.iter\n for h in self._hooks:\n h.after_train()\n\n def before_step(self):\n # Maintain the invariant that storage.iter == trainer.iter\n # for the entire execution of each step\n self.storage.iter = self.iter\n\n for h in self._hooks:\n h.before_step()\n\n def after_step(self):\n for h in self._hooks:\n h.after_step()\n\n def run_step(self):\n raise NotImplementedError\n\n def state_dict(self):\n ret = {\"iteration\": self.iter}\n hooks_state = {}\n for h in self._hooks:\n sd = h.state_dict()\n if sd:\n name = type(h).__qualname__\n if name in hooks_state:\n # TODO handle repetitive stateful hooks\n continue\n hooks_state[name] = sd\n if hooks_state:\n ret[\"hooks\"] = hooks_state\n return ret\n\n def load_state_dict(self, state_dict):\n logger = logging.getLogger(__name__)\n self.iter = state_dict[\"iteration\"]\n for key, value in state_dict.get(\"hooks\", {}).items():\n for h in self._hooks:\n try:\n name = type(h).__qualname__\n except AttributeError:\n continue\n if name == key:\n h.load_state_dict(value)\n break\n else:\n logger.warning(f\"Cannot find the hook '{key}', its state_dict is ignored.\")\n\n\nclass SimpleTrainer(TrainerBase):\n \"\"\"\n A simple trainer for the most common type of task:\n single-cost single-optimizer single-data-source iterative optimization,\n optionally using data-parallelism.\n It assumes that every step, you:\n\n 1. Compute the loss with a data from the data_loader.\n 2. Compute the gradients with the above loss.\n 3. Update the model with the optimizer.\n\n All other tasks during training (checkpointing, logging, evaluation, LR schedule)\n are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.\n\n If you want to do anything fancier than this,\n either subclass TrainerBase and implement your own `run_step`,\n or write your own training loop.\n \"\"\"\n\n def __init__(self, model, data_loader, optimizer):\n \"\"\"\n Args:\n model: a torch Module. Takes a data from data_loader and returns a\n dict of losses.\n data_loader: an iterable. Contains data to be used to call model.\n optimizer: a torch optimizer.\n \"\"\"\n super().__init__()\n\n \"\"\"\n We set the model to training mode in the trainer.\n However it's valid to train a model that's in eval mode.\n If you want your model (or a submodule of it) to behave\n like evaluation during training, you can overwrite its train() method.\n \"\"\"\n model.train()\n\n self.model = model\n self.data_loader = data_loader\n self._data_loader_iter = iter(data_loader)\n self.optimizer = optimizer\n\n def run_step(self):\n \"\"\"\n Implement the standard training logic described above.\n \"\"\"\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If you want to do something with the data, you can wrap the dataloader.\n \"\"\"\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n \"\"\"\n If you want to do something with the losses, you can wrap the model.\n \"\"\"\n loss_dict = self.model(data)\n if isinstance(loss_dict, torch.Tensor):\n losses = loss_dict\n loss_dict = {\"total_loss\": loss_dict}\n else:\n losses = sum(loss_dict.values())\n\n \"\"\"\n If you need to accumulate gradients or do something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n losses.backward()\n\n self._write_metrics(loss_dict, data_time)\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method. But it is\n suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4\n \"\"\"\n self.optimizer.step()\n\n def _write_metrics(\n self,\n loss_dict: Mapping[str, torch.Tensor],\n data_time: float,\n prefix: str = \"\",\n ) -> None:\n SimpleTrainer.write_metrics(loss_dict, data_time, prefix)\n\n @staticmethod\n def write_metrics(\n loss_dict: Mapping[str, torch.Tensor],\n data_time: float,\n prefix: str = \"\",\n ) -> None:\n \"\"\"\n Args:\n loss_dict (dict): dict of scalar losses\n data_time (float): time taken by the dataloader iteration\n prefix (str): prefix for logging keys\n \"\"\"\n metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}\n metrics_dict[\"data_time\"] = data_time\n\n # Gather metrics among all workers for logging\n # This assumes we do DDP-style training, which is currently the only\n # supported method in detectron2.\n all_metrics_dict = comm.gather(metrics_dict)\n\n if comm.is_main_process():\n storage = get_event_storage()\n\n # data_time among workers can have high variance. The actual latency\n # caused by data_time is the maximum among workers.\n data_time = np.max([x.pop(\"data_time\") for x in all_metrics_dict])\n storage.put_scalar(\"data_time\", data_time)\n\n # average the rest metrics\n metrics_dict = {\n k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()\n }\n total_losses_reduced = sum(metrics_dict.values())\n if not np.isfinite(total_losses_reduced):\n raise FloatingPointError(\n f\"Loss became infinite or NaN at iteration={storage.iter}!\\n\"\n f\"loss_dict = {metrics_dict}\"\n )\n\n storage.put_scalar(\"{}total_loss\".format(prefix), total_losses_reduced)\n if len(metrics_dict) > 1:\n storage.put_scalars(**metrics_dict)\n\n def state_dict(self):\n ret = super().state_dict()\n ret[\"optimizer\"] = self.optimizer.state_dict()\n return ret\n\n def load_state_dict(self, state_dict):\n super().load_state_dict(state_dict)\n self.optimizer.load_state_dict(state_dict[\"optimizer\"])\n\n\nclass AMPTrainer(SimpleTrainer):\n \"\"\"\n Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision\n in the training loop.\n \"\"\"\n\n def __init__(self, model, data_loader, optimizer, grad_scaler=None):\n \"\"\"\n Args:\n model, data_loader, optimizer: same as in :class:`SimpleTrainer`.\n grad_scaler: torch GradScaler to automatically scale gradients.\n \"\"\"\n unsupported = \"AMPTrainer does not support single-process multi-device training!\"\n if isinstance(model, DistributedDataParallel):\n assert not (model.device_ids and len(model.device_ids) > 1), unsupported\n assert not isinstance(model, DataParallel), unsupported\n\n super().__init__(model, data_loader, optimizer)\n\n if grad_scaler is None:\n from torch.cuda.amp import GradScaler\n\n grad_scaler = GradScaler()\n self.grad_scaler = grad_scaler\n\n def run_step(self):\n \"\"\"\n Implement the AMP training logic.\n \"\"\"\n assert self.model.training, \"[AMPTrainer] model was changed to eval mode!\"\n assert torch.cuda.is_available(), \"[AMPTrainer] CUDA is required for AMP training!\"\n from torch.cuda.amp import autocast\n\n start = time.perf_counter()\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n with autocast():\n loss_dict = self.model(data)\n if isinstance(loss_dict, torch.Tensor):\n losses = loss_dict\n loss_dict = {\"total_loss\": loss_dict}\n else:\n losses = sum(loss_dict.values())\n\n self.optimizer.zero_grad()\n self.grad_scaler.scale(losses).backward()\n\n self._write_metrics(loss_dict, data_time)\n\n self.grad_scaler.step(self.optimizer)\n self.grad_scaler.update()\n\n def state_dict(self):\n ret = super().state_dict()\n ret[\"grad_scaler\"] = self.grad_scaler.state_dict()\n return ret\n\n def load_state_dict(self, state_dict):\n super().load_state_dict(state_dict)\n self.grad_scaler.load_state_dict(state_dict[\"grad_scaler\"])\n", "# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport logging\nimport numpy as np\nimport unittest\nfrom unittest import mock\nfrom PIL import Image, ImageOps\n\nfrom detectron.config import get_cfg\nfrom detectron.data import detection_utils\nfrom detectron.data import transforms as T\nfrom detectron.utils.logger import setup_logger\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestTransforms(unittest.TestCase):\n def setUp(self):\n setup_logger()\n\n def test_apply_rotated_boxes(self):\n np.random.seed(125)\n cfg = get_cfg()\n is_train = True\n augs = detection_utils.build_augmentation(cfg, is_train)\n image = np.random.rand(200, 300)\n image, transforms = T.apply_augmentations(augs, image)\n image_shape = image.shape[:2] # h, w\n assert image_shape == (800, 1200)\n annotation = {\"bbox\": [179, 97, 62, 40, -56]}\n\n boxes = np.array([annotation[\"bbox\"]], dtype=np.float64) # boxes.shape = (1, 5)\n transformed_bbox = transforms.apply_rotated_box(boxes)[0]\n\n expected_bbox = np.array([484, 388, 248, 160, 56], dtype=np.float64)\n err_msg = \"transformed_bbox = {}, expected {}\".format(transformed_bbox, expected_bbox)\n assert np.allclose(transformed_bbox, expected_bbox), err_msg\n\n def test_resize_and_crop(self):\n np.random.seed(125)\n min_scale = 0.2\n max_scale = 2.0\n target_height = 1100\n target_width = 1000\n resize_aug = T.ResizeScale(min_scale, max_scale, target_height, target_width)\n fixed_size_crop_aug = T.FixedSizeCrop((target_height, target_width))\n hflip_aug = T.RandomFlip()\n augs = [resize_aug, fixed_size_crop_aug, hflip_aug]\n original_image = np.random.rand(900, 800)\n image, transforms = T.apply_augmentations(augs, original_image)\n image_shape = image.shape[:2] # h, w\n self.assertEqual((1100, 1000), image_shape)\n\n boxes = np.array(\n [[91, 46, 144, 111], [523, 251, 614, 295]],\n dtype=np.float64,\n )\n transformed_bboxs = transforms.apply_box(boxes)\n expected_bboxs = np.array(\n [\n [895.42, 33.42666667, 933.91125, 80.66],\n [554.0825, 182.39333333, 620.17125, 214.36666667],\n ],\n dtype=np.float64,\n )\n err_msg = \"transformed_bbox = {}, expected {}\".format(transformed_bboxs, expected_bboxs)\n self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg)\n\n polygon = np.array([[91, 46], [144, 46], [144, 111], [91, 111]])\n transformed_polygons = transforms.apply_polygons([polygon])\n expected_polygon = np.array([[934.0, 33.0], [934.0, 80.0], [896.0, 80.0], [896.0, 33.0]])\n self.assertEqual(1, len(transformed_polygons))\n err_msg = \"transformed_polygon = {}, expected {}\".format(\n transformed_polygons[0], expected_polygon\n )\n self.assertTrue(np.allclose(transformed_polygons[0], expected_polygon), err_msg)\n\n def test_apply_rotated_boxes_unequal_scaling_factor(self):\n np.random.seed(125)\n h, w = 400, 200\n newh, neww = 800, 800\n image = np.random.rand(h, w)\n augs = []\n augs.append(T.Resize(shape=(newh, neww)))\n image, transforms = T.apply_augmentations(augs, image)\n image_shape = image.shape[:2] # h, w\n assert image_shape == (newh, neww)\n\n boxes = np.array(\n [\n [150, 100, 40, 20, 0],\n [150, 100, 40, 20, 30],\n [150, 100, 40, 20, 90],\n [150, 100, 40, 20, -90],\n ],\n dtype=np.float64,\n )\n transformed_boxes = transforms.apply_rotated_box(boxes)\n\n expected_bboxes = np.array(\n [\n [600, 200, 160, 40, 0],\n [600, 200, 144.22205102, 52.91502622, 49.10660535],\n [600, 200, 80, 80, 90],\n [600, 200, 80, 80, -90],\n ],\n dtype=np.float64,\n )\n err_msg = \"transformed_boxes = {}, expected {}\".format(transformed_boxes, expected_bboxes)\n assert np.allclose(transformed_boxes, expected_bboxes), err_msg\n\n def test_print_augmentation(self):\n t = T.RandomCrop(\"relative\", (100, 100))\n self.assertEqual(str(t), \"RandomCrop(crop_type='relative', crop_size=(100, 100))\")\n\n t0 = T.RandomFlip(prob=0.5)\n self.assertEqual(str(t0), \"RandomFlip(prob=0.5)\")\n\n t1 = T.RandomFlip()\n self.assertEqual(str(t1), \"RandomFlip()\")\n\n t = T.AugmentationList([t0, t1])\n self.assertEqual(str(t), f\"AugmentationList[{t0}, {t1}]\")\n\n def test_random_apply_prob_out_of_range_check(self):\n test_probabilities = {0.0: True, 0.5: True, 1.0: True, -0.01: False, 1.01: False}\n\n for given_probability, is_valid in test_probabilities.items():\n if not is_valid:\n self.assertRaises(AssertionError, T.RandomApply, None, prob=given_probability)\n else:\n T.RandomApply(T.NoOpTransform(), prob=given_probability)\n\n def test_random_apply_wrapping_aug_probability_occured_evaluation(self):\n transform_mock = mock.MagicMock(name=\"MockTransform\", spec=T.Augmentation)\n image_mock = mock.MagicMock(name=\"MockImage\")\n random_apply = T.RandomApply(transform_mock, prob=0.001)\n\n with mock.patch.object(random_apply, \"_rand_range\", return_value=0.0001):\n transform = random_apply.get_transform(image_mock)\n transform_mock.get_transform.assert_called_once_with(image_mock)\n self.assertIsNot(transform, transform_mock)\n\n def test_random_apply_wrapping_std_transform_probability_occured_evaluation(self):\n transform_mock = mock.MagicMock(name=\"MockTransform\", spec=T.Transform)\n image_mock = mock.MagicMock(name=\"MockImage\")\n random_apply = T.RandomApply(transform_mock, prob=0.001)\n\n with mock.patch.object(random_apply, \"_rand_range\", return_value=0.0001):\n transform = random_apply.get_transform(image_mock)\n self.assertIs(transform, transform_mock)\n\n def test_random_apply_probability_not_occured_evaluation(self):\n transform_mock = mock.MagicMock(name=\"MockTransform\", spec=T.Augmentation)\n image_mock = mock.MagicMock(name=\"MockImage\")\n random_apply = T.RandomApply(transform_mock, prob=0.001)\n\n with mock.patch.object(random_apply, \"_rand_range\", return_value=0.9):\n transform = random_apply.get_transform(image_mock)\n transform_mock.get_transform.assert_not_called()\n self.assertIsInstance(transform, T.NoOpTransform)\n\n def test_augmentation_input_args(self):\n input_shape = (100, 100)\n output_shape = (50, 50)\n\n # define two augmentations with different args\n class TG1(T.Augmentation):\n def get_transform(self, image, sem_seg):\n return T.ResizeTransform(\n input_shape[0], input_shape[1], output_shape[0], output_shape[1]\n )\n\n class TG2(T.Augmentation):\n def get_transform(self, image):\n assert image.shape[:2] == output_shape # check that TG1 is applied\n return T.HFlipTransform(output_shape[1])\n\n image = np.random.rand(*input_shape).astype(\"float32\")\n sem_seg = (np.random.rand(*input_shape) < 0.5).astype(\"uint8\")\n inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args\n tfms = inputs.apply_augmentations([TG1(), TG2()])\n self.assertIsInstance(tfms[0], T.ResizeTransform)\n self.assertIsInstance(tfms[1], T.HFlipTransform)\n self.assertTrue(inputs.image.shape[:2] == output_shape)\n self.assertTrue(inputs.sem_seg.shape[:2] == output_shape)\n\n class TG3(T.Augmentation):\n def get_transform(self, image, nonexist):\n pass\n\n with self.assertRaises(AttributeError):\n inputs.apply_augmentations([TG3()])\n\n def test_augmentation_list(self):\n input_shape = (100, 100)\n image = np.random.rand(*input_shape).astype(\"float32\")\n sem_seg = (np.random.rand(*input_shape) < 0.5).astype(\"uint8\")\n inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args\n\n augs = T.AugmentationList([T.RandomFlip(), T.Resize(20)])\n _ = T.AugmentationList([augs, T.Resize(30)])(inputs)\n # 3 in latest fvcore (flattened transformlist), 2 in older\n # self.assertEqual(len(tfms), 3)\n\n def test_color_transforms(self):\n rand_img = np.random.random((100, 100, 3)) * 255\n rand_img = rand_img.astype(\"uint8\")\n\n # Test no-op\n noop_transform = T.ColorTransform(lambda img: img)\n self.assertTrue(np.array_equal(rand_img, noop_transform.apply_image(rand_img)))\n\n # Test a ImageOps operation\n magnitude = np.random.randint(0, 256)\n solarize_transform = T.PILColorTransform(lambda img: ImageOps.solarize(img, magnitude))\n expected_img = ImageOps.solarize(Image.fromarray(rand_img), magnitude)\n self.assertTrue(np.array_equal(expected_img, solarize_transform.apply_image(rand_img)))\n\n def test_resize_transform(self):\n input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)]\n output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)]\n for in_shape, out_shape in zip(input_shapes, output_shapes):\n in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8)\n tfm = T.ResizeTransform(in_shape[0], in_shape[1], out_shape[0], out_shape[1])\n out_img = tfm.apply_image(in_img)\n self.assertTrue(out_img.shape == out_shape)\n\n def test_extent_transform(self):\n input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)]\n src_rect = (20, 20, 80, 80)\n output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)]\n for in_shape, out_shape in zip(input_shapes, output_shapes):\n in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8)\n tfm = T.ExtentTransform(src_rect, out_shape[:2])\n out_img = tfm.apply_image(in_img)\n self.assertTrue(out_img.shape == out_shape)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\nimport logging\nimport numpy as np\nimport torch\n\nfrom detectron.config import configurable\nfrom detectron.layers import ShapeSpec, batched_nms_rotated\nfrom detectron.structures import Instances, RotatedBoxes, pairwise_iou_rotated\nfrom detectron.utils.events import get_event_storage\n\nfrom ..box_regression import Box2BoxTransformRotated\nfrom ..poolers import ROIPooler\nfrom ..proposal_generator.proposal_utils import add_ground_truth_to_proposals\nfrom .box_head import build_box_head\nfrom .fast_rcnn import FastRCNNOutputLayers\nfrom .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads\n\nlogger = logging.getLogger(__name__)\n\n\"\"\"\nShape shorthand in this module:\n\n N: number of images in the minibatch\n R: number of ROIs, combined over all images, in the minibatch\n Ri: number of ROIs in image i\n K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.\n\nNaming convention:\n\n deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the box2box\n transform (see :class:`box_regression.Box2BoxTransformRotated`).\n\n pred_class_logits: predicted class scores in [-inf, +inf]; use\n softmax(pred_class_logits) to estimate P(class).\n\n gt_classes: ground-truth classification labels in [0, K], where [0, K) represent\n foreground object classes and K represents the background class.\n\n pred_proposal_deltas: predicted rotated box2box transform deltas for transforming proposals\n to detection box predictions.\n\n gt_proposal_deltas: ground-truth rotated box2box transform deltas\n\"\"\"\n\n\ndef fast_rcnn_inference_rotated(\n boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image\n):\n \"\"\"\n Call `fast_rcnn_inference_single_image_rotated` for all images.\n\n Args:\n boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic\n boxes for each image. Element i has shape (Ri, K * 5) if doing\n class-specific regression, or (Ri, 5) if doing class-agnostic\n regression, where Ri is the number of predicted objects for image i.\n This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.\n scores (list[Tensor]): A list of Tensors of predicted class scores for each image.\n Element i has shape (Ri, K + 1), where Ri is the number of predicted objects\n for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.\n image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.\n score_thresh (float): Only return detections with a confidence score exceeding this\n threshold.\n nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].\n topk_per_image (int): The number of top scoring detections to return. Set < 0 to return\n all detections.\n\n Returns:\n instances: (list[Instances]): A list of N instances, one for each image in the batch,\n that stores the topk most confidence detections.\n kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates\n the corresponding boxes/scores index in [0, Ri) from the input, for image i.\n \"\"\"\n result_per_image = [\n fast_rcnn_inference_single_image_rotated(\n boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image\n )\n for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)\n ]\n return [x[0] for x in result_per_image], [x[1] for x in result_per_image]\n\n\ndef fast_rcnn_inference_single_image_rotated(\n boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image\n):\n \"\"\"\n Single-image inference. Return rotated bounding-box detection results by thresholding\n on scores and applying rotated non-maximum suppression (Rotated NMS).\n\n Args:\n Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes\n per image.\n\n Returns:\n Same as `fast_rcnn_inference_rotated`, but for only one image.\n \"\"\"\n valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)\n if not valid_mask.all():\n boxes = boxes[valid_mask]\n scores = scores[valid_mask]\n\n B = 5 # box dimension\n scores = scores[:, :-1]\n num_bbox_reg_classes = boxes.shape[1] // B\n # Convert to Boxes to use the `clip` function ...\n boxes = RotatedBoxes(boxes.reshape(-1, B))\n boxes.clip(image_shape)\n boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B\n # Filter results based on detection scores\n filter_mask = scores > score_thresh # R x K\n # R' x 2. First column contains indices of the R predictions;\n # Second column contains indices of classes.\n filter_inds = filter_mask.nonzero()\n if num_bbox_reg_classes == 1:\n boxes = boxes[filter_inds[:, 0], 0]\n else:\n boxes = boxes[filter_mask]\n scores = scores[filter_mask]\n\n # Apply per-class Rotated NMS\n keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh)\n if topk_per_image >= 0:\n keep = keep[:topk_per_image]\n boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]\n\n result = Instances(image_shape)\n result.pred_boxes = RotatedBoxes(boxes)\n result.scores = scores\n result.pred_classes = filter_inds[:, 1]\n\n return result, filter_inds[:, 0]\n\n\nclass RotatedFastRCNNOutputLayers(FastRCNNOutputLayers):\n \"\"\"\n Two linear layers for predicting Rotated Fast R-CNN outputs.\n \"\"\"\n\n @classmethod\n def from_config(cls, cfg, input_shape):\n args = super().from_config(cfg, input_shape)\n args[\"box2box_transform\"] = Box2BoxTransformRotated(\n weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS\n )\n return args\n\n def inference(self, predictions, proposals):\n \"\"\"\n Returns:\n list[Instances]: same as `fast_rcnn_inference_rotated`.\n list[Tensor]: same as `fast_rcnn_inference_rotated`.\n \"\"\"\n boxes = self.predict_boxes(predictions, proposals)\n scores = self.predict_probs(predictions, proposals)\n image_shapes = [x.image_size for x in proposals]\n\n return fast_rcnn_inference_rotated(\n boxes,\n scores,\n image_shapes,\n self.test_score_thresh,\n self.test_nms_thresh,\n self.test_topk_per_image,\n )\n\n\n@ROI_HEADS_REGISTRY.register()\nclass RROIHeads(StandardROIHeads):\n \"\"\"\n This class is used by Rotated Fast R-CNN to detect rotated boxes.\n For now, it only supports box predictions but not mask or keypoints.\n \"\"\"\n\n @configurable\n def __init__(self, **kwargs):\n \"\"\"\n NOTE: this interface is experimental.\n \"\"\"\n super().__init__(**kwargs)\n assert (\n not self.mask_on and not self.keypoint_on\n ), \"Mask/Keypoints not supported in Rotated ROIHeads.\"\n assert not self.train_on_pred_boxes, \"train_on_pred_boxes not implemented for RROIHeads!\"\n\n @classmethod\n def _init_box_head(cls, cfg, input_shape):\n # fmt: off\n in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES\n pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION\n pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)\n sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO\n pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE\n # fmt: on\n assert pooler_type in [\"ROIAlignRotated\"], pooler_type\n # assume all channel counts are equal\n in_channels = [input_shape[f].channels for f in in_features][0]\n\n box_pooler = ROIPooler(\n output_size=pooler_resolution,\n scales=pooler_scales,\n sampling_ratio=sampling_ratio,\n pooler_type=pooler_type,\n )\n box_head = build_box_head(\n cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)\n )\n # This line is the only difference v.s. StandardROIHeads\n box_predictor = RotatedFastRCNNOutputLayers(cfg, box_head.output_shape)\n return {\n \"box_in_features\": in_features,\n \"box_pooler\": box_pooler,\n \"box_head\": box_head,\n \"box_predictor\": box_predictor,\n }\n\n @torch.no_grad()\n def label_and_sample_proposals(self, proposals, targets):\n \"\"\"\n Prepare some proposals to be used to train the RROI heads.\n It performs box matching between `proposals` and `targets`, and assigns\n training labels to the proposals.\n It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes,\n with a fraction of positives that is no larger than `self.positive_sample_fraction.\n\n Args:\n See :meth:`StandardROIHeads.forward`\n\n Returns:\n list[Instances]: length `N` list of `Instances`s containing the proposals\n sampled for training. Each `Instances` has the following fields:\n - proposal_boxes: the rotated proposal boxes\n - gt_boxes: the ground-truth rotated boxes that the proposal is assigned to\n (this is only meaningful if the proposal has a label > 0; if label = 0\n then the ground-truth box is random)\n - gt_classes: the ground-truth classification lable for each proposal\n \"\"\"\n if self.proposal_append_gt:\n proposals = add_ground_truth_to_proposals(targets, proposals)\n\n proposals_with_gt = []\n\n num_fg_samples = []\n num_bg_samples = []\n for proposals_per_image, targets_per_image in zip(proposals, targets):\n has_gt = len(targets_per_image) > 0\n match_quality_matrix = pairwise_iou_rotated(\n targets_per_image.gt_boxes, proposals_per_image.proposal_boxes\n )\n matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)\n sampled_idxs, gt_classes = self._sample_proposals(\n matched_idxs, matched_labels, targets_per_image.gt_classes\n )\n\n proposals_per_image = proposals_per_image[sampled_idxs]\n proposals_per_image.gt_classes = gt_classes\n\n if has_gt:\n sampled_targets = matched_idxs[sampled_idxs]\n proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets]\n\n num_bg_samples.append((gt_classes == self.num_classes).sum().item())\n num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])\n proposals_with_gt.append(proposals_per_image)\n\n # Log the number of fg/bg samples that are selected for training ROI heads\n storage = get_event_storage()\n storage.put_scalar(\"roi_head/num_fg_samples\", np.mean(num_fg_samples))\n storage.put_scalar(\"roi_head/num_bg_samples\", np.mean(num_bg_samples))\n\n return proposals_with_gt\n" ]
[ [ "torch.cuda.amp.autocast", "numpy.mean", "torch.cuda.is_available", "numpy.isfinite", "torch.cuda.amp.GradScaler" ], [ "numpy.array", "numpy.random.rand", "numpy.random.seed", "numpy.allclose", "numpy.random.randint", "numpy.random.random" ], [ "torch.isfinite", "torch.no_grad", "numpy.mean" ] ]
boffomarco/hrp
[ "7017d358b8b53c289d0859f7dc61ca0d134843ed" ]
[ "am_driver_safe/src/Full_batch_EKF.py" ]
[ "#!/usr/bin/env python3\n\n# From https://github.com/AtsushiSakai/PythonRobotics/blob/master/Localization/extended_kalman_filter/extended_kalman_filter.py\n\nimport math\nfrom math import sin, cos, pi\n\nimport rospy\nimport tf\nfrom std_msgs.msg import Header\nfrom geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3, PoseWithCovariance, PoseWithCovarianceStamped, TwistWithCovariance\nfrom sensor_msgs.msg import NavSatFix, Imu\nfrom am_driver.msg import WheelEncoder\nfrom nav_msgs.msg import Odometry\n\nimport math\n\nimport threading\n\nimport matplotlib.pyplot as plt\nfrom scipy.spatial.transform import Rotation as Rot\n\nimport numpy as np\n\nimport pymap3d as pm\n\n# import the random module\nimport random\n\n\nclass Full_EKF():\n\n def __init__(self):\n print(\"Initialising Full_EKF\")\n\n # Define name of the Node\n rospy.init_node(\"Full_EKF\", anonymous=True)\n\n # Define the self.lock to allow multi-threading\n self.lock = threading.Lock()\n\n # Get the current time\n now = rospy.get_time()\n\n # Define set of topics to subscribe to\n\n rospy.Subscriber('cmd_vel', Twist, self.Control)\n self.control_measure = False\n self.control_t = now\n self.control_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n\n rospy.Subscriber('wheel_encoder', WheelEncoder, self.Encoder)\n self.encoder_measure = False\n self.encoder_t = now\n self.encoder_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n\n #rospy.Subscriber('imu_left/imu/data', IMU, self.ImuLeft)\n self.imu_left_measure = False\n self.imu_left_t = now\n self.imu_left_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n\n #rospy.Subscriber('imu_right/imu/data', IMU, self.ImuRight)\n self.imu_right_measure = False\n self.imu_right_t = now\n self.imu_right_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n\n rospy.Subscriber('GPSfix', NavSatFix, self.GPS)\n self.gps_measure = False\n self.gps_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n\n #rospy.Subscriber('VisualOdometry', Odometry, self.VisualOdometry)\n self.visual_odometry_measure = False\n self.visual_odometry_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n\n # Define set of topics to publish\n\n self.odom_control_pub = rospy.Publisher('Odom_Control', Odometry, queue_size=20)\n\n self.odom_encoder_pub = rospy.Publisher('Odom_Encoder', Odometry, queue_size=20)\n\n #self.odom_imu_l_pub = rospy.Publisher('Odom_IMU_L', Odometry, queue_size=20)\n\n #self.odom_imu_r_pub = rospy.Publisher('Odom_IMU_R', Odometry, queue_size=20)\n\n self.odom_gps_pub = rospy.Publisher('Odom_GPS', Odometry, queue_size=20)\n\n #self.odom_pred_ekf_pub = rospy.Publisher('Odom_Pred_EKF', Odometry, queue_size=20)\n\n self.odom_full_ekf_pub = rospy.Publisher('Odom_Full_EKF', Odometry, queue_size=20)\n\n\n # Kalman states\n self.x_t = 0.0\n self.y_t = 0.0\n self.yaw_t = -0.22 # Manually set to follow the GPS\n self.x_dot_t = 0.0\n self.yaw_dot_t = 0.0\n self.x_dot2_t = 0.0\n\n # State-Vector\n self.X_t = np.array([self.x_t, self.y_t, self.yaw_t,\n self.x_dot_t, self.yaw_dot_t, self.x_dot2_t])\n # Filter Covariance Matrix\n self.P_t = np.eye(6)\n\n\n # Initialise Measurements Vector\n self.Z = np.array([])\n # Initialise Measurements Covariance Matrix\n self.R = np.array([])\n # Initialise Measurements Matrix\n self.H = np.zeros((6,0))\n # Initialise Measurements Jacobian Matrix\n self.J_H = np.zeros((6,0))\n\n\n\n # Prediction step with only the kinematic model\n def Predict(self, dt):\n\n # State-Transition Matrix\n A = np.array([ [1.0, 0.0, 0.0, cos(self.X_t[2])*dt, 0.0, cos(self.X_t[2])*(dt**2)/2],\n [0.0, 1.0, 0.0, sin(self.X_t[2])*dt, 0.0, sin(self.X_t[2])*(dt**2)/2],\n [0.0, 0.0, 1.0, 0.0, dt, 0.0],\n [0.0, 0.0, 0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n\n # Noise Variance\n sigma_noise = 0.01\n\n # Noise Matrix\n W = np.array([ random.gauss(mu = 0, sigma = sigma_noise),\n random.gauss(mu = 0, sigma = sigma_noise),\n random.gauss(mu = 0, sigma = sigma_noise)/10,\n random.gauss(mu = 0, sigma = sigma_noise)/10,\n random.gauss(mu = 0, sigma = sigma_noise)/100,\n random.gauss(mu = 0, sigma = sigma_noise)/100])\n\n # Jacobian of Transition Matrix\n J_A = np.array([[1.0, 0.0, 0.0, -sin(self.X_t[2])*dt, 0.0, -sin(self.X_t[2])*(dt**2)/2],\n [0.0, 1.0, 0.0, cos(self.X_t[2])*dt, 0.0, cos(self.X_t[2])*(dt**2)/2],\n [0.0, 0.0, 1.0, 0.0, dt, 0.0],\n [0.0, 0.0, 0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n\n # Prediction Covariance\n Q = np.array([ [sigma_noise, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, sigma_noise, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, sigma_noise, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, sigma_noise/10, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, sigma_noise/10, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, sigma_noise/100]])\n\n\n # Prediction State\n self.X_Pred = A @ self.X_t + W\n\n # Prediction Covariance Matrix\n self.P_Pred = J_A @ self.P_t @ J_A.T + Q # ??? + A@Q@A.T ???\n self.P_Pred = (self.P_Pred + self.P_Pred.T) / 2 # Ensure that it is symmetric\n\n # Prediction step without measurement updates\n def UpdateNoMeasures(self):\n self.X_t = self.X_Pred\n self.P_t = self.P_Pred\n\n print(\"UpdateNoMeasures \" + str(self.X_t))\n\n\n # Update step with the measurements\n def Update(self):\n # Check if there are more updates\n if(self.control_measure or self.encoder_measure or self.imu_left_measure or self.imu_right_measure or self.gps_measure):\n\n # Make sure the execution is safe\n self.lock.acquire()\n try:\n # Reset Measurements check\n self.control_measure = self.encoder_measure = self.imu_left_measure = self.imu_right_measure = self.gps_measure = False\n\n # Transpose matrices after their creation\n Update_H = self.H.T\n Update_J_H = self.J_H.T\n Update_R = np.diag(self.R)\n # Store measurements vector for update\n Update_Z = self.Z\n\n\n # Initialise Measurements Vector\n self.Z = np.array([])\n # Initialise Measurements Covariance Matrix\n self.R = np.array([])\n # Initialise Measurements Matrix\n self.H = np.zeros((6,0))\n # Initialise Measurements Jacobian Matrix\n self.J_H = np.zeros((6,0))\n\n finally:\n self.lock.release() # release self.lock, no matter what\n\n\n\n # Predicted using measurements matrix\n Z_pred = Update_H @ self.X_Pred\n # Innovation\n Y = Update_Z - Z_pred\n # Innovation Covariance\n S = Update_J_H @ self.P_Pred @ Update_J_H.T + Update_R\n # Kalman Gain\n K = self.P_Pred @ Update_J_H.T @ np.linalg.pinv(S) # Pseudo-Inverse of S to avoid Singularity\n # State Update\n self.X_t = self.X_Pred + K @ Y\n # Covariance Update\n self.P_t = (np.eye(6) - K @ Update_J_H) @ self.P_Pred\n # Joseph form Covariance Update equation -> Ensure Positive Semi-Definite\n self.P_t = (np.eye(6) - K @ Update_J_H) @ self.P_Pred @ (np.eye(6) - K @ Update_J_H).T + K @ Update_R @ K.T\n # Ensure P is symmetric\n self.P_t = (self.P_t + self.P_t.T) / 2\n\n\n\n\n print(\"Update \" + str(self.X_t))\n else:\n # Keep just the prediction if no new measurements have been received\n self.UpdateNoMeasures()\n\n # Send the Update to Ros\n header = Header()\n header.stamp = rospy.Time.now() # Note you need to call rospy.init_node() before this will work\n header.frame_id = \"odom\"\n\n # since all odometry is 6DOF we'll need a quaternion created from yaw\n odom_quat = tf.transformations.quaternion_from_euler(0, 0, self.X_t[2])\n\n # next, we'll publish the pose message over ROS\n pose = Pose(Point(self.X_t[0], self.X_t[1], 0.), Quaternion(*odom_quat))\n\n pose_covariance = [0] * 36\n pose_covariance[0] = self.P_t[0][0]\n pose_covariance[1] = self.P_t[0][1]\n pose_covariance[5] = self.P_t[0][2]\n pose_covariance[6] = self.P_t[1][0]\n pose_covariance[7] = self.P_t[1][1]\n pose_covariance[11] = self.P_t[1][2]\n pose_covariance[30] = self.P_t[2][0]\n pose_covariance[31] = self.P_t[2][1]\n pose_covariance[35] = self.P_t[2][2]\n\n pose_ekf = PoseWithCovariance(pose, pose_covariance)\n\n # next, we'll publish the pose message over ROS\n twist = Twist(Vector3(self.X_t[3], self.X_t[4], 0.),Vector3(0.0, 0.0, self.X_t[5]))\n\n twist_covariance = [0] * 36\n twist_covariance[0] = self.P_t[3][3]\n twist_covariance[1] = self.P_t[3][4]\n twist_covariance[5] = self.P_t[3][5]\n twist_covariance[6] = self.P_t[4][3]\n twist_covariance[7] = self.P_t[4][4]\n twist_covariance[11] = self.P_t[4][5]\n twist_covariance[30] = self.P_t[5][3]\n twist_covariance[31] = self.P_t[5][4]\n twist_covariance[35] = self.P_t[5][5]\n\n twist_ekf = TwistWithCovariance(twist, twist_covariance)\n\n odom_ekf = Odometry(header, \"base_link\", pose_ekf, twist_ekf)\n\n # publish the message\n self.odom_full_ekf_pub.publish(odom_ekf)\n\n\n def Control(self, cmd_vel):\n\n now = rospy.get_time()\n\n dt = now - self.control_t\n\n self.control_t = now\n\n z_x_dot = cmd_vel.linear.x\n z_yaw_dot = cmd_vel.angular.z\n\n\n z_x_dot_cov = 0.001\n z_yaw_dot_cov = 0.01\n\n # Make sure the execution is safe\n self.lock.acquire()\n try:\n self.Z = np.append(self.Z, np.array([z_x_dot, z_yaw_dot]))\n self.R = np.append(self.R, np.array([z_x_dot_cov,z_yaw_dot_cov]))\n\n self.H = np.column_stack([self.H, np.array([0,0,0,1,0,0]), np.array([0,0,0,0,1,0])])\n self.J_H = np.column_stack([self.J_H, np.array([0,0,0,1,0,0]), np.array([0,0,0,0,1,0])])\n\n self.control_measure = True\n finally:\n self.lock.release() # release self.lock, no matter what\n\n\n self.control_state = np.array([z_x_dot, z_yaw_dot])\n print(\"Control \" + str(self.control_state))\n\n # Send the Update to Ros\n header = Header()\n header.stamp = rospy.Time.now() # Note you need to call rospy.init_node() before this will work\n header.frame_id = \"odom\"\n\n # since all odometry is 6DOF we'll need a quaternion created from yaw\n odom_quat = tf.transformations.quaternion_from_euler(0, 0, self.X_t[2])\n\n # next, we'll publish the pose message over ROS\n pose = Pose(Point(self.X_t[0], self.X_t[1], 0.), Quaternion(*odom_quat))\n\n pose_covariance = [0] * 36\n\n pose_control = PoseWithCovariance(pose, pose_covariance)\n\n twist_covariance = [0] * 36\n twist_covariance[0] = z_x_dot_cov\n twist_covariance[35] = z_yaw_dot_cov\n\n twist_control = TwistWithCovariance(cmd_vel, twist_covariance)\n\n odom_control = Odometry(header, \"base_link\", pose_control, twist_control)\n\n # publish the message\n self.odom_control_pub.publish(odom_control)\n\n\n def Encoder(self, wheel_encoder):\n\n # Automower parameters\n base_width = 0.464500 # Original measurement\n base_width = 0.435 # Measured the internal side with stick -> 0.435\n wheel_diameter = 0.245\n #self.wheel_diameter = 0.24 # Measured the inner side with stick -> 0.238\n wheel_pulses_per_turn = 349\n wheel_meter_per_tick = (2.0 * math.pi * wheel_diameter / 2.0) / wheel_pulses_per_turn\n\n\n lastLeftPulses = self.encoder_state[4]\n lastRightPulses = self.encoder_state[5]\n\n leftPulses = wheel_encoder.rwheelAccum\n rightPulses = wheel_encoder.lwheelAccum\n\n deltaLeftPulses = leftPulses - lastLeftPulses\n deltaRightPulses = rightPulses - lastRightPulses\n\n if(lastLeftPulses and lastRightPulses): #and deltaLeftPulses and deltaRightPulses):\n\n leftDist = - deltaLeftPulses * wheel_meter_per_tick\n rightDist = deltaRightPulses * wheel_meter_per_tick\n\n delta_d = ( rightDist + leftDist ) / 2\n\n delta_yaw = ( rightDist - leftDist ) / base_width\n\n delta_x = delta_d * math.cos( self.X_t[2] )\n delta_y = delta_d * math.sin( self.X_t[2] )\n\n\n z_x = self.X_t[0] - delta_x\n z_y = self.X_t[1] - delta_y\n z_yaw = self.X_t[2] + delta_yaw\n\n\n z_cov = 0.1\n z_yaw_cov = np.deg2rad(10)\n\n\n # Make sure the execution is safe\n self.lock.acquire()\n try:\n self.Z = np.append(self.Z, np.array([z_x, z_y, z_yaw]))\n self.R = np.append(self.R, np.array([z_cov,z_cov,z_yaw_cov]))\n\n self.H = np.column_stack([self.H, np.array([1,0,0,0,0,0]), np.array([0,1,0,0,0,0]), np.array([0,0,1,0,0,0])])\n self.J_H = np.column_stack([self.J_H, np.array([1,0,0,0,0,0]), np.array([0,1,0,0,0,0]), np.array([0,0,1,0,0,0])])\n\n self.encoder_measure = True\n finally:\n self.lock.release() # release self.lock, no matter what\n\n\n self.encoder_state = np.array([z_x, z_y, z_yaw, 0.0, lastLeftPulses, lastRightPulses])\n print(\"Encoder \" + str(self.encoder_state))\n\n\n # Send the Update to Ros\n header = Header()\n header.stamp = rospy.Time.now() # Note you need to call rospy.init_node() before this will work\n header.frame_id = \"odom\"\n\n # since all odometry is 6DOF we'll need a quaternion created from yaw\n odom_quat = tf.transformations.quaternion_from_euler(0, 0, z_yaw)\n\n # next, we'll publish the pose message over ROS\n pose = Pose(Point(z_x, z_y, 0.), Quaternion(*odom_quat))\n\n pose_covariance = [0] * 36\n pose_covariance[0] = z_cov\n pose_covariance[7] = z_cov\n pose_covariance[35] = z_yaw_cov\n\n pose_encoder = PoseWithCovariance(pose, pose_covariance)\n\n odom_encoder = Odometry(header, \"base_link\", pose_encoder, TwistWithCovariance())\n\n # publish the message\n self.odom_encoder_pub.publish(odom_encoder)\n\n\n # Store prev values\n self.encoder_state[4] = leftPulses\n self.encoder_state[5] = rightPulses\n\n\n\n\n def GPS(self, GPSfix):\n\n lat_mean = 59.406820 # Manually set based on test\n long_mean = 17.940523 # Manually set based on test\n\n gps_e , gps_n, gps_u = pm.geodetic2enu(GPSfix.latitude,GPSfix.longitude,0,lat_mean,long_mean,0)\n\n #delta_z_yaw = math.atan2((- gps_e - self.gps_state[1]),( gps_n - self.gps_state[0]))\n #print(delta_z_yaw)\n\n #z_yaw = delta_z_yaw\n z_x = gps_n\n z_y = - gps_e\n\n z_cov = GPSfix.position_covariance[0] # Original value of covariance from Automower\n z_cov = z_cov / 4.5 # Scale value of HDOP (Averaging among n&e covariances and removing 1.5*1.5 scale)\n z_cov = z_cov / 100 # Trying to lower the cov\n #z_yaw_cov = np.deg2rad(10) / 10 # Trying to lower the cov dividing by 10\n\n # Make sure the execution is safe\n self.lock.acquire()\n try:\n self.Z = np.append(self.Z, np.array([z_x, z_y]))\n self.R = np.append(self.R, np.array([z_cov,z_cov]))\n\n self.H = np.column_stack([self.H, np.array([1,0,0,0,0,0]), np.array([0,1,0,0,0,0])])\n self.J_H = np.column_stack([self.J_H, np.array([1,0,0,0,0,0]), np.array([0,1,0,0,0,0])])\n\n self.gps_measure = True\n finally:\n self.lock.release() # release self.lock, no matter what\n\n self.gps_state = np.array([z_x,z_y])\n print(\" GPS \" + str(self.gps_state))\n\n # Send the Update to Ros\n header = Header()\n header.stamp = rospy.Time.now() # Note you need to call rospy.init_node() before this will work\n header.frame_id = \"odom\"\n\n # since all odometry is 6DOF we'll need a quaternion created from yaw\n odom_quat = tf.transformations.quaternion_from_euler(0, 0, 0)\n\n # next, we'll publish the pose message over ROS\n pose = Pose(Point(z_x,z_y, 0.), Quaternion(*odom_quat))\n\n pose_covariance = [0] * 36\n pose_covariance[0] = z_cov\n pose_covariance[7] = z_cov\n pose_covariance[35] = 0\n\n pose_gps = PoseWithCovariance(pose, pose_covariance)\n\n odom_gps = Odometry(header, \"base_link\", pose_gps, TwistWithCovariance())\n\n # publish the message\n self.odom_gps_pub.publish(odom_gps)\n\n\n\ndef plot_covariance_ellipse(xEst, PEst): # pragma: no cover\n Pxy = PEst[0:2, 0:2]\n eigval, eigvec = np.linalg.eig(Pxy)\n\n if eigval[0] >= eigval[1]:\n bigind = 0\n smallind = 1\n else:\n bigind = 1\n smallind = 0\n\n t = np.arange(0, 2 * math.pi + 0.1, 0.1)\n a = math.sqrt(eigval[bigind])\n b = math.sqrt(eigval[smallind])\n x = [a * math.cos(it) for it in t]\n y = [b * math.sin(it) for it in t]\n angle = math.atan2(eigvec[1, bigind], eigvec[0, bigind])\n rot = Rot.from_euler('z', angle).as_matrix()[0:2, 0:2]\n fx = rot @ (np.array([x, y]))\n px = np.array(fx[0, :] + xEst[0]).flatten()\n py = np.array(fx[1, :] + xEst[1]).flatten()\n plt.plot(px, py, \"--r\")\n\n\n\n\n\nif __name__ == '__main__':\n\n print(\"Start Full_EKF\")\n\n full_ekf = None\n\n try:\n # Initialise the Kalman Filter\n full_ekf = Full_EKF()\n # Wait for the updates (Variable dt)\n #rospy.spin()\n # Continuosly try to get updates and run Prediction and Update (Constant dt)\n hertz = 250\n rate = rospy.Rate(hertz) # 250hz - highest frequency of the sensors (IMU)\n\n # Get the time\n start = rospy.get_time()\n\n # Wait for the system to start running\n while start == 0:\n start = rospy.get_time()\n # Sleep before next iteration\n rate.sleep()\n\n # State Vector [x y theta v omega a]'\n X = np.zeros((2, 1))\n\n Z = np.zeros((2, 1))\n\n # history\n hX = X\n hZ = Z\n\n # Start with the fusion\n end = start\n while not rospy.is_shutdown():\n # Update dt at each iteration\n start = rospy.get_time()\n dt = start - end\n print(\"KALMAN - \" + str(start) + \" \" + str(dt))\n # Prediction step\n full_ekf.Predict(dt)\n # Update step\n full_ekf.Update()\n # Reset time\n end = start\n\n # store data history\n hX = np.hstack((hX, np.array([full_ekf.X_t[0],full_ekf.X_t[1]]).reshape(2,1)))\n hZ = np.hstack((hZ, np.array([full_ekf.gps_state[0],full_ekf.gps_state[1]]).reshape(2,1)))\n\n plt.cla()\n # for stopping simulation with the esc key.\n plt.gcf().canvas.mpl_connect('key_release_event',\n lambda event: [exit(0) if event.key == 'escape' else None])\n plt.plot(hX[0, :].flatten(),\n hX[1, :].flatten(), \"-b\")\n plt.plot(hZ[0, :],\n hZ[1, :], \".g\")\n plot_covariance_ellipse(full_ekf.X_t, full_ekf.P_t)\n plt.axis(\"equal\")\n plt.grid(True)\n plt.pause(1/hertz)\n\n #time.sleep(1/hertz)\n # Sleep before next iteration\n #rate.sleep()\n\n except rospy.ROSInterruptException:\n\n pass\n\n print(full_ekf.X_t)\n print(full_ekf.P_t)\n print(\"End Full_EKF\")\n" ]
[ [ "numpy.array", "numpy.zeros", "matplotlib.pyplot.grid", "matplotlib.pyplot.plot", "numpy.linalg.pinv", "matplotlib.pyplot.cla", "numpy.eye", "scipy.spatial.transform.Rotation.from_euler", "numpy.arange", "numpy.linalg.eig", "matplotlib.pyplot.pause", "matplotlib.pyplot.gcf", "numpy.deg2rad", "numpy.diag", "matplotlib.pyplot.axis" ] ]
AustinJAdams/statsmodels
[ "9271ced806b807a4dd325238df38b60f1aa363e2", "e6632b6466dc7eb7062df0f26a6888da0e67e347" ]
[ "examples/python/glm.py", "statsmodels/tsa/statespace/mlemodel.py" ]
[ "# coding: utf-8\n\n# DO NOT EDIT\n# Autogenerated from the notebook glm.ipynb.\n# Edit the notebook and then sync the output with this file.\n#\n# flake8: noqa\n# DO NOT EDIT\n\n# # Generalized Linear Models\n\nimport numpy as np\nimport statsmodels.api as sm\nfrom scipy import stats\nfrom matplotlib import pyplot as plt\n\n# ## GLM: Binomial response data\n#\n# ### Load data\n#\n# In this example, we use the Star98 dataset which was taken with\n# permission\n# from Jeff Gill (2000) Generalized linear models: A unified approach.\n# Codebook\n# information can be obtained by typing:\n\nprint(sm.datasets.star98.NOTE)\n\n# Load the data and add a constant to the exogenous (independent)\n# variables:\n\ndata = sm.datasets.star98.load()\ndata.exog = sm.add_constant(data.exog, prepend=False)\n\n# The dependent variable is N by 2 (Success: NABOVE, Failure: NBELOW):\n\nprint(data.endog[:5, :])\n\n# The independent variables include all the other variables described\n# above, as\n# well as the interaction terms:\n\nprint(data.exog[:2, :])\n\n# ### Fit and summary\n\nglm_binom = sm.GLM(data.endog, data.exog, family=sm.families.Binomial())\nres = glm_binom.fit()\nprint(res.summary())\n\n# ### Quantities of interest\n\nprint('Total number of trials:', data.endog[0].sum())\nprint('Parameters: ', res.params)\nprint('T-values: ', res.tvalues)\n\n# First differences: We hold all explanatory variables constant at their\n# means and manipulate the percentage of low income households to assess its\n# impact on the response variables:\n\nmeans = data.exog.mean(axis=0)\nmeans25 = means.copy()\nmeans25[0] = stats.scoreatpercentile(data.exog[:, 0], 25)\nmeans75 = means.copy()\nmeans75[0] = lowinc_75per = stats.scoreatpercentile(data.exog[:, 0], 75)\nresp_25 = res.predict(means25)\nresp_75 = res.predict(means75)\ndiff = resp_75 - resp_25\n\n# The interquartile first difference for the percentage of low income\n# households in a school district is:\n\nprint(\"%2.4f%%\" % (diff * 100))\n\n# ### Plots\n#\n# We extract information that will be used to draw some interesting\n# plots:\n\nnobs = res.nobs\ny = data.endog[:, 0] / data.endog.sum(1)\nyhat = res.mu\n\n# Plot yhat vs y:\n\nfrom statsmodels.graphics.api import abline_plot\n\nfig, ax = plt.subplots()\nax.scatter(yhat, y)\nline_fit = sm.OLS(y, sm.add_constant(yhat, prepend=True)).fit()\nabline_plot(model_results=line_fit, ax=ax)\n\nax.set_title('Model Fit Plot')\nax.set_ylabel('Observed values')\nax.set_xlabel('Fitted values')\n\n# Plot yhat vs. Pearson residuals:\n\nfig, ax = plt.subplots()\n\nax.scatter(yhat, res.resid_pearson)\nax.hlines(0, 0, 1)\nax.set_xlim(0, 1)\nax.set_title('Residual Dependence Plot')\nax.set_ylabel('Pearson Residuals')\nax.set_xlabel('Fitted values')\n\n# Histogram of standardized deviance residuals:\n\nfrom scipy import stats\n\nfig, ax = plt.subplots()\n\nresid = res.resid_deviance.copy()\nresid_std = stats.zscore(resid)\nax.hist(resid_std, bins=25)\nax.set_title('Histogram of standardized deviance residuals')\n\n# QQ Plot of Deviance Residuals:\n\nfrom statsmodels import graphics\ngraphics.gofplots.qqplot(resid, line='r')\n\n# ## GLM: Gamma for proportional count response\n#\n# ### Load data\n#\n# In the example above, we printed the ``NOTE`` attribute to learn about\n# the\n# Star98 dataset. Statsmodels datasets ships with other useful\n# information. For\n# example:\n\nprint(sm.datasets.scotland.DESCRLONG)\n\n# Load the data and add a constant to the exogenous variables:\n\ndata2 = sm.datasets.scotland.load()\ndata2.exog = sm.add_constant(data2.exog, prepend=False)\nprint(data2.exog[:5, :])\nprint(data2.endog[:5])\n\n# ### Fit and summary\n\nglm_gamma = sm.GLM(data2.endog, data2.exog, family=sm.families.Gamma())\nglm_results = glm_gamma.fit()\nprint(glm_results.summary())\n\n# ## GLM: Gaussian distribution with a noncanonical link\n#\n# ### Artificial data\n\nnobs2 = 100\nx = np.arange(nobs2)\nnp.random.seed(54321)\nX = np.column_stack((x, x**2))\nX = sm.add_constant(X, prepend=False)\nlny = np.exp(-(.03 * x + .0001 * x**2 - 1.0)) + .001 * np.random.rand(nobs2)\n\n# ### Fit and summary\n\ngauss_log = sm.GLM(lny, X, family=sm.families.Gaussian(sm.families.links.log))\ngauss_log_results = gauss_log.fit()\nprint(gauss_log_results.summary())\n", "# -*- coding: utf-8 -*-\n\"\"\"\nState Space Model\n\nAuthor: Chad Fulton\nLicense: Simplified-BSD\n\"\"\"\nimport contextlib\nimport warnings\n\nfrom collections import OrderedDict\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import norm\n\nfrom statsmodels.tools.tools import pinv_extended, Bunch\nfrom statsmodels.tools.sm_exceptions import PrecisionWarning\nfrom statsmodels.tools.numdiff import (_get_epsilon, approx_hess_cs,\n approx_fprime_cs, approx_fprime)\nfrom statsmodels.tools.decorators import cache_readonly\nfrom statsmodels.tools.eval_measures import aic, bic, hqic\n\nimport statsmodels.base.wrapper as wrap\n\nimport statsmodels.genmod._prediction as pred\nfrom statsmodels.genmod.families.links import identity\n\nimport statsmodels.tsa.base.tsa_model as tsbase\n\nfrom .simulation_smoother import SimulationSmoother\nfrom .kalman_smoother import SmootherResults\nfrom .kalman_filter import INVERT_UNIVARIATE, SOLVE_LU, MEMORY_CONSERVE\nfrom .initialization import Initialization\nfrom .tools import prepare_exog, concat\n\n\ndef _handle_args(names, defaults, *args, **kwargs):\n output_args = []\n # We need to handle positional arguments in two ways, in case this was\n # called by a Scipy optimization routine\n if len(args) > 0:\n # the fit() method will pass a dictionary\n if isinstance(args[0], dict):\n flags = args[0]\n # otherwise, a user may have just used positional arguments...\n else:\n flags = dict(zip(names, args))\n for i in range(len(names)):\n output_args.append(flags.get(names[i], defaults[i]))\n\n for name, value in flags.items():\n if name in kwargs:\n raise TypeError(\"loglike() got multiple values for keyword\"\n \" argument '%s'\" % name)\n else:\n for i in range(len(names)):\n output_args.append(kwargs.pop(names[i], defaults[i]))\n\n return tuple(output_args) + (kwargs,)\n\n\nclass MLEModel(tsbase.TimeSeriesModel):\n r\"\"\"\n State space model for maximum likelihood estimation\n\n Parameters\n ----------\n endog : array_like\n The observed time-series process :math:`y`\n k_states : int\n The dimension of the unobserved state process.\n exog : array_like, optional\n Array of exogenous regressors, shaped nobs x k. Default is no\n exogenous regressors.\n dates : array_like of datetime, optional\n An array-like object of datetime objects. If a Pandas object is given\n for endog, it is assumed to have a DateIndex.\n freq : str, optional\n The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',\n 'M', 'A', or 'Q'. This is optional if dates are given.\n **kwargs\n Keyword arguments may be used to provide default values for state space\n matrices or for Kalman filtering options. See `Representation`, and\n `KalmanFilter` for more details.\n\n Attributes\n ----------\n ssm : statsmodels.tsa.statespace.kalman_filter.KalmanFilter\n Underlying state space representation.\n\n Notes\n -----\n This class wraps the state space model with Kalman filtering to add in\n functionality for maximum likelihood estimation. In particular, it adds\n the concept of updating the state space representation based on a defined\n set of parameters, through the `update` method or `updater` attribute (see\n below for more details on which to use when), and it adds a `fit` method\n which uses a numerical optimizer to select the parameters that maximize\n the likelihood of the model.\n\n The `start_params` `update` method must be overridden in the\n child class (and the `transform` and `untransform` methods, if needed).\n\n See Also\n --------\n statsmodels.tsa.statespace.mlemodel.MLEResults\n statsmodels.tsa.statespace.kalman_filter.KalmanFilter\n statsmodels.tsa.statespace.representation.Representation\n \"\"\"\n\n def __init__(self, endog, k_states, exog=None, dates=None, freq=None,\n **kwargs):\n # Initialize the model base\n super(MLEModel, self).__init__(endog=endog, exog=exog,\n dates=dates, freq=freq,\n missing='none')\n\n # Store kwargs to recreate model\n self._init_kwargs = kwargs\n\n # Prepared the endog array: C-ordered, shape=(nobs x k_endog)\n self.endog, self.exog = self.prepare_data()\n\n # Dimensions\n self.nobs = self.endog.shape[0]\n self.k_states = k_states\n\n # Initialize the state-space representation\n self.initialize_statespace(**kwargs)\n\n # Setup holder for fixed parameters\n self._has_fixed_params = False\n self._fixed_params = None\n self._params_index = None\n self._fixed_params_index = None\n self._free_params_index = None\n\n def prepare_data(self):\n \"\"\"\n Prepare data for use in the state space representation\n \"\"\"\n endog = np.array(self.data.orig_endog, order='C')\n exog = self.data.orig_exog\n if exog is not None:\n exog = np.array(exog)\n\n # Base class may allow 1-dim data, whereas we need 2-dim\n if endog.ndim == 1:\n endog.shape = (endog.shape[0], 1) # this will be C-contiguous\n\n return endog, exog\n\n def initialize_statespace(self, **kwargs):\n \"\"\"\n Initialize the state space representation\n\n Parameters\n ----------\n **kwargs\n Additional keyword arguments to pass to the state space class\n constructor.\n\n \"\"\"\n # (Now self.endog is C-ordered and in long format (nobs x k_endog). To\n # get F-ordered and in wide format just need to transpose)\n endog = self.endog.T\n\n # Instantiate the state space object\n self.ssm = SimulationSmoother(endog.shape[0], self.k_states,\n nobs=endog.shape[1], **kwargs)\n # Bind the data to the model\n self.ssm.bind(endog)\n\n # Other dimensions, now that `ssm` is available\n self.k_endog = self.ssm.k_endog\n\n def __setitem__(self, key, value):\n return self.ssm.__setitem__(key, value)\n\n def __getitem__(self, key):\n return self.ssm.__getitem__(key)\n\n def _get_init_kwds(self):\n # Get keywords based on model attributes\n kwds = super(MLEModel, self)._get_init_kwds()\n\n for key, value in kwds.items():\n if value is None and hasattr(self.ssm, key):\n kwds[key] = getattr(self.ssm, key)\n\n return kwds\n\n def clone(self, endog, exog=None, **kwargs):\n raise NotImplementedError\n\n def _clone_from_init_kwds(self, endog, exog=None, **kwargs):\n # Cannot make this the default, because there is extra work required\n # for subclasses to make _get_init_kwds useful.\n use_kwargs = self._get_init_kwds()\n use_kwargs.update(kwargs)\n return self.__class__(endog, exog=exog, **use_kwargs)\n\n def set_filter_method(self, filter_method=None, **kwargs):\n \"\"\"\n Set the filtering method\n\n The filtering method controls aspects of which Kalman filtering\n approach will be used.\n\n Parameters\n ----------\n filter_method : int, optional\n Bitmask value to set the filter method to. See notes for details.\n **kwargs\n Keyword arguments may be used to influence the filter method by\n setting individual boolean flags. See notes for details.\n\n Notes\n -----\n This method is rarely used. See the corresponding function in the\n `KalmanFilter` class for details.\n \"\"\"\n self.ssm.set_filter_method(filter_method, **kwargs)\n\n def set_inversion_method(self, inversion_method=None, **kwargs):\n \"\"\"\n Set the inversion method\n\n The Kalman filter may contain one matrix inversion: that of the\n forecast error covariance matrix. The inversion method controls how and\n if that inverse is performed.\n\n Parameters\n ----------\n inversion_method : int, optional\n Bitmask value to set the inversion method to. See notes for\n details.\n **kwargs\n Keyword arguments may be used to influence the inversion method by\n setting individual boolean flags. See notes for details.\n\n Notes\n -----\n This method is rarely used. See the corresponding function in the\n `KalmanFilter` class for details.\n \"\"\"\n self.ssm.set_inversion_method(inversion_method, **kwargs)\n\n def set_stability_method(self, stability_method=None, **kwargs):\n \"\"\"\n Set the numerical stability method\n\n The Kalman filter is a recursive algorithm that may in some cases\n suffer issues with numerical stability. The stability method controls\n what, if any, measures are taken to promote stability.\n\n Parameters\n ----------\n stability_method : int, optional\n Bitmask value to set the stability method to. See notes for\n details.\n **kwargs\n Keyword arguments may be used to influence the stability method by\n setting individual boolean flags. See notes for details.\n\n Notes\n -----\n This method is rarely used. See the corresponding function in the\n `KalmanFilter` class for details.\n \"\"\"\n self.ssm.set_stability_method(stability_method, **kwargs)\n\n def set_conserve_memory(self, conserve_memory=None, **kwargs):\n \"\"\"\n Set the memory conservation method\n\n By default, the Kalman filter computes a number of intermediate\n matrices at each iteration. The memory conservation options control\n which of those matrices are stored.\n\n Parameters\n ----------\n conserve_memory : int, optional\n Bitmask value to set the memory conservation method to. See notes\n for details.\n **kwargs\n Keyword arguments may be used to influence the memory conservation\n method by setting individual boolean flags.\n\n Notes\n -----\n This method is rarely used. See the corresponding function in the\n `KalmanFilter` class for details.\n \"\"\"\n self.ssm.set_conserve_memory(conserve_memory, **kwargs)\n\n def set_smoother_output(self, smoother_output=None, **kwargs):\n \"\"\"\n Set the smoother output\n\n The smoother can produce several types of results. The smoother output\n variable controls which are calculated and returned.\n\n Parameters\n ----------\n smoother_output : int, optional\n Bitmask value to set the smoother output to. See notes for details.\n **kwargs\n Keyword arguments may be used to influence the smoother output by\n setting individual boolean flags.\n\n Notes\n -----\n This method is rarely used. See the corresponding function in the\n `KalmanSmoother` class for details.\n \"\"\"\n self.ssm.set_smoother_output(smoother_output, **kwargs)\n\n def initialize_known(self, initial_state, initial_state_cov):\n \"\"\"Initialize known\"\"\"\n self.ssm.initialize_known(initial_state, initial_state_cov)\n\n def initialize_approximate_diffuse(self, variance=None):\n \"\"\"Initialize approximate diffuse\"\"\"\n self.ssm.initialize_approximate_diffuse(variance)\n\n def initialize_stationary(self):\n \"\"\"Initialize stationary\"\"\"\n self.ssm.initialize_stationary()\n\n @property\n def initialization(self):\n return self.ssm.initialization\n\n @initialization.setter\n def initialization(self, value):\n self.ssm.initialization = value\n\n @property\n def initial_variance(self):\n return self.ssm.initial_variance\n\n @initial_variance.setter\n def initial_variance(self, value):\n self.ssm.initial_variance = value\n\n @property\n def loglikelihood_burn(self):\n return self.ssm.loglikelihood_burn\n\n @loglikelihood_burn.setter\n def loglikelihood_burn(self, value):\n self.ssm.loglikelihood_burn = value\n\n @property\n def tolerance(self):\n return self.ssm.tolerance\n\n @tolerance.setter\n def tolerance(self, value):\n self.ssm.tolerance = value\n\n def _validate_can_fix_params(self, param_names):\n for param_name in param_names:\n if param_name not in self.param_names:\n raise ValueError('Invalid parameter name passed: \"%s\".'\n % param_name)\n\n @contextlib.contextmanager\n def fix_params(self, params):\n \"\"\"\n Fix parameters to specific values (context manager)\n\n Parameters\n ----------\n params : dict\n Dictionary describing the fixed parameter values, of the form\n `param_name: fixed_value`. See the `param_names` property for valid\n parameter names.\n\n Examples\n --------\n >>> mod = sm.tsa.SARIMAX(endog, order=(1, 0, 1))\n >>> with mod.fix_params({'ar.L1': 0.5}):\n res = mod.fit()\n \"\"\"\n k_params = len(self.param_names)\n # Initialization (this is done here rather than in the constructor\n # because param_names may not be available at that point)\n if self._fixed_params is None:\n self._fixed_params = {}\n self._params_index = OrderedDict(\n zip(self.param_names, np.arange(k_params)))\n\n # Cache the current fixed parameters\n cache_fixed_params = self._fixed_params.copy()\n cache_has_fixed_params = self._has_fixed_params\n cache_fixed_params_index = self._fixed_params_index\n cache_free_params_index = self._free_params_index\n\n # Validate parameter names and values\n self._validate_can_fix_params(set(params.keys()))\n\n # Set the new fixed parameters, keeping the order as given by\n # param_names\n self._fixed_params.update(params)\n self._fixed_params = OrderedDict([\n (name, self._fixed_params[name]) for name in self.param_names\n if name in self._fixed_params])\n\n # Update associated values\n self._has_fixed_params = True\n self._fixed_params_index = [self._params_index[key]\n for key in self._fixed_params.keys()]\n self._free_params_index = list(\n set(np.arange(k_params)).difference(self._fixed_params_index))\n\n try:\n yield\n finally:\n # Reset the fixed parameters\n self._has_fixed_params = cache_has_fixed_params\n self._fixed_params = cache_fixed_params\n self._fixed_params_index = cache_fixed_params_index\n self._free_params_index = cache_free_params_index\n\n def fit(self, start_params=None, transformed=True, includes_fixed=False,\n cov_type=None, cov_kwds=None, method='lbfgs', maxiter=50,\n full_output=1, disp=5, callback=None, return_params=False,\n optim_score=None, optim_complex_step=None, optim_hessian=None,\n flags=None, low_memory=False, **kwargs):\n \"\"\"\n Fits the model by maximum likelihood via Kalman filter.\n\n Parameters\n ----------\n start_params : array_like, optional\n Initial guess of the solution for the loglikelihood maximization.\n If None, the default is given by Model.start_params.\n transformed : bool, optional\n Whether or not `start_params` is already transformed. Default is\n True.\n cov_type : str, optional\n The `cov_type` keyword governs the method for calculating the\n covariance matrix of parameter estimates. Can be one of:\n\n - 'opg' for the outer product of gradient estimator\n - 'oim' for the observed information matrix estimator, calculated\n using the method of Harvey (1989)\n - 'approx' for the observed information matrix estimator,\n calculated using a numerical approximation of the Hessian matrix.\n - 'robust' for an approximate (quasi-maximum likelihood) covariance\n matrix that may be valid even in the presence of some\n misspecifications. Intermediate calculations use the 'oim'\n method.\n - 'robust_approx' is the same as 'robust' except that the\n intermediate calculations use the 'approx' method.\n - 'none' for no covariance matrix calculation.\n\n Default is 'opg' unless memory conservation is used to avoid\n computing the loglikelihood values for each observation, in which\n case the default is 'oim'.\n cov_kwds : dict or None, optional\n A dictionary of arguments affecting covariance matrix computation.\n\n **opg, oim, approx, robust, robust_approx**\n\n - 'approx_complex_step' : bool, optional - If True, numerical\n approximations are computed using complex-step methods. If False,\n numerical approximations are computed using finite difference\n methods. Default is True.\n - 'approx_centered' : bool, optional - If True, numerical\n approximations computed using finite difference methods use a\n centered approximation. Default is False.\n method : str, optional\n The `method` determines which solver from `scipy.optimize`\n is used, and it can be chosen from among the following strings:\n\n - 'newton' for Newton-Raphson, 'nm' for Nelder-Mead\n - 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)\n - 'lbfgs' for limited-memory BFGS with optional box constraints\n - 'powell' for modified Powell's method\n - 'cg' for conjugate gradient\n - 'ncg' for Newton-conjugate gradient\n - 'basinhopping' for global basin-hopping solver\n\n The explicit arguments in `fit` are passed to the solver,\n with the exception of the basin-hopping solver. Each\n solver has several optional arguments that are not the same across\n solvers. See the notes section below (or scipy.optimize) for the\n available arguments and for the list of explicit arguments that the\n basin-hopping solver supports.\n maxiter : int, optional\n The maximum number of iterations to perform.\n full_output : bool, optional\n Set to True to have all available output in the Results object's\n mle_retvals attribute. The output is dependent on the solver.\n See LikelihoodModelResults notes section for more information.\n disp : bool, optional\n Set to True to print convergence messages.\n callback : callable callback(xk), optional\n Called after each iteration, as callback(xk), where xk is the\n current parameter vector.\n return_params : bool, optional\n Whether or not to return only the array of maximizing parameters.\n Default is False.\n optim_score : {'harvey', 'approx'} or None, optional\n The method by which the score vector is calculated. 'harvey' uses\n the method from Harvey (1989), 'approx' uses either finite\n difference or complex step differentiation depending upon the\n value of `optim_complex_step`, and None uses the built-in gradient\n approximation of the optimizer. Default is None. This keyword is\n only relevant if the optimization method uses the score.\n optim_complex_step : bool, optional\n Whether or not to use complex step differentiation when\n approximating the score; if False, finite difference approximation\n is used. Default is True. This keyword is only relevant if\n `optim_score` is set to 'harvey' or 'approx'.\n optim_hessian : {'opg','oim','approx'}, optional\n The method by which the Hessian is numerically approximated. 'opg'\n uses outer product of gradients, 'oim' uses the information\n matrix formula from Harvey (1989), and 'approx' uses numerical\n approximation. This keyword is only relevant if the\n optimization method uses the Hessian matrix.\n low_memory : bool, optional\n If set to True, techniques are applied to substantially reduce\n memory usage. If used, some features of the results object will\n not be available (including smoothed results and in-sample\n prediction), although out-of-sample forecasting is possible.\n Default is False.\n **kwargs\n Additional keyword arguments to pass to the optimizer.\n\n Returns\n -------\n MLEResults\n\n See Also\n --------\n statsmodels.base.model.LikelihoodModel.fit\n statsmodels.tsa.statespace.mlemodel.MLEResults\n \"\"\"\n if start_params is None:\n start_params = self.start_params\n transformed = True\n includes_fixed = True\n\n # Update the score method\n if optim_score is None and method == 'lbfgs':\n kwargs.setdefault('approx_grad', True)\n kwargs.setdefault('epsilon', 1e-5)\n elif optim_score is None:\n optim_score = 'approx'\n\n # Check for complex step differentiation\n if optim_complex_step is None:\n optim_complex_step = not self.ssm._complex_endog\n elif optim_complex_step and self.ssm._complex_endog:\n raise ValueError('Cannot use complex step derivatives when data'\n ' or parameters are complex.')\n\n # Standardize starting parameters\n start_params = self.handle_params(start_params, transformed=True,\n includes_fixed=includes_fixed)\n\n # Unconstrain the starting parameters\n if transformed:\n start_params = self.untransform_params(start_params)\n\n # Remove any fixed parameters\n if self._has_fixed_params:\n start_params = start_params[self._free_params_index]\n\n # If all parameters are fixed, we are done\n if self._has_fixed_params and len(start_params) == 0:\n mlefit = Bunch(params=[], mle_retvals=None,\n mle_settings=None)\n else:\n # Maximum likelihood estimation\n if flags is None:\n flags = {}\n flags.update({\n 'transformed': False,\n 'includes_fixed': False,\n 'score_method': optim_score,\n 'approx_complex_step': optim_complex_step\n })\n if optim_hessian is not None:\n flags['hessian_method'] = optim_hessian\n fargs = (flags,)\n mlefit = super(MLEModel, self).fit(start_params, method=method,\n fargs=fargs,\n maxiter=maxiter,\n full_output=full_output,\n disp=disp, callback=callback,\n skip_hessian=True, **kwargs)\n\n # Just return the fitted parameters if requested\n if return_params:\n return self.handle_params(mlefit.params, transformed=False,\n includes_fixed=False)\n # Otherwise construct the results class if desired\n else:\n # Handle memory conservation option\n if low_memory:\n conserve_memory = self.ssm.conserve_memory\n self.ssm.set_conserve_memory(MEMORY_CONSERVE)\n\n # Perform filtering / smoothing\n if (self.ssm.memory_no_predicted or self.ssm.memory_no_gain\n or self.ssm.memory_no_smoothing):\n func = self.filter\n else:\n func = self.smooth\n res = func(mlefit.params, transformed=False, includes_fixed=False,\n cov_type=cov_type, cov_kwds=cov_kwds)\n\n res.mlefit = mlefit\n res.mle_retvals = mlefit.mle_retvals\n res.mle_settings = mlefit.mle_settings\n\n # Reset memory conservation\n if low_memory:\n self.ssm.set_conserve_memory(conserve_memory)\n\n return res\n\n def fit_constrained(self, constraints, start_params=None, **fit_kwds):\n \"\"\"\n Fit the model with some parameters subject to equality constraints.\n\n Parameters\n ----------\n constraints : dict\n Dictionary of constraints, of the form `param_name: fixed_value`.\n See the `param_names` property for valid parameter names.\n start_params : array_like, optional\n Initial guess of the solution for the loglikelihood maximization.\n If None, the default is given by Model.start_params.\n **fit_kwds : keyword arguments\n fit_kwds are used in the optimization of the remaining parameters.\n\n Returns\n -------\n results : Results instance\n\n Examples\n --------\n >>> mod = sm.tsa.SARIMAX(endog, order=(1, 0, 1))\n >>> res = mod.fit_constrained({'ar.L1': 0.5})\n\n \"\"\"\n with self.fix_params(constraints):\n res = self.fit(start_params, **fit_kwds)\n return res\n\n @property\n def _res_classes(self):\n return {'fit': (MLEResults, MLEResultsWrapper)}\n\n def _wrap_results(self, params, result, return_raw, cov_type=None,\n cov_kwds=None, results_class=None, wrapper_class=None):\n if not return_raw:\n # Wrap in a results object\n result_kwargs = {}\n if cov_type is not None:\n result_kwargs['cov_type'] = cov_type\n if cov_kwds is not None:\n result_kwargs['cov_kwds'] = cov_kwds\n\n if results_class is None:\n results_class = self._res_classes['fit'][0]\n if wrapper_class is None:\n wrapper_class = self._res_classes['fit'][1]\n\n res = results_class(self, params, result, **result_kwargs)\n result = wrapper_class(res)\n return result\n\n def filter(self, params, transformed=True, includes_fixed=False,\n complex_step=False, cov_type=None, cov_kwds=None,\n return_ssm=False, results_class=None,\n results_wrapper_class=None, low_memory=False, **kwargs):\n \"\"\"\n Kalman filtering\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the loglikelihood\n function.\n transformed : bool, optional\n Whether or not `params` is already transformed. Default is True.\n return_ssm : bool,optional\n Whether or not to return only the state space output or a full\n results object. Default is to return a full results object.\n cov_type : str, optional\n See `MLEResults.fit` for a description of covariance matrix types\n for results object.\n cov_kwds : dict or None, optional\n See `MLEResults.get_robustcov_results` for a description required\n keywords for alternative covariance estimators\n low_memory : bool, optional\n If set to True, techniques are applied to substantially reduce\n memory usage. If used, some features of the results object will\n not be available (including in-sample prediction), although\n out-of-sample forecasting is possible. Default is False.\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n \"\"\"\n params = self.handle_params(params, transformed=transformed,\n includes_fixed=includes_fixed)\n self.update(params, transformed=True, includes_fixed=True,\n complex_step=complex_step)\n\n # Save the parameter names\n self.data.param_names = self.param_names\n\n if complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n\n # Handle memory conservation\n if low_memory:\n kwargs['conserve_memory'] = MEMORY_CONSERVE\n\n # Get the state space output\n result = self.ssm.filter(complex_step=complex_step, **kwargs)\n\n # Wrap in a results object\n return self._wrap_results(params, result, return_ssm, cov_type,\n cov_kwds, results_class,\n results_wrapper_class)\n\n def smooth(self, params, transformed=True, includes_fixed=False,\n complex_step=False, cov_type=None, cov_kwds=None,\n return_ssm=False, results_class=None,\n results_wrapper_class=None, **kwargs):\n \"\"\"\n Kalman smoothing\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the loglikelihood\n function.\n transformed : bool, optional\n Whether or not `params` is already transformed. Default is True.\n return_ssm : bool,optional\n Whether or not to return only the state space output or a full\n results object. Default is to return a full results object.\n cov_type : str, optional\n See `MLEResults.fit` for a description of covariance matrix types\n for results object.\n cov_kwds : dict or None, optional\n See `MLEResults.get_robustcov_results` for a description required\n keywords for alternative covariance estimators\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n \"\"\"\n params = self.handle_params(params, transformed=transformed,\n includes_fixed=includes_fixed)\n self.update(params, transformed=True, includes_fixed=True,\n complex_step=complex_step)\n\n # Save the parameter names\n self.data.param_names = self.param_names\n\n if complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n\n # Get the state space output\n result = self.ssm.smooth(complex_step=complex_step, **kwargs)\n\n # Wrap in a results object\n return self._wrap_results(params, result, return_ssm, cov_type,\n cov_kwds, results_class,\n results_wrapper_class)\n\n _loglike_param_names = ['transformed', 'includes_fixed', 'complex_step']\n _loglike_param_defaults = [True, False, False]\n\n def loglike(self, params, *args, **kwargs):\n \"\"\"\n Loglikelihood evaluation\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the loglikelihood\n function.\n transformed : bool, optional\n Whether or not `params` is already transformed. Default is True.\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n\n Notes\n -----\n [1]_ recommend maximizing the average likelihood to avoid scale issues;\n this is done automatically by the base Model fit method.\n\n References\n ----------\n .. [1] Koopman, Siem Jan, Neil Shephard, and Jurgen A. Doornik. 1999.\n Statistical Algorithms for Models in State Space Using SsfPack 2.2.\n Econometrics Journal 2 (1): 107-60. doi:10.1111/1368-423X.00023.\n\n See Also\n --------\n update : modifies the internal state of the state space model to\n reflect new params\n \"\"\"\n transformed, includes_fixed, complex_step, kwargs = _handle_args(\n MLEModel._loglike_param_names, MLEModel._loglike_param_defaults,\n *args, **kwargs)\n\n params = self.handle_params(params, transformed=transformed,\n includes_fixed=includes_fixed)\n self.update(params, transformed=True, includes_fixed=True,\n complex_step=complex_step)\n\n if complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n\n loglike = self.ssm.loglike(complex_step=complex_step, **kwargs)\n\n # Koopman, Shephard, and Doornik recommend maximizing the average\n # likelihood to avoid scale issues, but the averaging is done\n # automatically in the base model `fit` method\n return loglike\n\n def loglikeobs(self, params, transformed=True, includes_fixed=False,\n complex_step=False, **kwargs):\n \"\"\"\n Loglikelihood evaluation\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the loglikelihood\n function.\n transformed : bool, optional\n Whether or not `params` is already transformed. Default is True.\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n\n Notes\n -----\n [1]_ recommend maximizing the average likelihood to avoid scale issues;\n this is done automatically by the base Model fit method.\n\n References\n ----------\n .. [1] Koopman, Siem Jan, Neil Shephard, and Jurgen A. Doornik. 1999.\n Statistical Algorithms for Models in State Space Using SsfPack 2.2.\n Econometrics Journal 2 (1): 107-60. doi:10.1111/1368-423X.00023.\n\n See Also\n --------\n update : modifies the internal state of the Model to reflect new params\n \"\"\"\n params = self.handle_params(params, transformed=transformed,\n includes_fixed=includes_fixed)\n\n # If we're using complex-step differentiation, then we cannot use\n # Cholesky factorization\n if complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n\n self.update(params, transformed=True, includes_fixed=True,\n complex_step=complex_step)\n\n return self.ssm.loglikeobs(complex_step=complex_step, **kwargs)\n\n def simulation_smoother(self, simulation_output=None, **kwargs):\n r\"\"\"\n Retrieve a simulation smoother for the state space model.\n\n Parameters\n ----------\n simulation_output : int, optional\n Determines which simulation smoother output is calculated.\n Default is all (including state and disturbances).\n **kwargs\n Additional keyword arguments, used to set the simulation output.\n See `set_simulation_output` for more details.\n\n Returns\n -------\n SimulationSmoothResults\n \"\"\"\n return self.ssm.simulation_smoother(\n simulation_output=simulation_output, **kwargs)\n\n def _forecasts_error_partial_derivatives(self, params, transformed=True,\n includes_fixed=False,\n approx_complex_step=None,\n approx_centered=False,\n res=None, **kwargs):\n params = np.array(params, ndmin=1)\n\n # We cannot use complex-step differentiation with non-transformed\n # parameters\n if approx_complex_step is None:\n approx_complex_step = transformed\n if not transformed and approx_complex_step:\n raise ValueError(\"Cannot use complex-step approximations to\"\n \" calculate the observed_information_matrix\"\n \" with untransformed parameters.\")\n\n # If we're using complex-step differentiation, then we cannot use\n # Cholesky factorization\n if approx_complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n\n # Get values at the params themselves\n if res is None:\n self.update(params, transformed=transformed,\n includes_fixed=includes_fixed,\n complex_step=approx_complex_step)\n res = self.ssm.filter(complex_step=approx_complex_step, **kwargs)\n\n # Setup\n n = len(params)\n\n # Compute partial derivatives w.r.t. forecast error and forecast\n # error covariance\n partials_forecasts_error = (\n np.zeros((self.k_endog, self.nobs, n))\n )\n partials_forecasts_error_cov = (\n np.zeros((self.k_endog, self.k_endog, self.nobs, n))\n )\n if approx_complex_step:\n epsilon = _get_epsilon(params, 2, None, n)\n increments = np.identity(n) * 1j * epsilon\n\n for i, ih in enumerate(increments):\n self.update(params + ih, transformed=transformed,\n includes_fixed=includes_fixed,\n complex_step=True)\n _res = self.ssm.filter(complex_step=True, **kwargs)\n\n partials_forecasts_error[:, :, i] = (\n _res.forecasts_error.imag / epsilon[i]\n )\n\n partials_forecasts_error_cov[:, :, :, i] = (\n _res.forecasts_error_cov.imag / epsilon[i]\n )\n elif not approx_centered:\n epsilon = _get_epsilon(params, 2, None, n)\n ei = np.zeros((n,), float)\n for i in range(n):\n ei[i] = epsilon[i]\n self.update(params + ei, transformed=transformed,\n complex_step=False)\n _res = self.ssm.filter(complex_step=False, **kwargs)\n\n partials_forecasts_error[:, :, i] = (\n _res.forecasts_error - res.forecasts_error) / epsilon[i]\n\n partials_forecasts_error_cov[:, :, :, i] = (\n _res.forecasts_error_cov -\n res.forecasts_error_cov) / epsilon[i]\n ei[i] = 0.0\n else:\n epsilon = _get_epsilon(params, 3, None, n) / 2.\n ei = np.zeros((n,), float)\n for i in range(n):\n ei[i] = epsilon[i]\n\n self.update(params + ei, transformed=transformed,\n complex_step=False)\n _res1 = self.ssm.filter(complex_step=False, **kwargs)\n\n self.update(params - ei, transformed=transformed,\n complex_step=False)\n _res2 = self.ssm.filter(complex_step=False, **kwargs)\n\n partials_forecasts_error[:, :, i] = (\n (_res1.forecasts_error - _res2.forecasts_error) /\n (2 * epsilon[i]))\n\n partials_forecasts_error_cov[:, :, :, i] = (\n (_res1.forecasts_error_cov - _res2.forecasts_error_cov) /\n (2 * epsilon[i]))\n\n ei[i] = 0.0\n\n return partials_forecasts_error, partials_forecasts_error_cov\n\n def observed_information_matrix(self, params, transformed=True,\n includes_fixed=False,\n approx_complex_step=None,\n approx_centered=False, **kwargs):\n \"\"\"\n Observed information matrix\n\n Parameters\n ----------\n params : array_like, optional\n Array of parameters at which to evaluate the loglikelihood\n function.\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n\n Notes\n -----\n This method is from Harvey (1989), which shows that the information\n matrix only depends on terms from the gradient. This implementation is\n partially analytic and partially numeric approximation, therefore,\n because it uses the analytic formula for the information matrix, with\n numerically computed elements of the gradient.\n\n References\n ----------\n Harvey, Andrew C. 1990.\n Forecasting, Structural Time Series Models and the Kalman Filter.\n Cambridge University Press.\n\n \"\"\"\n params = np.array(params, ndmin=1)\n\n # Setup\n n = len(params)\n\n # We cannot use complex-step differentiation with non-transformed\n # parameters\n if approx_complex_step is None:\n approx_complex_step = transformed\n if not transformed and approx_complex_step:\n raise ValueError(\"Cannot use complex-step approximations to\"\n \" calculate the observed_information_matrix\"\n \" with untransformed parameters.\")\n\n # Get values at the params themselves\n params = self.handle_params(params, transformed=transformed,\n includes_fixed=includes_fixed)\n self.update(params, transformed=True, includes_fixed=True,\n complex_step=approx_complex_step)\n # If we're using complex-step differentiation, then we cannot use\n # Cholesky factorization\n if approx_complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n res = self.ssm.filter(complex_step=approx_complex_step, **kwargs)\n dtype = self.ssm.dtype\n\n # Save this for inversion later\n inv_forecasts_error_cov = res.forecasts_error_cov.copy()\n\n partials_forecasts_error, partials_forecasts_error_cov = (\n self._forecasts_error_partial_derivatives(\n params, transformed=transformed, includes_fixed=includes_fixed,\n approx_complex_step=approx_complex_step,\n approx_centered=approx_centered, res=res, **kwargs))\n\n # Compute the information matrix\n tmp = np.zeros((self.k_endog, self.k_endog, self.nobs, n), dtype=dtype)\n\n information_matrix = np.zeros((n, n), dtype=dtype)\n for t in range(self.ssm.loglikelihood_burn, self.nobs):\n inv_forecasts_error_cov[:, :, t] = (\n np.linalg.inv(res.forecasts_error_cov[:, :, t])\n )\n for i in range(n):\n tmp[:, :, t, i] = np.dot(\n inv_forecasts_error_cov[:, :, t],\n partials_forecasts_error_cov[:, :, t, i]\n )\n for i in range(n):\n for j in range(n):\n information_matrix[i, j] += (\n 0.5 * np.trace(np.dot(tmp[:, :, t, i],\n tmp[:, :, t, j]))\n )\n information_matrix[i, j] += np.inner(\n partials_forecasts_error[:, t, i],\n np.dot(inv_forecasts_error_cov[:, :, t],\n partials_forecasts_error[:, t, j])\n )\n return information_matrix / (self.nobs - self.ssm.loglikelihood_burn)\n\n def opg_information_matrix(self, params, transformed=True,\n includes_fixed=False, approx_complex_step=None,\n **kwargs):\n \"\"\"\n Outer product of gradients information matrix\n\n Parameters\n ----------\n params : array_like, optional\n Array of parameters at which to evaluate the loglikelihood\n function.\n **kwargs\n Additional arguments to the `loglikeobs` method.\n\n References\n ----------\n Berndt, Ernst R., Bronwyn Hall, Robert Hall, and Jerry Hausman. 1974.\n Estimation and Inference in Nonlinear Structural Models.\n NBER Chapters. National Bureau of Economic Research, Inc.\n\n \"\"\"\n # We cannot use complex-step differentiation with non-transformed\n # parameters\n if approx_complex_step is None:\n approx_complex_step = transformed\n if not transformed and approx_complex_step:\n raise ValueError(\"Cannot use complex-step approximations to\"\n \" calculate the observed_information_matrix\"\n \" with untransformed parameters.\")\n\n score_obs = self.score_obs(params, transformed=transformed,\n includes_fixed=includes_fixed,\n approx_complex_step=approx_complex_step,\n **kwargs).transpose()\n return (\n np.inner(score_obs, score_obs) /\n (self.nobs - self.ssm.loglikelihood_burn)\n )\n\n def _score_complex_step(self, params, **kwargs):\n # the default epsilon can be too small\n # inversion_method = INVERT_UNIVARIATE | SOLVE_LU\n epsilon = _get_epsilon(params, 2., None, len(params))\n kwargs['transformed'] = True\n kwargs['complex_step'] = True\n return approx_fprime_cs(params, self.loglike, epsilon=epsilon,\n kwargs=kwargs)\n\n def _score_finite_difference(self, params, approx_centered=False,\n **kwargs):\n kwargs['transformed'] = True\n return approx_fprime(params, self.loglike, kwargs=kwargs,\n centered=approx_centered)\n\n def _score_harvey(self, params, approx_complex_step=True, **kwargs):\n score_obs = self._score_obs_harvey(\n params, approx_complex_step=approx_complex_step, **kwargs)\n return np.sum(score_obs, axis=0)\n\n def _score_obs_harvey(self, params, approx_complex_step=True,\n approx_centered=False, **kwargs):\n \"\"\"\n Score\n\n Parameters\n ----------\n params : array_like, optional\n Array of parameters at which to evaluate the loglikelihood\n function.\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n\n Notes\n -----\n This method is from Harvey (1989), section 3.4.5\n\n References\n ----------\n Harvey, Andrew C. 1990.\n Forecasting, Structural Time Series Models and the Kalman Filter.\n Cambridge University Press.\n\n \"\"\"\n params = np.array(params, ndmin=1)\n n = len(params)\n\n # Get values at the params themselves\n self.update(params, transformed=True, complex_step=approx_complex_step)\n if approx_complex_step:\n kwargs['inversion_method'] = INVERT_UNIVARIATE | SOLVE_LU\n res = self.ssm.filter(complex_step=approx_complex_step, **kwargs)\n\n # Get forecasts error partials\n partials_forecasts_error, partials_forecasts_error_cov = (\n self._forecasts_error_partial_derivatives(\n params, transformed=True,\n approx_complex_step=approx_complex_step,\n approx_centered=approx_centered, res=res, **kwargs))\n\n # Compute partial derivatives w.r.t. likelihood function\n partials = np.zeros((self.nobs, n))\n k_endog = self.k_endog\n for t in range(self.nobs):\n inv_forecasts_error_cov = np.linalg.inv(\n res.forecasts_error_cov[:, :, t])\n\n for i in range(n):\n partials[t, i] += np.trace(np.dot(\n np.dot(inv_forecasts_error_cov,\n partials_forecasts_error_cov[:, :, t, i]),\n (np.eye(k_endog) -\n np.dot(inv_forecasts_error_cov,\n np.outer(res.forecasts_error[:, t],\n res.forecasts_error[:, t])))))\n # 2 * dv / di * F^{-1} v_t\n # where x = F^{-1} v_t or F x = v\n partials[t, i] += 2 * np.dot(\n partials_forecasts_error[:, t, i],\n np.dot(inv_forecasts_error_cov, res.forecasts_error[:, t]))\n\n return -partials / 2.\n\n _score_param_names = ['transformed', 'includes_fixed', 'score_method',\n 'approx_complex_step', 'approx_centered']\n _score_param_defaults = [True, False, 'approx', None, False]\n\n def score(self, params, *args, **kwargs):\n \"\"\"\n Compute the score function at params.\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the score.\n *args\n Additional positional arguments to the `loglike` method.\n **kwargs\n Additional keyword arguments to the `loglike` method.\n\n Returns\n -------\n score : array\n Score, evaluated at `params`.\n\n Notes\n -----\n This is a numerical approximation, calculated using first-order complex\n step differentiation on the `loglike` method.\n\n Both args and kwargs are necessary because the optimizer from\n `fit` must call this function and only supports passing arguments via\n args (for example `scipy.optimize.fmin_l_bfgs`).\n \"\"\"\n (transformed, includes_fixed, method, approx_complex_step,\n approx_centered, kwargs) = (\n _handle_args(MLEModel._score_param_names,\n MLEModel._score_param_defaults, *args, **kwargs))\n # For fit() calls, the method is called 'score_method' (to distinguish\n # it from the method used for fit) but generally in kwargs the method\n # will just be called 'method'\n if 'method' in kwargs:\n method = kwargs.pop('method')\n\n if approx_complex_step is None:\n approx_complex_step = not self.ssm._complex_endog\n if approx_complex_step and self.ssm._complex_endog:\n raise ValueError('Cannot use complex step derivatives when data'\n ' or parameters are complex.')\n\n out = self.handle_params(\n params, transformed=transformed, includes_fixed=includes_fixed,\n return_jacobian=not transformed)\n if transformed:\n params = out\n else:\n params, transform_score = out\n\n if method == 'harvey':\n score = self._score_harvey(\n params, approx_complex_step=approx_complex_step, **kwargs)\n elif method == 'approx' and approx_complex_step:\n kwargs['includes_fixed'] = True\n score = self._score_complex_step(params, **kwargs)\n elif method == 'approx':\n kwargs['includes_fixed'] = True\n score = self._score_finite_difference(\n params, approx_centered=approx_centered, **kwargs)\n else:\n raise NotImplementedError('Invalid score method.')\n\n if not transformed:\n score = np.dot(transform_score, score)\n\n return score\n\n def score_obs(self, params, method='approx', transformed=True,\n includes_fixed=False, approx_complex_step=None,\n approx_centered=False, **kwargs):\n \"\"\"\n Compute the score per observation, evaluated at params\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the score.\n **kwargs\n Additional arguments to the `loglike` method.\n\n Returns\n -------\n score : array\n Score per observation, evaluated at `params`.\n\n Notes\n -----\n This is a numerical approximation, calculated using first-order complex\n step differentiation on the `loglikeobs` method.\n \"\"\"\n if not transformed and approx_complex_step:\n raise ValueError(\"Cannot use complex-step approximations to\"\n \" calculate the score at each observation\"\n \" with untransformed parameters.\")\n\n if approx_complex_step is None:\n approx_complex_step = not self.ssm._complex_endog\n if approx_complex_step and self.ssm._complex_endog:\n raise ValueError('Cannot use complex step derivatives when data'\n ' or parameters are complex.')\n\n params = self.handle_params(params, transformed=True,\n includes_fixed=includes_fixed)\n kwargs['transformed'] = transformed\n kwargs['includes_fixed'] = True\n\n if method == 'harvey':\n score = self._score_obs_harvey(\n params, approx_complex_step=approx_complex_step, **kwargs)\n elif method == 'approx' and approx_complex_step:\n # the default epsilon can be too small\n epsilon = _get_epsilon(params, 2., None, len(params))\n kwargs['complex_step'] = True\n score = approx_fprime_cs(params, self.loglikeobs, epsilon=epsilon,\n kwargs=kwargs)\n elif method == 'approx':\n score = approx_fprime(params, self.loglikeobs, kwargs=kwargs,\n centered=approx_centered)\n else:\n raise NotImplementedError('Invalid scoreobs method.')\n\n return score\n\n _hessian_param_names = ['transformed', 'hessian_method',\n 'approx_complex_step', 'approx_centered']\n _hessian_param_defaults = [True, 'approx', None, False]\n\n def hessian(self, params, *args, **kwargs):\n r\"\"\"\n Hessian matrix of the likelihood function, evaluated at the given\n parameters\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the hessian.\n *args\n Additional positional arguments to the `loglike` method.\n **kwargs\n Additional keyword arguments to the `loglike` method.\n\n Returns\n -------\n hessian : array\n Hessian matrix evaluated at `params`\n\n Notes\n -----\n This is a numerical approximation.\n\n Both args and kwargs are necessary because the optimizer from\n `fit` must call this function and only supports passing arguments via\n args (for example `scipy.optimize.fmin_l_bfgs`).\n \"\"\"\n transformed, method, approx_complex_step, approx_centered, kwargs = (\n _handle_args(MLEModel._hessian_param_names,\n MLEModel._hessian_param_defaults,\n *args, **kwargs))\n # For fit() calls, the method is called 'hessian_method' (to\n # distinguish it from the method used for fit) but generally in kwargs\n # the method will just be called 'method'\n if 'method' in kwargs:\n method = kwargs.pop('method')\n\n if not transformed and approx_complex_step:\n raise ValueError(\"Cannot use complex-step approximations to\"\n \" calculate the hessian with untransformed\"\n \" parameters.\")\n\n if approx_complex_step is None:\n approx_complex_step = not self.ssm._complex_endog\n if approx_complex_step and self.ssm._complex_endog:\n raise ValueError('Cannot use complex step derivatives when data'\n ' or parameters are complex.')\n\n if method == 'oim':\n hessian = self._hessian_oim(\n params, transformed=transformed,\n approx_complex_step=approx_complex_step,\n approx_centered=approx_centered, **kwargs)\n elif method == 'opg':\n hessian = self._hessian_opg(\n params, transformed=transformed,\n approx_complex_step=approx_complex_step,\n approx_centered=approx_centered, **kwargs)\n elif method == 'approx' and approx_complex_step:\n hessian = self._hessian_complex_step(\n params, transformed=transformed, **kwargs)\n elif method == 'approx':\n hessian = self._hessian_finite_difference(\n params, transformed=transformed,\n approx_centered=approx_centered, **kwargs)\n else:\n raise NotImplementedError('Invalid Hessian calculation method.')\n return hessian\n\n def _hessian_oim(self, params, **kwargs):\n \"\"\"\n Hessian matrix computed using the Harvey (1989) information matrix\n \"\"\"\n return -self.observed_information_matrix(params, **kwargs)\n\n def _hessian_opg(self, params, **kwargs):\n \"\"\"\n Hessian matrix computed using the outer product of gradients\n information matrix\n \"\"\"\n return -self.opg_information_matrix(params, **kwargs)\n\n def _hessian_finite_difference(self, params, approx_centered=False,\n **kwargs):\n params = np.array(params, ndmin=1)\n\n warnings.warn('Calculation of the Hessian using finite differences'\n ' is usually subject to substantial approximation'\n ' errors.', PrecisionWarning)\n\n if not approx_centered:\n epsilon = _get_epsilon(params, 3, None, len(params))\n else:\n epsilon = _get_epsilon(params, 4, None, len(params)) / 2\n hessian = approx_fprime(params, self._score_finite_difference,\n epsilon=epsilon, kwargs=kwargs,\n centered=approx_centered)\n\n return hessian / (self.nobs - self.ssm.loglikelihood_burn)\n\n def _hessian_complex_step(self, params, **kwargs):\n \"\"\"\n Hessian matrix computed by second-order complex-step differentiation\n on the `loglike` function.\n \"\"\"\n # the default epsilon can be too small\n epsilon = _get_epsilon(params, 3., None, len(params))\n kwargs['transformed'] = True\n kwargs['complex_step'] = True\n hessian = approx_hess_cs(\n params, self.loglike, epsilon=epsilon, kwargs=kwargs)\n\n return hessian / (self.nobs - self.ssm.loglikelihood_burn)\n\n @property\n def start_params(self):\n \"\"\"\n (array) Starting parameters for maximum likelihood estimation.\n \"\"\"\n if hasattr(self, '_start_params'):\n return self._start_params\n else:\n raise NotImplementedError\n\n @property\n def param_names(self):\n \"\"\"\n (list of str) List of human readable parameter names (for parameters\n actually included in the model).\n \"\"\"\n if hasattr(self, '_param_names'):\n return self._param_names\n else:\n try:\n names = ['param.%d' % i for i in range(len(self.start_params))]\n except NotImplementedError:\n names = []\n return names\n\n def transform_jacobian(self, unconstrained, approx_centered=False):\n \"\"\"\n Jacobian matrix for the parameter transformation function\n\n Parameters\n ----------\n unconstrained : array_like\n Array of unconstrained parameters used by the optimizer.\n\n Returns\n -------\n jacobian : array\n Jacobian matrix of the transformation, evaluated at `unconstrained`\n\n Notes\n -----\n This is a numerical approximation using finite differences. Note that\n in general complex step methods cannot be used because it is not\n guaranteed that the `transform_params` method is a real function (e.g.\n if Cholesky decomposition is used).\n\n See Also\n --------\n transform_params\n \"\"\"\n return approx_fprime(unconstrained, self.transform_params,\n centered=approx_centered)\n\n def transform_params(self, unconstrained):\n \"\"\"\n Transform unconstrained parameters used by the optimizer to constrained\n parameters used in likelihood evaluation\n\n Parameters\n ----------\n unconstrained : array_like\n Array of unconstrained parameters used by the optimizer, to be\n transformed.\n\n Returns\n -------\n constrained : array_like\n Array of constrained parameters which may be used in likelihood\n evaluation.\n\n Notes\n -----\n This is a noop in the base class, subclasses should override where\n appropriate.\n \"\"\"\n return np.array(unconstrained, ndmin=1)\n\n def untransform_params(self, constrained):\n \"\"\"\n Transform constrained parameters used in likelihood evaluation\n to unconstrained parameters used by the optimizer\n\n Parameters\n ----------\n constrained : array_like\n Array of constrained parameters used in likelihood evaluation, to\n be transformed.\n\n Returns\n -------\n unconstrained : array_like\n Array of unconstrained parameters used by the optimizer.\n\n Notes\n -----\n This is a noop in the base class, subclasses should override where\n appropriate.\n \"\"\"\n return np.array(constrained, ndmin=1)\n\n def handle_params(self, params, transformed=True, includes_fixed=False,\n return_jacobian=False):\n params = np.array(params, ndmin=1)\n\n if not includes_fixed and self._has_fixed_params:\n k_params = len(self.param_names)\n new_params = np.zeros(k_params, dtype=params.dtype) * np.nan\n new_params[self._free_params_index] = params\n params = new_params\n\n if not transformed:\n # It may be the case that the transformation relies on having\n # \"some\" (non-NaN) values for the fixed parameters, even if we will\n # not actually be transforming the fixed parameters (as they will)\n # be set below regardless\n if not includes_fixed and self._has_fixed_params:\n params[self._fixed_params_index] = (\n list(self._fixed_params.values()))\n\n if return_jacobian:\n transform_score = self.transform_jacobian(params)\n params = self.transform_params(params)\n\n if not includes_fixed and self._has_fixed_params:\n params[self._fixed_params_index] = (\n list(self._fixed_params.values()))\n\n return (params, transform_score) if return_jacobian else params\n\n def update(self, params, transformed=True, includes_fixed=False,\n complex_step=False):\n \"\"\"\n Update the parameters of the model\n\n Parameters\n ----------\n params : array_like\n Array of new parameters.\n transformed : bool, optional\n Whether or not `params` is already transformed. If set to False,\n `transform_params` is called. Default is True.\n\n Returns\n -------\n params : array_like\n Array of parameters.\n\n Notes\n -----\n Since Model is a base class, this method should be overridden by\n subclasses to perform actual updating steps.\n \"\"\"\n return self.handle_params(params=params, transformed=transformed,\n includes_fixed=includes_fixed)\n\n def simulate(self, params, nsimulations, measurement_shocks=None,\n state_shocks=None, initial_state=None):\n r\"\"\"\n Simulate a new time series following the state space model\n\n Parameters\n ----------\n params : array_like\n Array of model parameters.\n nsimulations : int\n The number of observations to simulate. If the model is\n time-invariant this can be any number. If the model is\n time-varying, then this number must be less than or equal to the\n number\n measurement_shocks : array_like, optional\n If specified, these are the shocks to the measurement equation,\n :math:`\\varepsilon_t`. If unspecified, these are automatically\n generated using a pseudo-random number generator. If specified,\n must be shaped `nsimulations` x `k_endog`, where `k_endog` is the\n same as in the state space model.\n state_shocks : array_like, optional\n If specified, these are the shocks to the state equation,\n :math:`\\eta_t`. If unspecified, these are automatically\n generated using a pseudo-random number generator. If specified,\n must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the\n same as in the state space model.\n initial_state : array_like, optional\n If specified, this is the state vector at time zero, which should\n be shaped (`k_states` x 1), where `k_states` is the same as in the\n state space model. If unspecified, but the model has been\n initialized, then that initialization is used. If unspecified and\n the model has not been initialized, then a vector of zeros is used.\n Note that this is not included in the returned `simulated_states`\n array.\n\n Returns\n -------\n simulated_obs : array\n An (nsimulations x k_endog) array of simulated observations.\n \"\"\"\n self.update(params)\n\n simulated_obs, simulated_states = self.ssm.simulate(\n nsimulations, measurement_shocks, state_shocks, initial_state)\n\n # Simulated obs is (nobs x k_endog); do not want to squeeze in\n # case of nsimulations = 1\n if simulated_obs.shape[1] == 1:\n simulated_obs = simulated_obs[:, 0]\n return simulated_obs\n\n def impulse_responses(self, params, steps=1, impulse=0,\n orthogonalized=False, cumulative=False, **kwargs):\n \"\"\"\n Impulse response function\n\n Parameters\n ----------\n params : array_like\n Array of model parameters.\n steps : int, optional\n The number of steps for which impulse responses are calculated.\n Default is 1. Note that the initial impulse is not counted as a\n step, so if `steps=1`, the output will have 2 entries.\n impulse : int or array_like\n If an integer, the state innovation to pulse; must be between 0\n and `k_posdef-1`. Alternatively, a custom impulse vector may be\n provided; must be shaped `k_posdef x 1`.\n orthogonalized : bool, optional\n Whether or not to perform impulse using orthogonalized innovations.\n Note that this will also affect custum `impulse` vectors. Default\n is False.\n cumulative : bool, optional\n Whether or not to return cumulative impulse responses. Default is\n False.\n **kwargs\n If the model is time-varying and `steps` is greater than the number\n of observations, any of the state space representation matrices\n that are time-varying must have updated values provided for the\n out-of-sample steps.\n For example, if `design` is a time-varying component, `nobs` is 10,\n and `steps` is 15, a (`k_endog` x `k_states` x 5) matrix must be\n provided with the new design matrix values.\n\n Returns\n -------\n impulse_responses : array\n Responses for each endogenous variable due to the impulse\n given by the `impulse` argument. A (steps + 1 x k_endog) array.\n\n Notes\n -----\n Intercepts in the measurement and state equation are ignored when\n calculating impulse responses.\n\n \"\"\"\n self.update(params)\n irfs = self.ssm.impulse_responses(\n steps, impulse, orthogonalized, cumulative, **kwargs)\n\n # IRF is (nobs x k_endog); do not want to squeeze in case of steps = 1\n if irfs.shape[1] == 1:\n irfs = irfs[:, 0]\n\n return irfs\n\n @classmethod\n def from_formula(cls, formula, data, subset=None):\n \"\"\"\n Not implemented for state space models\n \"\"\"\n raise NotImplementedError\n\n\nclass MLEResults(tsbase.TimeSeriesModelResults):\n r\"\"\"\n Class to hold results from fitting a state space model.\n\n Parameters\n ----------\n model : MLEModel instance\n The fitted model instance\n params : array\n Fitted parameters\n filter_results : KalmanFilter instance\n The underlying state space model and Kalman filter output\n\n Attributes\n ----------\n model : Model instance\n A reference to the model that was fit.\n filter_results : KalmanFilter instance\n The underlying state space model and Kalman filter output\n nobs : float\n The number of observations used to fit the model.\n params : array\n The parameters of the model.\n scale : float\n This is currently set to 1.0 unless the model uses concentrated\n filtering.\n\n See Also\n --------\n MLEModel\n statsmodels.tsa.statespace.kalman_filter.FilterResults\n statsmodels.tsa.statespace.representation.FrozenRepresentation\n \"\"\"\n def __init__(self, model, params, results, cov_type=None, cov_kwds=None,\n **kwargs):\n self.data = model.data\n scale = results.scale\n\n tsbase.TimeSeriesModelResults.__init__(self, model, params,\n normalized_cov_params=None,\n scale=scale)\n\n # Save the fixed parameters\n self._has_fixed_params = self.model._has_fixed_params\n self._fixed_params_index = self.model._fixed_params_index\n self._free_params_index = self.model._free_params_index\n # TODO: seems like maybe self.fixed_params should be the dictionary\n # itself, not just the keys?\n if self._has_fixed_params:\n self._fixed_params = self.model._fixed_params.copy()\n self.fixed_params = list(self._fixed_params.keys())\n else:\n self._fixed_params = None\n self.fixed_params = []\n self.param_names = [\n '%s (fixed)' % name if name in self.fixed_params else name\n for name in (self.data.param_names or [])]\n\n # Save the state space representation output\n self.filter_results = results\n if isinstance(results, SmootherResults):\n self.smoother_results = results\n else:\n self.smoother_results = None\n\n # Dimensions\n self.nobs = self.filter_results.nobs\n self.nobs_diffuse = self.filter_results.nobs_diffuse\n if self.nobs_diffuse > 0 and self.loglikelihood_burn > 0:\n warnings.warn('Care should be used when applying a loglikelihood'\n ' burn to a model with exact diffuse initialization.'\n ' Some results objects, e.g. degrees of freedom,'\n ' expect only one of the two to be set.')\n # This only excludes explicitly burned (usually approximate diffuse)\n # periods but does not exclude approximate diffuse periods. This is\n # because the loglikelihood remains valid for the initial periods in\n # the exact diffuse case (see DK, 2012, section 7.2) and so also do\n # e.g. information criteria (see DK, 2012, section 7.4) and the score\n # vector (see DK, 2012, section 7.3.3, equation 7.15).\n # However, other objects should be excluded in the diffuse periods\n # (e.g. the diffuse forecast errors, so in some cases a different\n # nobs_effective will have to be computed and used)\n self.nobs_effective = self.nobs - self.loglikelihood_burn\n\n P = self.filter_results.initial_diffuse_state_cov\n self.k_diffuse_states = 0 if P is None else np.sum(np.diagonal(P) == 1)\n\n # Degrees of freedom (see DK 2012, section 7.4)\n k_free_params = self.params.size - len(self.fixed_params)\n self.df_model = (k_free_params + self.k_diffuse_states\n + self.filter_results.filter_concentrated)\n self.df_resid = self.nobs_effective - self.df_model\n\n # Setup covariance matrix notes dictionary\n if not hasattr(self, 'cov_kwds'):\n self.cov_kwds = {}\n if cov_type is None:\n cov_type = 'approx' if results.memory_no_likelihood else 'opg'\n self.cov_type = cov_type\n\n # Setup the cache\n self._cache = {}\n\n # Handle covariance matrix calculation\n if cov_kwds is None:\n cov_kwds = {}\n self._cov_approx_complex_step = (\n cov_kwds.pop('approx_complex_step', True))\n self._cov_approx_centered = cov_kwds.pop('approx_centered', False)\n try:\n self._rank = None\n self._get_robustcov_results(cov_type=cov_type, use_self=True,\n **cov_kwds)\n except np.linalg.LinAlgError:\n self._rank = 0\n k_params = len(self.params)\n self.cov_params_default = np.zeros((k_params, k_params)) * np.nan\n self.cov_kwds['cov_type'] = (\n 'Covariance matrix could not be calculated: singular.'\n ' information matrix.')\n self.model.update(self.params, transformed=True, includes_fixed=True)\n\n # References of filter and smoother output\n extra_arrays = [\n 'filtered_state', 'filtered_state_cov', 'predicted_state',\n 'predicted_state_cov', 'forecasts', 'forecasts_error',\n 'forecasts_error_cov', 'standardized_forecasts_error',\n 'forecasts_error_diffuse_cov', 'predicted_diffuse_state_cov',\n 'scaled_smoothed_estimator',\n 'scaled_smoothed_estimator_cov', 'smoothing_error',\n 'smoothed_state',\n 'smoothed_state_cov', 'smoothed_state_autocov',\n 'smoothed_measurement_disturbance',\n 'smoothed_state_disturbance',\n 'smoothed_measurement_disturbance_cov',\n 'smoothed_state_disturbance_cov']\n for name in extra_arrays:\n setattr(self, name, getattr(self.filter_results, name, None))\n\n # Remove too-short results when memory conservation was used\n if model.ssm.memory_no_forecast:\n self.forecasts = None\n self.forecasts_error = None\n self.forecasts_error_cov = None\n if model.ssm.memory_no_predicted:\n self.predicted_state = None\n self.predicted_state_cov = None\n if model.ssm.memory_no_filtered:\n self.filtered_state = None\n self.filtered_state_cov = None\n if model.ssm.memory_no_gain:\n pass\n if model.ssm.memory_no_smoothing:\n pass\n if model.ssm.memory_no_std_forecast:\n self.standardized_forecasts_error = None\n\n # Handle removing data\n self._data_attr_model = getattr(self, '_data_attr_model', [])\n self._data_attr_model.extend(['ssm'])\n self._data_attr.extend(extra_arrays)\n self._data_attr.extend(['filter_results', 'smoother_results'])\n self.data_in_cache = getattr(self, 'data_in_cache', [])\n self.data_in_cache.extend([])\n\n def _get_robustcov_results(self, cov_type='opg', **kwargs):\n \"\"\"\n Create new results instance with specified covariance estimator as\n default\n\n Note: creating new results instance currently not supported.\n\n Parameters\n ----------\n cov_type : str\n the type of covariance matrix estimator to use. See Notes below\n kwargs : depends on cov_type\n Required or optional arguments for covariance calculation.\n See Notes below.\n\n Returns\n -------\n results : results instance\n This method creates a new results instance with the requested\n covariance as the default covariance of the parameters.\n Inferential statistics like p-values and hypothesis tests will be\n based on this covariance matrix.\n\n Notes\n -----\n The following covariance types and required or optional arguments are\n currently available:\n\n - 'opg' for the outer product of gradient estimator\n - 'oim' for the observed information matrix estimator, calculated\n using the method of Harvey (1989)\n - 'approx' for the observed information matrix estimator,\n calculated using a numerical approximation of the Hessian matrix.\n Uses complex step approximation by default, or uses finite\n differences if `approx_complex_step=False` in the `cov_kwds`\n dictionary.\n - 'robust' for an approximate (quasi-maximum likelihood) covariance\n matrix that may be valid even in the presence of some\n misspecifications. Intermediate calculations use the 'oim'\n method.\n - 'robust_approx' is the same as 'robust' except that the\n intermediate calculations use the 'approx' method.\n - 'none' for no covariance matrix calculation.\n \"\"\"\n from statsmodels.base.covtype import descriptions\n\n use_self = kwargs.pop('use_self', False)\n if use_self:\n res = self\n else:\n raise NotImplementedError\n res = self.__class__(\n self.model, self.params,\n normalized_cov_params=self.normalized_cov_params,\n scale=self.scale)\n\n # Set the new covariance type\n res.cov_type = cov_type\n res.cov_kwds = {}\n\n # Calculate the new covariance matrix\n approx_complex_step = self._cov_approx_complex_step\n if approx_complex_step:\n approx_type_str = 'complex-step'\n elif self._cov_approx_centered:\n approx_type_str = 'centered finite differences'\n else:\n approx_type_str = 'finite differences'\n\n k_params = len(self.params)\n if k_params == 0:\n res.cov_params_default = np.zeros((0, 0))\n res._rank = 0\n res.cov_kwds['description'] = 'No parameters estimated.'\n elif cov_type == 'custom':\n res.cov_type = kwargs['custom_cov_type']\n res.cov_params_default = kwargs['custom_cov_params']\n res.cov_kwds['description'] = kwargs['custom_description']\n res._rank = np.linalg.matrix_rank(res.cov_params_default)\n elif cov_type == 'none':\n res.cov_params_default = np.zeros((k_params, k_params)) * np.nan\n res._rank = np.nan\n res.cov_kwds['description'] = descriptions['none']\n elif self.cov_type == 'approx':\n res.cov_params_default = res.cov_params_approx\n res.cov_kwds['description'] = descriptions['approx'].format(\n approx_type=approx_type_str)\n elif self.cov_type == 'oim':\n res.cov_params_default = res.cov_params_oim\n res.cov_kwds['description'] = descriptions['OIM'].format(\n approx_type=approx_type_str)\n elif self.cov_type == 'opg':\n res.cov_params_default = res.cov_params_opg\n res.cov_kwds['description'] = descriptions['OPG'].format(\n approx_type=approx_type_str)\n elif self.cov_type == 'robust' or self.cov_type == 'robust_oim':\n res.cov_params_default = res.cov_params_robust_oim\n res.cov_kwds['description'] = descriptions['robust-OIM'].format(\n approx_type=approx_type_str)\n elif self.cov_type == 'robust_approx':\n res.cov_params_default = res.cov_params_robust_approx\n res.cov_kwds['description'] = descriptions['robust-approx'].format(\n approx_type=approx_type_str)\n else:\n raise NotImplementedError('Invalid covariance matrix type.')\n\n return res\n\n @cache_readonly\n def aic(self):\n \"\"\"\n (float) Akaike Information Criterion\n \"\"\"\n # return -2 * self.llf + 2 * self.df_model\n return aic(self.llf, self.nobs_effective, self.df_model)\n\n @cache_readonly\n def bic(self):\n \"\"\"\n (float) Bayes Information Criterion\n \"\"\"\n # return (-2 * self.llf +\n # self.df_model * np.log(self.nobs_effective))\n return bic(self.llf, self.nobs_effective, self.df_model)\n\n def _cov_params_approx(self, approx_complex_step=True,\n approx_centered=False):\n evaluated_hessian = self.nobs_effective * self.model.hessian(\n params=self.params, transformed=True, includes_fixed=True,\n method='approx', approx_complex_step=approx_complex_step,\n approx_centered=approx_centered)\n # TODO: Case with \"not approx_complex_step\" is not hit in\n # tests as of 2017-05-19\n\n if len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n (tmp, singular_values) = pinv_extended(evaluated_hessian[mask])\n neg_cov = np.zeros_like(evaluated_hessian) * np.nan\n neg_cov[mask] = tmp\n else:\n (neg_cov, singular_values) = pinv_extended(evaluated_hessian)\n\n self.model.update(self.params, transformed=True, includes_fixed=True)\n if self._rank is None:\n self._rank = np.linalg.matrix_rank(np.diag(singular_values))\n return -neg_cov\n\n @cache_readonly\n def cov_params_approx(self):\n \"\"\"\n (array) The variance / covariance matrix. Computed using the numerical\n Hessian approximated by complex step or finite differences methods.\n \"\"\"\n return self._cov_params_approx(self._cov_approx_complex_step,\n self._cov_approx_centered)\n\n def _cov_params_oim(self, approx_complex_step=True, approx_centered=False):\n evaluated_hessian = self.nobs_effective * self.model.hessian(\n self.params, hessian_method='oim', transformed=True,\n includes_fixed=True, approx_complex_step=approx_complex_step,\n approx_centered=approx_centered)\n\n if len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n (tmp, singular_values) = pinv_extended(evaluated_hessian[mask])\n neg_cov = np.zeros_like(evaluated_hessian) * np.nan\n neg_cov[mask] = tmp\n else:\n (neg_cov, singular_values) = pinv_extended(evaluated_hessian)\n\n self.model.update(self.params, transformed=True, includes_fixed=True)\n if self._rank is None:\n self._rank = np.linalg.matrix_rank(np.diag(singular_values))\n return -neg_cov\n\n @cache_readonly\n def cov_params_oim(self):\n \"\"\"\n (array) The variance / covariance matrix. Computed using the method\n from Harvey (1989).\n \"\"\"\n return self._cov_params_oim(self._cov_approx_complex_step,\n self._cov_approx_centered)\n\n def _cov_params_opg(self, approx_complex_step=True, approx_centered=False):\n evaluated_hessian = self.nobs_effective * self.model._hessian_opg(\n self.params, transformed=True, includes_fixed=True,\n approx_complex_step=approx_complex_step,\n approx_centered=approx_centered)\n\n if len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n (tmp, singular_values) = pinv_extended(evaluated_hessian[mask])\n neg_cov = np.zeros_like(evaluated_hessian) * np.nan\n neg_cov[mask] = tmp\n else:\n (neg_cov, singular_values) = pinv_extended(evaluated_hessian)\n\n self.model.update(self.params, transformed=True, includes_fixed=True)\n if self._rank is None:\n self._rank = np.linalg.matrix_rank(np.diag(singular_values))\n return -neg_cov\n\n @cache_readonly\n def cov_params_opg(self):\n \"\"\"\n (array) The variance / covariance matrix. Computed using the outer\n product of gradients method.\n \"\"\"\n return self._cov_params_opg(self._cov_approx_complex_step,\n self._cov_approx_centered)\n\n @cache_readonly\n def cov_params_robust(self):\n \"\"\"\n (array) The QMLE variance / covariance matrix. Alias for\n `cov_params_robust_oim`\n \"\"\"\n return self.cov_params_robust_oim\n\n def _cov_params_robust_oim(self, approx_complex_step=True,\n approx_centered=False):\n cov_opg = self._cov_params_opg(approx_complex_step=approx_complex_step,\n approx_centered=approx_centered)\n\n evaluated_hessian = self.nobs_effective * self.model.hessian(\n self.params, hessian_method='oim', transformed=True,\n includes_fixed=True, approx_complex_step=approx_complex_step,\n approx_centered=approx_centered)\n\n if len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n cov_params = np.zeros_like(evaluated_hessian) * np.nan\n\n cov_opg = cov_opg[mask]\n evaluated_hessian = evaluated_hessian[mask]\n\n tmp, singular_values = pinv_extended(\n np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian))\n\n cov_params[mask] = tmp\n else:\n (cov_params, singular_values) = pinv_extended(\n np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian))\n\n self.model.update(self.params, transformed=True, includes_fixed=True)\n if self._rank is None:\n self._rank = np.linalg.matrix_rank(np.diag(singular_values))\n return cov_params\n\n @cache_readonly\n def cov_params_robust_oim(self):\n \"\"\"\n (array) The QMLE variance / covariance matrix. Computed using the\n method from Harvey (1989) as the evaluated hessian.\n \"\"\"\n return self._cov_params_robust_oim(self._cov_approx_complex_step,\n self._cov_approx_centered)\n\n def _cov_params_robust_approx(self, approx_complex_step=True,\n approx_centered=False):\n cov_opg = self._cov_params_opg(approx_complex_step=approx_complex_step,\n approx_centered=approx_centered)\n\n evaluated_hessian = self.nobs_effective * self.model.hessian(\n self.params, transformed=True, includes_fixed=True,\n method='approx', approx_complex_step=approx_complex_step)\n # TODO: Case with \"not approx_complex_step\" is not\n # hit in tests as of 2017-05-19\n\n if len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n cov_params = np.zeros_like(evaluated_hessian) * np.nan\n\n cov_opg = cov_opg[mask]\n evaluated_hessian = evaluated_hessian[mask]\n\n tmp, singular_values = pinv_extended(\n np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian))\n\n cov_params[mask] = tmp\n else:\n (cov_params, singular_values) = pinv_extended(\n np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian))\n\n self.model.update(self.params, transformed=True, includes_fixed=True)\n if self._rank is None:\n self._rank = np.linalg.matrix_rank(np.diag(singular_values))\n return cov_params\n\n @cache_readonly\n def cov_params_robust_approx(self):\n \"\"\"\n (array) The QMLE variance / covariance matrix. Computed using the\n numerical Hessian as the evaluated hessian.\n \"\"\"\n return self._cov_params_robust_approx(self._cov_approx_complex_step,\n self._cov_approx_centered)\n\n def info_criteria(self, criteria, method='standard'):\n r\"\"\"\n Information criteria\n\n Parameters\n ----------\n criteria : {'aic', 'bic', 'hqic'}\n The information criteria to compute.\n method : {'standard', 'lutkepohl'}\n The method for information criteria computation. Default is\n 'standard' method; 'lutkepohl' computes the information criteria\n as in Lütkepohl (2007). See Notes for formulas.\n\n Notes\n -----\n The `'standard'` formulas are:\n\n .. math::\n\n AIC & = -2 \\log L(Y_n | \\hat \\psi) + 2 k \\\\\n BIC & = -2 \\log L(Y_n | \\hat \\psi) + k \\log n \\\\\n HQIC & = -2 \\log L(Y_n | \\hat \\psi) + 2 k \\log \\log n \\\\\n\n where :math:`\\hat \\psi` are the maximum likelihood estimates of the\n parameters, :math:`n` is the number of observations, and `k` is the\n number of estimated parameters.\n\n Note that the `'standard'` formulas are returned from the `aic`, `bic`,\n and `hqic` results attributes.\n\n The `'lutkepohl'` formulas are (Lütkepohl, 2010):\n\n .. math::\n\n AIC_L & = \\log | Q | + \\frac{2 k}{n} \\\\\n BIC_L & = \\log | Q | + \\frac{k \\log n}{n} \\\\\n HQIC_L & = \\log | Q | + \\frac{2 k \\log \\log n}{n} \\\\\n\n where :math:`Q` is the state covariance matrix. Note that the Lütkepohl\n definitions do not apply to all state space models, and should be used\n with care outside of SARIMAX and VARMAX models.\n\n References\n ----------\n .. [*] Lütkepohl, Helmut. 2007. *New Introduction to Multiple Time*\n *Series Analysis.* Berlin: Springer.\n \"\"\"\n criteria = criteria.lower()\n method = method.lower()\n\n if method == 'standard':\n out = getattr(self, criteria)\n elif method == 'lutkepohl':\n if self.filter_results.state_cov.shape[-1] > 1:\n raise ValueError('Cannot compute Lütkepohl statistics for'\n ' models with time-varying state covariance'\n ' matrix.')\n\n cov = self.filter_results.state_cov[:, :, 0]\n if criteria == 'aic':\n out = np.squeeze(np.linalg.slogdet(cov)[1] +\n 2 * self.df_model / self.nobs_effective)\n elif criteria == 'bic':\n out = np.squeeze(np.linalg.slogdet(cov)[1] +\n self.df_model * np.log(self.nobs_effective) /\n self.nobs_effective)\n elif criteria == 'hqic':\n out = np.squeeze(np.linalg.slogdet(cov)[1] +\n 2 * self.df_model *\n np.log(np.log(self.nobs_effective)) /\n self.nobs_effective)\n else:\n raise ValueError('Invalid information criteria')\n\n else:\n raise ValueError('Invalid information criteria computation method')\n\n return out\n\n @cache_readonly\n def fittedvalues(self):\n \"\"\"\n (array) The predicted values of the model. An (nobs x k_endog) array.\n \"\"\"\n # This is a (k_endog x nobs array; do not want to squeeze in case of\n # the corner case where nobs = 1 (mostly a concern in the predict or\n # forecast functions, but here also to maintain consistency)\n fittedvalues = self.forecasts\n if fittedvalues is None:\n pass\n elif fittedvalues.shape[0] == 1:\n fittedvalues = fittedvalues[0, :]\n else:\n fittedvalues = fittedvalues.T\n return fittedvalues\n\n @cache_readonly\n def hqic(self):\n \"\"\"\n (float) Hannan-Quinn Information Criterion\n \"\"\"\n # return (-2 * self.llf +\n # 2 * np.log(np.log(self.nobs_effective)) * self.df_model)\n return hqic(self.llf, self.nobs_effective, self.df_model)\n\n @cache_readonly\n def llf_obs(self):\n \"\"\"\n (float) The value of the log-likelihood function evaluated at `params`.\n \"\"\"\n return self.filter_results.llf_obs\n\n @cache_readonly\n def llf(self):\n \"\"\"\n (float) The value of the log-likelihood function evaluated at `params`.\n \"\"\"\n return self.filter_results.llf\n\n @cache_readonly\n def loglikelihood_burn(self):\n \"\"\"\n (float) The number of observations during which the likelihood is not\n evaluated.\n \"\"\"\n return self.filter_results.loglikelihood_burn\n\n @cache_readonly\n def pvalues(self):\n \"\"\"\n (array) The p-values associated with the z-statistics of the\n coefficients. Note that the coefficients are assumed to have a Normal\n distribution.\n \"\"\"\n pvalues = np.zeros_like(self.zvalues) * np.nan\n mask = np.ones_like(pvalues, dtype=bool)\n mask[self._free_params_index] = True\n mask &= ~np.isnan(self.zvalues)\n pvalues[mask] = norm.sf(np.abs(self.zvalues[mask])) * 2\n return pvalues\n\n @cache_readonly\n def resid(self):\n \"\"\"\n (array) The model residuals. An (nobs x k_endog) array.\n \"\"\"\n # This is a (k_endog x nobs array; do not want to squeeze in case of\n # the corner case where nobs = 1 (mostly a concern in the predict or\n # forecast functions, but here also to maintain consistency)\n resid = self.forecasts_error\n if resid is None:\n pass\n elif resid.shape[0] == 1:\n resid = resid[0, :]\n else:\n resid = resid.T\n return resid\n\n @cache_readonly\n def zvalues(self):\n \"\"\"\n (array) The z-statistics for the coefficients.\n \"\"\"\n return self.params / self.bse\n\n def test_normality(self, method):\n \"\"\"\n Test for normality of standardized residuals.\n\n Null hypothesis is normality.\n\n Parameters\n ----------\n method : {'jarquebera', None}\n The statistical test for normality. Must be 'jarquebera' for\n Jarque-Bera normality test. If None, an attempt is made to select\n an appropriate test.\n\n Notes\n -----\n Let `d` = max(loglikelihood_burn, nobs_diffuse); this test is\n calculated ignoring the first `d` residuals.\n\n In the case of missing data, the maintained hypothesis is that the\n data are missing completely at random. This test is then run on the\n standardized residuals excluding those corresponding to missing\n observations.\n\n See Also\n --------\n statsmodels.stats.stattools.jarque_bera\n\n \"\"\"\n if method is None:\n method = 'jarquebera'\n\n if self.standardized_forecasts_error is None:\n raise ValueError('Cannot compute test statistic when standardized'\n ' forecast errors have not been computed.')\n\n if method == 'jarquebera':\n from statsmodels.stats.stattools import jarque_bera\n d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)\n output = []\n for i in range(self.model.k_endog):\n resid = self.filter_results.standardized_forecasts_error[i, d:]\n mask = ~np.isnan(resid)\n output.append(jarque_bera(resid[mask]))\n else:\n raise NotImplementedError('Invalid normality test method.')\n\n return np.array(output)\n\n def test_heteroskedasticity(self, method, alternative='two-sided',\n use_f=True):\n r\"\"\"\n Test for heteroskedasticity of standardized residuals\n\n Tests whether the sum-of-squares in the first third of the sample is\n significantly different than the sum-of-squares in the last third\n of the sample. Analogous to a Goldfeld-Quandt test. The null hypothesis\n is of no heteroskedasticity.\n\n Parameters\n ----------\n method : {'breakvar', None}\n The statistical test for heteroskedasticity. Must be 'breakvar'\n for test of a break in the variance. If None, an attempt is\n made to select an appropriate test.\n alternative : str, 'increasing', 'decreasing' or 'two-sided'\n This specifies the alternative for the p-value calculation. Default\n is two-sided.\n use_f : bool, optional\n Whether or not to compare against the asymptotic distribution\n (chi-squared) or the approximate small-sample distribution (F).\n Default is True (i.e. default is to compare against an F\n distribution).\n\n Returns\n -------\n output : array\n An array with `(test_statistic, pvalue)` for each endogenous\n variable. The array is then sized `(k_endog, 2)`. If the method is\n called as `het = res.test_heteroskedasticity()`, then `het[0]` is\n an array of size 2 corresponding to the first endogenous variable,\n where `het[0][0]` is the test statistic, and `het[0][1]` is the\n p-value.\n\n Notes\n -----\n The null hypothesis is of no heteroskedasticity. That means different\n things depending on which alternative is selected:\n\n - Increasing: Null hypothesis is that the variance is not increasing\n throughout the sample; that the sum-of-squares in the later\n subsample is *not* greater than the sum-of-squares in the earlier\n subsample.\n - Decreasing: Null hypothesis is that the variance is not decreasing\n throughout the sample; that the sum-of-squares in the earlier\n subsample is *not* greater than the sum-of-squares in the later\n subsample.\n - Two-sided: Null hypothesis is that the variance is not changing\n throughout the sample. Both that the sum-of-squares in the earlier\n subsample is not greater than the sum-of-squares in the later\n subsample *and* that the sum-of-squares in the later subsample is\n not greater than the sum-of-squares in the earlier subsample.\n\n For :math:`h = [T/3]`, the test statistic is:\n\n .. math::\n\n H(h) = \\sum_{t=T-h+1}^T \\tilde v_t^2\n \\Bigg / \\sum_{t=d+1}^{d+1+h} \\tilde v_t^2\n\n where :math:`d` = max(loglikelihood_burn, nobs_diffuse)` (usually\n corresponding to diffuse initialization under either the approximate\n or exact approach).\n\n This statistic can be tested against an :math:`F(h,h)` distribution.\n Alternatively, :math:`h H(h)` is asymptotically distributed according\n to :math:`\\chi_h^2`; this second test can be applied by passing\n `asymptotic=True` as an argument.\n\n See section 5.4 of [1]_ for the above formula and discussion, as well\n as additional details.\n\n TODO\n\n - Allow specification of :math:`h`\n\n References\n ----------\n .. [1] Harvey, Andrew C. 1990. *Forecasting, Structural Time Series*\n *Models and the Kalman Filter.* Cambridge University Press.\n \"\"\"\n if method is None:\n method = 'breakvar'\n\n if self.standardized_forecasts_error is None:\n raise ValueError('Cannot compute test statistic when standardized'\n ' forecast errors have not been computed.')\n\n if method == 'breakvar':\n # Store some values\n squared_resid = self.filter_results.standardized_forecasts_error**2\n d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)\n # This differs from self.nobs_effective because here we want to\n # exclude exact diffuse periods, whereas self.nobs_effective only\n # excludes explicitly burned (usually approximate diffuse) periods.\n nobs_effective = self.nobs - d\n\n test_statistics = []\n p_values = []\n for i in range(self.model.k_endog):\n h = int(np.round(nobs_effective / 3))\n numer_resid = squared_resid[i, -h:]\n numer_resid = numer_resid[~np.isnan(numer_resid)]\n numer_dof = len(numer_resid)\n\n denom_resid = squared_resid[i, d:d+h]\n denom_resid = denom_resid[~np.isnan(denom_resid)]\n denom_dof = len(denom_resid)\n\n if numer_dof < 2:\n warnings.warn('Early subset of data for variable %d'\n ' has too few non-missing observations to'\n ' calculate test statistic.' % i)\n numer_resid = np.nan\n if denom_dof < 2:\n warnings.warn('Later subset of data for variable %d'\n ' has too few non-missing observations to'\n ' calculate test statistic.' % i)\n denom_resid = np.nan\n\n test_statistic = np.sum(numer_resid) / np.sum(denom_resid)\n\n # Setup functions to calculate the p-values\n if use_f:\n from scipy.stats import f\n pval_lower = lambda test_statistics: f.cdf( # noqa:E731\n test_statistics, numer_dof, denom_dof)\n pval_upper = lambda test_statistics: f.sf( # noqa:E731\n test_statistics, numer_dof, denom_dof)\n else:\n from scipy.stats import chi2\n pval_lower = lambda test_statistics: chi2.cdf( # noqa:E731\n numer_dof * test_statistics, denom_dof)\n pval_upper = lambda test_statistics: chi2.sf( # noqa:E731\n numer_dof * test_statistics, denom_dof)\n\n # Calculate the one- or two-sided p-values\n alternative = alternative.lower()\n if alternative in ['i', 'inc', 'increasing']:\n p_value = pval_upper(test_statistic)\n elif alternative in ['d', 'dec', 'decreasing']:\n test_statistic = 1. / test_statistic\n p_value = pval_upper(test_statistic)\n elif alternative in ['2', '2-sided', 'two-sided']:\n p_value = 2 * np.minimum(\n pval_lower(test_statistic),\n pval_upper(test_statistic)\n )\n else:\n raise ValueError('Invalid alternative.')\n\n test_statistics.append(test_statistic)\n p_values.append(p_value)\n\n output = np.c_[test_statistics, p_values]\n else:\n raise NotImplementedError('Invalid heteroskedasticity test'\n ' method.')\n\n return output\n\n def test_serial_correlation(self, method, lags=None):\n \"\"\"\n Ljung-box test for no serial correlation of standardized residuals\n\n Null hypothesis is no serial correlation.\n\n Parameters\n ----------\n method : {'ljungbox','boxpierece', None}\n The statistical test for serial correlation. If None, an attempt is\n made to select an appropriate test.\n lags : None, int or array_like\n If lags is an integer then this is taken to be the largest lag\n that is included, the test result is reported for all smaller lag\n length.\n If lags is a list or array, then all lags are included up to the\n largest lag in the list, however only the tests for the lags in the\n list are reported.\n If lags is None, then the default maxlag is 12*(nobs/100)^{1/4}\n\n Returns\n -------\n output : array\n An array with `(test_statistic, pvalue)` for each endogenous\n variable and each lag. The array is then sized\n `(k_endog, 2, lags)`. If the method is called as\n `ljungbox = res.test_serial_correlation()`, then `ljungbox[i]`\n holds the results of the Ljung-Box test (as would be returned by\n `statsmodels.stats.diagnostic.acorr_ljungbox`) for the `i` th\n endogenous variable.\n\n Notes\n -----\n Let `d` = max(loglikelihood_burn, nobs_diffuse); this test is\n calculated ignoring the first `d` residuals.\n\n Output is nan for any endogenous variable which has missing values.\n\n See Also\n --------\n statsmodels.stats.diagnostic.acorr_ljungbox\n\n \"\"\"\n if method is None:\n method = 'ljungbox'\n\n if self.standardized_forecasts_error is None:\n raise ValueError('Cannot compute test statistic when standardized'\n ' forecast errors have not been computed.')\n\n if method == 'ljungbox' or method == 'boxpierce':\n from statsmodels.stats.diagnostic import acorr_ljungbox\n d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)\n # This differs from self.nobs_effective because here we want to\n # exclude exact diffuse periods, whereas self.nobs_effective only\n # excludes explicitly burned (usually approximate diffuse) periods.\n nobs_effective = self.nobs - d\n output = []\n\n # Default lags for acorr_ljungbox is 40, but may not always have\n # that many observations\n if lags is None:\n lags = min(40, nobs_effective - 1)\n\n for i in range(self.model.k_endog):\n results = acorr_ljungbox(\n self.filter_results.standardized_forecasts_error[i][d:],\n lags=lags, boxpierce=(method == 'boxpierce'))\n if method == 'ljungbox':\n output.append(results[0:2])\n else:\n output.append(results[2:])\n\n output = np.c_[output]\n else:\n raise NotImplementedError('Invalid serial correlation test'\n ' method.')\n return output\n\n def get_prediction(self, start=None, end=None, dynamic=False,\n index=None, **kwargs):\n \"\"\"\n In-sample prediction and out-of-sample forecasting\n\n Parameters\n ----------\n start : int, str, or datetime, optional\n Zero-indexed observation number at which to start forecasting,\n i.e., the first forecast is start. Can also be a date string to\n parse or a datetime type. Default is the the zeroth observation.\n end : int, str, or datetime, optional\n Zero-indexed observation number at which to end forecasting, i.e.,\n the last forecast is end. Can also be a date string to\n parse or a datetime type. However, if the dates index does not\n have a fixed frequency, end must be an integer index if you\n want out of sample prediction. Default is the last observation in\n the sample.\n dynamic : bool, int, str, or datetime, optional\n Integer offset relative to `start` at which to begin dynamic\n prediction. Can also be an absolute date string to parse or a\n datetime type (these are not interpreted as offsets).\n Prior to this observation, true endogenous values will be used for\n prediction; starting with this observation and continuing through\n the end of prediction, forecasted endogenous values will be used\n instead.\n **kwargs\n Additional arguments may required for forecasting beyond the end\n of the sample. See `FilterResults.predict` for more details.\n\n Returns\n -------\n forecast : array\n Array of out of in-sample predictions and / or out-of-sample\n forecasts. An (npredict x k_endog) array.\n \"\"\"\n if start is None:\n start = self.model._index[0]\n\n # Handle start, end, dynamic\n start, end, out_of_sample, prediction_index = (\n self.model._get_prediction_index(start, end, index))\n\n # Handle `dynamic`\n if isinstance(dynamic, (bytes, str)):\n dynamic, _, _ = self.model._get_index_loc(dynamic)\n\n # Perform the prediction\n # This is a (k_endog x npredictions) array; do not want to squeeze in\n # case of npredictions = 1\n prediction_results = self.filter_results.predict(\n start, end + out_of_sample + 1, dynamic, **kwargs)\n\n # Return a new mlemodel.PredictionResults object\n return PredictionResultsWrapper(PredictionResults(\n self, prediction_results, row_labels=prediction_index))\n\n def get_forecast(self, steps=1, **kwargs):\n \"\"\"\n Out-of-sample forecasts\n\n Parameters\n ----------\n steps : int, str, or datetime, optional\n If an integer, the number of steps to forecast from the end of the\n sample. Can also be a date string to parse or a datetime type.\n However, if the dates index does not have a fixed frequency, steps\n must be an integer. Default\n **kwargs\n Additional arguments may required for forecasting beyond the end\n of the sample. See `FilterResults.predict` for more details.\n\n Returns\n -------\n forecast : array\n Array of out of sample forecasts. A (steps x k_endog) array.\n \"\"\"\n if isinstance(steps, int):\n end = self.nobs + steps - 1\n else:\n end = steps\n return self.get_prediction(start=self.nobs, end=end, **kwargs)\n\n def predict(self, start=None, end=None, dynamic=False, **kwargs):\n \"\"\"\n In-sample prediction and out-of-sample forecasting\n\n Parameters\n ----------\n start : int, str, or datetime, optional\n Zero-indexed observation number at which to start forecasting,\n i.e., the first forecast is start. Can also be a date string to\n parse or a datetime type. Default is the the zeroth observation.\n end : int, str, or datetime, optional\n Zero-indexed observation number at which to end forecasting, i.e.,\n the last forecast is end. Can also be a date string to\n parse or a datetime type. However, if the dates index does not\n have a fixed frequency, end must be an integer index if you\n want out of sample prediction. Default is the last observation in\n the sample.\n dynamic : bool, int, str, or datetime, optional\n Integer offset relative to `start` at which to begin dynamic\n prediction. Can also be an absolute date string to parse or a\n datetime type (these are not interpreted as offsets).\n Prior to this observation, true endogenous values will be used for\n prediction; starting with this observation and continuing through\n the end of prediction, forecasted endogenous values will be used\n instead.\n **kwargs\n Additional arguments may required for forecasting beyond the end\n of the sample. See `FilterResults.predict` for more details.\n\n Returns\n -------\n forecast : array\n Array of out of in-sample predictions and / or out-of-sample\n forecasts. An (npredict x k_endog) array.\n \"\"\"\n # Perform the prediction\n prediction_results = self.get_prediction(start, end, dynamic, **kwargs)\n return prediction_results.predicted_mean\n\n def forecast(self, steps=1, **kwargs):\n \"\"\"\n Out-of-sample forecasts\n\n Parameters\n ----------\n steps : int, str, or datetime, optional\n If an integer, the number of steps to forecast from the end of the\n sample. Can also be a date string to parse or a datetime type.\n However, if the dates index does not have a fixed frequency, steps\n must be an integer. Default\n **kwargs\n Additional arguments may required for forecasting beyond the end\n of the sample. See `FilterResults.predict` for more details.\n\n Returns\n -------\n forecast : array\n Array of out of sample forecasts. A (steps x k_endog) array.\n \"\"\"\n if isinstance(steps, int):\n end = self.nobs + steps - 1\n else:\n end = steps\n return self.predict(start=self.nobs, end=end, **kwargs)\n\n def simulate(self, nsimulations, measurement_shocks=None,\n state_shocks=None, initial_state=None):\n r\"\"\"\n Simulate a new time series following the state space model\n\n Parameters\n ----------\n nsimulations : int\n The number of observations to simulate. If the model is\n time-invariant this can be any number. If the model is\n time-varying, then this number must be less than or equal to the\n number\n measurement_shocks : array_like, optional\n If specified, these are the shocks to the measurement equation,\n :math:`\\varepsilon_t`. If unspecified, these are automatically\n generated using a pseudo-random number generator. If specified,\n must be shaped `nsimulations` x `k_endog`, where `k_endog` is the\n same as in the state space model.\n state_shocks : array_like, optional\n If specified, these are the shocks to the state equation,\n :math:`\\eta_t`. If unspecified, these are automatically\n generated using a pseudo-random number generator. If specified,\n must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the\n same as in the state space model.\n initial_state : array_like, optional\n If specified, this is the state vector at time zero, which should\n be shaped (`k_states` x 1), where `k_states` is the same as in the\n state space model. If unspecified, but the model has been\n initialized, then that initialization is used. If unspecified and\n the model has not been initialized, then a vector of zeros is used.\n Note that this is not included in the returned `simulated_states`\n array.\n\n Returns\n -------\n simulated_obs : array\n An (nsimulations x k_endog) array of simulated observations.\n \"\"\"\n scale = self.scale if self.filter_results.filter_concentrated else None\n with self.model.ssm.fixed_scale(scale):\n sim = self.model.simulate(self.params, nsimulations,\n measurement_shocks, state_shocks,\n initial_state)\n return sim\n\n def impulse_responses(self, steps=1, impulse=0, orthogonalized=False,\n cumulative=False, **kwargs):\n \"\"\"\n Impulse response function\n\n Parameters\n ----------\n steps : int, optional\n The number of steps for which impulse responses are calculated.\n Default is 1. Note that the initial impulse is not counted as a\n step, so if `steps=1`, the output will have 2 entries.\n impulse : int or array_like\n If an integer, the state innovation to pulse; must be between 0\n and `k_posdef-1`. Alternatively, a custom impulse vector may be\n provided; must be shaped `k_posdef x 1`.\n orthogonalized : bool, optional\n Whether or not to perform impulse using orthogonalized innovations.\n Note that this will also affect custum `impulse` vectors. Default\n is False.\n cumulative : bool, optional\n Whether or not to return cumulative impulse responses. Default is\n False.\n **kwargs\n If the model is time-varying and `steps` is greater than the number\n of observations, any of the state space representation matrices\n that are time-varying must have updated values provided for the\n out-of-sample steps.\n For example, if `design` is a time-varying component, `nobs` is 10,\n and `steps` is 15, a (`k_endog` x `k_states` x 5) matrix must be\n provided with the new design matrix values.\n\n Returns\n -------\n impulse_responses : array\n Responses for each endogenous variable due to the impulse\n given by the `impulse` argument. A (steps + 1 x k_endog) array.\n\n Notes\n -----\n Intercepts in the measurement and state equation are ignored when\n calculating impulse responses.\n\n \"\"\"\n scale = self.scale if self.filter_results.filter_concentrated else None\n with self.model.ssm.fixed_scale(scale):\n irfs = self.model.impulse_responses(self.params, steps, impulse,\n orthogonalized, cumulative,\n **kwargs)\n return irfs\n\n def _apply(self, mod, refit=False, fit_kwargs=None, **kwargs):\n if fit_kwargs is None:\n fit_kwargs = {}\n\n if refit:\n fit_kwargs.setdefault('start_params', self.params)\n if self._has_fixed_params:\n fit_kwargs.setdefault('includes_fixed', True)\n res = mod.fit_constrained(self._fixed_params, **fit_kwargs)\n else:\n res = mod.fit(**fit_kwargs)\n else:\n if 'cov_type' in fit_kwargs:\n raise ValueError('Cannot specify covariance type in'\n ' `fit_kwargs` unless refitting'\n ' parameters (not available in extend).')\n if 'cov_kwds' in fit_kwargs:\n raise ValueError('Cannot specify covariance keyword arguments'\n ' in `fit_kwargs` unless refitting'\n ' parameters (not available in extend).')\n\n fit_kwargs['cov_type'] = 'custom'\n fit_kwargs['cov_kwds'] = {\n 'custom_cov_type': self.cov_type,\n 'custom_cov_params': self.cov_params_default,\n 'custom_description': ('Parameters and standard errors'\n ' were estimated using a different'\n ' dataset and were then applied to this'\n ' dataset. %s'\n % self.cov_kwds['description'])}\n\n if self.smoother_results is not None:\n res = mod.smooth(self.params, **fit_kwargs)\n else:\n res = mod.filter(self.params, **fit_kwargs)\n\n return res\n\n def append(self, endog, exog=None, refit=False, fit_kwargs=None, **kwargs):\n \"\"\"\n Recreate the results object with new data appended to the original data\n\n Creates a new result object applied to a dataset that is created by\n appending new data to the end of the model's original data. The new\n results can then be used for analysis or forecasting.\n\n Parameters\n ----------\n endog : array_like\n New observations from the modeled time-series process.\n exog : array_like, optional\n New observations of exogenous regressors, if applicable.\n refit : bool, optional\n Whether to re-fit the parameters, based on the combined dataset.\n Default is False (so parameters from the current results object\n are used to create the new results object).\n fit_kwargs : dict, optional\n Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /\n `smooth`.\n **kwargs\n Keyword arguments may be used to modify model specification\n arguments when created the new model object.\n\n Returns\n -------\n results\n Updated Results object, that includes results from both the\n original dataset and the new dataset.\n\n Notes\n -----\n The `endog` and `exog` arguments to this method must be formatted in\n the same was (e.g. Pandas Series versus Numpy array) as were the\n `endog` and `exog` arrays passed to the original model.\n\n The `endog` argument to this method should consist of new observations\n that occurred directly after the last element of `endog`. For any other\n kind of dataset, see the `apply` method.\n\n This method will apply filtering to all of the original data as well\n as to the new data. To apply filtering only to the new data (which\n can be much faster if the original dataset is large), see the `extend`\n method.\n\n Examples\n --------\n >>> index = pd.period_range(start='2000', periods=2, freq='A')\n >>> original_observations = pd.Series([1.2, 1.5], index=index)\n >>> mod = sm.tsa.SARIMAX(original_observations)\n >>> res = mod.fit()\n >>> print(res.params)\n ar.L1 0.9756\n sigma2 0.0889\n dtype: float64\n >>> print(res.fittedvalues)\n 2000 0.0000\n 2001 1.1707\n Freq: A-DEC, dtype: float64\n >>> print(res.forecast(1))\n 2002 1.4634\n Freq: A-DEC, dtype: float64\n\n >>> new_index = pd.period_range(start='2002', periods=1, freq='A')\n >>> new_observations = pd.Series([0.9], index=new_index)\n >>> updated_res = res.append(new_observations)\n >>> print(updated_res.params)\n ar.L1 0.9756\n sigma2 0.0889\n dtype: float64\n >>> print(updated_res.fittedvalues)\n 2000 0.0000\n 2001 1.1707\n 2002 1.4634\n Freq: A-DEC, dtype: float64\n >>> print(updated_res.forecast(1))\n 2003 0.878\n Freq: A-DEC, dtype: float64\n\n See Also\n --------\n statsmodels.tsa.statespace.mlemodel.MLEResults.extend\n statsmodels.tsa.statespace.mlemodel.MLEResults.apply\n \"\"\"\n new_endog = concat([self.model.data.orig_endog, endog], axis=0)\n if exog is not None:\n _, exog = prepare_exog(exog)\n new_exog = concat([self.model.data.orig_exog, exog], axis=0)\n else:\n new_exog = None\n\n mod = self.model.clone(new_endog, exog=new_exog, **kwargs)\n res = self._apply(mod, refit=refit, fit_kwargs=fit_kwargs, **kwargs)\n\n return res\n\n def extend(self, endog, exog=None, fit_kwargs=None, **kwargs):\n \"\"\"\n Recreate the results object for new data that extends the original data\n\n Creates a new result object applied to a new dataset that is assumed to\n follow directly from the end of the model's original data. The new\n results can then be used for analysis or forecasting.\n\n Parameters\n ----------\n endog : array_like\n New observations from the modeled time-series process.\n exog : array_like, optional\n New observations of exogenous regressors, if applicable.\n fit_kwargs : dict, optional\n Keyword arguments to pass to `filter` or `smooth`.\n **kwargs\n Keyword arguments may be used to modify model specification\n arguments when created the new model object.\n\n Returns\n -------\n results\n Updated Results object, that includes results only for the new\n dataset.\n\n Notes\n -----\n The `endog` argument to this method should consist of new observations\n that occurred directly after the last element of the model's original\n `endog` array. For any other kind of dataset, see the `apply` method.\n\n This method will apply filtering only to the new data provided by the\n `endog` argument, which can be much faster than re-filtering the entire\n dataset. However, the returned results object will only have results\n for the new data. To retrieve results for both the new data and the\n original data, see the `append` method.\n\n Examples\n --------\n >>> index = pd.period_range(start='2000', periods=2, freq='A')\n >>> original_observations = pd.Series([1.2, 1.5], index=index)\n >>> mod = sm.tsa.SARIMAX(original_observations)\n >>> res = mod.fit()\n >>> print(res.params)\n ar.L1 0.9756\n sigma2 0.0889\n dtype: float64\n >>> print(res.fittedvalues)\n 2000 0.0000\n 2001 1.1707\n Freq: A-DEC, dtype: float64\n >>> print(res.forecast(1))\n 2002 1.4634\n Freq: A-DEC, dtype: float64\n\n >>> new_index = pd.period_range(start='2002', periods=1, freq='A')\n >>> new_observations = pd.Series([0.9], index=new_index)\n >>> updated_res = res.extend(new_observations)\n >>> print(updated_res.params)\n ar.L1 0.9756\n sigma2 0.0889\n dtype: float64\n >>> print(updated_res.fittedvalues)\n 2002 1.4634\n Freq: A-DEC, dtype: float64\n >>> print(updated_res.forecast(1))\n 2003 0.878\n Freq: A-DEC, dtype: float64\n\n See Also\n --------\n statsmodels.tsa.statespace.mlemodel.MLEResults.append\n statsmodels.tsa.statespace.mlemodel.MLEResults.apply\n \"\"\"\n # Extend the current fit result to additional data\n mod = self.model.clone(endog, exog=exog, **kwargs)\n mod.ssm.initialization = Initialization(\n mod.k_states, 'known', constant=self.predicted_state[..., -1],\n stationary_cov=self.predicted_state_cov[..., -1])\n res = self._apply(mod, refit=False, fit_kwargs=fit_kwargs, **kwargs)\n\n return res\n\n def apply(self, endog, exog=None, refit=False, fit_kwargs=None, **kwargs):\n \"\"\"\n Apply the fitted parameters to new data unrelated to the original data\n\n Creates a new result object using the current fitted parameters,\n applied to a completely new dataset that is assumed to be unrelated to\n the model's original data. The new results can then be used for\n analysis or forecasting.\n\n Parameters\n ----------\n endog : array_like\n New observations from the modeled time-series process.\n exog : array_like, optional\n New observations of exogenous regressors, if applicable.\n refit : bool, optional\n Whether to re-fit the parameters, using the new dataset.\n Default is False (so parameters from the current results object\n are used to create the new results object).\n fit_kwargs : dict, optional\n Keyword arguments to pass to `fit` (if `refit=True`) or `filter` /\n `smooth`.\n **kwargs\n Keyword arguments may be used to modify model specification\n arguments when created the new model object.\n\n Returns\n -------\n results\n Updated Results object, that includes results only for the new\n dataset.\n\n Notes\n -----\n The `endog` argument to this method should consist of new observations\n that are unrelated to the original model's `endog` dataset. For\n observations that continue that original dataset by follow directly\n after its last element, see the `append` and `extend` methods.\n\n Examples\n --------\n >>> index = pd.period_range(start='2000', periods=2, freq='A')\n >>> original_observations = pd.Series([1.2, 1.5], index=index)\n >>> mod = sm.tsa.SARIMAX(original_observations)\n >>> res = mod.fit()\n >>> print(res.params)\n ar.L1 0.9756\n sigma2 0.0889\n dtype: float64\n >>> print(res.fittedvalues)\n 2000 0.0000\n 2001 1.1707\n Freq: A-DEC, dtype: float64\n >>> print(res.forecast(1))\n 2002 1.4634\n Freq: A-DEC, dtype: float64\n\n >>> new_index = pd.period_range(start='1980', periods=3, freq='A')\n >>> new_observations = pd.Series([1.4, 0.3, 1.2], index=new_index)\n >>> new_res = res.apply(new_observations)\n >>> print(new_res.params)\n ar.L1 0.9756\n sigma2 0.0889\n dtype: float64\n >>> print(new_res.fittedvalues)\n 1980 1.1707\n 1981 1.3659\n 1982 0.2927\n Freq: A-DEC, dtype: float64\n Freq: A-DEC, dtype: float64\n >>> print(new_res.forecast(1))\n 1983 1.1707\n Freq: A-DEC, dtype: float64\n\n See Also\n --------\n statsmodels.tsa.statespace.mlemodel.MLEResults.append\n statsmodels.tsa.statespace.mlemodel.MLEResults.apply\n \"\"\"\n mod = self.model.clone(endog, exog=exog, **kwargs)\n res = self._apply(mod, refit=refit, fit_kwargs=fit_kwargs, **kwargs)\n\n return res\n\n def plot_diagnostics(self, variable=0, lags=10, fig=None, figsize=None):\n \"\"\"\n Diagnostic plots for standardized residuals of one endogenous variable\n\n Parameters\n ----------\n variable : int, optional\n Index of the endogenous variable for which the diagnostic plots\n should be created. Default is 0.\n lags : int, optional\n Number of lags to include in the correlogram. Default is 10.\n fig : Matplotlib Figure instance, optional\n If given, subplots are created in this figure instead of in a new\n figure. Note that the 2x2 grid will be created in the provided\n figure using `fig.add_subplot()`.\n figsize : tuple, optional\n If a figure is created, this argument allows specifying a size.\n The tuple is (width, height).\n\n Notes\n -----\n Produces a 2x2 plot grid with the following plots (ordered clockwise\n from top left):\n\n 1. Standardized residuals over time\n 2. Histogram plus estimated density of standardized residuals, along\n with a Normal(0,1) density plotted for reference.\n 3. Normal Q-Q plot, with Normal reference line.\n 4. Correlogram\n\n See Also\n --------\n statsmodels.graphics.gofplots.qqplot\n statsmodels.graphics.tsaplots.plot_acf\n \"\"\"\n from statsmodels.graphics.utils import _import_mpl, create_mpl_fig\n _import_mpl()\n fig = create_mpl_fig(fig, figsize)\n # Eliminate residuals associated with burned or diffuse likelihoods\n d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)\n resid = self.filter_results.standardized_forecasts_error[variable, d:]\n\n # Top-left: residuals vs time\n ax = fig.add_subplot(221)\n if hasattr(self.data, 'dates') and self.data.dates is not None:\n x = self.data.dates[d:]._mpl_repr()\n else:\n x = np.arange(len(resid))\n ax.plot(x, resid)\n ax.hlines(0, x[0], x[-1], alpha=0.5)\n ax.set_xlim(x[0], x[-1])\n ax.set_title('Standardized residual')\n\n # Top-right: histogram, Gaussian kernel density, Normal density\n # Can only do histogram and Gaussian kernel density on the non-null\n # elements\n resid_nonmissing = resid[~(np.isnan(resid))]\n ax = fig.add_subplot(222)\n\n # gh5792: Remove except after support for matplotlib>2.1 required\n try:\n ax.hist(resid_nonmissing, density=True, label='Hist')\n except AttributeError:\n ax.hist(resid_nonmissing, normed=True, label='Hist')\n\n from scipy.stats import gaussian_kde, norm\n kde = gaussian_kde(resid_nonmissing)\n xlim = (-1.96*2, 1.96*2)\n x = np.linspace(xlim[0], xlim[1])\n ax.plot(x, kde(x), label='KDE')\n ax.plot(x, norm.pdf(x), label='N(0,1)')\n ax.set_xlim(xlim)\n ax.legend()\n ax.set_title('Histogram plus estimated density')\n\n # Bottom-left: QQ plot\n ax = fig.add_subplot(223)\n from statsmodels.graphics.gofplots import qqplot\n qqplot(resid_nonmissing, line='s', ax=ax)\n ax.set_title('Normal Q-Q')\n\n # Bottom-right: Correlogram\n ax = fig.add_subplot(224)\n from statsmodels.graphics.tsaplots import plot_acf\n plot_acf(resid, ax=ax, lags=lags)\n ax.set_title('Correlogram')\n\n ax.set_ylim(-1, 1)\n\n return fig\n\n def summary(self, alpha=.05, start=None, title=None, model_name=None,\n display_params=True):\n \"\"\"\n Summarize the Model\n\n Parameters\n ----------\n alpha : float, optional\n Significance level for the confidence intervals. Default is 0.05.\n start : int, optional\n Integer of the start observation. Default is 0.\n model_name : str\n The name of the model used. Default is to use model class name.\n\n Returns\n -------\n summary : Summary instance\n This holds the summary table and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary\n \"\"\"\n from statsmodels.iolib.summary import Summary\n\n # Model specification results\n model = self.model\n if title is None:\n title = 'Statespace Model Results'\n\n if start is None:\n start = 0\n if self.model._index_dates:\n ix = self.model._index\n d = ix[start]\n sample = ['%02d-%02d-%02d' % (d.month, d.day, d.year)]\n d = ix[-1]\n sample += ['- ' + '%02d-%02d-%02d' % (d.month, d.day, d.year)]\n else:\n sample = [str(start), ' - ' + str(self.nobs)]\n\n # Standardize the model name as a list of str\n if model_name is None:\n model_name = model.__class__.__name__\n\n # Diagnostic tests results\n try:\n het = self.test_heteroskedasticity(method='breakvar')\n except Exception: # FIXME: catch something specific\n het = np.array([[np.nan]*2])\n try:\n lb = self.test_serial_correlation(method='ljungbox')\n except Exception: # FIXME: catch something specific\n lb = np.array([[np.nan]*2]).reshape(1, 2, 1)\n try:\n jb = self.test_normality(method='jarquebera')\n except Exception: # FIXME: catch something specific\n jb = np.array([[np.nan]*4])\n\n # Create the tables\n if not isinstance(model_name, list):\n model_name = [model_name]\n\n top_left = [('Dep. Variable:', None)]\n top_left.append(('Model:', [model_name[0]]))\n for i in range(1, len(model_name)):\n top_left.append(('', ['+ ' + model_name[i]]))\n top_left += [\n ('Date:', None),\n ('Time:', None),\n ('Sample:', [sample[0]]),\n ('', [sample[1]])\n ]\n\n top_right = [\n ('No. Observations:', [self.nobs]),\n ('Log Likelihood', [\"%#5.3f\" % self.llf]),\n ]\n if hasattr(self, 'rsquared'):\n top_right.append(('R-squared:', [\"%#8.3f\" % self.rsquared]))\n top_right += [\n ('AIC', [\"%#5.3f\" % self.aic]),\n ('BIC', [\"%#5.3f\" % self.bic]),\n ('HQIC', [\"%#5.3f\" % self.hqic])]\n if (self.filter_results is not None and\n self.filter_results.filter_concentrated):\n top_right.append(('Scale', [\"%#5.3f\" % self.scale]))\n\n if hasattr(self, 'cov_type'):\n top_left.append(('Covariance Type:', [self.cov_type]))\n\n format_str = lambda array: [ # noqa:E731\n ', '.join(['{0:.2f}'.format(i) for i in array])\n ]\n diagn_left = [('Ljung-Box (Q):', format_str(lb[:, 0, -1])),\n ('Prob(Q):', format_str(lb[:, 1, -1])),\n ('Heteroskedasticity (H):', format_str(het[:, 0])),\n ('Prob(H) (two-sided):', format_str(het[:, 1]))\n ]\n\n diagn_right = [('Jarque-Bera (JB):', format_str(jb[:, 0])),\n ('Prob(JB):', format_str(jb[:, 1])),\n ('Skew:', format_str(jb[:, 2])),\n ('Kurtosis:', format_str(jb[:, 3]))\n ]\n\n summary = Summary()\n summary.add_table_2cols(self, gleft=top_left, gright=top_right,\n title=title)\n if len(self.params) > 0 and display_params:\n summary.add_table_params(self, alpha=alpha,\n xname=self.param_names, use_t=False)\n summary.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,\n title=\"\")\n\n # Add warnings/notes, added to text format only\n etext = []\n if hasattr(self, 'cov_type') and 'description' in self.cov_kwds:\n etext.append(self.cov_kwds['description'])\n if self._rank < (len(self.params) - len(self.fixed_params)):\n cov_params = self.cov_params()\n if len(self.fixed_params) > 0:\n mask = np.ix_(self._free_params_index, self._free_params_index)\n cov_params = cov_params[mask]\n etext.append(\"Covariance matrix is singular or near-singular,\"\n \" with condition number %6.3g. Standard errors may be\"\n \" unstable.\" % np.linalg.cond(cov_params))\n\n if etext:\n etext = [\"[{0}] {1}\".format(i + 1, text)\n for i, text in enumerate(etext)]\n etext.insert(0, \"Warnings:\")\n summary.add_extra_txt(etext)\n\n return summary\n\n\nclass MLEResultsWrapper(wrap.ResultsWrapper):\n _attrs = {\n 'zvalues': 'columns',\n 'cov_params_approx': 'cov',\n 'cov_params_default': 'cov',\n 'cov_params_oim': 'cov',\n 'cov_params_opg': 'cov',\n 'cov_params_robust': 'cov',\n 'cov_params_robust_approx': 'cov',\n 'cov_params_robust_oim': 'cov',\n }\n _wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,\n _attrs)\n _methods = {\n 'forecast': 'dates',\n 'simulate': 'ynames',\n 'impulse_responses': 'ynames'\n }\n _wrap_methods = wrap.union_dicts(\n tsbase.TimeSeriesResultsWrapper._wrap_methods, _methods)\nwrap.populate_wrapper(MLEResultsWrapper, MLEResults) # noqa:E305\n\n\nclass PredictionResults(pred.PredictionResults):\n \"\"\"\n\n Parameters\n ----------\n prediction_results : kalman_filter.PredictionResults instance\n Results object from prediction after fitting or filtering a state space\n model.\n row_labels : iterable\n Row labels for the predicted data.\n\n Attributes\n ----------\n\n \"\"\"\n def __init__(self, model, prediction_results, row_labels=None):\n if model.model.k_endog == 1:\n endog = pd.Series(prediction_results.endog[:, 0],\n name=model.model.endog_names)\n else:\n endog = pd.DataFrame(prediction_results.endog.T,\n columns=model.model.endog_names)\n self.model = Bunch(data=model.data.__class__(\n endog=endog, predict_dates=row_labels))\n self.prediction_results = prediction_results\n\n # Get required values\n predicted_mean = self.prediction_results.forecasts\n if predicted_mean.shape[0] == 1:\n predicted_mean = predicted_mean[0, :]\n else:\n predicted_mean = predicted_mean.transpose()\n\n var_pred_mean = self.prediction_results.forecasts_error_cov\n if var_pred_mean.shape[0] == 1:\n var_pred_mean = var_pred_mean[0, 0, :]\n else:\n var_pred_mean = var_pred_mean.transpose()\n\n # Initialize\n super(PredictionResults, self).__init__(predicted_mean, var_pred_mean,\n dist='norm',\n row_labels=row_labels,\n link=identity())\n\n @property\n def se_mean(self):\n if self.var_pred_mean.ndim == 1:\n se_mean = np.sqrt(self.var_pred_mean)\n else:\n se_mean = np.sqrt(self.var_pred_mean.T.diagonal())\n return se_mean\n\n def conf_int(self, method='endpoint', alpha=0.05, **kwds):\n # TODO: this performs metadata wrapping, and that should be handled\n # by attach_* methods. However, they do not currently support\n # this use case.\n conf_int = super(PredictionResults, self).conf_int(\n method, alpha, **kwds)\n\n # Create a dataframe\n if self.row_labels is not None:\n conf_int = pd.DataFrame(conf_int, index=self.row_labels)\n\n # Attach the endog names\n ynames = self.model.data.ynames\n if not type(ynames) == list:\n ynames = [ynames]\n names = (['lower %s' % name for name in ynames] +\n ['upper %s' % name for name in ynames])\n conf_int.columns = names\n\n return conf_int\n\n def summary_frame(self, endog=0, what='all', alpha=0.05):\n # TODO: finish and cleanup\n # import pandas as pd\n # ci_obs = self.conf_int(alpha=alpha, obs=True) # need to split\n ci_mean = np.asarray(self.conf_int(alpha=alpha))\n to_include = OrderedDict()\n if self.predicted_mean.ndim == 1:\n yname = self.model.data.ynames\n to_include['mean'] = self.predicted_mean\n to_include['mean_se'] = self.se_mean\n k_endog = 1\n else:\n yname = self.model.data.ynames[endog]\n to_include['mean'] = self.predicted_mean[:, endog]\n to_include['mean_se'] = self.se_mean[:, endog]\n k_endog = self.predicted_mean.shape[1]\n to_include['mean_ci_lower'] = ci_mean[:, endog]\n to_include['mean_ci_upper'] = ci_mean[:, k_endog + endog]\n\n # OrderedDict does not work to preserve sequence\n # pandas dict does not handle 2d_array\n # data = np.column_stack(list(to_include.values()))\n # names = ....\n res = pd.DataFrame(to_include, index=self.row_labels,\n columns=to_include.keys())\n res.columns.name = yname\n return res\n\n\nclass PredictionResultsWrapper(wrap.ResultsWrapper):\n _attrs = {\n 'predicted_mean': 'dates',\n 'se_mean': 'dates',\n 't_values': 'dates',\n }\n _wrap_attrs = wrap.union_dicts(_attrs)\n\n _methods = {}\n _wrap_methods = wrap.union_dicts(_methods)\nwrap.populate_wrapper(PredictionResultsWrapper, PredictionResults) # noqa:E305\n" ]
[ [ "scipy.stats.zscore", "scipy.stats.scoreatpercentile", "numpy.random.rand", "numpy.random.seed", "numpy.exp", "matplotlib.pyplot.subplots", "numpy.arange", "numpy.column_stack" ], [ "scipy.stats.norm.pdf", "numpy.ones_like", "numpy.dot", "numpy.outer", "numpy.inner", "numpy.zeros_like", "numpy.linalg.matrix_rank", "numpy.log", "scipy.stats.chi2.sf", "pandas.DataFrame", "scipy.stats.f.sf", "numpy.eye", "numpy.arange", "numpy.sqrt", "numpy.linalg.inv", "numpy.array", "numpy.zeros", "numpy.round", "numpy.ix_", "numpy.identity", "scipy.stats.f.cdf", "numpy.linalg.slogdet", "numpy.isnan", "numpy.sum", "numpy.diagonal", "scipy.stats.gaussian_kde", "numpy.linalg.cond", "numpy.abs", "pandas.Series", "numpy.linspace", "scipy.stats.chi2.cdf", "numpy.diag", "numpy.maximum" ] ]
2021rahul/Weakly-supervised-regression-for-ORDinal-labels
[ "1654b097c2f8fe3177fd929a8c58bbea677015f0" ]
[ "SOURCE/BALANCED/WORD/test_model.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 13 12:32:57 2019\n\n@author: ghosh128\n\"\"\"\n\nimport sys\nsys.path.append(\"../\")\nimport os\nimport numpy as np\nimport config\nimport tensorflow as tf\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\n\ntf.set_random_seed(1)\n#%%\nprint(\"LOAD DATA\")\ntest_data = np.load(os.path.join(config.NUMPY_DIR, \"test_data.npy\"))\nnum_features = test_data.shape[-1] - 2\n#%%\nprint(\"BUILD MODEL\")\n\ntf.reset_default_graph()\nwith tf.name_scope('data'):\n X = tf.placeholder(tf.float32, [None, num_features], name=\"inputs\")\n Y = tf.placeholder(tf.float32, [None, 1], name=\"labels\")\n\nwith tf.variable_scope(\"Variables\", reuse=tf.AUTO_REUSE):\n W = tf.get_variable(\"W\", [num_features, 1], initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable(\"b\", [1], initializer=tf.zeros_initializer())\n\nZ = tf.matmul(X, W, name=\"multiply_weights\")\nZ = tf.add(Z, b, name=\"add_bias\")\nZ = tf.sigmoid(Z)\n#%%\nprint(\"TEST MODEL\")\nsaver = tf.train.Saver()\nwith tf.Session() as sess:\n saver.restore(sess, os.path.join(config.MODEL_DIR, \"WORD\", \"model_balanced.ckpt\"))\n data = test_data[:,:-2]\n feed_dict = {X: data}\n preds = sess.run(Z, feed_dict=feed_dict)\nlabels = np.reshape(test_data[:, -2], [-1, 1])\nRMSE = sqrt(mean_squared_error(labels, preds))\nprint(\"RMSE:\", RMSE)\n" ]
[ [ "tensorflow.set_random_seed", "tensorflow.zeros_initializer", "sklearn.metrics.mean_squared_error", "numpy.reshape", "tensorflow.sigmoid", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.reset_default_graph", "tensorflow.matmul", "tensorflow.train.Saver", "tensorflow.Session", "tensorflow.variable_scope", "tensorflow.placeholder", "tensorflow.name_scope", "tensorflow.add" ] ]
drunckoder/yandex_school
[ "d080fee90b74977e0a671309662893b0f95fad60" ]
[ "yandex_school/resources.py" ]
[ "from typing import List, Dict, Tuple\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\nfrom flask import request\nfrom flask_restful import Resource\nfrom marshmallow import ValidationError\nimport numpy\n\nfrom yandex_school import db\nfrom yandex_school.models import Import, Citizen, Relative\nfrom yandex_school.validation import citizenSchema, citizensSchema, validate_relatives, validate_citizen_ids\n\n\nclass CreateImport(Resource):\n \"\"\"\n Serves /imports endpoint\n \"\"\"\n\n @staticmethod\n def store_relationships(import_id: int, relative_links: List[Tuple]) -> None:\n \"\"\"\n Maps citizen_ids to database ids, pushes relationships into database\n :param import_id: id of current import\n :param relative_links: relationship links list of citizen_ids\n :return: None\n \"\"\"\n # get citizens ids of the current import\n id_list = db.engine.execute(\n db.select([Citizen.c.id, Citizen.c.citizen_id]).where(Citizen.c.import_id == import_id)\n ).fetchall()\n # map database ids to citizen ids\n rev_id_map = {k: v for v, k in id_list}\n # create relationships\n relationships = [{'citizen_id': rev_id_map[citizen], 'relative_id': rev_id_map[relative]}\n for citizen, relative in relative_links]\n # push into database\n db.engine.execute(Relative.insert(), relationships)\n\n def post(self):\n \"\"\"\n Post request handler\n \"\"\"\n try:\n # validates data, returns objects\n citizens = citizensSchema.load(request.json)\n # check if there are no citizens\n if not citizens:\n raise ValidationError('No citizens were present in the request body')\n # check if ids are correct\n validate_citizen_ids(citizens)\n # validates relatives, returns relationship tuples\n relative_links = validate_relatives(citizens)\n except ValidationError as ex:\n return {'message': f'Validation error', 'errors': ex.messages}, 400\n except KeyError as ex:\n return {'message': f'Expected key {ex} not found in the request body'}, 400\n except TypeError as ex:\n return {'message': f'Malformed data', 'errors': ex}, 400\n\n # putting new import record into db and getting resulting primary key back\n import_id = db.engine.execute(Import.insert(), [{}]).inserted_primary_key[0]\n\n # assigning IDs manually as bulk saving ignores\n # relationships\n for citizen in citizens:\n citizen['import_id'] = import_id\n\n # putting citizens into db\n db.engine.execute(Citizen.insert(), citizens)\n\n # store relationships only if they exist\n if relative_links:\n self.store_relationships(import_id, relative_links)\n\n return {'data': {'import_id': import_id}}, 201\n\n\nclass PatchCitizen(Resource):\n \"\"\"\n Serves /imports/<int:import_id>/citizens/<int:citizen_id> endpoint\n \"\"\"\n\n @staticmethod\n def get_relatives_diff(import_id: int, citizen_id: int, requested_relatives: list) -> Tuple[int, list, list]:\n \"\"\"\n Calculates relative changes to be made by request.\n :param import_id: requested import_id\n :param citizen_id: requested citizen_id\n :param requested_relatives: proposed relatives, should be final state of the operation\n :return: citizen's database id, list of new relatives, list of lost relatives. All ids are database ids.\n \"\"\"\n\n def get_diff(cur: List[int], req: List[int]) -> Tuple[set, set]:\n \"\"\"\n Sub-function for set difference operations\n :param cur: current relatives\n :param req: proposed relatives\n :return: list of citizen_ids to be added and to be removed from relatives\n \"\"\"\n cur, req, add, rem = set(cur), set(req), set(), set()\n rem = cur - req\n add = cur - rem ^ req\n return add, rem\n\n join = Citizen.outerjoin(Relative, Relative.c.citizen_id == Citizen.c.id)\n raw_relatives = db.engine.execute(\n db.select([Relative.c.relative_id])\n .where(Citizen.c.import_id == import_id)\n .where(Citizen.c.citizen_id == citizen_id)\n .select_from(join)\n ).fetchall()\n\n id_list = db.engine.execute(\n db.select([Citizen.c.id, Citizen.c.citizen_id]).where(Citizen.c.import_id == import_id)\n )\n\n id_map = {k: v for k, v in id_list}\n rev_id_map = {k: v for v, k in id_map.items()}\n\n # raises KeyError if citizen_id does not exist\n db_citizen_id = rev_id_map[citizen_id]\n\n if not all([citizen_id in rev_id_map for citizen_id in requested_relatives]):\n raise ValidationError(f'Citizen relatives contain unexistent citizen_id')\n\n current_relatives = list(map(id_map.get, *zip(*raw_relatives)))\n\n add_list, rem_list = get_diff(current_relatives, requested_relatives)\n\n add_list = list(map(rev_id_map.get, add_list))\n rem_list = list(map(rev_id_map.get, rem_list))\n\n add_links = []\n\n for x in add_list:\n add_links.append({'citizen_id': db_citizen_id, 'relative_id': x})\n add_links.append({'citizen_id': x, 'relative_id': db_citizen_id})\n\n return db_citizen_id, add_links, rem_list\n\n def process_relatives(self, import_id: int, citizen_id: int, requested_relatives: list) -> int:\n \"\"\"\n Pushes relationship changes into database\n :param import_id: requested import_id\n :param citizen_id: requested citizen_id\n :param requested_relatives: proposed relatives, should be final state of the operation\n :return: citizen's database id.\n \"\"\"\n db_citizen_id, add_links, rem_list = self.get_relatives_diff(import_id, citizen_id, requested_relatives)\n\n # remove lost relationships\n if rem_list:\n # one side\n db.engine.execute(Relative.delete().where(db.and_(Relative.c.citizen_id == db_citizen_id,\n Relative.c.relative_id.in_(rem_list))))\n # opposite side\n db.engine.execute(Relative.delete().where(db.and_(Relative.c.citizen_id.in_(rem_list),\n Relative.c.relative_id == db_citizen_id)))\n # add new relationships\n if add_links:\n db.engine.execute(Relative.insert(), add_links)\n\n return db_citizen_id\n\n @staticmethod\n def merge_relatives(citizen_relatives: list, id_list: List[int]) -> dict:\n \"\"\"\n Maps database ids to citizen_ids and merges joined citizen relationships\n rows from database into properly serializable format.\n Converts citizen from raw format to a named dict as a side-effect.\n :param citizen_relatives: list of RowProxy of citizen and relative data\n :param id_list: list of ids to citizen_ids\n :return: citizen dict with relatives\n \"\"\"\n citizen_relatives = [dict(citizen_relative) for citizen_relative in citizen_relatives]\n citizen = dict(citizen_relatives[0])\n\n if citizen['relative_id']:\n id_map = {k: v for k, v in id_list}\n relatives = [id_map[entry['relative_id']] for entry in citizen_relatives]\n else:\n relatives = []\n\n citizen['relatives'] = relatives\n\n del citizen['relative_id']\n\n return citizen\n\n def patch(self, import_id, citizen_id):\n \"\"\"\n Patch request handler\n \"\"\"\n try:\n citizen_part = citizenSchema.load(request.json, partial=True)\n except ValidationError as ex:\n return {'message': f'Validation error', 'errors': ex.messages}, 400\n except KeyError as ex:\n return {'message': f'Expected key {ex} not found in the request body'}, 400\n except TypeError as ex:\n return {'message': f'Malformed data', 'errors': ex}, 400\n\n if 'citizen_id' in citizen_part:\n return {'message': 'citizen_id can not be patched'}, 400\n\n if 'relatives' in citizen_part: # resolve relative link changes, update and get citizen by id\n requested_relatives = citizen_part['relatives']\n try:\n db_citizen_id = self.process_relatives(import_id, citizen_id, requested_relatives)\n except ValidationError as ex:\n return {'message': f'Validation error', 'errors': ex.messages}, 400\n except KeyError:\n # can't tell whats exactly wrong as its up to database querying result being empty\n # or did I just masked out a weird bug? Probably not a good solution.\n return {'message': f'import_id {import_id} or citizen_id {citizen_id} not found'}, 404\n\n if len(citizen_part) > 1: # this means we do have something to update besides relatives\n db.engine.execute(Citizen.update().where(Citizen.c.id == db_citizen_id), citizen_part)\n\n citizen = db.engine.execute(db.select([Citizen]).where(Citizen.c.id == db_citizen_id)).fetchone()\n response = citizenSchema.dump(citizen)\n # I know, but this saves precious time\n response['relatives'] = citizen_part['relatives']\n\n else: # update and get citizen by import_id and citizen_id as we don't know the absolute id\n\n update_result = db.engine.execute(Citizen.update()\n .where(Citizen.c.import_id == import_id)\n .where(Citizen.c.citizen_id == citizen_id),\n citizen_part\n )\n\n # a dirty way of citizen_id and import_id validation\n # it relies on database to report 0 rows updated which means\n # either of parameters are missing\n if update_result.rowcount != 1:\n return {'message': f'citizen_id or import_id not found'}, 404\n\n join = Citizen.outerjoin(Relative, Relative.c.citizen_id == Citizen.c.id)\n citizen_relatives = db.engine.execute(db.select([Citizen, Relative.c.relative_id])\n .where(Citizen.c.import_id == import_id)\n .where(Citizen.c.citizen_id == citizen_id)\n .select_from(join)).fetchall()\n\n # get list of ids to citizen_ids to resolve relatives\n id_list = db.engine.execute(\n db.select([Citizen.c.id, Citizen.c.citizen_id]).where(Citizen.c.import_id == import_id)\n ).fetchall()\n\n citizen = self.merge_relatives(citizen_relatives, id_list)\n response = citizenSchema.dump(citizen)\n\n return response, 200\n\n\nclass GetCitizens(Resource):\n \"\"\"\n Serves /imports/<int:import_id>/citizens endpoint\n \"\"\"\n\n @staticmethod\n def merge_by_relatives(raw_citizens: List) -> List[Dict]:\n \"\"\"\n Maps database ids to citizen_ids and merges joined citizen relationships\n rows from database into properly serializable format.\n Converts citizens from raw format to named dicts as a side-effect.\n :param raw_citizens: list of RowProxy\n :return: list of citizens where each citizen is a dict\n \"\"\"\n prev_id = 0\n citizens = [dict(raw_citizen) for raw_citizen in raw_citizens]\n id_map = {citizen['id']: citizen['citizen_id'] for citizen in citizens}\n result = []\n for citizen in citizens:\n relative_id = citizen['relative_id']\n if prev_id == citizen['citizen_id']:\n result[-1]['relatives'].append(id_map[relative_id])\n else:\n del citizen['relative_id']\n if relative_id:\n citizen['relatives'] = [id_map[relative_id]]\n else:\n citizen['relatives'] = []\n result.append(citizen)\n prev_id = citizen['citizen_id']\n return result\n\n def get(self, import_id):\n \"\"\"\n Get request handler\n \"\"\"\n\n join = Citizen.outerjoin(Relative, Relative.c.citizen_id == Citizen.c.id)\n raw_citizens = db.engine.execute(\n db.select([Citizen, Relative.c.relative_id])\n .where(Citizen.c.import_id == import_id)\n .order_by(Citizen.c.citizen_id).select_from(join)\n ).fetchall()\n\n # form relatives lists\n citizens = self.merge_by_relatives(raw_citizens)\n\n # looks like import_id not found database\n # this not the best way to check this, probably\n # but the idea was to reduce database queries amount\n if not citizens:\n return {'message': f'no data found for import_id: {import_id}'}, 404\n\n return {'data': citizensSchema.dump(citizens)}\n\n\nclass GetBirthdays(Resource):\n \"\"\"\n Serves /imports/<int:import_id>/citizens/birthdays endpoint\n \"\"\"\n\n @staticmethod\n def get(import_id):\n \"\"\"\n Get request handler\n \"\"\"\n\n # resulting dict template\n months_dict: Dict[int, List] = {x: [] for x in range(1, 13)}\n\n # get ids, citizen_ids, birthdays, relatives\n join = Citizen.outerjoin(Relative, Relative.c.citizen_id == Citizen.c.id)\n raw_citizens = db.engine.execute(\n db.select([Citizen.c.id, Citizen.c.citizen_id, Citizen.c.birth_date, Relative.c.relative_id])\n .where(Citizen.c.import_id == import_id)\n .order_by(Citizen.c.citizen_id)\n .select_from(join)\n ).fetchall()\n\n # pack them into dicts\n citizens_relatives = [dict(entry) for entry in raw_citizens]\n\n # empty database response\n if not citizens_relatives:\n return {'message': f'import_id {import_id} not found'}, 404\n\n id_bd_map = {x['id']: {'citizen_id': x['citizen_id'], 'month': x['birth_date'].month}\n for x in citizens_relatives}\n\n # aggregation storage: citizen_id -> month -> number of presents\n presents = {}\n\n for citizen_relative in citizens_relatives:\n db_citizen_id = citizen_relative['id']\n citizen_id = id_bd_map[db_citizen_id]['citizen_id']\n\n db_relative_id = citizen_relative['relative_id']\n if not db_relative_id: # no relatives :(\n continue\n\n relative_birth_month = id_bd_map[db_relative_id]['month']\n\n try:\n presents[citizen_id][relative_birth_month] += 1\n except KeyError:\n try:\n presents[citizen_id][relative_birth_month] = 1\n except KeyError:\n presents[citizen_id] = {relative_birth_month: 1}\n\n # build response from aggregation storage\n\n for citizen_key in presents:\n for month_key in presents[citizen_key]:\n months_dict[month_key].append({\n 'citizen_id': citizen_key,\n 'presents': presents[citizen_key][month_key]\n })\n\n return {'data': months_dict}, 200\n\n\nclass GetAges(Resource):\n \"\"\"\n Serves /imports/<int:import_id>/towns/stat/percentile/age endpoint\n \"\"\"\n\n @staticmethod\n def get(import_id):\n \"\"\"\n Get request handler\n \"\"\"\n\n raw_town_birthdays = db.engine.execute(\n db.select([Citizen.c.town, Citizen.c.birth_date])\n .where(Citizen.c.import_id == import_id)\n .order_by(Citizen.c.town)\n ).fetchall()\n\n town_birthdays = [dict(entry) for entry in raw_town_birthdays]\n\n if not town_birthdays:\n return {'message': f'import_id {import_id} not found'}, 404\n\n # keeps ages lists by towns\n towns_ages = {}\n\n for town_birthday in town_birthdays:\n town = town_birthday['town']\n birth_date = town_birthday['birth_date']\n age = relativedelta(datetime.utcnow(), birth_date).years\n try:\n towns_ages[town].append(age)\n except KeyError:\n towns_ages[town] = [age]\n\n response = []\n\n for town, ages in towns_ages.items():\n p50, p75, p99 = numpy.percentile(ages, [50, 75, 99])\n response.append({\n 'town': town,\n 'p50': p50,\n 'p75': p75,\n 'p99': p99\n })\n\n return {'data': response}, 200\n" ]
[ [ "numpy.percentile" ] ]
superporchetta/tide_prediction
[ "9946f0c97b7e1bbe0a791380d6a35e277f5d3a1c" ]
[ "LSTM.py" ]
[ "from sklearn.preprocessing import StandardScaler, MinMaxScaler\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n# https://cnvrg.io/pytorch-lstm/\n\n\nclass LSTM1(nn.Module):\n def __init__(self, num_classes, input_size, hidden_size, num_layers, seq_length):\n super(LSTM1, self).__init__()\n self.num_classes = num_classes # number of classes\n self.num_layers = num_layers # number of layers\n self.input_size = input_size # input size\n self.hidden_size = hidden_size # hidden state\n self.seq_length = seq_length # sequence length\n\n self.lstm = nn.LSTM(\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n batch_first=True,\n ) # lstm\n self.fc_1 = nn.Linear(hidden_size, 128) # fully connected 1\n self.fc = nn.Linear(128, num_classes) # fully connected last layer\n\n self.relu = nn.ReLU()\n\n def forward(self, x):\n h_0 = Variable(\n torch.zeros(self.num_layers, x.size(0), self.hidden_size)\n ) # hidden state\n c_0 = Variable(\n torch.zeros(self.num_layers, x.size(0), self.hidden_size)\n ) # internal state\n # Propagate input through LSTM\n output, (hn, cn) = self.lstm(\n x, (h_0, c_0)\n ) # lstm with input, hidden, and internal state\n hn = hn.view(-1, self.hidden_size) # reshaping the data for Dense layer next\n out = self.relu(hn)\n out = self.fc_1(out) # first Dense\n out = self.relu(out) # relu\n out = self.fc(out) # Final Output\n return out\n" ]
[ [ "torch.nn.Linear", "torch.nn.LSTM", "torch.nn.ReLU" ] ]
Yuchong-Geng/lanefinder
[ "6ff65d36b681f1594cd2c2788a2dfa7632cac6d0" ]
[ "image/processing.py" ]
[ "import cv2\nimport numpy as np\n\n\ndef preprocessing(frame, mean, std):\n # normalize and quantize input\n # with paramaeters obtained during\n # model calibration\n frame *= (1 / 255)\n expd = np.expand_dims(frame, axis=0)\n quantized = (expd / std + mean)\n\n return quantized.astype(np.uint8)\n\n\ndef postprocessing(pred_obj, frame, mean, std, in_shape, out_shape):\n # get predicted mask in shape (n_rows*n_cols, )\n # and reshape back to (n_rows, n_cols)\n pred = pred_obj[1].reshape(in_shape)\n\n # dequantize and cast back to float\n dequantized = (std * (pred - mean))\n dequantized = dequantized.astype(np.float32)\n\n # resize frame and mask to output shape\n frame = cv2.resize(frame, out_shape)\n mask = cv2.resize(dequantized, (frame.shape[1], frame.shape[0]))\n\n # perform closing operation on mask to smooth out lane edges\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel, iterations=1)\n mask = cv2.morphologyEx(mask, cv2.MORPH_ERODE, kernel, iterations=4)\n mask = cv2.GaussianBlur(mask, (5, 5), 0)\n\n # overlay frame and segmentation mask\n frame[mask != 0] = (255, 0, 255)\n\n return frame\n" ]
[ [ "numpy.expand_dims" ] ]
wizardhead/pytti-core
[ "6030f6154ad7d17b93cf76e2d42905d4231a0abd" ]
[ "src/pytti/Image/ema_image.py" ]
[ "import torch\nfrom torch import nn\nfrom pytti.Image.differentiable_image import DifferentiableImage\n\n\nclass EMAImage(DifferentiableImage):\n \"\"\"\n Base class for differentiable images with Exponential Moving Average filtering\n Based on code by Katherine Crowson\n \"\"\"\n\n def __init__(self, width, height, tensor, decay):\n super().__init__(width, height)\n self.tensor = nn.Parameter(tensor)\n self.register_buffer(\"biased\", torch.zeros_like(tensor))\n self.register_buffer(\"average\", torch.zeros_like(tensor))\n self.decay = decay\n self.register_buffer(\"accum\", torch.tensor(1.0))\n self.update()\n\n @torch.no_grad()\n def update(self):\n if not self.training:\n raise RuntimeError(\"update() should only be called during training\")\n self.accum.mul_(self.decay)\n self.biased.mul_(self.decay)\n self.biased.add_((1 - self.decay) * self.tensor)\n self.average.copy_(self.biased)\n self.average.div_(1 - self.accum)\n\n @torch.no_grad()\n def reset(self):\n if not self.training:\n raise RuntimeError(\"reset() should only be called during training\")\n self.biased.set_(torch.zeros_like(self.biased))\n self.average.set_(torch.zeros_like(self.average))\n self.accum.set_(torch.ones_like(self.accum))\n self.update()\n\n def decode_training_tensor(self):\n return self.decode(self.tensor)\n\n def decode_tensor(self):\n return self.decode(self.average)\n\n def decode(self, tensor):\n raise NotImplementedError\n" ]
[ [ "torch.no_grad", "torch.nn.Parameter", "torch.tensor", "torch.ones_like", "torch.zeros_like" ] ]
allenai/ViRB
[ "fbe1c42571ce0994b1e41bc4bdf88cf9658ae48b" ]
[ "models/DeepLabClassificationHead.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom models.ResNet50Encoder import Bottleneck\n\n\nclass DeepLabClassificationHead(nn.Module):\n\n def __init__(self, num_classes):\n super().__init__()\n self.aspp = ASPP(2048, 256)\n self.low_level_feature_reducer = nn.Sequential(\n nn.Conv2d(256, 48, 1),\n nn.BatchNorm2d(48, momentum=0.0003),\n nn.ReLU(),\n )\n self.decoder = nn.Sequential(\n nn.Conv2d(256 + 48, 256, 3, padding=1),\n nn.BatchNorm2d(256, momentum=0.0003),\n nn.ReLU(),\n nn.Conv2d(256, 256, 3, padding=1),\n nn.BatchNorm2d(256, momentum=0.0003),\n nn.ReLU(),\n nn.Conv2d(256, num_classes, 3, padding=1),\n )\n self.classifier = nn.Sequential(\n nn.Flatten(),\n nn.Linear(7*7*256, num_classes),\n )\n\n\n def forward(self, x):\n # l2_size = tuple(x[\"block1\"].shape[-2:])\n # label_size = tuple(x[\"img\"].shape[-2:])\n\n x_backbone = x[\"block4\"].float()\n\n x_aspp = self.aspp(x_backbone)\n # x_aspp = nn.Upsample(l2_size, mode='bilinear', align_corners=True)(x_aspp)\n x = self.classifier(x_aspp)\n # x = torch.cat((self.low_level_feature_reducer(x[\"block1\"]), x_aspp), dim=1)\n # x = self.decoder(x)\n # x = nn.Upsample(label_size, mode='bilinear', align_corners=True)(x)\n return x\n\n def eval(self):\n # self.block4.eval()\n self.aspp.eval()\n self.decoder.eval()\n return self\n\n def train(self, mode=True):\n # self.block4.eval()\n self.aspp.train(mode)\n self.decoder.train(mode)\n return self\n\n def required_encoding(self):\n return [\"block4\"]\n\nclass ASPP(nn.Module):\n\n def __init__(self, C, depth, conv=nn.Conv2d, norm=nn.BatchNorm2d, momentum=0.0003, mult=1):\n super(ASPP, self).__init__()\n self._C = C\n self._depth = depth\n\n self.global_pooling = nn.AdaptiveAvgPool2d(1)\n self.relu = nn.ReLU(inplace=True)\n self.aspp1 = conv(C, depth, kernel_size=1, stride=1, bias=False)\n self.aspp2 = conv(C, depth, kernel_size=3, stride=1,\n dilation=int(6*mult), padding=int(6*mult),\n bias=False)\n self.aspp3 = conv(C, depth, kernel_size=3, stride=1,\n dilation=int(12*mult), padding=int(12*mult),\n bias=False)\n self.aspp4 = conv(C, depth, kernel_size=3, stride=1,\n dilation=int(18*mult), padding=int(18*mult),\n bias=False)\n self.aspp5 = conv(C, depth, kernel_size=1, stride=1, bias=False)\n self.aspp1_bn = norm(depth, momentum)\n self.aspp2_bn = norm(depth, momentum)\n self.aspp3_bn = norm(depth, momentum)\n self.aspp4_bn = norm(depth, momentum)\n self.aspp5_bn = norm(depth, momentum)\n self.conv2 = conv(depth * 5, depth, kernel_size=1, stride=1,\n bias=False)\n self.bn2 = norm(depth, momentum)\n\n def forward(self, x):\n x1 = self.aspp1(x)\n x1 = self.aspp1_bn(x1)\n x1 = self.relu(x1)\n x2 = self.aspp2(x)\n x2 = self.aspp2_bn(x2)\n x2 = self.relu(x2)\n x3 = self.aspp3(x)\n x3 = self.aspp3_bn(x3)\n x3 = self.relu(x3)\n x4 = self.aspp4(x)\n x4 = self.aspp4_bn(x4)\n x4 = self.relu(x4)\n x5 = self.global_pooling(x)\n x5 = self.aspp5(x5)\n x5 = self.aspp5_bn(x5)\n x5 = self.relu(x5)\n x5 = nn.Upsample((x.shape[2], x.shape[3]), mode='bilinear',\n align_corners=True)(x5)\n x = torch.cat((x1, x2, x3, x4, x5), 1)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n return x\n\n\nclass CascadeBlock(nn.Module):\n\n def __init__(self, block, planes, inplanes, blocks, stride=1, dilation=1):\n super(CascadeBlock, self).__init__()\n self.conv = nn.Conv2d\n # downsample = None\n # if stride != 1 or dilation != 1 or inplanes != planes * block.expansion:\n # downsample = nn.Sequential(\n # self.conv(inplanes, planes * block.expansion,\n # kernel_size=1, stride=stride, dilation=max(1, dilation // 2), bias=False),\n # self._make_norm(planes * block.expansion),\n # )\n #\n # layers = []\n # self.upsample_layer = block(inplanes, planes, stride, downsample, dilation=max(1, dilation // 2),\n # conv=self.conv, norm=self._make_norm)\n # inplanes = planes * block.expansion\n # for i in range(1, blocks):\n # layers.append(block(inplanes, planes, dilation=dilation, conv=self.conv, norm=self._make_norm))\n # self.conv = nn.Sequential(*layers)\n\n downsample = nn.Sequential(\n self.conv(inplanes, planes*block.expansion, kernel_size=1, stride=stride,\n dilation=dilation, bias=False),\n self._make_norm(planes * block.expansion),\n )\n self.upsample_layer = block(inplanes, planes, stride, downsample, dilation=dilation,\n conv=self.conv, norm=self._make_norm)\n inplanes = planes * block.expansion\n self.conv = nn.Sequential(\n block(inplanes, planes, dilation=dilation*2, conv=self.conv, norm=self._make_norm),\n block(inplanes, planes, dilation=dilation, conv=self.conv, norm=self._make_norm)\n )\n\n def forward(self, x, backbone=None):\n out = self.upsample_layer(x)\n if backbone is not None:\n out = out + backbone\n out = self.conv(out)\n return out\n\n def _make_norm(self, planes, momentum=0.05):\n return nn.BatchNorm2d(planes, momentum=momentum)\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.Upsample", "torch.nn.Conv2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.Flatten" ] ]
DEVESHTARASIA/unidata-python-workshop
[ "6ce194a0515effbd0cddb50c2302d5160494747e" ]
[ "notebooks/Command_Line_Tools/skewt.py" ]
[ "# skewt.py - A simple Skew-T plotting tool\n\nimport argparse\nfrom datetime import datetime\n\nimport matplotlib.pyplot as plt\nimport metpy.calc as mpcalc\nfrom metpy.io.upperair import get_upper_air_data\nfrom metpy.plots import Hodograph, SkewT\nfrom metpy.units import units\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nfrom mpldatacursor import datacursor\nimport numpy as np\n\ndef get_sounding_data(date, station):\n\n ds = get_upper_air_data(date, station)\n\n p = ds.variables['pressure'][:]\n T = ds.variables['temperature'][:]\n Td = ds.variables['dewpoint'][:]\n u = ds.variables['u_wind'][:]\n v = ds.variables['v_wind'][:]\n windspeed = ds.variables['speed'][:]\n\n return p, T, Td, u, v, windspeed\n\ndef plot_sounding(date, station):\n p, T, Td, u, v, windspeed = get_sounding_data(date, station)\n\n lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])\n lfc_pressure, lfc_temperature = mpcalc.lfc(p, T, Td)\n parcel_path = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')\n\n # Create a new figure. The dimensions here give a good aspect ratio\n fig = plt.figure(figsize=(8, 8))\n skew = SkewT(fig)\n\n # Plot the data\n skew.plot(p, T, color='tab:red')\n skew.plot(p, Td, color='tab:green')\n\n # Plot thermodynamic parameters and parcel path\n skew.plot(p, parcel_path, color='black')\n\n if lcl_pressure:\n skew.ax.axhline(lcl_pressure, color='black')\n\n if lfc_pressure:\n skew.ax.axhline(lfc_pressure, color='0.7')\n\n # Add the relevant special lines\n skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)\n skew.plot_dry_adiabats()\n skew.plot_moist_adiabats()\n skew.plot_mixing_lines()\n\n # Shade areas representing CAPE and CIN\n skew.shade_cin(p, T, parcel_path)\n skew.shade_cape(p, T, parcel_path)\n\n # Add wind barbs\n skew.plot_barbs(p, u, v)\n\n # Add an axes to the plot\n ax_hod = inset_axes(skew.ax, '30%', '30%', loc=1, borderpad=3)\n\n # Plot the hodograph\n h = Hodograph(ax_hod, component_range=100.)\n\n # Grid the hodograph\n h.add_grid(increment=20)\n\n # Plot the data on the hodograph\n mask = (p >= 100 * units.mbar)\n h.plot_colormapped(u[mask], v[mask], windspeed[mask]) # Plot a line colored by wind speed\n\n # Set some sensible axis limits\n skew.ax.set_ylim(1000, 100)\n skew.ax.set_xlim(-40, 60)\n\n return fig, skew\n\nif __name__ == '__main__':\n # Parse out the command line arguments\n parser = argparse.ArgumentParser(description='''Make an advanced SkewT\n plot of upper air observations.''')\n parser.add_argument('--date', required=True,\n help='Date of the sounding YYYYMMDD')\n parser.add_argument('--hour', required=True,\n help='Time of the sounding in hours')\n parser.add_argument('--station', default='OUN',\n help='Station three letter identifier')\n parser.add_argument('--savefig', action='store_true',\n help='Save out figure instead of displaying it')\n parser.add_argument('--imgformat', default='png',\n help='Format to save the resulting image as.')\n args = parser.parse_args()\n\n # Parse out the date time stamp\n date = datetime.strptime('{0}{1}'.format(args.date, args.hour), '%Y%m%d%H')\n\n # Make the sounding figure\n fig, skew = plot_sounding(date, args.station)\n\n # Save or show figurexs\n if args.savefig:\n plt.savefig('{0}_{1}.{2}'.format(args.station,\n datetime.strftime(date, '%Y%m%d_%HZ'),\n args.imgformat))\n else:\n datacursor(formatter=u'{y:.02f} hPa \\n{x:.02f}\\u00B0C'.format, bbox=dict(fc='white'))\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
microsoft-fevieira/maro
[ "f9b4fcd06222109238be491ef64ce8affb2938d8" ]
[ "maro/simulator/scenarios/cim/business_engine.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\n\nimport os\nfrom math import ceil, floor\n\nimport numpy as np\nfrom yaml import safe_load\n\nfrom maro.backends.frame import FrameBase, SnapshotList\nfrom maro.data_lib.cim import CimDataContainerWrapper, Order, Stop\nfrom maro.event_buffer import AtomEvent, CascadeEvent, EventBuffer, MaroEvents\nfrom maro.simulator.scenarios import AbsBusinessEngine\nfrom maro.simulator.scenarios.helpers import DocableDict\nfrom maro.simulator.scenarios.matrix_accessor import MatrixAttributeAccessor\nfrom maro.streamit import streamit\n\nfrom .common import Action, ActionScope, ActionType, DecisionEvent\nfrom .event_payload import EmptyReturnPayload, LadenReturnPayload, VesselDischargePayload, VesselStatePayload\nfrom .events import Events\nfrom .frame_builder import gen_cim_frame\nfrom .ports_order_export import PortOrderExporter\n\nmetrics_desc = \"\"\"\nCIM metrics used provide statistics information until now (may be in the middle of current tick).\nIt contains following keys:\n\norder_requirements (int): Accumulative orders until now.\ncontainer_shortage (int): Accumulative shortage until now.\noperation_number (int): Total empty transfer (both load and discharge) cost,\n the cost factors can be configured in configuration file at section \"transfer_cost_factors\".\n\"\"\"\n\n\nclass CimBusinessEngine(AbsBusinessEngine):\n \"\"\"Cim business engine, used simulate CIM related problem.\"\"\"\n\n def __init__(\n self, event_buffer: EventBuffer, topology: str, start_tick: int, max_tick: int,\n snapshot_resolution: int, max_snapshots: int, additional_options: dict = None\n ):\n super().__init__(\n \"cim\", event_buffer, topology, start_tick, max_tick,\n snapshot_resolution, max_snapshots, additional_options\n )\n\n # Update self._config_path with current file path.\n self.update_config_root_path(__file__)\n\n config_path = os.path.join(self._config_path, \"config.yml\")\n\n # Load data from wrapper.\n self._data_cntr: CimDataContainerWrapper = CimDataContainerWrapper(\n config_path, max_tick, self._topology)\n\n # Create a copy of config object to expose to others, and not affect generator.\n with open(config_path) as fp:\n self._config = safe_load(fp)\n\n self._vessels = []\n self._ports = []\n self._frame = None\n self._full_on_ports: MatrixAttributeAccessor = None\n self._full_on_vessels: MatrixAttributeAccessor = None\n self._vessel_plans: MatrixAttributeAccessor = None\n self._port_orders_exporter = PortOrderExporter(\"enable-dump-snapshot\" in additional_options)\n\n # Read transfer cost factors.\n transfer_cost_factors = self._config[\"transfer_cost_factors\"]\n\n self._load_cost_factor: float = transfer_cost_factors[\"load\"]\n self._dsch_cost_factor: float = transfer_cost_factors[\"dsch\"]\n\n # Used to collect total cost to avoid to much snapshot querying.\n self._total_operate_num: float = 0\n\n self._init_frame()\n\n # Snapshot list should be initialized after frame.\n self._snapshots = self._frame.snapshots\n\n self._register_events()\n\n # As we already unpack the route to the max tick, we can insert all departure events at the beginning.\n self._load_departure_events()\n\n self._stream_base_info()\n\n @property\n def configs(self):\n \"\"\"dict: Configurations of CIM business engine.\"\"\"\n return self._config\n\n @property\n def frame(self) -> FrameBase:\n \"\"\"FrameBase: Frame of current business engine.\"\"\"\n return self._frame\n\n @property\n def snapshots(self) -> SnapshotList:\n \"\"\"SnapshotList: Snapshot list of current frame.\"\"\"\n return self._snapshots\n\n def step(self, tick: int):\n \"\"\"Called at each tick to generate orders and arrival events.\n\n Args:\n tick (int): Tick to generate orders.\n \"\"\"\n\n # At each tick:\n # 1. Generate orders for this tick.\n # 2. Transfer orders into events (ORDER).\n # 3. Check and add vessel arrival event (atom and cascade).\n\n total_empty_number = sum(\n [node.empty for node in self._ports + self._vessels])\n\n for order in self._data_cntr.get_orders(tick, total_empty_number):\n # Use cascade event to support insert sub events.\n order_evt = self._event_buffer.gen_cascade_event(tick, Events.ORDER, order)\n\n self._event_buffer.insert_event(order_evt)\n self._port_orders_exporter.add(order)\n\n # Used to hold decision event of this tick, we will append this at the end\n # to make sure all the other logic finished.\n # TODO: Remove it after event priority is supported.\n decision_evt_list = []\n\n for vessel in self._vessels:\n vessel_idx: int = vessel.idx\n loc_idx: int = vessel.next_loc_idx\n\n stop: Stop = self._data_cntr.vessel_stops[vessel_idx, loc_idx]\n port_idx: int = stop.port_idx\n\n # At the beginning the vessel is parking at port, will not invoke arrive event.\n if loc_idx > 0:\n # Check if there is any arrive event.\n if stop.arrive_tick == tick:\n arrival_payload = VesselStatePayload(port_idx, vessel_idx)\n\n # This vessel will arrive at current tick.\n arrival_event = self._event_buffer.gen_atom_event(\n tick, Events.VESSEL_ARRIVAL, arrival_payload)\n\n # Then it will load full.\n load_event = self._event_buffer.gen_atom_event(\n tick, Events.LOAD_FULL, arrival_payload)\n\n self._event_buffer.insert_event(arrival_event)\n self._event_buffer.insert_event(load_event)\n\n # Generate cascade event and payload.\n decision_payload = DecisionEvent(\n tick, port_idx, vessel_idx, self.snapshots, self.action_scope, self.early_discharge\n )\n\n decision_event: CascadeEvent = self._event_buffer.gen_decision_event(tick, decision_payload)\n\n decision_evt_list.append(decision_event)\n\n # Update vessel location so that later logic will get correct value.\n vessel.last_loc_idx = vessel.next_loc_idx\n\n # We should update the future stop list at each tick.\n past_stops = self._data_cntr.vessel_past_stops[vessel.idx, vessel.last_loc_idx, loc_idx]\n future_stops = self._data_cntr.vessel_future_stops[vessel.idx, vessel.last_loc_idx, loc_idx]\n\n vessel.set_stop_list(past_stops, future_stops)\n\n # Update vessel plans.\n for plan_port_idx, plan_tick in self._data_cntr.vessel_planned_stops[\n vessel_idx, vessel.route_idx, vessel.last_loc_idx\n ]:\n self._vessel_plans[vessel_idx, plan_port_idx] = plan_tick\n\n if loc_idx > 0 and stop.arrive_tick == tick:\n self._vessel_plans[vessel_idx, port_idx] = stop.arrive_tick\n\n # Insert the cascade events at the end.\n for event in decision_evt_list:\n self._event_buffer.insert_event(event)\n\n def post_step(self, tick: int):\n \"\"\"Post-process after each step.\n\n Args:\n tick (int): Tick to process.\n \"\"\"\n self._stream_data()\n\n if (tick + 1) % self._snapshot_resolution == 0:\n # Update acc_fulfillment before take snapshot.\n for port in self._ports:\n port.acc_fulfillment = port.acc_booking - port.acc_shortage\n\n # Before go to next tick, we will take a snapshot first.\n self._frame.take_snapshot(self.frame_index(tick))\n\n # Reset port statistics (by tick) fields.\n for port in self._ports:\n port.shortage = 0\n port.booking = 0\n port.fulfillment = 0\n port.transfer_cost = 0\n\n return tick + 1 == self._max_tick\n\n def reset(self):\n \"\"\"Reset the business engine, it will reset frame value.\"\"\"\n\n self._snapshots.reset()\n\n self._frame.reset()\n\n self._reset_nodes()\n\n self._data_cntr.reset()\n\n # Insert departure event again.\n self._load_departure_events()\n\n self._total_operate_num = 0\n\n def action_scope(self, port_idx: int, vessel_idx: int) -> ActionScope:\n \"\"\"Get the action scope of specified agent.\n\n Args:\n port_idx (int): Index of specified agent.\n vessel_idx (int): Index of specified vessel to take the action.\n\n Returns:\n ActionScope: Contains load and discharge scope.\n \"\"\"\n port = self._ports[port_idx]\n vessel = self._vessels[vessel_idx]\n\n return ActionScope(load=min(port.empty, vessel.remaining_space), discharge=vessel.empty)\n\n def early_discharge(self, vessel_idx: int) -> int:\n \"\"\"Get the early discharge number of specified vessel.\n\n Args:\n vessel_idx (int): Index of specified vessel.\n \"\"\"\n return self._vessels[vessel_idx].early_discharge\n\n def get_metrics(self) -> DocableDict:\n \"\"\"Get metrics information for cim scenario.\n\n Args:\n dict: A dict that contains \"perf\", \"total_shortage\" and \"total_cost\",\n and can use help method to show help docs.\n \"\"\"\n total_shortage = sum([p.acc_shortage for p in self._ports])\n total_booking = sum([p.acc_booking for p in self._ports])\n\n return DocableDict(\n metrics_desc,\n order_requirements=total_booking,\n container_shortage=total_shortage,\n operation_number=self._total_operate_num\n )\n\n def get_node_mapping(self) -> dict:\n \"\"\"Get node name mappings related with this environment.\n\n Returns:\n dict: Node name to index mapping dictionary.\n \"\"\"\n return {\n \"ports\": self._data_cntr.port_mapping,\n \"vessels\": self._data_cntr.vessel_mapping\n }\n\n def get_event_payload_detail(self) -> dict:\n \"\"\"dict: Event payload details of current scenario.\"\"\"\n return {\n Events.ORDER.name: Order.summary_key,\n Events.RETURN_FULL.name: LadenReturnPayload.summary_key,\n Events.VESSEL_ARRIVAL.name: VesselStatePayload.summary_key,\n Events.LOAD_FULL.name: VesselStatePayload.summary_key,\n Events.DISCHARGE_FULL.name: VesselDischargePayload.summary_key,\n Events.PENDING_DECISION.name: DecisionEvent.summary_key,\n Events.LOAD_EMPTY.name: Action.summary_key,\n Events.DISCHARGE_EMPTY.name: Action.summary_key,\n Events.VESSEL_DEPARTURE.name: VesselStatePayload.summary_key,\n Events.RETURN_EMPTY.name: EmptyReturnPayload.summary_key\n }\n\n def get_agent_idx_list(self) -> list:\n \"\"\"Get port index list related with this environment.\n\n Returns:\n list: A list of port index.\n \"\"\"\n return [i for i in range(self._data_cntr.port_number)]\n\n def dump(self, folder: str):\n self._port_orders_exporter.dump(folder)\n\n def _init_nodes(self):\n # Init ports.\n for port_settings in self._data_cntr.ports:\n port = self._ports[port_settings.index]\n port.set_init_state(port_settings.name,\n port_settings.capacity, port_settings.empty)\n\n # Init vessels.\n for vessel_setting in self._data_cntr.vessels:\n vessel = self._vessels[vessel_setting.index]\n\n vessel.set_init_state(\n vessel_setting.name,\n self._data_cntr.container_volume,\n vessel_setting.capacity,\n self._data_cntr.route_mapping[vessel_setting.route_name],\n vessel_setting.empty\n )\n\n # Init vessel plans.\n self._vessel_plans[:] = -1\n\n def _reset_nodes(self):\n # Reset both vessels and ports.\n # NOTE: This should be called after frame.reset.\n for port in self._ports:\n port.reset()\n\n for vessel in self._vessels:\n vessel.reset()\n\n # Reset vessel plans.\n self._vessel_plans[:] = -1\n\n def _register_events(self):\n \"\"\"Register events.\"\"\"\n register_handler = self._event_buffer.register_event_handler\n\n register_handler(Events.RETURN_FULL, self._on_full_return)\n register_handler(Events.RETURN_EMPTY, self._on_empty_return)\n register_handler(Events.ORDER, self._on_order_generated)\n register_handler(Events.LOAD_FULL, self._on_full_load)\n register_handler(Events.VESSEL_DEPARTURE, self._on_departure)\n register_handler(Events.DISCHARGE_FULL, self._on_discharge)\n register_handler(MaroEvents.TAKE_ACTION, self._on_action_received)\n\n def _load_departure_events(self):\n \"\"\"Insert leaving event at the beginning as we already unpack the root to a loop at the beginning.\"\"\"\n\n for vessel_idx, stops in enumerate(self._data_cntr.vessel_stops[:]):\n for stop in stops:\n payload = VesselStatePayload(stop.port_idx, vessel_idx)\n dep_evt = self._event_buffer.gen_atom_event(stop.leave_tick, Events.VESSEL_DEPARTURE, payload)\n\n self._event_buffer.insert_event(dep_evt)\n\n def _init_frame(self):\n \"\"\"Initialize the frame based on data generator.\"\"\"\n port_num = self._data_cntr.port_number\n vessel_num = self._data_cntr.vessel_number\n stop_num = (self._data_cntr.past_stop_number,\n self._data_cntr.future_stop_number)\n\n self._frame = gen_cim_frame(\n port_num, vessel_num, stop_num, self.calc_max_snapshots())\n\n self._ports = self._frame.ports\n self._vessels = self._frame.vessels\n\n self._full_on_ports = self._frame.matrix[0][\"full_on_ports\"]\n self._full_on_vessels = self._frame.matrix[0][\"full_on_vessels\"]\n self._vessel_plans = self._frame.matrix[0][\"vessel_plans\"]\n\n self._init_nodes()\n\n def _get_reachable_ports(self, vessel_idx: int):\n \"\"\"Get ports that specified vessel can reach (for order), return a list of tuple (port_id, arrive_tick).\n\n Args:\n vessel_idx (int): Index of specified vessel.\n\n Returns:\n Reachable port index list of specified vessel.\n \"\"\"\n vessel = self._vessels[vessel_idx]\n\n return self._data_cntr.reachable_stops[vessel_idx, vessel.route_idx, vessel.next_loc_idx]\n\n def _get_pending_full(self, src_port_idx: int, dest_port_idx: int):\n \"\"\"Get pending full number from src_port_idx to dest_port_idx.\"\"\"\n return self._full_on_ports[src_port_idx, dest_port_idx]\n\n def _set_pending_full(self, src_port_idx: int, dest_port_idx: int, value):\n \"\"\"Set the full number from src_port_idx to dest_port_idx.\"\"\"\n assert value >= 0\n\n self._full_on_ports[src_port_idx, dest_port_idx] = value\n\n def _on_order_generated(self, event: CascadeEvent):\n \"\"\"When there is an order generated, we should do:\n 1. Generate a LADEN_RETURN event by configured buffer time: \\\n The event will be inserted to the immediate_event_list ASAP if the configured buffer time is 0, \\\n else the event will be inserted to the event buffer directly.\n 2. Update port state: on_shipper +, empty -.\n\n Args:\n event (CascadeEvent): Order event object.\n \"\"\"\n order: Order = event.payload\n src_port = self._ports[order.src_port_idx]\n\n execute_qty = order.quantity\n src_empty = src_port.empty\n src_port.booking += execute_qty\n src_port.acc_booking += execute_qty\n\n # Check if there is any shortage.\n if src_empty < order.quantity:\n # Booking & shortage.\n shortage_qty = order.quantity - src_empty\n src_port.shortage += shortage_qty\n src_port.acc_shortage += shortage_qty\n execute_qty = src_empty\n\n # Update port state.\n src_port.empty -= execute_qty\n # Full contianers that pending to return.\n src_port.on_shipper += execute_qty\n\n buffer_ticks = self._data_cntr.full_return_buffers[src_port.idx]\n\n payload = LadenReturnPayload(\n src_port_idx=order.src_port_idx, dest_port_idx=order.dest_port_idx, quantity=execute_qty\n )\n\n laden_return_evt = self._event_buffer.gen_atom_event(\n tick=event.tick + buffer_ticks, event_type=Events.RETURN_FULL, payload=payload\n )\n\n # If buffer_tick is 0, we should execute it as this tick.\n if buffer_ticks == 0:\n event.add_immediate_event(laden_return_evt)\n else:\n self._event_buffer.insert_event(laden_return_evt)\n\n def _on_full_return(self, event: AtomEvent):\n \"\"\"Handler for processing the event that full containers are returned from shipper.\n\n Once the full containers are returned, the containers are ready to be loaded. The workflow is:\n 1. First move the container from on_shipper to full (update state: on_shipper -> full).\n 2. Then append the container to the port pending list.\n \"\"\"\n payload: LadenReturnPayload = event.payload\n\n src_port = self._ports[payload.src_port_idx]\n src_port.on_shipper -= payload.quantity\n src_port.full += payload.quantity\n\n pending_full_number = self._get_pending_full(\n payload.src_port_idx, payload.dest_port_idx)\n\n self._set_pending_full(\n payload.src_port_idx, payload.dest_port_idx, pending_full_number + payload.quantity)\n\n def _on_full_load(self, event: AtomEvent):\n \"\"\"Handler for processing event that a vessel need to load full containers from current port.\n\n When there is a vessel arrive at a port:\n 1. Discharge full (we ignore this action here, as we will generate a discharge event \\\n after a vessel have loaded any full).\n 2. Load full by destination id, and generate discharge event.\n 3. Update vessel.state to PARKING.\n 4. Fill future stop list.\n 5. Early discharge.\n\n Args:\n event (AtomEvent): Arrival event object.\n \"\"\"\n\n arrival_obj: VesselStatePayload = event.payload\n vessel_idx: int = arrival_obj.vessel_idx\n port_idx: int = arrival_obj.port_idx\n vessel = self._vessels[vessel_idx]\n port = self._ports[port_idx]\n container_volume = self._data_cntr.container_volume\n vessel_capacity = vessel.capacity\n\n # Update vessel state.\n vessel.last_loc_idx = vessel.next_loc_idx\n\n # NOTE: This remaining space do not contains empty, as we can early discharge them if no enough space.\n remaining_space = vessel_capacity - vessel.full * container_volume\n\n # How many containers we can load.\n acceptable_number = floor(remaining_space / container_volume)\n total_load_qty = 0\n\n for next_port_idx, arrive_tick in self._get_reachable_ports(vessel_idx):\n full_number_to_next_port = self._get_pending_full(\n port_idx, next_port_idx)\n\n if acceptable_number > 0 and full_number_to_next_port > 0:\n # We can load some full.\n loaded_qty = min(full_number_to_next_port, acceptable_number)\n total_load_qty += loaded_qty\n\n # Update port state.\n self._set_pending_full(\n port_idx, next_port_idx, full_number_to_next_port - loaded_qty)\n\n port.full -= loaded_qty\n vessel.full += loaded_qty\n\n # Update state.\n self._full_on_vessels[vessel_idx, next_port_idx] += loaded_qty\n\n acceptable_number -= loaded_qty\n\n # Generate a discharge event, as we know when the vessel will arrive at destination.\n payload = VesselDischargePayload(vessel_idx, port_idx, next_port_idx, loaded_qty)\n dsch_event = self._event_buffer.gen_cascade_event(arrive_tick, Events.DISCHARGE_FULL, payload)\n\n self._event_buffer.insert_event(dsch_event)\n\n # Early discharge.\n total_container = vessel.full + vessel.empty\n\n vessel.early_discharge = 0\n\n if total_container * container_volume > vessel.capacity:\n early_discharge_number = \\\n total_container - ceil(vessel.capacity / container_volume)\n vessel.empty -= early_discharge_number\n port.empty += early_discharge_number\n vessel.early_discharge = early_discharge_number\n\n def _on_departure(self, event: AtomEvent):\n \"\"\"Handler for processing event when there is a vessel leaving from port.\n\n When the vessel departing from port:\n 1. Update location to next stop.\n\n Args:\n event (AtomEvent): Departure event object.\n \"\"\"\n\n departure_payload: VesselStatePayload = event.payload\n vessel_idx = departure_payload.vessel_idx\n vessel = self._vessels[vessel_idx]\n\n # As we have unfold all the route stop, we can just location ++.\n vessel.next_loc_idx += 1\n\n def _on_discharge(self, event: CascadeEvent):\n \"\"\"Handler for processing event the there are some full need to be discharged.\n\n\n 1. Discharge specified qty of full from vessel into port.on_consignee.\n 2. Generate a empty_return event by configured buffer time:\n a. If buffer time is 0, then insert into immediate_event_list to process it ASAP.\n b. Or insert into event buffer.\n\n Args:\n event (AtomEvent): Discharge event object.\n \"\"\"\n discharge_payload: VesselDischargePayload = event.payload\n vessel_idx = discharge_payload.vessel_idx\n port_idx = discharge_payload.port_idx\n vessel = self._vessels[vessel_idx]\n port = self._ports[port_idx]\n discharge_qty: int = discharge_payload.quantity\n\n vessel.full -= discharge_qty\n port.on_consignee += discharge_qty\n\n self._full_on_vessels[vessel_idx, port_idx] -= discharge_qty\n\n buffer_ticks = self._data_cntr.empty_return_buffers[port.idx]\n payload = EmptyReturnPayload(port_idx=port.idx, quantity=discharge_qty)\n mt_return_evt = self._event_buffer.gen_atom_event(\n tick=event.tick + buffer_ticks, event_type=Events.RETURN_EMPTY, payload=payload\n )\n\n if buffer_ticks == 0:\n event.add_immediate_event(mt_return_evt)\n else:\n self._event_buffer.insert_event(mt_return_evt)\n\n def _on_empty_return(self, event: AtomEvent):\n \"\"\"Handler for processing event when there are some empty container return to port.\n\n Args:\n event (AtomEvent): Empty-return event object.\n \"\"\"\n payload: EmptyReturnPayload = event.payload\n port = self._ports[payload.port_idx]\n\n port.on_consignee -= payload.quantity\n port.empty += payload.quantity\n\n def _on_action_received(self, event: CascadeEvent):\n \"\"\"Handler for processing actions from agent.\n\n Args:\n event (CascadeEvent): Action event object with expected payload: {vessel_id: empty_number_to_move}}.\n \"\"\"\n actions = event.payload\n\n if actions:\n if type(actions) is not list:\n actions = [actions]\n\n for action in actions:\n vessel_idx = action.vessel_idx\n port_idx = action.port_idx\n move_num = action.quantity\n vessel = self._vessels[vessel_idx]\n port = self._ports[port_idx]\n port_empty = port.empty\n vessel_empty = vessel.empty\n\n action_type: ActionType = getattr(action, \"action_type\", None)\n\n # Make it compatiable with previous action.\n if action_type is None:\n action_type = ActionType.DISCHARGE if move_num > 0 else ActionType.LOAD\n\n # Make sure the move number is positive, as we have the action type.\n move_num = abs(move_num)\n\n if action_type == ActionType.DISCHARGE:\n assert(move_num <= vessel_empty)\n\n port.empty = port_empty + move_num\n vessel.empty = vessel_empty - move_num\n else:\n assert(move_num <= min(port_empty, vessel.remaining_space))\n\n port.empty = port_empty - move_num\n vessel.empty = vessel_empty + move_num\n\n # Align the event type to make the output readable.\n event.event_type = Events.DISCHARGE_EMPTY if action_type == ActionType.DISCHARGE else Events.LOAD_EMPTY\n\n # Update transfer cost for port and metrics.\n self._total_operate_num += move_num\n port.transfer_cost += move_num\n\n self._vessel_plans[vessel_idx, port_idx] += self._data_cntr.vessel_period[vessel_idx]\n\n def _stream_base_info(self):\n if streamit:\n streamit.info(self._scenario_name, self._topology, self._max_tick)\n streamit.complex(\"config\", self._config)\n\n def _stream_data(self):\n if streamit:\n port_number = len(self._ports)\n vessel_number = len(self._vessels)\n\n for port in self._ports:\n streamit.data(\n \"port_details\", index=port.index, capacity=port.capacity, empty=port.empty, full=port.full,\n on_shipper=port.on_shipper, on_consignee=port.on_consignee, shortage=port.shortage,\n acc_shortage=port.acc_shortage, booking=port.booking, acc_booking=port.acc_booking,\n fulfillment=port.fulfillment, acc_fulfillment=port.acc_fulfillment, transfer_cost=port.transfer_cost\n )\n\n for vessel in self._vessels:\n streamit.data(\n \"vessel_details\", index=vessel.index, capacity=vessel.capacity, empty=vessel.empty,\n full=vessel.full, remaining_space=vessel.remaining_space, early_discharge=vessel.early_discharge,\n route_idx=vessel.route_idx, last_loc_idx=vessel.last_loc_idx, next_loc_idx=vessel.next_loc_idx,\n past_stop_list=vessel.past_stop_list[:], past_stop_tick_list=vessel.past_stop_tick_list[:],\n future_stop_list=vessel.future_stop_list[:], future_stop_tick_list=vessel.future_stop_tick_list[:]\n )\n\n vessel_plans = np.array(self._vessel_plans[:]).reshape(vessel_number, port_number)\n\n a, b = np.where(vessel_plans > -1)\n\n for vessel_index, port_index in list(zip(a, b)):\n streamit.data(\n \"vessel_plans\", vessel_index=vessel_index,\n port_index=port_index, planed_arrival_tick=vessel_plans[vessel_index, port_index]\n )\n\n full_on_ports = np.array(self._full_on_ports[:]).reshape(port_number, port_number)\n\n a, b = np.where(full_on_ports > 0)\n\n for from_port_index, to_port_index in list(zip(a, b)):\n streamit.data(\n \"full_on_ports\", from_port_index=from_port_index,\n dest_port_index=to_port_index, quantity=full_on_ports[from_port_index, to_port_index]\n )\n\n full_on_vessels = np.array(self._full_on_vessels[:]).reshape(vessel_number, port_number)\n\n a, b = np.where(full_on_vessels > 0)\n\n for vessel_index, port_index in list(zip(a, b)):\n streamit.data(\n \"full_on_vessels\", vessel_index=vessel_index, port_index=port_index,\n quantity=full_on_vessels[vessel_index, port_index]\n )\n" ]
[ [ "numpy.where", "numpy.array" ] ]
ReneRa/scikit-learn-extensions
[ "499c763b22e980d5add16b085d11915b3cd1c6d2", "499c763b22e980d5add16b085d11915b3cd1c6d2" ]
[ "sklearnext/preprocessing/over_sampling/kmeans_smote.py", "sklearnext/model_selection/search.py" ]
[ "\"\"\"\nThe :mod:`sklearnext.preprocessing.oversampling.kmeans_smote`\ncontains the implementation of the K-Means SMOTE oversampler.\n\"\"\"\n\n# Authors: Felix Last\n# Georgios Douzas <gdouzas@icloud.com>\n# License: BSD 3 clause\n\nimport warnings\nimport copy\nimport numpy as np\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom imblearn.over_sampling.base import BaseOverSampler\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.over_sampling import RandomOverSampler\n\n\nclass KMeansSMOTE(BaseOverSampler):\n \"\"\"Class to perform oversampling using K-Means SMOTE.\n K-Means SMOTE works in three steps:\n 1. Cluster the entire input space using k-means.\n 2. Distribute the number of samples to generate across clusters:\n 1. Select clusters which have a high number of minority class samples.\n 2. Assign more synthetic samples to clusters where minority class samples are sparsely distributed.\n 3. Oversample each filtered cluster using SMOTE.\n The method implements SMOTE and random oversampling as limit cases. Therefore, the following configurations\n may be used to achieve the behavior of ...\n ... SMOTE: ``imbalance_ratio_threshold=float('Inf'), kmeans_args={'n_clusters':1}``\n ... random oversampling: ``imbalance_ratio_threshold=float('Inf'), kmeans_args={'n_clusters':1}, smote_args={'k_neighbors':0})``\n Parameters\n ----------\n ratio : str, dict, or callable, optional (default='auto')\n Ratio to use for resampling the data set.\n - If ``str``, has to be one of: (i) ``'minority'``: resample the\n minority class; (ii) ``'majority'``: resample the majority class,\n (iii) ``'not minority'``: resample all classes apart of the minority\n class, (iv) ``'all'``: resample all classes, and (v) ``'auto'``:\n correspond to ``'all'`` with for oversampling methods and ``'not\n minority'`` for undersampling methods. The classes targeted will be\n oversampled or undersampled to achieve an equal number of sample\n with the majority or minority class.\n - If ``dict``, the keys correspond to the targeted classes. The values\n correspond to the desired number of samples.\n - If callable, function taking ``y`` and returns a ``dict``. The keys\n correspond to the targeted classes. The values correspond to the\n desired number of samples.\n random_state : int, RandomState instance or None, optional (default=None)\n If int, ``random_state`` is the seed used by the random number\n generator; If ``RandomState`` instance, random_state is the random\n number generator; If ``None``, the random number generator is the\n ``RandomState`` instance used by ``np.random``.\n Will be copied to kmeans_args and smote_args if not explicitly passed there.\n kmeans_args : dict, optional (default={})\n Parameters to be passed to ``sklearn.cluster.KMeans`` or ``sklearn.cluster.MiniBatchKMeans``\n (see ``use_minibatch_kmeans``). If n_clusters is not explicitly set, scikit-learn's\n default will apply.\n smote_args : dict, optional (default={})\n Parameters to be passed to ``imblearn.over_sampling.SMOTE``. Note that ``k_neighbors`` is automatically\n adapted without warning when a cluster is smaller than the number of neighbors specified.\n `ratio` will be overwritten according to ratio passed to this class. `random_state`\n will be passed from this class if none is specified.\n imbalance_ratio_threshold : float or dict, optional (default=1.0)\n Specify a threshold for a cluster's imbalance ratio ``((majority_count + 1) / (minority_count + 1))``.\n Only clusters with an imbalance ratio less than the threshold are oversampled. Use a dictionary to specify\n different thresholds for different minority classes.\n density_power : float, optional (default=None)\n Used to compute the density of minority samples within each cluster. By default, the number of features will be used.\n use_minibatch_kmeans : boolean, optional (default=True)\n If False, use ``sklearn.cluster.KMeans``. If True, use ``sklearn.cluster.MiniBatchKMeans``.\n n_jobs : int, optional (default=1)\n The number of threads to open if possible. This parameter will be copied to ``kmeans_args`` and\n ``smote_args`` if not explicitly passed there. Note: ``MiniBatchKMeans`` does not accept ``n_jobs``.\n Examples\n --------\n >>> import numpy as np\n >>> from imblearn.datasets import fetch_datasets\n >>> from sklearnext.preprocessing import KMeansSMOTE\n >>>\n >>> datasets = fetch_datasets(filter_data=['oil'])\n >>> X, y = datasets['oil']['data'], datasets['oil']['target']\n >>>\n >>> [print('Class {} has {} instances'.format(label, count))\n ... for label, count in zip(*np.unique(y, return_counts=True))]\n >>>\n >>> kmeans_smote = KMeansSMOTE(\n ... kmeans_args={\n ... 'n_clusters': 100\n ... },\n ... smote_args={\n ... 'k_neighbors': 10\n ... }\n ... )\n >>> X_resampled, y_resampled = kmeans_smote.fit_sample(X, y)\n >>>\n >>> [print('Class {} has {} instances after oversampling'.format(label, count))\n ... for label, count in zip(*np.unique(y_resampled, return_counts=True))]\n \"\"\"\n\n def __init__(self,\n ratio='auto',\n random_state=None,\n kmeans_args={},\n smote_args={},\n imbalance_ratio_threshold=1.0,\n density_power=None,\n use_minibatch_kmeans=True,\n n_jobs=1):\n super(KMeansSMOTE, self).__init__(ratio=ratio, random_state=random_state)\n self.imbalance_ratio_threshold = imbalance_ratio_threshold\n self.kmeans_args = copy.deepcopy(kmeans_args)\n self.smote_args = copy.deepcopy(smote_args)\n self.random_state = random_state\n self.n_jobs = n_jobs\n self.use_minibatch_kmeans = use_minibatch_kmeans\n\n self.density_power = density_power\n\n def _cluster(self, X):\n \"\"\"Run k-means to cluster the dataset\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n Matrix containing the data which have to be sampled.\n Returns\n -------\n cluster_assignment : ndarray, shape (n_samples)\n The corresponding cluster labels of ``X``.\n \"\"\"\n\n if self.use_minibatch_kmeans:\n from sklearn.cluster import MiniBatchKMeans as KMeans\n else:\n from sklearn.cluster import KMeans as KMeans\n\n kmeans = KMeans(**self.kmeans_args)\n if self.use_minibatch_kmeans and 'init_size' not in self.kmeans_args:\n self.kmeans_args['init_size'] = min(2 * kmeans.n_clusters, X.shape[0])\n kmeans = KMeans(**self.kmeans_args)\n\n kmeans.fit_transform(X)\n cluster_assignment = kmeans.labels_\n # kmeans.labels_ does not use continuous labels,\n # i.e. some labels in 0..n_clusters may not exist. Tidy up this mess.\n return cluster_assignment\n\n def _filter_clusters(self, X, y, cluster_assignment, minority_class_label):\n \"\"\"Determine sampling weight for each cluster.\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n Matrix containing the data which have to be sampled.\n y : ndarray, shape (n_samples, )\n Corresponding label for each sample in X.\n cluster_assignment : ndarray, shape (n_samples)\n The corresponding cluster labels of ``X``.\n minority_class_label : int\n Label of the minority class to filter by.\n Returns\n -------\n sampling_weights : ndarray, shape (np.max(np.unique(cluster_assignment)),)\n Vector of sampling weights for each cluster\n \"\"\"\n # compute the shape of the density factors\n # since the cluster labels are not continuous, make it large enough\n # to fit all values up to the largest cluster label\n largest_cluster_label = np.max(np.unique(cluster_assignment))\n sparsity_factors = np.zeros((largest_cluster_label + 1,), dtype=np.float64)\n minority_mask = (y == minority_class_label)\n sparsity_sum = 0\n imbalance_ratio_threshold = self.imbalance_ratio_threshold\n if isinstance(imbalance_ratio_threshold, dict):\n imbalance_ratio_threshold = imbalance_ratio_threshold[minority_class_label]\n\n for i in np.unique(cluster_assignment):\n cluster = X[cluster_assignment == i]\n mask = minority_mask[cluster_assignment == i]\n minority_count = cluster[mask].shape[0]\n majority_count = cluster[~mask].shape[0]\n imbalance_ratio = (majority_count + 1) / (minority_count + 1)\n if (imbalance_ratio < imbalance_ratio_threshold) and (minority_count > 1):\n distances = euclidean_distances(cluster[mask])\n non_diagonal_distances = distances[\n ~np.eye(distances.shape[0], dtype=np.bool)\n ]\n average_minority_distance = np.mean( non_diagonal_distances )\n if average_minority_distance is 0: average_minority_distance = 1e-1 # to avoid division by 0\n density_factor = minority_count / (average_minority_distance ** self.density_power)\n sparsity_factors[i] = 1 / density_factor\n\n # prevent division by zero; set zero weights in majority clusters\n sparsity_sum = sparsity_factors.sum()\n if sparsity_sum == 0:\n sparsity_sum = 1 # to avoid division by zero\n sparsity_sum = np.full(sparsity_factors.shape, sparsity_sum, np.asarray(sparsity_sum).dtype)\n sampling_weights = (sparsity_factors / sparsity_sum)\n\n return sampling_weights\n\n\n def _sample(self, X, y):\n \"\"\"Resample the dataset.\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n Matrix containing the data which have to be sampled.\n y : ndarray, shape (n_samples, )\n Corresponding label for each sample in X.\n Returns\n -------\n X_resampled : ndarray, shape (n_samples_new, n_features)\n The array containing the resampled data.\n y_resampled : ndarray, shape (n_samples_new)\n The corresponding labels of ``X_resampled``\n \"\"\"\n self._set_subalgorithm_params()\n\n if self.density_power is None:\n self.density_power = X.shape[1]\n\n resampled = [ (X.copy(), y.copy()) ]\n for minority_class_label, n_samples in self.ratio_.items():\n if n_samples == 0:\n continue\n\n cluster_assignment = self._cluster(X)\n sampling_weights = self._filter_clusters(X, y, cluster_assignment, minority_class_label)\n smote_args = self.smote_args.copy()\n if np.count_nonzero(sampling_weights) > 0:\n # perform k-means smote\n for i in np.unique(cluster_assignment):\n cluster_X = X[cluster_assignment == i]\n cluster_y = y[cluster_assignment == i]\n if sampling_weights[i] > 0:\n # determine ratio for oversampling the current cluster\n target_ratio = {label: np.count_nonzero(cluster_y == label) for label in self.ratio_}\n cluster_minority_count = np.count_nonzero(cluster_y == minority_class_label)\n generate_count = int(round(n_samples * sampling_weights[i]))\n target_ratio[minority_class_label] = generate_count + cluster_minority_count\n\n # make sure that cluster_y has more than 1 class, adding a random point otherwise\n remove_index = -1\n if np.unique(cluster_y).size < 2:\n remove_index = cluster_y.size\n cluster_X = np.append(cluster_X, np.zeros((1,cluster_X.shape[1])), axis=0)\n majority_class_label = next( key for key in self.ratio_.keys() if key != minority_class_label )\n target_ratio[majority_class_label] = 1 + target_ratio[majority_class_label]\n cluster_y = np.append(cluster_y, np.asarray(majority_class_label).reshape((1,)), axis=0)\n\n # clear target ratio of labels not present in cluster\n for label in list(target_ratio.keys()):\n if label not in cluster_y:\n del target_ratio[label]\n\n # modify copy of the user defined smote_args to reflect computed parameters\n smote_args['ratio'] = target_ratio\n\n smote_args = self._validate_smote_args(smote_args, cluster_minority_count)\n oversampler = SMOTE(**smote_args)\n\n # if k_neighbors is 0, perform random oversampling instead of smote\n if 'k_neighbors' in smote_args and smote_args['k_neighbors'] == 0:\n oversampler_args = {}\n if 'random_state' in smote_args:\n oversampler_args['random_state'] = smote_args['random_state']\n oversampler = RandomOverSampler(**oversampler_args)\n\n # finally, apply smote to cluster\n with warnings.catch_warnings():\n # ignore warnings about minority class getting bigger than majority class\n # since this would only be true within this cluster\n warnings.filterwarnings(action='ignore', category=UserWarning, message='After over-sampling\\, the number of samples \\(.*\\) in class .* will be larger than the number of samples in the majority class \\(class #.* \\-\\> .*\\)')\n cluster_resampled_X, cluster_resampled_y = oversampler.fit_sample(cluster_X, cluster_y)\n\n if remove_index > -1:\n # since SMOTE's results are ordered the same way as the data passed into it,\n # the temporarily added point is at the same index position as it was added.\n for l in [cluster_resampled_X, cluster_resampled_y, cluster_X, cluster_y]:\n np.delete(l, remove_index, 0)\n\n # add new generated samples to resampled\n resampled.append( (\n cluster_resampled_X[cluster_y.size:,:],\n cluster_resampled_y[cluster_y.size:]))\n else:\n # all weights are zero -> perform regular smote\n warnings.warn('No minority clusters found for class {}. Performing regular SMOTE. Try changing the number of clusters.'.format(minority_class_label))\n target_ratio = {label: np.count_nonzero(y == label) for label in self.ratio_}\n target_ratio[minority_class_label] = self.ratio_[minority_class_label]\n minority_count = np.count_nonzero(y == minority_class_label)\n smote_args = self._validate_smote_args(smote_args, minority_count)\n oversampler = SMOTE(**smote_args)\n X_smote, y_smote = oversampler.fit_sample(X, y)\n resampled.append((\n X_smote[y.size:,:],\n y_smote[y.size:]))\n\n\n resampled = list(zip(*resampled))\n if(len(resampled) > 0):\n X_resampled = np.concatenate(resampled[0], axis=0)\n y_resampled = np.concatenate(resampled[1], axis=0)\n return X_resampled, y_resampled\n\n\n def _validate_smote_args(self, smote_args, minority_count):\n # determine max number of nearest neighbors considering sample size\n max_k_neighbors = minority_count - 1\n # check if max_k_neighbors is violated also considering smote's default\n smote = SMOTE(**smote_args)\n if smote.k_neighbors > max_k_neighbors:\n smote_args['k_neighbors'] = max_k_neighbors\n smote = SMOTE(**smote_args)\n return smote_args\n\n def _set_subalgorithm_params(self):\n # copy random_state to sub-algorithms\n if self.random_state is not None:\n if 'random_state' not in self.smote_args:\n self.smote_args['random_state'] = self.random_state\n if 'random_state' not in self.kmeans_args:\n self.kmeans_args['random_state'] = self.random_state\n\n # copy n_jobs to sub-algorithms\n if self.n_jobs is not None:\n if 'n_jobs' not in self.smote_args:\n self.smote_args['n_jobs'] = self.n_jobs\n if 'n_jobs' not in self.kmeans_args:\n if not self.use_minibatch_kmeans:\n self.kmeans_args['n_jobs'] = self.n_jobs", "\"\"\"\nThe :mod:`sklearnext.model_selection.search` includes utilities to search\nthe parameter and model space.\n\"\"\"\n\n# Author: Georgios Douzas <gdouzas@icloud.com>\n# License: BSD 3 clause\n\nfrom warnings import warn, filterwarnings\nimport re\nfrom dask_searchcv.utils import copy_estimator\nfrom sklearn.metrics import r2_score, accuracy_score\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.utils.metaestimators import _BaseComposition\nfrom dask_searchcv.model_selection import GridSearchCV\nfrom dask_searchcv.model_selection import _RETURN_TRAIN_SCORE_DEFAULT\nfrom ..utils.validation import check_param_grids, check_estimators\n\n\n_DOC_TEMPLATE = \"\"\"{oneliner}\n\n{name} implements a \"fit\" and a \"score\" method.\nIt also implements \"predict\", \"predict_proba\", \"decision_function\",\n\"transform\" and \"inverse_transform\" if they are implemented in the\nestimator used.\n\n{description}\n\nParameters\n----------\nestimator : estimator object.\n This is assumed to implement the scikit-learn estimator interface.\n Either estimator needs to provide a ``score`` function,\n or ``scoring`` must be passed.\n\n{parameters}\n\nscoring : string, callable, list/tuple, dict or None, default: None\n A single string or a callable to evaluate the predictions on the test\n set.\n\n For evaluating multiple metrics, either give a list of (unique) strings\n or a dict with names as keys and callables as values.\n\n NOTE that when using custom scorers, each scorer should return a single\n value. Metric functions returning a list/array of values can be wrapped\n into multiple scorers that return one value each.\n\n If None, the estimator's default scorer (if available) is used.\n\niid : boolean, default=True\n If True, the data is assumed to be identically distributed across\n the folds, and the loss minimized is the total loss per sample,\n and not the mean loss across the folds.\n\ncv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n - None, to use the default 3-fold cross validation,\n - integer, to specify the number of folds in a ``(Stratified)KFold``,\n - An object to be used as a cross-validation generator.\n - An iterable yielding train, test splits.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, ``StratifiedKFold`` is used. In all\n other cases, ``KFold`` is used.\n\nrefit : boolean, or string, default=True\n Refit an estimator using the best found parameters on the whole\n dataset.\n\n For multiple metric evaluation, this needs to be a string denoting the\n scorer is used to find the best parameters for refitting the estimator\n at the end.\n\n The refitted estimator is made available at the ``best_estimator_``\n attribute and permits using ``predict`` directly on this\n ``GridSearchCV`` instance.\n\n Also for multiple metric evaluation, the attributes ``best_index_``,\n ``best_score_`` and ``best_parameters_`` will only be available if\n ``refit`` is set and all of them will be determined w.r.t this specific\n scorer.\n\n See ``scoring`` parameter to know more about multiple metric\n evaluation.\n\nerror_score : 'raise' (default) or numeric\n Value to assign to the score if an error occurs in estimator fitting.\n If set to 'raise', the error is raised. If a numeric value is given,\n FitFailedWarning is raised. This parameter does not affect the refit\n step, which will always raise the error.\n\nreturn_train_score : boolean, default=True\n If ``'False'``, the ``cv_results_`` attribute will not include training\n scores.\n\n Note that for scikit-learn >= 0.19.1, the default of ``True`` is\n deprecated, and a warning will be raised when accessing train score results\n without explicitly asking for train scores.\n\nscheduler : string, callable, Client, or None, default=None\n The dask scheduler to use. Default is to use the global scheduler if set,\n and fallback to the threaded scheduler otherwise. To use a different\n scheduler either specify it by name (either \"threading\", \"multiprocessing\",\n or \"synchronous\"), pass in a ``dask.distributed.Client``, or provide a\n scheduler ``get`` function.\n\nn_jobs : int, default=-1\n Number of jobs to run in parallel. Ignored for the synchronous and\n distributed schedulers. If ``n_jobs == -1`` [default] all cpus are used.\n For ``n_jobs < -1``, ``(n_cpus + 1 + n_jobs)`` are used.\n\ncache_cv : bool, default=True\n Whether to extract each train/test subset at most once in each worker\n process, or every time that subset is needed. Caching the splits can\n speedup computation at the cost of increased memory usage per worker\n process.\n\n If True, worst case memory usage is ``(n_splits + 1) * (X.nbytes +\n y.nbytes)`` per worker. If False, worst case memory usage is\n ``(n_threads_per_worker + 1) * (X.nbytes + y.nbytes)`` per worker.\n\nExamples\n--------\n{example}\n\nAttributes\n----------\ncv_results_ : dict of numpy (masked) ndarrays\n A dict with keys as column headers and values as columns, that can be\n imported into a pandas ``DataFrame``.\n\n For instance the below given table\n\n +------------+-----------+------------+-----------------+---+---------+\n |param_kernel|param_gamma|param_degree|split0_test_score|...|rank.....|\n +============+===========+============+=================+===+=========+\n | 'poly' | -- | 2 | 0.8 |...| 2 |\n +------------+-----------+------------+-----------------+---+---------+\n | 'poly' | -- | 3 | 0.7 |...| 4 |\n +------------+-----------+------------+-----------------+---+---------+\n | 'rbf' | 0.1 | -- | 0.8 |...| 3 |\n +------------+-----------+------------+-----------------+---+---------+\n | 'rbf' | 0.2 | -- | 0.9 |...| 1 |\n +------------+-----------+------------+-----------------+---+---------+\n\n will be represented by a ``cv_results_`` dict of::\n\n {{\n 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],\n mask = [False False False False]...)\n 'param_gamma': masked_array(data = [-- -- 0.1 0.2],\n mask = [ True True False False]...),\n 'param_degree': masked_array(data = [2.0 3.0 -- --],\n mask = [False False True True]...),\n 'split0_test_score' : [0.8, 0.7, 0.8, 0.9],\n 'split1_test_score' : [0.82, 0.5, 0.7, 0.78],\n 'mean_test_score' : [0.81, 0.60, 0.75, 0.82],\n 'std_test_score' : [0.02, 0.01, 0.03, 0.03],\n 'rank_test_score' : [2, 4, 3, 1],\n 'split0_train_score' : [0.8, 0.7, 0.8, 0.9],\n 'split1_train_score' : [0.82, 0.7, 0.82, 0.5],\n 'mean_train_score' : [0.81, 0.7, 0.81, 0.7],\n 'std_train_score' : [0.03, 0.04, 0.03, 0.03],\n 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],\n 'std_fit_time' : [0.01, 0.02, 0.01, 0.01],\n 'mean_score_time' : [0.007, 0.06, 0.04, 0.04],\n 'std_score_time' : [0.001, 0.002, 0.003, 0.005],\n 'params' : [{{'kernel': 'poly', 'degree': 2}}, ...],\n }}\n\n NOTE that the key ``'params'`` is used to store a list of parameter\n settings dict for all the parameter candidates.\n\n The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and\n ``std_score_time`` are all in seconds.\n\nbest_estimator_ : estimator\n Estimator that was chosen by the search, i.e. estimator\n which gave highest score (or smallest loss if specified)\n on the left out data. Not available if refit=False.\n\nbest_score_ : float or dict of floats\n Score of best_estimator on the left out data.\n When using multiple metrics, ``best_score_`` will be a dictionary\n where the keys are the names of the scorers, and the values are\n the mean test score for that scorer.\n\nbest_params_ : dict\n Parameter setting that gave the best results on the hold out data.\n\nbest_index_ : int or dict of ints\n The index (of the ``cv_results_`` arrays) which corresponds to the best\n candidate parameter setting.\n\n The dict at ``search.cv_results_['params'][search.best_index_]`` gives\n the parameter setting for the best model, that gives the highest\n mean score (``search.best_score_``).\n\n When using multiple metrics, ``best_index_`` will be a dictionary\n where the keys are the names of the scorers, and the values are\n the index with the best mean score for that scorer, as described above.\n\nscorer_ : function or dict of functions\n Scorer function used on the held out data to choose the best\n parameters for the model. A dictionary of ``{{scorer_name: scorer}}``\n when multiple metrics are used.\n\nn_splits_ : int\n The number of cross-validation splits (folds/iterations).\n\nNotes\n------\nThe parameters selected are those that maximize the score of the left out\ndata, unless an explicit score is passed in which case it is used instead.\n\"\"\"\n\n_grid_oneliner = \"\"\"\\\nExhaustive search over specified parameter values for an estimator.\\\n\"\"\"\n_grid_description = \"\"\"\\\nThe parameters of the estimator used to apply these methods are optimized\nby cross-validated grid-search over a parameter grid.\\\n\"\"\"\n_grid_parameters = \"\"\"\\\nparam_grid : dict or list of dictionaries\n Dictionary with parameters names (string) as keys and lists of\n parameter settings to try as values, or a list of such\n dictionaries, in which case the grids spanned by each dictionary\n in the list are explored. This enables searching over any sequence\n of parameter settings.\\\n\"\"\"\n_grid_example = \"\"\"\\\n>>> import dask_searchcv as dcv\n>>> from sklearn import svm, datasets\n>>> iris = datasets.load_iris()\n>>> parameters = {'kernel': ['linear', 'rbf'], 'C': [1, 10]}\n>>> svc = svm.SVC()\n>>> clf = dcv.GridSearchCV(svc, parameters)\n>>> clf.fit(iris.data, iris.target) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS\nGridSearchCV(cache_cv=..., cv=..., error_score=...,\n estimator=SVC(C=..., cache_size=..., class_weight=..., coef0=...,\n decision_function_shape=..., degree=..., gamma=...,\n kernel=..., max_iter=-1, probability=False,\n random_state=..., shrinking=..., tol=...,\n verbose=...),\n iid=..., n_jobs=..., param_grid=..., refit=..., return_train_score=...,\n scheduler=..., scoring=...)\n>>> sorted(clf.cv_results_.keys()) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS\n['mean_fit_time', 'mean_score_time', 'mean_test_score',...\n 'mean_train_score', 'param_C', 'param_kernel', 'params',...\n 'rank_test_score', 'split0_test_score',...\n 'split0_train_score', 'split1_test_score', 'split1_train_score',...\n 'split2_test_score', 'split2_train_score',...\n 'std_fit_time', 'std_score_time', 'std_test_score', 'std_train_score'...]\\\n\"\"\"\n\n\nclass _ParametrizedEstimators(_BaseComposition):\n \"\"\"The functionality of a collection of estimators is provided as\n a single metaestimator. The fitted estimator is selected using a\n parameter.\"\"\"\n\n def __init__(self, estimators, est_name=None, random_state=None):\n self.estimators = estimators\n self.est_name = est_name\n self.random_state = random_state\n check_estimators(estimators)\n self._validate_names([est_name for est_name, _ in estimators])\n _ParametrizedEstimators._estimator_type = self._return_estimator_type()\n\n def _return_estimator_type(self):\n _, steps = zip(*self.estimators)\n if len(set([step._estimator_type for step in steps if hasattr(step, '_estimator_type')])) > 1:\n warn('Estimators include both regressors and classifiers. Estimator type set to classifier.')\n return 'classifier'\n return steps[0]._estimator_type\n\n def score(self, X, y, sample_weight=None):\n \"\"\"Returns the coefficient of determination R^2 of the prediction\n if estimator type is a regressor or the mean accuracy on the given\n test data and labels if estimator type is a classifier.\n\n The coefficient R^2 is defined as (1 - u/v), where u is the residual\n sum of squares ((y_true - y_pred) ** 2).sum() and v is the total\n sum of squares ((y_true - y_true.mean()) ** 2).sum().\n The best possible score is 1.0 and it can be negative (because the\n model can be arbitrarily worse). A constant model that always\n predicts the expected value of y, disregarding the input features,\n would get a R^2 score of 0.0.\n\n In multi-label classification, this is the subset accuracy\n which is a harsh metric since you require for each sample that\n each label set be correctly predicted.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n Test samples.\n\n y : array-like, shape = (n_samples) or (n_samples, n_outputs)\n True labels for X.\n\n sample_weight : array-like, shape = [n_samples], optional\n Sample weights.\n\n Returns\n -------\n score : float\n Mean accuracy of self.predict(X) wrt. y.\n\n \"\"\"\n if _ParametrizedEstimators._estimator_type == 'regressor':\n score = r2_score(y, self.predict(X), sample_weight=sample_weight, multioutput='variance_weighted')\n elif _ParametrizedEstimators._estimator_type == 'classifier':\n score = accuracy_score(y, self.predict(X), sample_weight=sample_weight)\n return score\n\n def set_params(self, **params):\n \"\"\"Set the parameters.\n Valid parameter keys can be listed with get_params().\n Parameters\n ----------\n params : keyword arguments\n Specific parameters using e.g. set_params(parameter_name=new_value)\n In addition, to setting the parameters of the ``_ParametrizedEstimators``,\n the individual estimators of the ``_ParametrizedEstimators`` can also be\n set or replaced by setting them to None.\n \"\"\"\n super()._set_params('estimators', **params)\n check_estimators(self.estimators)\n return self\n\n def get_params(self, deep=True):\n \"\"\"Get the parameters.\n Parameters\n ----------\n deep: bool\n Setting it to True gets the various estimators and the parameters\n of the estimators as well\n \"\"\"\n return super()._get_params('estimators', deep=deep)\n\n def fit(self, X, y, *args, **kwargs):\n \"\"\"\"Fit the selected estimator and dataset.\"\"\"\n\n # Copy one of the estimators\n if self.est_name is None:\n raise ValueError('Attribute `est_name` is set to None. An estimator should be selected.')\n estimator = copy_estimator(dict(self.estimators)[self.est_name])\n\n # Fix data race\n filterwarnings('ignore', category=DeprecationWarning, module=r'^{0}\\.'.format(re.escape(__name__)))\n\n # Set random state when exists\n params = estimator.get_params().keys()\n random_state_params = [par for par, included in zip(params, ['random_state' in par for par in params]) if included]\n for par in random_state_params:\n estimator.set_params(**{par: self.random_state})\n\n # Fit estimator\n self.estimator_ = estimator.fit(X, y, *args, **kwargs)\n\n return self\n\n def predict(self, X, *args, **kwargs):\n \"\"\"\"Predict with the selected estimator.\"\"\"\n check_is_fitted(self, 'estimator_')\n return self.estimator_.predict(X, *args, **kwargs)\n\n def predict_proba(self, X, *args, **kwargs):\n \"\"\"\"Predict the probability with the selected estimator.\"\"\"\n check_is_fitted(self, 'estimator_')\n return self.estimator_.predict_proba(X, *args, **kwargs)\n\n\nclass ModelSearchCV(GridSearchCV):\n __doc__ = _DOC_TEMPLATE.format(name=\"ModelSearchCV\",\n oneliner=_grid_oneliner,\n description=_grid_description,\n parameters=_grid_parameters,\n example=_grid_example)\n\n def __init__(self,\n estimators,\n param_grids,\n scoring=None,\n iid=True,\n refit=True,\n cv=None,\n error_score='raise',\n return_train_score=_RETURN_TRAIN_SCORE_DEFAULT,\n scheduler=None,\n n_jobs=-1,\n cache_cv=True):\n self.estimators = estimators\n self.param_grids = param_grids\n super(ModelSearchCV, self).__init__(estimator=_ParametrizedEstimators(estimators),\n param_grid=check_param_grids(param_grids, estimators),\n scoring=scoring,\n iid=iid,\n refit=refit,\n cv=cv,\n error_score=error_score,\n return_train_score=return_train_score,\n scheduler=scheduler,\n n_jobs=n_jobs,\n cache_cv=cache_cv)\n\n @staticmethod\n def _split_est_name(param_grid):\n param_grid = {param:value for param, value in param_grid.items() if param != 'random_state'}\n est_name = param_grid.pop('est_name')\n return est_name, {'__'.join(param.split('__')[1:]):value for param, value in param_grid.items()}\n\n def _modify_grid_search_attrs(self):\n if hasattr(self, 'best_estimator_'):\n self.best_estimator_ = self.best_estimator_.estimator_\n models = []\n for ind, param_grid in enumerate(self.cv_results_['params']):\n est_name, self.cv_results_['params'][ind] = self._split_est_name(param_grid)\n models.append(est_name)\n self.cv_results_.update({'models': models})\n\n def fit(self, X, y=None, groups=None, **fit_params):\n super(ModelSearchCV, self).fit(X, y, groups, **fit_params)\n self._modify_grid_search_attrs()\n return self\n\n" ]
[ [ "numpy.concatenate", "numpy.count_nonzero", "numpy.delete", "numpy.asarray", "numpy.zeros", "sklearn.cluster.KMeans", "numpy.mean", "numpy.eye", "sklearn.metrics.pairwise.euclidean_distances", "numpy.unique" ], [ "sklearn.utils.validation.check_is_fitted" ] ]
pfistfl/HPOBench
[ "a7ad8807bd2e058ff99f703ad057b64ecadd4b66" ]
[ "hpobench/benchmarks/nas/nasbench_101.py" ]
[ "\"\"\"\nInterface to NasBench101 for Hyperparameter Optimization and Neural Architecture Search\n\nhttps://github.com/automl/nas_benchmarks\n\nHow to use this benchmark:\n--------------------------\n\nWe recommend using the containerized version of this benchmark.\nIf you want to use this benchmark locally (without running it via the corresponding container),\nyou need to perform the following steps.\n\n1. Download data\n================\n\nThe data will be downloaded automatically.\nNote: However, if you use the benchmark locally, you can specify also the data directory (path to the folder, where the\nnasbench_full.tfrecord is) by hand.\n\nIn this case you can download the data with the following command.\n\n```\nwget https://storage.googleapis.com/nasbench/nasbench_full.tfrecord\n```\nRemark: it is important to select the full tf record and not the 'only_108' record to perform multi-fidelity\noptimization.\n\n2. Clone and install\n====================\n```\ncd /path/to/HPOBench\npip install .[nasbench_101]\n\npip install git+https://github.com/google-research/nasbench.git@master\npip install git+https://github.com/automl/nas_benchmarks.git@master\n```\n\nNotes:\n------\nBenchmarks in NASBench101 only contain epochs 4, 12, 36 and 108.\nQuerying another epoch, e.g. 5, raises an assertion.\n\nChangelog:\n==========\n0.0.4\n* New container release due to a general change in the communication between container and HPOBench.\n Works with HPOBench >= v0.0.8\n\n0.0.3:\n* Standardize the structure of the meta information\n\n0.0.2:\n* The objective function takes as input now the parameter run_index. Allowed values are Tuple(0-2), 0, 1, 2, None.\n This value specifies which seeds are used. The user can specify a single index or a tuple with indices.\n If the user wants to use a randomly drawn run_index, they can simply set the value explicitly to None.\n* Fix a bug in NASCifar10CBenchmark\n\n0.0.1:\n* First implementation\n\n\n\"\"\"\nimport logging\n\nfrom pathlib import Path\nfrom typing import Union, Dict, Any, Tuple, List\n\nimport ConfigSpace as CS\nimport numpy as np\nfrom tabular_benchmarks.nas_cifar10 import NASCifar10\nfrom nasbench import api\nfrom nasbench.api import OutOfDomainError\nfrom nasbench.lib import graph_util\n\nfrom hpobench import config_file\nimport hpobench.util.rng_helper as rng_helper\nfrom hpobench.abstract_benchmark import AbstractBenchmark\nfrom hpobench.util.data_manager import NASBench_101DataManager\n\n__version__ = '0.0.4'\nlogger = logging.getLogger('NasBench101')\n\nMAX_EDGES = 9\nVERTICES = 7\nDEFAULT_API_FILE = config_file.data_dir / \"nasbench_101\"\n\n\nclass NASCifar10BaseBenchmark(AbstractBenchmark):\n def __init__(self, benchmark: NASCifar10,\n data_path: Union[Path, str, None] = None,\n rng: Union[np.random.RandomState, int, None] = None, **kwargs):\n \"\"\"\n Baseclass for the tabular benchmarks https://github.com/automl/nas_benchmarks/tree/master/tabular_benchmarks.\n Please install the benchmark first. Place the data under ``data_path``.\n\n Parameters\n ----------\n benchmark : NASCifar10\n Type of the benchmark to use. Don't call this class directly. Instantiate via subclasses (see below).\n data_path : str, Path, None\n Path to the folder, which contains the downloaded file nasbench_full.tfrecord.\n rng : np.random.RandomState, int, None\n Random seed for the benchmarks\n \"\"\"\n\n super(NASCifar10BaseBenchmark, self).__init__(rng=rng)\n\n self.benchmark = benchmark\n self.data_path = data_path\n\n def _query_benchmark(self, config: Dict, run_index: int, budget: int = 108) -> Dict:\n raise NotImplementedError\n\n # pylint: disable=arguments-differ\n @AbstractBenchmark.check_parameters\n def objective_function(self, configuration: Union[CS.Configuration, Dict],\n fidelity: Union[CS.Configuration, Dict, None] = None,\n run_index: Union[int, Tuple, None] = (0, 1, 2),\n rng: Union[np.random.RandomState, int, None] = None,\n **kwargs) -> Dict:\n \"\"\"\n Query the NAS-benchmark using a given configuration and a epoch (=budget).\n\n Parameters\n ----------\n configuration : Dict, CS.Configuration\n fidelity: Dict, None\n Fidelity parameters, check get_fidelity_space(). Uses default (max) value if None.\n run_index : int, Tuple, None\n The nas benchmark has for each configuration-budget-pair results from 3 different runs.\n - If multiple `run_id`s are given as Tuple, the benchmark returns the mean over the given runs.\n - By default (no parameter is specified) all runs are used. A specific run can be chosen by setting the\n `run_id` to a value from [0, 3]. While the performance is averaged across the `run_index`, the costs are\n the sum of the runtime per `run_index`.\n - When this value is explicitly set to `None`, the function will use a random seed.\n rng : np.random.RandomState, int, None\n Random seed to use in the benchmark.\n\n To prevent overfitting on a single seed, it is possible to pass a\n parameter ``rng`` as 'int' or 'np.random.RandomState' to this function.\n If this parameter is not given, the default random state is used.\n kwargs\n\n Returns\n -------\n Dict -\n function_value : validation error\n cost : runtime\n info : Dict\n fidelity : used fidelities in this evaluation\n \"\"\"\n self.rng = rng_helper.get_rng(rng, self_rng=self.rng)\n\n if isinstance(run_index, int):\n assert 0 <= run_index <= 2, f'run_index must be in [0, 2], not {run_index}'\n run_index = (run_index, )\n elif isinstance(run_index, (Tuple, List)):\n assert 0 < len(run_index) <= 3, 'run_index must not be empty'\n assert min(run_index) >= 0 and max(run_index) <= 2, \\\n f'all run_index values must be in [0, 2], but were {run_index}'\n if len(set(run_index)) != len(run_index):\n logger.debug('There are some values more than once in the run_index. We remove the redundant entries.')\n run_index = tuple(set(run_index))\n elif run_index is None:\n logger.debug('The run index is explicitly set to None! A random seed will be selected.')\n run_index = tuple(self.rng.choice((0, 1, 2), size=1))\n else:\n raise ValueError(f'run index must be one of Tuple or Int, but was {type(run_index)}')\n\n self.benchmark.reset_tracker()\n\n # Returns (valid_accuracy: 0, runtime: 0) if it is invalid, e.g. config not valid or\n # budget not in 4 12 36 108\n train_accuracies = []\n valid_accuracies = []\n test_accuracies = []\n training_times = []\n additional = {}\n\n for run_id in run_index:\n data = self._query_benchmark(config=configuration, budget=fidelity['budget'], run_index=run_id)\n\n train_accuracies.append(data['train_accuracy'])\n valid_accuracies.append(data['validation_accuracy'])\n test_accuracies.append(data['test_accuracy'])\n training_times.append(data['training_time'])\n\n # Since those information are the same for all run ids, just store one of them.\n additional = {'trainable_parameters': data['trainable_parameters'],\n 'module_operations': data['module_operations']}\n\n return {'function_value': float(1 - np.mean(valid_accuracies)),\n 'cost': float(np.sum(training_times)),\n 'info': {'fidelity': fidelity,\n 'train_accuracies': train_accuracies,\n 'valid_accuracies': valid_accuracies,\n 'test_accuracies': test_accuracies,\n 'training_times': training_times,\n 'data': additional\n }\n }\n\n @AbstractBenchmark.check_parameters\n def objective_function_test(self, configuration: Union[Dict, CS.Configuration],\n fidelity: Union[CS.Configuration, Dict, None] = None,\n rng: Union[np.random.RandomState, int, None] = None,\n **kwargs) -> Dict:\n \"\"\"\n Validate a configuration on the maximum available budget.\n\n Parameters\n ----------\n configuration : Dict, CS.Configuration\n fidelity: Dict, None\n Fidelity parameters, check get_fidelity_space(). Uses default (max) value if None.\n rng : np.random.RandomState, int, None\n Random seed to use in the benchmark. To prevent overfitting on a single seed, it is\n possible to pass a parameter ``rng`` as 'int' or 'np.random.RandomState' to this\n function. If this parameter is not given, the default random state is used.\n kwargs\n\n Returns\n -------\n Dict -\n function_value : test error\n cost : runtime\n info : Dict\n fidelity : used fidelities in this evaluation\n \"\"\"\n\n result = self.objective_function(configuration=configuration, fidelity=fidelity, run_index=(0, 1, 2), rng=rng)\n result['function_value'] = float(1 - np.mean(result['info']['test_accuracies']))\n\n return result\n\n @staticmethod\n def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace:\n raise NotImplementedError\n\n @staticmethod\n def get_meta_information() -> Dict:\n \"\"\" Returns the meta information for the benchmark \"\"\"\n return {'name': 'Tabular Benchmarks for Hyperparameter Optimization and Neural Architecture Search',\n 'references': ['@article{klein2019tabular,'\n 'title = {Tabular benchmarks for joint architecture and hyperparameter optimization},'\n 'author = {Klein, Aaron and Hutter, Frank},'\n 'journal = {arXiv preprint arXiv:1905.04970},'\n 'year = {2019}}',\n 'https://arxiv.org/abs/1905.04970',\n ],\n 'code': 'https://github.com/automl/nas_benchmarks',\n }\n\n @staticmethod\n def _get_configuration_space(benchmark: Any, seed: Union[int, None] = None) -> CS.ConfigurationSpace:\n \"\"\" Helper function to pass a seed to the configuration space \"\"\"\n seed = seed if seed is not None else np.random.randint(1, 100000)\n cs = benchmark.get_configuration_space()\n cs.seed(seed)\n return cs\n\n @staticmethod\n def get_fidelity_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace:\n \"\"\"\n Creates a ConfigSpace.ConfigurationSpace containing all fidelity parameters for\n the NAS Benchmark 101.\n\n Parameters\n ----------\n seed : int, None\n Fixing the seed for the ConfigSpace.ConfigurationSpace\n\n Returns\n -------\n ConfigSpace.ConfigurationSpace\n \"\"\"\n seed = seed if seed is not None else np.random.randint(1, 100000)\n fidel_space = CS.ConfigurationSpace(seed=seed)\n\n fidel_space.add_hyperparameters([\n CS.OrdinalHyperparameter('budget', sequence=[4, 12, 36, 108], default_value=108)\n ])\n\n return fidel_space\n\n @staticmethod\n def _try_download_api_file(save_to: Union[Path, str, None]):\n data_manager = NASBench_101DataManager(save_to)\n data_manager.download()\n return data_manager.save_dir\n\n\nclass NASCifar10ABenchmark(NASCifar10BaseBenchmark):\n def __init__(self, data_path: Union[Path, str, None] = None,\n rng: Union[np.random.RandomState, int, None] = None, **kwargs):\n\n data_path = self._try_download_api_file(data_path)\n\n from tabular_benchmarks.nas_cifar10 import NASCifar10A\n benchmark = NASCifar10A(data_dir=str(data_path), multi_fidelity=True)\n super(NASCifar10ABenchmark, self).__init__(benchmark=benchmark, data_path=data_path, rng=rng, **kwargs)\n\n @staticmethod\n def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace:\n \"\"\"\n Return the configuration space for the NASCifar10A benchmark.\n Parameters\n ----------\n seed : int, None\n Random seed for the configuration space.\n\n Returns\n -------\n CS.ConfigurationSpace - Containing the benchmark's hyperparameter\n \"\"\"\n\n from tabular_benchmarks.nas_cifar10 import NASCifar10A\n return NASCifar10BBenchmark._get_configuration_space(NASCifar10A, seed)\n\n def _query_benchmark(self, config: Dict, run_index: int, budget: int = 108) -> Dict:\n \"\"\"\n Copied from the 'objective_function' from nas_cifar10.py\n We adapted the file in such a way, that the complete result is returned. The original implementation returns\n only the validation error. Now, it can also return the test loss for a given configuration.\n\n Parameters\n ----------\n config : Dict\n run_index : int\n Specifies the seed to use. Can be one of 0, 1, 2.\n budget : int\n The number of epochs. Must be one of: 4 12 36 108. Otherwise a accuracy of 0 is returned.\n\n Returns\n -------\n Dict\n \"\"\"\n\n failure = {\"test_accuracy\": 0, \"train_accuracy\": 0, \"validation_accuracy\": 0, \"training_time\": 0,\n \"info\": \"failure\", \"trainable_parameters\": 0, \"module_operations\": 0}\n\n if self.benchmark.multi_fidelity is False:\n assert budget == 108\n\n matrix = np.zeros([VERTICES, VERTICES], dtype=np.int8)\n idx = np.triu_indices(matrix.shape[0], k=1)\n for i in range(VERTICES * (VERTICES - 1) // 2):\n row = idx[0][i]\n col = idx[1][i]\n matrix[row, col] = config[\"edge_%d\" % i]\n\n # if not graph_util.is_full_dag(matrix) or graph_util.num_edges(matrix) > MAX_EDGES:\n if graph_util.num_edges(matrix) > MAX_EDGES:\n self.benchmark.record_invalid(config, 1, 1, 0)\n return failure\n\n labeling = [config[\"op_node_%d\" % i] for i in range(5)]\n labeling = ['input'] + list(labeling) + ['output']\n model_spec = api.ModelSpec(matrix, labeling)\n\n try:\n data = modified_query(self.benchmark, run_index=run_index, model_spec=model_spec, epochs=budget)\n except api.OutOfDomainError:\n self.benchmark.record_invalid(config, 1, 1, 0)\n return failure\n\n self.benchmark.record_valid(config, data, model_spec)\n\n # We dont need this field.\n data.pop('module_adjacency')\n\n return data\n\n\nclass NASCifar10BBenchmark(NASCifar10BaseBenchmark):\n def __init__(self, data_path: Union[Path, str, None] = None,\n rng: Union[np.random.RandomState, int, None] = None, **kwargs):\n\n data_path = self._try_download_api_file(data_path)\n\n from tabular_benchmarks.nas_cifar10 import NASCifar10B\n benchmark = NASCifar10B(data_dir=str(data_path), multi_fidelity=True)\n super(NASCifar10BBenchmark, self).__init__(benchmark=benchmark, data_path=data_path, rng=rng, **kwargs)\n\n @staticmethod\n def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace:\n \"\"\"\n Return the configuration space for the NASCifar10B benchmark.\n Parameters\n ----------\n seed : int, None\n Random seed for the configuration space.\n\n Returns\n -------\n CS.ConfigurationSpace - Containing the benchmark's hyperparameter\n \"\"\"\n\n from tabular_benchmarks.nas_cifar10 import NASCifar10B\n return NASCifar10BBenchmark._get_configuration_space(NASCifar10B, seed)\n\n def _query_benchmark(self, config: Dict, run_index: int, budget: int = 108) -> Dict:\n \"\"\"\n Copied from the 'objective_function' from nas_cifar10.py\n We adapted the file in such a way, that the complete result is returned. The original implementation returns\n only the validation error. Now, it can also return the test loss for a given configuration.\n\n Parameters\n ----------\n config : Dict\n budget : int\n The number of epochs. Must be one of: 4 12 36 108. Otherwise a accuracy of 0 is returned.\n\n Returns\n -------\n Dict\n \"\"\"\n failure = {\"test_accuracy\": 0, \"train_accuracy\": 0, \"validation_accuracy\": 0, \"training_time\": 0,\n \"info\": \"failure\", \"trainable_parameters\": 0, \"module_operations\": 0}\n\n if self.benchmark.multi_fidelity is False:\n assert budget == 108\n\n bitlist = [0] * (VERTICES * (VERTICES - 1) // 2)\n for i in range(MAX_EDGES):\n bitlist[config[\"edge_%d\" % i]] = 1\n out = 0\n for bit in bitlist:\n out = (out << 1) | bit\n\n matrix = np.fromfunction(graph_util.gen_is_edge_fn(out),\n (VERTICES, VERTICES),\n dtype=np.int8)\n # if not graph_util.is_full_dag(matrix) or graph_util.num_edges(matrix) > MAX_EDGES:\n if graph_util.num_edges(matrix) > MAX_EDGES:\n self.benchmark.record_invalid(config, 1, 1, 0)\n return failure\n\n labeling = [config[\"op_node_%d\" % i] for i in range(5)]\n labeling = ['input'] + list(labeling) + ['output']\n model_spec = api.ModelSpec(matrix, labeling)\n try:\n data = modified_query(self.benchmark, run_index=run_index, model_spec=model_spec, epochs=budget)\n except api.OutOfDomainError:\n self.benchmark.record_invalid(config, 1, 1, 0)\n return failure\n\n self.benchmark.record_valid(config, data, model_spec)\n\n # We dont need this field.\n data.pop('module_adjacency')\n\n return data\n\n\nclass NASCifar10CBenchmark(NASCifar10BaseBenchmark):\n def __init__(self, data_path: Union[Path, str, None] = None,\n rng: Union[np.random.RandomState, int, None] = None, **kwargs):\n\n data_path = self._try_download_api_file(data_path)\n\n from tabular_benchmarks.nas_cifar10 import NASCifar10C\n benchmark = NASCifar10C(data_dir=str(data_path), multi_fidelity=True)\n super(NASCifar10CBenchmark, self).__init__(benchmark=benchmark, data_path=data_path, rng=rng, **kwargs)\n\n @staticmethod\n def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace:\n \"\"\"\n Return the configuration space for the NASCifar10C benchmark.\n Parameters\n ----------\n seed : int, None\n Random seed for the configuration space.\n\n Returns\n -------\n CS.ConfigurationSpace - Containing the benchmark's hyperparameter\n \"\"\"\n\n from tabular_benchmarks.nas_cifar10 import NASCifar10C\n return NASCifar10BBenchmark._get_configuration_space(NASCifar10C, seed)\n\n def _query_benchmark(self, config: Dict, run_index: int, budget: int = 108) -> Dict:\n \"\"\"\n Copied from the 'objective_function' from nas_cifar10.py\n We adapted the file in such a way, that the complete result is returned. The original implementation returns\n only the validation error. Now, it can also return the test loss for a given configuration.\n\n Parameters\n ----------\n config : Dict\n budget : int\n The number of epochs. Must be one of: 4 12 36 108. Otherwise a accuracy of 0 is returned.\n\n Returns\n -------\n Dict\n \"\"\"\n # Unify the return value to a dictionary.\n failure = {\"test_accuracy\": 0, \"train_accuracy\": 0, \"validation_accuracy\": 0, \"training_time\": 0,\n \"info\": \"failure\", \"trainable_parameters\": 0, \"module_operations\": 0}\n\n if self.benchmark.multi_fidelity is False:\n assert budget == 108\n\n edge_prob = []\n for i in range(VERTICES * (VERTICES - 1) // 2):\n edge_prob.append(config[\"edge_%d\" % i])\n\n idx = np.argsort(edge_prob)[::-1][:config[\"num_edges\"]]\n binay_encoding = np.zeros(len(edge_prob))\n binay_encoding[idx] = 1\n matrix = np.zeros([VERTICES, VERTICES], dtype=np.int8)\n idx = np.triu_indices(matrix.shape[0], k=1)\n for i in range(VERTICES * (VERTICES - 1) // 2):\n row = idx[0][i]\n col = idx[1][i]\n matrix[row, col] = binay_encoding[i]\n\n if graph_util.num_edges(matrix) > MAX_EDGES:\n self.benchmark.record_invalid(config, 1, 1, 0)\n return failure\n\n labeling = [config[\"op_node_%d\" % i] for i in range(5)]\n labeling = ['input'] + list(labeling) + ['output']\n model_spec = api.ModelSpec(matrix, labeling)\n try:\n data = modified_query(self.benchmark, run_index=run_index, model_spec=model_spec, epochs=budget)\n except api.OutOfDomainError:\n self.benchmark.record_invalid(config, 1, 1, 0)\n return failure\n\n self.benchmark.record_valid(config, data, model_spec)\n\n # We dont need this field.\n data.pop('module_adjacency')\n\n return data\n\n\ndef modified_query(benchmark, model_spec, run_index: int, epochs=108, stop_halfway=False):\n \"\"\"\n NOTE:\n Copied from https://github.com/google-research/nasbench/blob/b94247037ee470418a3e56dcb83814e9be83f3a8/nasbench/api.py#L204-L263 # noqa\n We changed the function in such a way that we now can specified the run index (index of the evaluation) which was\n in the original code sampled randomly.\n\n OLD DOCSTRING:\n Fetch one of the evaluations for this model spec.\n\n Each call will sample one of the config['num_repeats'] evaluations of the\n model. This means that repeated queries of the same model (or isomorphic\n models) may return identical metrics.\n\n This function will increment the budget counters for benchmarking purposes.\n See self.training_time_spent, and self.total_epochs_spent.\n\n This function also allows querying the evaluation metrics at the halfway\n point of training using stop_halfway. Using this option will increment the\n budget counters only up to the halfway point.\n\n Args:\n model_spec: ModelSpec object.\n epochs: number of epochs trained. Must be one of the evaluated number of\n epochs, [4, 12, 36, 108] for the full dataset.\n stop_halfway: if True, returned dict will only contain the training time\n and accuracies at the halfway point of training (num_epochs/2).\n Otherwise, returns the time and accuracies at the end of training\n (num_epochs).\n\n Returns:\n dict containing the evaluated data for this object.\n\n Raises:\n OutOfDomainError: if model_spec or num_epochs is outside the search space.\n \"\"\"\n if epochs not in benchmark.dataset.valid_epochs:\n raise OutOfDomainError('invalid number of epochs, must be one of %s'\n % benchmark.dataset.valid_epochs)\n\n fixed_stat, computed_stat = benchmark.dataset.get_metrics_from_spec(model_spec)\n\n # MODIFICATION: Use the run index instead of the sampled one.\n # sampled_index = random.randint(0, self.config['num_repeats'] - 1)\n computed_stat = computed_stat[epochs][run_index]\n\n data = {}\n data['module_adjacency'] = fixed_stat['module_adjacency']\n data['module_operations'] = fixed_stat['module_operations']\n data['trainable_parameters'] = fixed_stat['trainable_parameters']\n\n if stop_halfway:\n data['training_time'] = computed_stat['halfway_training_time']\n data['train_accuracy'] = computed_stat['halfway_train_accuracy']\n data['validation_accuracy'] = computed_stat['halfway_validation_accuracy']\n data['test_accuracy'] = computed_stat['halfway_test_accuracy']\n else:\n data['training_time'] = computed_stat['final_training_time']\n data['train_accuracy'] = computed_stat['final_train_accuracy']\n data['validation_accuracy'] = computed_stat['final_validation_accuracy']\n data['test_accuracy'] = computed_stat['final_test_accuracy']\n\n benchmark.dataset.training_time_spent += data['training_time']\n if stop_halfway:\n benchmark.dataset.total_epochs_spent += epochs // 2\n else:\n benchmark.dataset.total_epochs_spent += epochs\n\n return data\n" ]
[ [ "numpy.triu_indices", "numpy.zeros", "numpy.sum", "numpy.mean", "numpy.random.randint", "numpy.argsort" ] ]
Kaufi-Jonas/VaRA-Tool-Suite
[ "31563896ad7dd1c1a147202b0c5c9fffe772b803" ]
[ "varats/varats/plots/blame_interaction_graph_plots.py" ]
[ "\"\"\"Module for BlameInteractionGraph plots.\"\"\"\n\nimport typing as tp\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport click\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport pandas as pd\nimport plotly.offline as offply\nfrom matplotlib import style\n\nfrom varats.data.reports.blame_interaction_graph import (\n create_blame_interaction_graph,\n CIGNodeAttrs,\n CIGEdgeAttrs,\n AIGNodeAttrs,\n CAIGNodeAttrs,\n)\nfrom varats.data.reports.blame_report import BlameReport\nfrom varats.mapping.commit_map import get_commit_map\nfrom varats.paper_mgmt.case_study import (\n newest_processed_revision_for_case_study,\n)\nfrom varats.plot.plot import Plot, PlotDataEmpty\nfrom varats.plot.plots import (\n PlotGenerator,\n REQUIRE_CASE_STUDY,\n REQUIRE_REVISION,\n)\nfrom varats.plots.chord_plot_utils import (\n make_chord_plot,\n make_arc_plot,\n NodeTy,\n ChordPlotNodeInfo,\n ChordPlotEdgeInfo,\n ArcPlotEdgeInfo,\n ArcPlotNodeInfo,\n)\nfrom varats.ts_utils.cli_util import CLIOptionTy, make_cli_option\nfrom varats.utils.git_util import (\n CommitRepoPair,\n create_commit_lookup_helper,\n UNCOMMITTED_COMMIT_HASH,\n FullCommitHash,\n ShortCommitHash,\n)\n\n\nclass CommitInteractionGraphPlot(Plot, plot_name='cig_plot'):\n \"\"\"Creates a dot file for a commit interaction graph.\"\"\"\n\n def plot(self, view_mode: bool) -> None:\n # Nothing to do here.\n pass\n\n def save(self, plot_dir: Path, filetype: str = 'svg') -> None:\n project_name = self.plot_kwargs[\"project\"]\n revision = self.plot_kwargs[\"revision\"]\n cig = create_blame_interaction_graph(project_name, revision\n ).commit_interaction_graph()\n nx.set_node_attributes(\n cig, {node: cig.nodes[node][\"commit_hash\"] for node in cig.nodes},\n \"label\"\n )\n\n # pylint: disable=import-outside-toplevel\n from networkx.drawing.nx_agraph import write_dot\n write_dot(cig, plot_dir / self.plot_file_name(\"dot\"))\n\n def calc_missing_revisions(\n self, boundary_gradient: float\n ) -> tp.Set[FullCommitHash]:\n raise NotImplementedError\n\n\nclass CommitInteractionGraphPlotGenerator(\n PlotGenerator,\n generator_name=\"cig-plot\",\n options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]\n):\n \"\"\"Plot a commit interaction graph.\"\"\"\n\n def generate(self) -> tp.List[Plot]:\n return [\n CommitInteractionGraphPlot(self.plot_config, **self.plot_kwargs)\n ]\n\n\nNodeInfoTy = tp.TypeVar(\"NodeInfoTy\", ChordPlotNodeInfo, ArcPlotNodeInfo)\nEdgeInfoTy = tp.TypeVar(\"EdgeInfoTy\", ChordPlotEdgeInfo, ArcPlotEdgeInfo)\n\n\ndef _prepare_cig_plotly(\n project_name: str, revision: FullCommitHash,\n create_node_info: tp.Callable[[NodeTy, CommitRepoPair, nx.DiGraph],\n NodeInfoTy],\n create_edge_info: tp.Callable[[CommitRepoPair, CommitRepoPair, int],\n EdgeInfoTy]\n) -> tp.Tuple[tp.List[tp.Tuple[NodeTy, NodeInfoTy]], tp.List[tp.Tuple[\n NodeTy, NodeTy, EdgeInfoTy]]]:\n commit_lookup = create_commit_lookup_helper(project_name)\n cig = create_blame_interaction_graph(project_name,\n revision).commit_interaction_graph()\n\n def filter_nodes(node: CommitRepoPair) -> bool:\n if node.commit_hash == UNCOMMITTED_COMMIT_HASH:\n return False\n commit = commit_lookup(node)\n if not commit:\n return False\n # make filter configurable\n return datetime.utcfromtimestamp(commit.commit_time\n ) >= datetime(2015, 1, 1)\n\n nodes: tp.List[tp.Tuple[NodeTy, NodeInfoTy]] = []\n node_meta: tp.Dict[NodeTy, CommitRepoPair] = {}\n for node in cig.nodes:\n node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node])\n commit = node_attrs[\"commit\"]\n if not filter_nodes(commit):\n continue\n node_meta[node] = commit\n nodes.append((node, create_node_info(node, commit, cig)))\n\n nodes = sorted(\n nodes, key=lambda x: int(commit_lookup(node_meta[x[0]]).commit_time)\n )\n\n edges: tp.List[tp.Tuple[NodeTy, NodeTy, EdgeInfoTy]] = []\n for source, sink in cig.edges:\n amount = tp.cast(CIGEdgeAttrs, cig[source][sink])[\"amount\"]\n source_commit = tp.cast(CIGNodeAttrs, cig.nodes[source])[\"commit\"]\n sink_commit = tp.cast(CIGNodeAttrs, cig.nodes[sink])[\"commit\"]\n if not filter_nodes(source_commit) or not filter_nodes(sink_commit):\n continue\n edges.append((\n source, sink, create_edge_info(source_commit, sink_commit, amount)\n ))\n\n return nodes, edges\n\n\nclass CommitInteractionGraphChordPlot(Plot, plot_name='cig_chord_plot'):\n \"\"\"Chord plot for a commit interaction graph.\"\"\"\n\n def plot(self, view_mode: bool) -> None:\n project_name: str = self.plot_kwargs[\"case_study\"].project_name\n revision = get_commit_map(project_name).convert_to_full_or_warn(\n ShortCommitHash(self.plot_kwargs[\"revision\"])\n )\n\n def create_node_data(\n node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph\n ) -> ChordPlotNodeInfo:\n del node\n del cig\n return {\"info\": commit.commit_hash.short_hash, \"color\": 1}\n\n def create_edge_data(\n source_commit: CommitRepoPair, sink_commit: CommitRepoPair,\n amount: int\n ) -> ChordPlotEdgeInfo:\n return {\n \"size\": amount,\n \"color\": 1,\n \"info\":\n f\"{source_commit.commit_hash.short_hash} \"\n f\"--{{{amount}}}--> \"\n f\"{sink_commit.commit_hash.short_hash}\"\n }\n\n nodes, edges = _prepare_cig_plotly(\n project_name, revision, create_node_data, create_edge_data\n )\n figure = make_chord_plot(nodes, edges, \"Commit Interaction Graph\")\n\n if view_mode:\n figure.show()\n else:\n offply.plot(figure, filename=self.plot_file_name(\"html\"))\n\n def calc_missing_revisions(\n self, boundary_gradient: float\n ) -> tp.Set[FullCommitHash]:\n raise NotImplementedError\n\n\nclass CIGChordPlotGenerator(\n PlotGenerator,\n generator_name=\"cig-chord-plot\",\n options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]\n):\n \"\"\"Generates a chord plot for a commit interaction graph.\"\"\"\n\n def generate(self) -> tp.List[Plot]:\n return [\n CommitInteractionGraphChordPlot(\n self.plot_config, **self.plot_kwargs\n )\n ]\n\n\nclass CommitInteractionGraphArcPlot(Plot, plot_name='cig_arc_plot'):\n \"\"\"Arc plot for a commit interaction graph.\"\"\"\n\n def plot(self, view_mode: bool) -> None:\n project_name: str = self.plot_kwargs[\"case_study\"].project_name\n revision = get_commit_map(project_name).convert_to_full_or_warn(\n ShortCommitHash(self.plot_kwargs[\"revision\"])\n )\n\n def create_node_data(\n node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph\n ) -> ArcPlotNodeInfo:\n return {\n \"info\": commit.commit_hash.short_hash,\n \"size\": cig.degree(node),\n \"fill_color\": cig.out_degree(node),\n \"line_color\": cig.in_degree(node)\n }\n\n def create_edge_data(\n source_commit: CommitRepoPair, sink_commit: CommitRepoPair,\n amount: int\n ) -> ArcPlotEdgeInfo:\n return {\n \"size\": amount,\n \"color\": amount,\n \"info\":\n f\"{source_commit.commit_hash.short_hash} \"\n f\"--{{{amount}}}--> \"\n f\"{sink_commit.commit_hash.short_hash}\"\n }\n\n nodes, edges = _prepare_cig_plotly(\n project_name, revision, create_node_data, create_edge_data\n )\n figure = make_arc_plot(nodes, edges, \"Commit Interaction Graph\")\n\n if view_mode:\n figure.show()\n else:\n offply.plot(figure, filename=self.plot_file_name(\"html\"))\n\n def calc_missing_revisions(\n self, boundary_gradient: float\n ) -> tp.Set[FullCommitHash]:\n raise NotImplementedError\n\n\nclass CIGArcPlotGenerator(\n PlotGenerator,\n generator_name=\"cig-arc-plot\",\n options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]\n):\n \"\"\"Generates an arc plot for a commit interaction graph.\"\"\"\n\n def generate(self) -> tp.List[Plot]:\n return [\n CommitInteractionGraphArcPlot(self.plot_config, **self.plot_kwargs)\n ]\n\n\nOPTIONAL_SORT_METHOD: CLIOptionTy = make_cli_option(\n \"--sort-by\",\n type=click.Choice([\"degree\", \"time\"]),\n default=\"degree\",\n required=False,\n help=\"Sort method for commit interaction graph nodes.\"\n)\n\n\nclass CommitInteractionGraphNodeDegreePlot(Plot, plot_name='cig_node_degrees'):\n \"\"\"\n Plot node degrees of a commit interaction graph.\n\n Additional arguments:\n - sort: criteria to sort the revisions [degree, time]\n \"\"\"\n\n def plot(self, view_mode: bool) -> None:\n sort = self.plot_kwargs[\"sort\"]\n case_study = self.plot_kwargs[\"plot_case_study\"]\n\n style.use(self.plot_config.style())\n fig, axes = plt.subplots(1, 1, sharey=\"all\")\n fig.subplots_adjust(hspace=0.5)\n\n fig.suptitle(\"Commit Interaction Graph - Node Degrees\")\n axes.set_title(case_study.project_name)\n axes.set_ylabel(\"Degree\")\n xlabel = \"\"\n if sort == \"time\":\n xlabel = \"Time (old to new)\"\n elif sort == \"degree\":\n xlabel = \"Commits\"\n axes.set_xlabel(xlabel)\n\n revision = newest_processed_revision_for_case_study(\n case_study, BlameReport\n )\n if not revision:\n raise PlotDataEmpty()\n\n cig = create_blame_interaction_graph(case_study.project_name, revision\n ).commit_interaction_graph()\n commit_lookup = create_commit_lookup_helper(case_study.project_name)\n\n def filter_nodes(node: CommitRepoPair) -> bool:\n if node.commit_hash == UNCOMMITTED_COMMIT_HASH:\n return False\n return bool(commit_lookup(node))\n\n def commit_time(node: CommitRepoPair) -> datetime:\n return datetime.utcfromtimestamp(commit_lookup(node).commit_time)\n\n nodes: tp.List[tp.Dict[str, tp.Any]] = []\n for node in cig.nodes:\n node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node])\n commit = node_attrs[\"commit\"]\n if not filter_nodes(commit):\n continue\n nodes.append(({\n \"commit_hash\": commit.commit_hash,\n \"commit_time\": commit_time(commit),\n \"node_degree\": cig.degree(node),\n \"node_out_degree\": cig.out_degree(node),\n \"node_in_degree\": cig.in_degree(node),\n }))\n\n data = pd.DataFrame(nodes)\n\n if sort == \"time\":\n data.sort_values(by=\"commit_time\", inplace=True)\n\n node_degrees = data.loc[:, [\"commit_hash\", \"node_degree\"]]\n node_out_degrees = data.loc[:, [\"commit_hash\", \"node_out_degree\"]]\n node_in_degrees = data.loc[:, [\"commit_hash\", \"node_in_degree\"]]\n\n if sort == \"degree\":\n node_degrees.sort_values(by=\"node_degree\", inplace=True)\n node_out_degrees.sort_values(by=\"node_out_degree\", inplace=True)\n node_in_degrees.sort_values(by=\"node_in_degree\", inplace=True)\n\n axes.plot(node_degrees[\"node_degree\"].values, label=\"degree\")\n axes.plot(\n node_out_degrees[\"node_out_degree\"].values, label=\"out_degree\"\n )\n axes.plot(node_in_degrees[\"node_in_degree\"].values, label=\"in_degree\")\n\n axes.legend()\n\n def calc_missing_revisions(\n self, boundary_gradient: float\n ) -> tp.Set[FullCommitHash]:\n raise NotImplementedError\n\n\nclass CIGNodeDegreePlotGenerator(\n PlotGenerator,\n generator_name=\"cig-node-degrees\",\n options=[REQUIRE_CASE_STUDY, OPTIONAL_SORT_METHOD]\n):\n \"\"\"Generates a plot of node degrees of a commit interaction graph.\"\"\"\n\n def generate(self) -> tp.List[Plot]:\n return [\n CommitInteractionGraphNodeDegreePlot(\n self.plot_config, **self.plot_kwargs\n )\n ]\n\n\nclass AuthorInteractionGraphNodeDegreePlot(Plot, plot_name='aig_node_degrees'):\n \"\"\"Plot node degrees of a author interaction graph.\"\"\"\n\n def plot(self, view_mode: bool) -> None:\n case_study = self.plot_kwargs[\"plot_case_study\"]\n\n style.use(self.plot_config.style())\n fig, axes = plt.subplots(1, 1, sharey=\"all\")\n fig.subplots_adjust(hspace=0.5)\n\n fig.suptitle(\"Author Interaction Graph - Node Degrees\")\n axes.set_title(case_study.project_name)\n axes.set_ylabel(\"Degree\")\n axes.set_xlabel(\"Authors\")\n\n project_name = case_study.project_name\n revision = newest_processed_revision_for_case_study(\n case_study, BlameReport\n )\n if not revision:\n raise PlotDataEmpty()\n\n aig = create_blame_interaction_graph(project_name, revision\n ).author_interaction_graph()\n\n nodes: tp.List[tp.Dict[str, tp.Any]] = []\n for node in aig.nodes:\n node_attrs = tp.cast(AIGNodeAttrs, aig.nodes[node])\n author = node_attrs[\"author\"]\n nodes.append(({\n \"author\": author,\n \"node_degree\": aig.degree(node),\n \"node_out_degree\": aig.out_degree(node),\n \"node_in_degree\": aig.in_degree(node),\n }))\n\n data = pd.DataFrame(nodes)\n node_degrees = data.loc[:, [\"author\", \"node_degree\"]]\n node_out_degrees = data.loc[:, [\"author\", \"node_out_degree\"]]\n node_in_degrees = data.loc[:, [\"author\", \"node_in_degree\"]]\n\n node_degrees.sort_values(by=\"node_degree\", inplace=True)\n node_out_degrees.sort_values(by=\"node_out_degree\", inplace=True)\n node_in_degrees.sort_values(by=\"node_in_degree\", inplace=True)\n\n axes.plot(node_degrees[\"node_degree\"].values, label=\"degree\")\n axes.plot(\n node_out_degrees[\"node_out_degree\"].values, label=\"out_degree\"\n )\n axes.plot(node_in_degrees[\"node_in_degree\"].values, label=\"in_degree\")\n\n axes.legend()\n\n def calc_missing_revisions(\n self, boundary_gradient: float\n ) -> tp.Set[FullCommitHash]:\n raise NotImplementedError\n\n\nclass AIGNodeDegreePlotGenerator(\n PlotGenerator,\n generator_name=\"aig-node-degrees\",\n options=[REQUIRE_CASE_STUDY]\n):\n \"\"\"Generates a plot of node degrees of a author interaction graph.\"\"\"\n\n def generate(self) -> tp.List[Plot]:\n return [\n AuthorInteractionGraphNodeDegreePlot(\n self.plot_config, **self.plot_kwargs\n )\n ]\n\n\nclass CommitAuthorInteractionGraphNodeDegreePlot(\n Plot, plot_name='caig_node_degrees'\n):\n \"\"\"Plot node degrees of commits in a commit-author interaction graph.\"\"\"\n\n def plot(self, view_mode: bool) -> None:\n case_study = self.plot_kwargs[\"plot_case_study\"]\n\n style.use(self.plot_config.style())\n fig, axes = plt.subplots(1, 1, sharey=\"all\")\n fig.subplots_adjust(hspace=0.5)\n\n fig.suptitle(\"Commit-Author Interaction Graph - # Interacting Authors\")\n axes.set_title(case_study.project_name)\n axes.set_ylabel(\"Authors\")\n axes.set_xlabel(\"Commits\")\n\n project_name = case_study.project_name\n revision = newest_processed_revision_for_case_study(\n case_study, BlameReport\n )\n if not revision:\n raise PlotDataEmpty()\n\n caig = create_blame_interaction_graph(project_name, revision\n ).commit_author_interaction_graph()\n\n nodes: tp.List[tp.Dict[str, tp.Any]] = []\n for node in caig.nodes:\n node_attrs = tp.cast(CAIGNodeAttrs, caig.nodes[node])\n commit = node_attrs[\"commit\"]\n\n if commit:\n nodes.append(({\n \"commit\": commit.commit_hash,\n \"num_authors\": caig.degree(node)\n }))\n\n data = pd.DataFrame(nodes)\n num_authors = data.loc[:, [\"commit\", \"num_authors\"]]\n num_authors.sort_values(by=\"num_authors\", inplace=True)\n axes.plot(num_authors[\"num_authors\"].values)\n\n def calc_missing_revisions(\n self, boundary_gradient: float\n ) -> tp.Set[FullCommitHash]:\n raise NotImplementedError\n\n\nclass CAIGNodeDegreePlotGenerator(\n PlotGenerator,\n generator_name=\"caig-node-degrees\",\n options=[\n REQUIRE_CASE_STUDY,\n ]\n):\n \"\"\"Generates a plot of node degrees of a commit-author interaction graph.\"\"\"\n\n def generate(self) -> tp.List[Plot]:\n return [\n CommitAuthorInteractionGraphNodeDegreePlot(\n self.plot_config, **self.plot_kwargs\n )\n ]\n" ]
[ [ "pandas.DataFrame", "matplotlib.pyplot.subplots" ] ]
tao-harald/geoopt
[ "d6fea4d44f146877c5a430e9fd6ba0fb7e821b92" ]
[ "tests/test_rlinesearch.py" ]
[ "import geoopt\nimport torch\nimport pytest\n\n\n@pytest.mark.parametrize(\n \"line_search_params\",\n [dict(), dict(c1=1e-3, c2=0.99), dict(amax=1, amin=1e-12), dict(stabilize=10)],\n)\n@pytest.mark.parametrize(\"batch_size\", [None, 1, 16])\n@pytest.mark.parametrize(\"line_search_method\", [\"armijo\", \"wolfe\"])\n@pytest.mark.parametrize(\"cg_method\", [\"steepest\", \"fr\", \"pr\"])\ndef test_rwolfe_stiefel(line_search_params, batch_size, line_search_method, cg_method):\n # Use line search to solve orthogonal procrustes\n stiefel = geoopt.manifolds.Stiefel()\n torch.manual_seed(42)\n (n, m) = (10, 20)\n\n A = torch.randn(n, m, dtype=torch.float64)\n Q = stiefel.random((n, n), dtype=torch.float64)\n B = Q @ A\n\n with torch.no_grad():\n if batch_size is None:\n X = stiefel.random((n, n), dtype=torch.float64)\n else:\n X = stiefel.random((batch_size, n, n), dtype=torch.float64)\n X.requires_grad = True\n\n def closure():\n optim.zero_grad()\n loss = (X @ A - B).norm() ** 2\n loss.backward()\n return loss.item()\n\n optim = geoopt.optim.RiemannianLineSearch(\n [X],\n line_search_method=line_search_method,\n line_search_params=line_search_params,\n cg_method=cg_method,\n )\n\n loss = None\n for i in range(1000):\n loss = optim.step(closure)\n # Stop when no new step can be found, or goal reached\n if optim.last_step_size is None or loss < 1e-4:\n break\n assert loss < 1e-4\n" ]
[ [ "torch.manual_seed", "torch.no_grad", "torch.randn" ] ]
hircumg/Performance-RNN-PyTorch
[ "83ca93a2186ab5655fb2ca6e4ea9ce177e9d6111" ]
[ "preprocess.py" ]
[ "import os\nimport re\nimport sys\nimport torch\nimport hashlib\nfrom progress.bar import Bar\n\nfrom sequence import NoteSeq, EventSeq, ControlSeq\nimport utils\nimport config\n\ndef preprocess_midi(path):\n note_seq = NoteSeq.from_midi_file(path)\n note_seq.adjust_time(-note_seq.notes[0].start)\n event_seq = EventSeq.from_note_seq(note_seq)\n control_seq = ControlSeq.from_event_seq(event_seq)\n return event_seq.to_array(), control_seq.to_compressed_array()\n\ndef preprocess_midi_files_under(midi_root, save_dir):\n midi_paths = list(utils.find_files_by_extensions(midi_root, ['.mid', '.midi']))\n os.makedirs(save_dir, exist_ok=True)\n out_fmt = '{}-{}.data'\n\n for path in Bar('Processing').iter(midi_paths):\n print(' ', end='[{}]'.format(path), flush=True)\n\n try:\n data = preprocess_midi(path)\n except KeyboardInterrupt:\n print(' Abort')\n return\n except:\n print(' Error')\n continue\n\n name = os.path.basename(path)\n code = hashlib.md5(path.encode()).hexdigest()\n save_path = os.path.join(save_dir, out_fmt.format(name, code))\n torch.save(data, save_path)\n\n print('Done')\n\nif __name__ == '__main__':\n preprocess_midi_files_under(\n midi_root=sys.argv[1],\n save_dir=sys.argv[2])\n" ]
[ [ "torch.save" ] ]
connorjward/PyOP2
[ "35076d0cce1c64d322708a6f15fed85c0a2e30de" ]
[ "test/unit/test_linalg_complex.py" ]
[ "# This file is part of PyOP2\n#\n# PyOP2 is Copyright (c) 2012, Imperial College London and\n# others. Please see the AUTHORS file in the main source directory for\n# a full list of copyright holders. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * The name of Imperial College London or that of other\n# contributors may not be used to endorse or promote products\n# derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTERS\n# ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED\n# OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nimport pytest\nimport numpy as np\n\nfrom pyop2 import op2\n\nnelems = 8\n\n\n@pytest.fixture\ndef set():\n return op2.Set(nelems)\n\n\n@pytest.fixture\ndef dset(set):\n return op2.DataSet(set, 1)\n\n\n@pytest.fixture\ndef x(dset):\n return op2.Dat(dset, None, np.complex128, \"x\")\n\n\n@pytest.fixture\ndef y(dset):\n return op2.Dat(dset, np.arange(1, nelems + 1) + np.arange(1, nelems + 1)*1.j, np.complex128, \"y\")\n\n\n@pytest.fixture\ndef yf(dset):\n return op2.Dat(dset, np.arange(1, nelems + 1), np.float64, \"y\")\n\n\n@pytest.fixture\ndef yc(dset):\n return op2.Dat(dset, np.arange(1, nelems + 1), np.complex128, \"y\")\n\n\n@pytest.fixture\ndef yi(dset):\n return op2.Dat(dset, np.arange(1, nelems + 1), np.int64, \"y\")\n\n\n@pytest.fixture\ndef x2():\n s = op2.Set(nelems, \"s1\")\n return op2.Dat(s ** (1, 2), np.zeros(2 * nelems), np.complex128, \"x\")\n\n\n@pytest.fixture\ndef y2():\n s = op2.Set(nelems, \"s2\")\n return op2.Dat(s ** (2, 1), np.zeros(2 * nelems), np.complex128, \"y\")\n\n\nclass TestLinAlgOp:\n\n \"\"\"\n Tests of linear algebra operators returning a new Dat.\n \"\"\"\n\n def test_add(self, x, y):\n x._data = 2 * y.data\n assert all((x + y).data == 3 * y.data)\n\n def test_sub(self, x, y):\n x._data = 2 * y.data\n assert all((x - y).data == y.data)\n\n def test_mul_complex(self, x, y):\n x._data = (2+2j) * y.data\n assert all((x * y).data == (2+2j) * y.data * y.data)\n\n def test_div_complex(self, x, y):\n x._data = (2+2j) * y.data\n # Note complex division does not have the same stability as\n # floating point when vectorised\n assert all(x.data / y.data == 2.0+2.j)\n assert np.allclose((x / y).data, 2.0+2.j)\n\n def test_mul(self, x, y):\n x._data = 2 * y.data\n assert all((x * y).data == 2 * y.data * y.data)\n\n def test_div(self, x, y):\n x._data = 2 * y.data\n x.data / y.data\n # Note complex division does not have the same stability as\n # floating point when vectorised\n assert all(x.data/y.data == 2.0+0.j)\n assert np.allclose((x / y).data, 2.0+0.j)\n\n def test_add_shape_mismatch(self, x2, y2):\n with pytest.raises(ValueError):\n x2 + y2\n\n def test_sub_shape_mismatch(self, x2, y2):\n with pytest.raises(ValueError):\n x2 - y2\n\n def test_mul_shape_mismatch(self, x2, y2):\n with pytest.raises(ValueError):\n x2 * y2\n\n def test_div_shape_mismatch(self, x2, y2):\n with pytest.raises(ValueError):\n x2 / y2\n\n def test_add_scalar(self, x, y):\n x._data = y.data + 1.0\n assert all(x.data == (y + 1.0).data)\n\n def test_radd_scalar(self, x, y):\n x._data = y.data + 1.0\n assert all(x.data == (1.0 + y).data)\n\n def test_add_complex_scalar(self, x, y):\n x._data = y.data + (1.0+1.j)\n assert all(x.data == (y + (1.0+1.j)).data)\n\n def test_radd_complex_scalar(self, x, y):\n x._data = y.data + (1.0+1.j)\n assert all(x.data == ((1.0+1.j) + y).data)\n\n def test_pos_copies(self, y):\n z = +y\n assert all(z.data == y.data)\n assert z is not y\n\n def test_neg_copies(self, y):\n z = -y\n assert all(z.data == -y.data)\n assert z is not y\n\n def test_sub_scalar(self, x, y):\n x._data = y.data - 1.0\n assert all(x.data == (y - 1.0).data)\n\n def test_rsub_scalar(self, x, y):\n x._data = 1.0 - y.data\n assert all(x.data == (1.0 - y).data)\n\n def test_mul_scalar(self, x, y):\n x._data = 2 * y.data\n assert all(x.data == (y * 2.0).data)\n\n def test_rmul_scalar(self, x, y):\n x._data = 2 * y.data\n assert all(x.data == (2.0 * y).data)\n\n def test_sub_complex_scalar(self, x, y):\n x._data = y.data - (1.0+1.j)\n assert all(x.data == (y - (1.0+1.j)).data)\n\n def test_rsub_complex_scalar(self, x, y):\n x._data = (1.0+1.j) - y.data\n assert all(x.data == ((1.0+1.j) - y).data)\n\n def test_mul_complex_scalar(self, x, y):\n x._data = (2+2j) * y.data\n assert all(x.data == (y * (2.0+2.j)).data)\n\n def test_rmul_complex_scalar(self, x, y):\n x._data = (2+2j) * y.data\n assert all(x.data == ((2.0+2.j) * y).data)\n\n def test_div_scalar(self, x, y):\n x._data = 2 * y.data\n assert all((x / 2.0).data == y.data)\n\n def test_add_ftype(self, y, yf):\n x = y + yf\n assert x.data.dtype == np.complex128\n\n def test_sub_ftype(self, y, yf):\n x = y - yf\n assert x.data.dtype == np.complex128\n\n def test_mul_ftype(self, y, yf):\n x = y * yf\n assert x.data.dtype == np.complex128\n\n def test_div_ftype(self, y, yf):\n x = y / yf\n assert x.data.dtype == np.complex128\n\n def test_add_ctype(self, y, yc):\n x = y + yc\n assert x.data.dtype == np.complex128\n\n def test_sub_ctype(self, y, yc):\n x = y - yc\n assert x.data.dtype == np.complex128\n\n def test_mul_ctype(self, y, yc):\n x = y * yc\n assert x.data.dtype == np.complex128\n\n def test_div_ctype(self, y, yc):\n x = y / yc\n assert x.data.dtype == np.complex128\n\n def test_add_itype(self, y, yi):\n xi = yi + y\n assert xi.data.dtype == np.int64\n\n def test_sub_itype(self, y, yi):\n xi = yi - y\n assert xi.data.dtype == np.int64\n\n def test_mul_itype(self, y, yi):\n xi = yi * y\n assert xi.data.dtype == np.int64\n\n def test_div_itype(self, y, yi):\n xi = yi / y\n assert xi.data.dtype == np.int64\n\n def test_linalg_and_parloop(self, x, y):\n \"\"\"Linear algebra operators should force computation\"\"\"\n x._data = np.zeros(x.dataset.total_size, dtype=np.complex128)\n k = op2.Kernel('static void k(complex double *x) { *x = 1.0+1.0*I; }', 'k')\n op2.par_loop(k, x.dataset.set, x(op2.WRITE))\n z = x + y\n assert all(z.data == y.data + (1.+1.j))\n\n\nclass TestLinAlgIop:\n\n \"\"\"\n Tests of linear algebra operators modifying a Dat in place.\n \"\"\"\n\n def test_iadd(self, x, y):\n x._data = 2 * y.data\n x += y\n assert all(x.data == 3 * y.data)\n\n def test_isub(self, x, y):\n x._data = 2 * y.data\n x -= y\n assert all(x.data == y.data)\n\n def test_imul(self, x, y):\n x._data = 2 * y.data\n x *= y\n assert all(x.data == 2 * y.data * y.data)\n\n def test_idiv(self, x, y):\n x._data = 2 * y.data\n x /= y\n # Note complex division does not have the same stability as\n # floating point when vectorised\n assert np.allclose(x.data, 2.0 + 0.j)\n\n def test_iadd_shape_mismatch(self, x2, y2):\n with pytest.raises(ValueError):\n x2 += y2\n\n def test_isub_shape_mismatch(self, x2, y2):\n with pytest.raises(ValueError):\n x2 -= y2\n\n def test_imul_shape_mismatch(self, x2, y2):\n with pytest.raises(ValueError):\n x2 *= y2\n\n def test_idiv_shape_mismatch(self, x2, y2):\n with pytest.raises(ValueError):\n x2 /= y2\n\n def test_iadd_scalar(self, x, y):\n x._data = y.data + 1.0\n y += 1.0\n assert all(x.data == y.data)\n\n def test_isub_scalar(self, x, y):\n x._data = y.data - 1.0\n y -= 1.0\n assert all(x.data == y.data)\n\n def test_imul_scalar(self, x, y):\n x._data = 2 * y.data\n y *= 2.0\n assert all(x.data == y.data)\n\n def test_idiv_scalar(self, x, y):\n x._data = 2 * y.data\n x /= 2.0\n assert all(x.data == y.data)\n\n def test_iadd_complex_scalar(self, x, y):\n x._data = y.data + (1.0+1.j)\n y += (1.0+1.j)\n assert all(x.data == y.data)\n\n def test_isub_complex_scalar(self, x, y):\n x._data = y.data - (1.0+1.j)\n y -= (1.0+1.j)\n assert all(x.data == y.data)\n\n def test_imul_complex_scalar(self, x, y):\n x._data = (2+2j) * y.data\n y *= (2.0+2.j)\n assert all(x.data == y.data)\n\n def test_idiv_complex_scalar(self, x, y):\n x._data = (2+2j) * y.data\n x /= (2.0+2j)\n assert all(x.data == y.data)\n\n def test_iadd_ftype(self, y, yi):\n y += yi\n assert y.data.dtype == np.complex128\n\n def test_isub_ftype(self, y, yi):\n y -= yi\n assert y.data.dtype == np.complex128\n\n def test_imul_ftype(self, y, yi):\n y *= yi\n assert y.data.dtype == np.complex128\n\n def test_idiv_ftype(self, y, yi):\n y /= yi\n assert y.data.dtype == np.complex128\n\n def test_iadd_ctype(self, y, yc):\n y += yc\n assert y.data.dtype == np.complex128\n\n def test_isub_ctype(self, y, yc):\n y -= yc\n assert y.data.dtype == np.complex128\n\n def test_imul_ctype(self, y, yc):\n y *= yc\n assert y.data.dtype == np.complex128\n\n def test_idiv_ctype(self, y, yc):\n y /= yc\n assert y.data.dtype == np.complex128\n\n def test_iadd_itype(self, y, yi):\n yi += y\n assert yi.data.dtype == np.int64\n\n def test_isub_itype(self, y, yi):\n yi -= y\n assert yi.data.dtype == np.int64\n\n def test_imul_itype(self, y, yi):\n yi *= y\n assert yi.data.dtype == np.int64\n\n def test_idiv_itype(self, y, yi):\n yi /= y\n assert yi.data.dtype == np.int64\n\n\nclass TestLinAlgScalar:\n\n \"\"\"\n Tests of linear algebra operators return a scalar.\n \"\"\"\n\n def test_norm(self):\n s = op2.Set(2)\n n = op2.Dat(s, [3, 4j], np.complex128, \"n\")\n assert type(n.norm) is float\n assert abs(n.norm - 5) < 1e-12\n\n def test_inner(self):\n s = op2.Set(2)\n n = op2.Dat(s, [3, 4j], np.complex128)\n o = op2.Dat(s, [4, 5j], np.complex128)\n\n ret = n.inner(o)\n\n assert abs(ret - 32) < 1e-12\n\n ret = o.inner(n)\n\n assert abs(ret - 32) < 1e-12\n\n def test_norm_mixed(self):\n s = op2.Set(1)\n\n n = op2.Dat(s, [3], np.complex128)\n o = op2.Dat(s, [4j], np.complex128)\n\n md = op2.MixedDat([n, o])\n assert type(md.norm) is float\n assert abs(md.norm - 5) < 1e-12\n\n def test_inner_mixed(self):\n s = op2.Set(1)\n\n n = op2.Dat(s, [3], np.complex128)\n o = op2.Dat(s, [4j], np.complex128)\n\n md = op2.MixedDat([n, o])\n\n n1 = op2.Dat(s, [4], np.complex128)\n o1 = op2.Dat(s, [5j], np.complex128)\n\n md1 = op2.MixedDat([n1, o1])\n\n ret = md.inner(md1)\n\n assert abs(ret - 32) < 1e-12\n\n ret = md1.inner(md)\n\n assert abs(ret - 32) < 1e-12\n" ]
[ [ "numpy.allclose", "numpy.arange", "numpy.zeros" ] ]
e-esteva/cipher
[ "44e240901f7fb5d28531012e83b63427a4726221" ]
[ "cipher/preprocess/encode_utils.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nimport subprocess \nfrom .wrangle import filter_encode_metatable, extract_metatable_information\n\ndef _download_url(url, outpath=None):\n \"\"\"\n Download a file from a given url and save it with a specified output file \n if necessary. \n\n Parameters\n ----------\n url : <str>\n The url of the file to download.\n outpath : <str>\n The full output file path. If None specified, the file is saved \n in the current working directory with its original name. \n \n Returns\n -------\n None\n\n Example\n -------\n >>> url = \"https://www.encodeproject.org/files/ENCFF695MMQ/@@download/ENCFF695MMQ.bed.gz\"\n >>> outpath = \"./downloads/out.bed.gz\"\n >>> _download_url(url, outpath) \n\n \"\"\"\n if outpath is None:\n cmd = ['wget', url]\n else:\n cmd = ['wget', url, '-O', outpath]\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n\ndef download_cell_line_data(metadata_path, tfchipdir):\n \"\"\"This function parses a raw meta data file downloaded from \n the ENCODE website, downloads a curated list of ChIPseq bed files \n into a directory organized by cell line and TF. \n Additional meta data is also saved. \n \n Parameters\n ----------\n metadata_path : <str>\n The path to the input meta data file in tsv format. \n\n tfchipdir : <str>\n The path to the directory in which the bed files are to downloaded and saved onto. \n\n\n Returns\n -------\n None\n\n Example\n -------\n >>> metadata_path = './A549.tsv'\n >>> tfchipdir = \"./tf_chip/\"\n >>> download_cell_line_data(metadata_path, tfchipdir) \n\n \"\"\"\n # load the meta data file and filter its contents \n metatable_filtered = filter_encode_metatable(metadata_path)\n res = extract_metatable_information(metatable_filtered)\n df = pd.DataFrame.from_dict(res)\n\n # loop through all the rows of the cell type metadata table.\n for idx in df.index:\n row = df.iloc[idx]\n\n # get the output directory and output path for the bed file\n tf = row['tf_list']\n cell_type = row['cell_type_list']\n url = row['url_list']\n file_accession = row['file_accession_list']\n outdir = os.path.join(tfchipdir, cell_type, tf)\n outpath = os.path.join(outdir, file_accession+\".bed.gz\")\n\n # get the meta data path \n meta_df_name = os.path.join(outdir, 'metadata.tsv')\n\n # load the meta data if it already exists ; create new one if not. \n if not os.path.exists(outdir):\n os.makedirs(outdir)\n meta_df = pd.DataFrame(data=[], columns=list(df.columns[2:]))\n else:\n meta_df = pd.read_csv(meta_df_name, sep='\\t')\n \n # download the bed file \n _download_url(url, outpath=outpath)\n\n # update the metadata table\n remaining_metadata = row.iloc[2:] \n meta_df = meta_df.append(remaining_metadata)\n\n # save the meta data table \n meta_df.to_csv(meta_df_name, sep='\\t')\n" ]
[ [ "pandas.DataFrame.from_dict", "pandas.read_csv" ] ]
Nicolinho/RLBench
[ "3014e872f518d5439e73e057e2251dee1f9df481" ]
[ "rlbench/demo.py" ]
[ "import numpy as np\n\n\nclass Demo(object):\n\n def __init__(self, observations, random_seed=None):\n self._observations = observations\n self.random_seed = random_seed\n\n def __len__(self):\n return len(self._observations)\n\n def __getitem__(self, i):\n return self._observations[i]\n\n def restore_state(self):\n np.random.set_state(self.random_seed)\n" ]
[ [ "numpy.random.set_state" ] ]
aapeliv/tensorflow
[ "cd2a135c126f209ccc943555b85ca436ea27ffe3" ]
[ "tensorflow/python/estimator/estimator.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Base Estimator class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport os\nimport tempfile\n\nimport numpy as np\nimport six\n\nfrom google.protobuf import message\nfrom tensorflow.core.framework import summary_pb2\nfrom tensorflow.python.client import session as tf_session\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.estimator import model_fn as model_fn_lib\nfrom tensorflow.python.estimator import run_config\nfrom tensorflow.python.estimator import util as estimator_util\nfrom tensorflow.python.estimator.export import export as export_helpers\nfrom tensorflow.python.estimator.export import export_output\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import metrics as metrics_lib\nfrom tensorflow.python.ops import resources\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.saved_model import builder as saved_model_builder\nfrom tensorflow.python.saved_model import utils_impl as saved_model_utils\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.summary.writer import writer_cache\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import checkpoint_management\nfrom tensorflow.python.training import device_setter\nfrom tensorflow.python.training import distribute as distribute_lib\nfrom tensorflow.python.training import evaluation\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import saver\nfrom tensorflow.python.training import training\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.training import warm_starting_util\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import compat_internal\nfrom tensorflow.python.util import function_utils\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import estimator_export\n\n\n_VALID_MODEL_FN_ARGS = set(\n ['features', 'labels', 'mode', 'params', 'self', 'config'])\n\n\n@estimator_export('estimator.Estimator')\nclass Estimator(object):\n \"\"\"Estimator class to train and evaluate TensorFlow models.\n\n The `Estimator` object wraps a model which is specified by a `model_fn`,\n which, given inputs and a number of other parameters, returns the ops\n necessary to perform training, evaluation, or predictions.\n\n All outputs (checkpoints, event files, etc.) are written to `model_dir`, or a\n subdirectory thereof. If `model_dir` is not set, a temporary directory is\n used.\n\n The `config` argument can be passed `tf.estimator.RunConfig` object containing\n information about the execution environment. It is passed on to the\n `model_fn`, if the `model_fn` has a parameter named \"config\" (and input\n functions in the same manner). If the `config` parameter is not passed, it is\n instantiated by the `Estimator`. Not passing config means that defaults useful\n for local execution are used. `Estimator` makes config available to the model\n (for instance, to allow specialization based on the number of workers\n available), and also uses some of its fields to control internals, especially\n regarding checkpointing.\n\n The `params` argument contains hyperparameters. It is passed to the\n `model_fn`, if the `model_fn` has a parameter named \"params\", and to the input\n functions in the same manner. `Estimator` only passes params along, it does\n not inspect it. The structure of `params` is therefore entirely up to the\n developer.\n\n None of `Estimator`'s methods can be overridden in subclasses (its\n constructor enforces this). Subclasses should use `model_fn` to configure\n the base class, and may add methods implementing specialized functionality.\n\n @compatibility(eager)\n Calling methods of `Estimator` will work while eager execution is enabled.\n However, the `model_fn` and `input_fn` is not executed eagerly, `Estimator`\n will switch to graph model before calling all user-provided functions (incl.\n hooks), so their code has to be compatible with graph mode execution. Note\n that `input_fn` code using `tf.data` generally works in both graph and eager\n modes.\n @end_compatibility\n \"\"\"\n\n def __init__(self, model_fn, model_dir=None, config=None, params=None,\n warm_start_from=None):\n \"\"\"Constructs an `Estimator` instance.\n\n See [estimators](https://tensorflow.org/guide/estimators) for more information.\n To warm-start an `Estimator`:\n\n ```python\n estimator = tf.estimator.DNNClassifier(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256],\n warm_start_from=\"/path/to/checkpoint/dir\")\n ```\n\n For more details on warm-start configuration, see\n `tf.estimator.WarmStartSettings`.\n\n Args:\n model_fn: Model function. Follows the signature:\n\n * Args:\n\n * `features`: This is the first item returned from the `input_fn`\n passed to `train`, `evaluate`, and `predict`. This should be a\n single `tf.Tensor` or `dict` of same.\n * `labels`: This is the second item returned from the `input_fn`\n passed to `train`, `evaluate`, and `predict`. This should be a\n single `tf.Tensor` or `dict` of same (for multi-head models).\n If mode is @{tf.estimator.ModeKeys.PREDICT}, `labels=None` will\n be passed. If the `model_fn`'s signature does not accept\n `mode`, the `model_fn` must still be able to handle\n `labels=None`.\n * `mode`: Optional. Specifies if this training, evaluation or\n prediction. See `tf.estimator.ModeKeys`.\n * `params`: Optional `dict` of hyperparameters. Will receive what\n is passed to Estimator in `params` parameter. This allows\n to configure Estimators from hyper parameter tuning.\n * `config`: Optional `estimator.RunConfig` object. Will receive what\n is passed to Estimator as its `config` parameter, or a default\n value. Allows setting up things in your `model_fn` based on\n configuration such as `num_ps_replicas`, or `model_dir`.\n\n * Returns:\n `tf.estimator.EstimatorSpec`\n\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into an estimator to\n continue training a previously saved model. If `PathLike` object, the\n path will be resolved. If `None`, the model_dir in `config` will be used\n if set. If both are set, they must be same. If both are `None`, a\n temporary directory will be used.\n config: `estimator.RunConfig` configuration object.\n params: `dict` of hyper parameters that will be passed into `model_fn`.\n Keys are names of parameters, values are basic python types.\n warm_start_from: Optional string filepath to a checkpoint or SavedModel to\n warm-start from, or a `tf.estimator.WarmStartSettings`\n object to fully configure warm-starting. If the string\n filepath is provided instead of a\n `tf.estimator.WarmStartSettings`, then all variables are\n warm-started, and it is assumed that vocabularies\n and `tf.Tensor` names are unchanged.\n\n Raises:\n ValueError: parameters of `model_fn` don't match `params`.\n ValueError: if this is called via a subclass and if that class overrides\n a member of `Estimator`.\n \"\"\"\n Estimator._assert_members_are_not_overridden(self)\n\n self._config = maybe_overwrite_model_dir_and_session_config(config,\n model_dir)\n\n # The distribute field contains an instance of DistributionStrategy.\n self._train_distribution = self._config.train_distribute\n self._eval_distribution = self._config.eval_distribute\n # Model directory.\n self._model_dir = self._config.model_dir\n self._session_config = self._config.session_config\n logging.info('Using config: %s', str(vars(self._config)))\n\n self._device_fn = (\n self._config.device_fn or _get_replica_device_setter(self._config))\n\n if model_fn is None:\n raise ValueError('model_fn must be provided to Estimator.')\n _verify_model_fn_args(model_fn, params)\n self._model_fn = model_fn\n self._params = copy.deepcopy(params or {})\n\n # pylint: disable=protected-access\n self._warm_start_settings = _get_default_warm_start_settings(\n warm_start_from)\n # pylint: enable=protected-access\n\n @property\n def model_dir(self):\n return self._model_dir\n\n @property\n def config(self):\n return copy.deepcopy(self._config)\n\n @property\n def params(self):\n return copy.deepcopy(self._params)\n\n @property\n def model_fn(self):\n \"\"\"Returns the `model_fn` which is bound to `self.params`.\n\n Returns:\n The `model_fn` with following signature:\n `def model_fn(features, labels, mode, config)`\n \"\"\"\n\n def public_model_fn(features, labels, mode, config):\n return self._call_model_fn(features, labels, mode, config)\n\n return public_model_fn\n\n # TODO(ispir): support a list of names\n def get_variable_value(self, name):\n \"\"\"Returns value of the variable given by name.\n\n Args:\n name: string or a list of string, name of the tensor.\n\n Returns:\n Numpy array - value of the tensor.\n\n Raises:\n ValueError: If the `Estimator` has not produced a checkpoint yet.\n \"\"\"\n _check_checkpoint_available(self.model_dir)\n with context.graph_mode():\n return training.load_variable(self.model_dir, name)\n\n def get_variable_names(self):\n \"\"\"Returns list of all variable names in this model.\n\n Returns:\n List of names.\n\n Raises:\n ValueError: If the `Estimator` has not produced a checkpoint yet.\n \"\"\"\n _check_checkpoint_available(self.model_dir)\n with context.graph_mode():\n return [name for name, _ in training.list_variables(self.model_dir)]\n\n def latest_checkpoint(self):\n \"\"\"Finds the filename of the latest saved checkpoint file in `model_dir`.\n\n Returns:\n The full path to the latest checkpoint or `None` if no checkpoint was\n found.\n \"\"\"\n with context.graph_mode():\n return checkpoint_management.latest_checkpoint(self.model_dir)\n\n def train(self,\n input_fn,\n hooks=None,\n steps=None,\n max_steps=None,\n saving_listeners=None):\n \"\"\"Trains a model given training data `input_fn`.\n\n Args:\n input_fn: A function that provides input data for training as minibatches.\n See [Premade\n Estimators](https://tensorflow.org/guide/premade_estimators#create_input_functions)\n for more information. The function should construct and return one of\n the following: * A\n `tf.data.Dataset` object: Outputs of `Dataset` object must be a tuple\n `(features, labels)` with same constraints as below. * A tuple\n `(features, labels)`: Where `features` is a `tf.Tensor` or a dictionary\n of string feature name to `Tensor` and `labels` is a `Tensor` or a\n dictionary of string label name to `Tensor`. Both `features` and\n `labels` are consumed by `model_fn`. They should satisfy the expectation\n of `model_fn` from inputs.\n hooks: List of `tf.train.SessionRunHook` subclass instances. Used for\n callbacks inside the training loop.\n steps: Number of steps for which to train the model. If `None`, train\n forever or train until `input_fn` generates the `tf.errors.OutOfRange`\n error or `StopIteration` exception. `steps` works incrementally. If you\n call two times `train(steps=10)` then training occurs in total 20 steps.\n If `OutOfRange` or `StopIteration` occurs in the middle, training stops\n before 20 steps. If you don't want to have incremental behavior please\n set `max_steps` instead. If set, `max_steps` must be `None`.\n max_steps: Number of total steps for which to train model. If `None`,\n train forever or train until `input_fn` generates the\n `tf.errors.OutOfRange` error or `StopIteration` exception. If set,\n `steps` must be `None`. If `OutOfRange` or `StopIteration` occurs in the\n middle, training stops before `max_steps` steps. Two calls to\n `train(steps=100)` means 200 training iterations. On the other hand, two\n calls to `train(max_steps=100)` means that the second call will not do\n any iteration since first call did all 100 steps.\n saving_listeners: list of `CheckpointSaverListener` objects. Used for\n callbacks that run immediately before or after checkpoint savings.\n\n Returns:\n `self`, for chaining.\n\n Raises:\n ValueError: If both `steps` and `max_steps` are not `None`.\n ValueError: If either `steps` or `max_steps <= 0`.\n \"\"\"\n if self.config.task_type in (run_config.TaskType.EVALUATOR,\n run_config.TaskType.PS):\n raise ValueError(\n 'Train has been called wrong configuration. Please use '\n 'tf.estimator.train_and_evaluate which calls propper API according '\n 'to given configuration. Current configuration: {}.'.format(\n self.config))\n\n with context.graph_mode():\n if (steps is not None) and (max_steps is not None):\n raise ValueError('Can not provide both steps and max_steps.')\n if steps is not None and steps <= 0:\n raise ValueError('Must specify steps > 0, given: {}'.format(steps))\n if max_steps is not None and max_steps <= 0:\n raise ValueError(\n 'Must specify max_steps > 0, given: {}'.format(max_steps))\n\n if max_steps is not None:\n start_step = _load_global_step_from_checkpoint_dir(self._model_dir)\n if max_steps <= start_step:\n logging.info('Skipping training since max_steps has already saved.')\n return self\n\n hooks = _check_hooks_type(hooks)\n hooks.extend(self._convert_train_steps_to_hooks(steps, max_steps))\n\n saving_listeners = _check_listeners_type(saving_listeners)\n loss = self._train_model(input_fn, hooks, saving_listeners)\n logging.info('Loss for final step: %s.', loss)\n return self\n\n def _convert_train_steps_to_hooks(self, steps, max_steps):\n \"\"\"Create hooks to run correct number of steps in training.\n\n Args:\n steps: number of steps to run during training.\n max_steps: maximum number of steps to be run during training. It'll be\n the maximum number of steps the model will train to after restoring\n from checkpoint even across multiple estimator.train calls.\n\n Returns:\n List of hooks to be passed to the estimator.\n \"\"\"\n if steps is not None or max_steps is not None:\n if self._train_distribution:\n steps_per_run = getattr(self._train_distribution, 'steps_per_run', 1)\n if steps_per_run > 1:\n return [basic_session_run_hooks._MultiStepStopAtStepHook( # pylint: disable=protected-access\n steps, max_steps, steps_per_run)]\n return [training.StopAtStepHook(steps, max_steps)]\n else:\n return []\n\n def eval_dir(self, name=None):\n \"\"\"Shows the directory name where evaluation metrics are dumped.\n\n Args:\n name: Name of the evaluation if user needs to run multiple evaluations on\n different data sets, such as on training data vs test data. Metrics for\n different evaluations are saved in separate folders, and appear\n separately in tensorboard.\n\n Returns:\n A string which is the path of directory contains evaluation metrics.\n \"\"\"\n return os.path.join(self._model_dir, 'eval' if not name else\n 'eval_' + name)\n\n def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None,\n name=None):\n \"\"\"Evaluates the model given evaluation data `input_fn`.\n\n For each step, calls `input_fn`, which returns one batch of data.\n Evaluates until:\n - `steps` batches are processed, or\n - `input_fn` raises an end-of-input exception (`tf.errors.OutOfRangeError`\n or\n `StopIteration`).\n\n Args:\n input_fn: A function that constructs the input data for evaluation. See\n [Premade Estimators](https://tensorflow.org/guide/premade#create_input_functions}\n for more information. The\n function should construct and return one of the following: * A\n `tf.data.Dataset` object: Outputs of `Dataset` object must be a tuple\n `(features, labels)` with same constraints as below. * A tuple\n `(features, labels)`: Where `features` is a `tf.Tensor` or a dictionary\n of string feature name to `Tensor` and `labels` is a `Tensor` or a\n dictionary of string label name to `Tensor`. Both `features` and\n `labels` are consumed by `model_fn`. They should satisfy the expectation\n of `model_fn` from inputs.\n steps: Number of steps for which to evaluate model. If `None`, evaluates\n until `input_fn` raises an end-of-input exception.\n hooks: List of `tf.train.SessionRunHook` subclass instances. Used for\n callbacks inside the evaluation call.\n checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the\n latest checkpoint in `model_dir` is used. If there are no checkpoints\n in `model_dir`, evaluation is run with newly initialized `Variables`\n instead of ones restored from checkpoint.\n name: Name of the evaluation if user needs to run multiple evaluations on\n different data sets, such as on training data vs test data. Metrics for\n different evaluations are saved in separate folders, and appear\n separately in tensorboard.\n\n Returns:\n A dict containing the evaluation metrics specified in `model_fn` keyed by\n name, as well as an entry `global_step` which contains the value of the\n global step for which this evaluation was performed.\n\n Raises:\n ValueError: If `steps <= 0`.\n ValueError: If no model has been trained, namely `model_dir`, or the\n given `checkpoint_path` is empty.\n \"\"\"\n with context.graph_mode():\n hooks = _check_hooks_type(hooks)\n hooks.extend(self._convert_eval_steps_to_hooks(steps))\n\n # Check that model has been trained (if nothing has been set explicitly).\n if not checkpoint_path:\n latest_path = checkpoint_management.latest_checkpoint(self._model_dir)\n if not latest_path:\n logging.info('Could not find trained model in model_dir: {}, running '\n 'initialization to evaluate.'.format(self._model_dir))\n checkpoint_path = latest_path\n\n def _evaluate():\n (scaffold, update_op, eval_dict, all_hooks) = (\n self._evaluate_build_graph(input_fn, hooks, checkpoint_path))\n return self._evaluate_run(\n checkpoint_path=checkpoint_path,\n scaffold=scaffold,\n update_op=update_op,\n eval_dict=eval_dict,\n all_hooks=all_hooks,\n output_dir=self.eval_dir(name))\n\n with ops.Graph().as_default():\n if self._eval_distribution:\n with self._eval_distribution.scope():\n return _evaluate()\n else:\n return _evaluate()\n\n def _convert_eval_steps_to_hooks(self, steps):\n if steps is None:\n return []\n\n if steps <= 0:\n raise ValueError('Must specify steps > 0, given: {}'.format(steps))\n return [evaluation._StopAfterNEvalsHook(num_evals=steps)] # pylint: disable=protected-access\n\n def predict(self,\n input_fn,\n predict_keys=None,\n hooks=None,\n checkpoint_path=None,\n yield_single_examples=True):\n \"\"\"Yields predictions for given features.\n\n Args:\n input_fn: A function that constructs the features. Prediction continues\n until `input_fn` raises an end-of-input exception\n (`tf.errors.OutOfRangeError` or `StopIteration`).\n See [Premade\n Estimators](https://tensorflow.org/guide/premade_estimators#create_input_functions)\n for more information. The function should construct and return one of\n the following:\n\n * A `tf.data.Dataset` object: Outputs of `Dataset` object must have\n same constraints as below.\n * features: A `tf.Tensor` or a dictionary of string feature name to\n `Tensor`. features are consumed by `model_fn`. They should satisfy\n the expectation of `model_fn` from inputs.\n * A tuple, in which case the first item is extracted as features.\n\n predict_keys: list of `str`, name of the keys to predict. It is used if\n the `tf.estimator.EstimatorSpec.predictions` is a `dict`. If\n `predict_keys` is used then rest of the predictions will be filtered\n from the dictionary. If `None`, returns all.\n hooks: List of `tf.train.SessionRunHook` subclass instances. Used for\n callbacks inside the prediction call.\n checkpoint_path: Path of a specific checkpoint to predict. If `None`, the\n latest checkpoint in `model_dir` is used. If there are no checkpoints\n in `model_dir`, prediction is run with newly initialized `Variables`\n instead of ones restored from checkpoint.\n yield_single_examples: If `False`, yields the whole batch as returned by\n the `model_fn` instead of decomposing the batch into individual\n elements. This is useful if `model_fn` returns some tensors whose first\n dimension is not equal to the batch size.\n\n Yields:\n Evaluated values of `predictions` tensors.\n\n Raises:\n ValueError: Could not find a trained model in `model_dir`.\n ValueError: If batch length of predictions is not the same and\n `yield_single_examples` is `True`.\n ValueError: If there is a conflict between `predict_keys` and\n `predictions`. For example if `predict_keys` is not `None` but\n `tf.estimator.EstimatorSpec.predictions` is not a `dict`.\n \"\"\"\n with context.graph_mode():\n hooks = _check_hooks_type(hooks)\n # Check that model has been trained.\n if not checkpoint_path:\n checkpoint_path = checkpoint_management.latest_checkpoint(\n self._model_dir)\n if not checkpoint_path:\n logging.info('Could not find trained model in model_dir: {}, running '\n 'initialization to predict.'.format(self._model_dir))\n with ops.Graph().as_default() as g:\n random_seed.set_random_seed(self._config.tf_random_seed)\n self._create_and_assert_global_step(g)\n features, input_hooks = self._get_features_from_input_fn(\n input_fn, model_fn_lib.ModeKeys.PREDICT)\n estimator_spec = self._call_model_fn(\n features, None, model_fn_lib.ModeKeys.PREDICT, self.config)\n\n # Call to warm_start has to be after model_fn is called.\n self._maybe_warm_start(checkpoint_path)\n\n predictions = self._extract_keys(\n estimator_spec.predictions, predict_keys)\n all_hooks = list(input_hooks)\n all_hooks.extend(hooks)\n all_hooks.extend(list(estimator_spec.prediction_hooks or []))\n with training.MonitoredSession(\n session_creator=training.ChiefSessionCreator(\n checkpoint_filename_with_path=checkpoint_path,\n master=self._config.master,\n scaffold=estimator_spec.scaffold,\n config=self._session_config),\n hooks=all_hooks) as mon_sess:\n while not mon_sess.should_stop():\n preds_evaluated = mon_sess.run(predictions)\n if not yield_single_examples:\n yield preds_evaluated\n elif not isinstance(predictions, dict):\n for pred in preds_evaluated:\n yield pred\n else:\n for i in range(self._extract_batch_length(preds_evaluated)):\n yield {\n key: value[i]\n for key, value in six.iteritems(preds_evaluated)\n }\n\n def _assert_members_are_not_overridden(self):\n \"\"\"Asserts members of `Estimator` are not overridden.\"\"\"\n # TPUEstimator is special cased (owned by TF).\n if self.__class__.__name__ == 'TPUEstimator':\n return\n\n allowed_overrides = set([\n '_create_and_assert_global_step',\n '_tf_api_names', '_tf_api_names_v1', '_estimator_api_names',\n '_estimator_api_names_v1', '_estimator_api_constants',\n '_estimator_api_constants_v1',\n ])\n estimator_members = set([m for m in Estimator.__dict__.keys()\n if not m.startswith('__')])\n subclass_members = set(self.__class__.__dict__.keys())\n common_members = estimator_members & subclass_members - allowed_overrides\n overridden_members = [\n m for m in common_members\n if Estimator.__dict__[m] != self.__class__.__dict__[m]]\n if overridden_members:\n raise ValueError(\n 'Subclasses of Estimator cannot override members of Estimator. '\n '{} does override {}'.format(self.__class__, overridden_members))\n\n def export_savedmodel(\n self, export_dir_base, serving_input_receiver_fn,\n assets_extra=None,\n as_text=False,\n checkpoint_path=None,\n strip_default_attrs=False):\n # pylint: disable=line-too-long\n \"\"\"Exports inference graph as a `SavedModel` into the given dir.\n\n For a detailed guide, see\n [Using SavedModel with Estimators](https://tensorflow.org/guide/saved_model#using_savedmodel_with_estimators).\n\n This method builds a new graph by first calling the\n `serving_input_receiver_fn` to obtain feature `Tensor`s, and then calling\n this `Estimator`'s `model_fn` to generate the model graph based on those\n features. It restores the given checkpoint (or, lacking that, the most\n recent checkpoint) into this graph in a fresh session. Finally it creates\n a timestamped export directory below the given `export_dir_base`, and writes\n a `SavedModel` into it containing a single `tf.MetaGraphDef` saved from this\n session.\n\n The exported `MetaGraphDef` will provide one `SignatureDef` for each\n element of the `export_outputs` dict returned from the `model_fn`, named\n using\n the same keys. One of these keys is always\n `tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`,\n indicating which\n signature will be served when a serving request does not specify one.\n For each signature, the outputs are provided by the corresponding\n `tf.estimator.export.ExportOutput`s, and the inputs are always the input\n receivers provided by\n the `serving_input_receiver_fn`.\n\n Extra assets may be written into the `SavedModel` via the `assets_extra`\n argument. This should be a dict, where each key gives a destination path\n (including the filename) relative to the assets.extra directory. The\n corresponding value gives the full path of the source file to be copied.\n For example, the simple case of copying a single file without renaming it\n is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.\n\n Args:\n export_dir_base: A string containing a directory in which to create\n timestamped subdirectories containing exported `SavedModel`s.\n serving_input_receiver_fn: A function that takes no argument and returns a\n `tf.estimator.export.ServingInputReceiver` or\n `tf.estimator.export.TensorServingInputReceiver`.\n assets_extra: A dict specifying how to populate the assets.extra directory\n within the exported `SavedModel`, or `None` if no extra assets are\n needed.\n as_text: whether to write the `SavedModel` proto in text format.\n checkpoint_path: The checkpoint path to export. If `None` (the default),\n the most recent checkpoint found within the model directory is chosen.\n strip_default_attrs: Boolean. If `True`, default-valued attributes will be\n removed from the `NodeDef`s. For a detailed guide, see [Stripping\n Default-Valued\n Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).\n\n Returns:\n The string path to the exported directory.\n\n Raises:\n ValueError: if no `serving_input_receiver_fn` is provided, no\n `export_outputs`\n are provided, or no checkpoint can be found.\n \"\"\"\n # pylint: enable=line-too-long\n return self._export_saved_model_for_mode(\n export_dir_base,\n serving_input_receiver_fn,\n assets_extra=assets_extra,\n as_text=as_text,\n checkpoint_path=checkpoint_path,\n strip_default_attrs=strip_default_attrs,\n mode=model_fn_lib.ModeKeys.PREDICT)\n\n def _export_saved_model_for_mode(\n self, export_dir_base, input_receiver_fn,\n assets_extra=None,\n as_text=False,\n checkpoint_path=None,\n strip_default_attrs=False,\n mode=model_fn_lib.ModeKeys.PREDICT):\n # pylint: disable=line-too-long\n \"\"\"Exports a single train/eval/predict graph as a `SavedModel`.\n\n This method is a wrapper for `_export_all_saved_models`, and wraps a raw\n `input_receiver_fn` in a dictionary to pass in to that function.\n See `_export_all_saved_models` for full docs.\n\n See `tf.contrib.estimator.export_saved_model_for_mode` for the currently\n exposed version of this function.\n\n Args:\n export_dir_base: A string containing a directory in which to create\n timestamped subdirectories containing exported `SavedModel`s.\n input_receiver_fn: a function that takes no argument and returns the\n appropriate subclass of `InputReceiver`.\n assets_extra: A dict specifying how to populate the assets.extra directory\n within the exported `SavedModel`, or `None` if no extra assets are\n needed.\n as_text: whether to write the `SavedModel` proto in text format.\n checkpoint_path: The checkpoint path to export. If `None` (the default),\n the most recent checkpoint found within the model directory is chosen.\n strip_default_attrs: Boolean. If `True`, default-valued attributes will be\n removed from the `NodeDef`s. For a detailed guide, see [Stripping\n Default-Valued\n Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).\n mode: `tf.estimator.ModeKeys` value indicating with mode will be exported.\n\n Returns:\n The string path to the exported directory.\n\n Raises:\n ValueError: if `input_receiver_fn` is `None`, no `export_outputs`\n are provided, or no checkpoint can be found.\n \"\"\"\n # pylint: enable=line-too-long\n if not input_receiver_fn:\n raise ValueError('An input_receiver_fn must be defined.')\n\n input_receiver_fn_map = {mode: input_receiver_fn}\n\n return self._export_all_saved_models(\n export_dir_base,\n input_receiver_fn_map,\n assets_extra=assets_extra,\n as_text=as_text,\n checkpoint_path=checkpoint_path,\n strip_default_attrs=strip_default_attrs)\n\n def _export_all_saved_models(\n self, export_dir_base, input_receiver_fn_map,\n assets_extra=None,\n as_text=False,\n checkpoint_path=None,\n strip_default_attrs=False):\n # pylint: disable=line-too-long\n \"\"\"Exports a `SavedModel` containing `tf.MetaGraphDefs` for each requested mode.\n\n See `tf.contrib.estimator.export_all_saved_models` for the currently\n exposed version of this function.\n\n For each mode passed in via the `input_receiver_fn_map`,\n this method builds a new graph by calling the `input_receiver_fn` to obtain\n feature and label `Tensor`s. Next, this method calls the `Estimator`'s\n `model_fn` in the passed mode to generate the model graph based on\n those features and labels, and restores the given checkpoint\n (or, lacking that, the most recent checkpoint) into the graph.\n Only one of the modes is used for saving variables to the `SavedModel`\n (order of preference: @{tf.estimator.ModeKeys#TRAIN$TRAIN},\n @{tf.estimator.ModeKeys#EVAL$EVAL}, then\n @{tf.estimator.ModeKeys#PREDICT$PREDICT}), such that up to three\n `tf.MetaGraphDefs` are saved with a single set of variables in a single\n `SavedModel` directory.\n\n For the variables and `tf.MetaGraphDefs`, a timestamped export directory\n below\n `export_dir_base`, and writes a `SavedModel` into it containing\n the `tf.MetaGraphDef` for the given mode and its associated signatures.\n\n For prediction, the exported `MetaGraphDef` will provide one `SignatureDef`\n for each element of the `export_outputs` dict returned from the `model_fn`,\n named using the same keys. One of these keys is always\n `tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`,\n indicating which\n signature will be served when a serving request does not specify one.\n For each signature, the outputs are provided by the corresponding\n `tf.estimator.export.ExportOutput`s, and the inputs are always the input\n receivers provided by\n the `serving_input_receiver_fn`.\n\n For training and evaluation, the `train_op` is stored in an extra\n collection,\n and loss, metrics, and predictions are included in a `SignatureDef` for the\n mode in question.\n\n Extra assets may be written into the `SavedModel` via the `assets_extra`\n argument. This should be a dict, where each key gives a destination path\n (including the filename) relative to the assets.extra directory. The\n corresponding value gives the full path of the source file to be copied.\n For example, the simple case of copying a single file without renaming it\n is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.\n\n Args:\n export_dir_base: A string containing a directory in which to create\n timestamped subdirectories containing exported `SavedModel`s.\n input_receiver_fn_map: dict of `tf.estimator.ModeKeys` to\n `input_receiver_fn` mappings, where the `input_receiver_fn` is a\n function that takes no arguments and returns the appropriate subclass of\n `InputReceiver`.\n assets_extra: A dict specifying how to populate the assets.extra directory\n within the exported `SavedModel`, or `None` if no extra assets are\n needed.\n as_text: whether to write the `SavedModel` proto in text format.\n checkpoint_path: The checkpoint path to export. If `None` (the default),\n the most recent checkpoint found within the model directory is chosen.\n strip_default_attrs: Boolean. If `True`, default-valued attributes will be\n removed from the `NodeDef`s. For a detailed guide, see [Stripping\n Default-Valued\n Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).\n\n Returns:\n A dict of `tf.estimator.ModeKeys` value to string path for each exported\n directory.\n\n Raises:\n ValueError: if any `input_receiver_fn` is `None`, no `export_outputs`\n are provided, or no checkpoint can be found.\n \"\"\"\n # pylint: enable=line-too-long\n # TODO(b/65561022): Consider allowing multiple input_receiver_fns per mode.\n with context.graph_mode():\n if not checkpoint_path:\n # Locate the latest checkpoint\n checkpoint_path = checkpoint_management.latest_checkpoint(\n self._model_dir)\n if not checkpoint_path:\n raise ValueError(\"Couldn't find trained model at %s.\" % self._model_dir)\n\n export_dir = export_helpers.get_timestamped_export_dir(export_dir_base)\n temp_export_dir = export_helpers.get_temp_export_dir(export_dir)\n\n builder = saved_model_builder.SavedModelBuilder(temp_export_dir)\n\n save_variables = True\n # Note that the order in which we run here matters, as the first\n # mode we pass through will be used to save the variables. We run TRAIN\n # first, as that is also the mode used for checkpoints, and therefore\n # we are not likely to have vars in PREDICT that are not in the checkpoint\n # created by TRAIN.\n if input_receiver_fn_map.get(model_fn_lib.ModeKeys.TRAIN):\n self._add_meta_graph_for_mode(\n builder, input_receiver_fn_map, checkpoint_path,\n strip_default_attrs, save_variables,\n mode=model_fn_lib.ModeKeys.TRAIN)\n save_variables = False\n if input_receiver_fn_map.get(model_fn_lib.ModeKeys.EVAL):\n self._add_meta_graph_for_mode(\n builder, input_receiver_fn_map, checkpoint_path,\n strip_default_attrs, save_variables,\n mode=model_fn_lib.ModeKeys.EVAL)\n save_variables = False\n if input_receiver_fn_map.get(model_fn_lib.ModeKeys.PREDICT):\n self._add_meta_graph_for_mode(\n builder, input_receiver_fn_map, checkpoint_path,\n strip_default_attrs, save_variables,\n mode=model_fn_lib.ModeKeys.PREDICT)\n save_variables = False\n\n if save_variables:\n raise ValueError('No valid modes for exporting found. Got {}.'.format(\n input_receiver_fn_map.keys()))\n\n builder.save(as_text)\n\n # Add the extra assets\n if assets_extra:\n assets_extra_path = os.path.join(compat.as_bytes(temp_export_dir),\n compat.as_bytes('assets.extra'))\n for dest_relative, source in assets_extra.items():\n dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),\n compat.as_bytes(dest_relative))\n dest_path = os.path.dirname(dest_absolute)\n gfile.MakeDirs(dest_path)\n gfile.Copy(source, dest_absolute)\n\n gfile.Rename(temp_export_dir, export_dir)\n return export_dir\n\n def _add_meta_graph_for_mode(self,\n builder,\n input_receiver_fn_map,\n checkpoint_path,\n strip_default_attrs,\n save_variables=True,\n mode=model_fn_lib.ModeKeys.PREDICT,\n export_tags=None,\n check_variables=True):\n # pylint: disable=line-too-long\n \"\"\"Loads variables and adds them along with a `tf.MetaGraphDef` for saving.\n\n Args:\n builder: instance of `tf.saved_modle.builder.SavedModelBuilder` that will\n be used for saving.\n input_receiver_fn_map: dict of `tf.estimator.ModeKeys` to\n `input_receiver_fn` mappings, where the `input_receiver_fn` is a\n function that takes no argument and returns the appropriate subclass of\n `InputReceiver`.\n checkpoint_path: The checkpoint path to export. If `None` (the default),\n the most recent checkpoint found within the model directory is chosen.\n strip_default_attrs: Boolean. If `True`, default-valued attributes will be\n removed from the `NodeDef`s. For a detailed guide, see [Stripping\n Default-Valued\n Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).\n save_variables: bool, whether variables should be saved. If `False`, just\n the `tf.MetaGraphDef` will be saved. Note that `save_variables` should\n only be `True` for the first call to this function, and the\n `SavedModelBuilder` will raise an error if that is not the case.\n mode: `tf.estimator.ModeKeys` value indicating which mode will be\n exported.\n export_tags: The set of tags with which to save `tf.MetaGraphDef`. If\n `None`, a default set will be selected to matched the passed mode.\n check_variables: bool, whether to check the checkpoint has all variables.\n\n Raises:\n ValueError: if `save_variables` is `True` and `check_variable` is `False`.\n \"\"\"\n # pylint: enable=line-too-long\n if export_tags is None:\n export_tags = model_fn_lib.EXPORT_TAG_MAP[mode]\n input_receiver_fn = input_receiver_fn_map[mode]\n\n with ops.Graph().as_default() as g:\n self._create_and_assert_global_step(g)\n random_seed.set_random_seed(self._config.tf_random_seed)\n\n input_receiver = input_receiver_fn()\n\n # Call the model_fn and collect the export_outputs.\n estimator_spec = self._call_model_fn(\n features=input_receiver.features,\n labels=getattr(input_receiver, 'labels', None),\n mode=mode,\n config=self.config)\n\n export_outputs = self._get_export_outputs_for_spec(estimator_spec)\n\n # Build the SignatureDefs from receivers and all outputs\n signature_def_map = export_helpers.build_all_signature_defs(\n input_receiver.receiver_tensors,\n export_outputs,\n getattr(input_receiver, 'receiver_tensors_alternatives', None),\n serving_only=(mode == model_fn_lib.ModeKeys.PREDICT))\n\n with tf_session.Session(config=self._session_config) as session:\n\n if estimator_spec.scaffold.local_init_op is not None:\n local_init_op = estimator_spec.scaffold.local_init_op\n else:\n local_init_op = monitored_session.Scaffold.default_local_init_op()\n\n # This saver will be used both for restoring variables now,\n # and in saving out the metagraph below. This ensures that any\n # Custom Savers stored with the Scaffold are passed through to the\n # SavedModel for restore later.\n graph_saver = estimator_spec.scaffold.saver or saver.Saver(sharded=True)\n\n if save_variables and not check_variables:\n raise ValueError('If `save_variables` is `True, `check_variables`'\n 'must not be `False`.')\n if check_variables:\n try:\n graph_saver.restore(session, checkpoint_path)\n except errors.NotFoundError as e:\n msg = ('Could not load all requested variables from checkpoint. '\n 'Please make sure your model_fn does not expect variables '\n 'that were not saved in the checkpoint.\\n\\n'\n 'Encountered error with mode `{}` while restoring '\n 'checkpoint from: `{}`. Full Traceback:\\n\\n{}').format(\n mode, checkpoint_path, e)\n raise ValueError(msg)\n\n # We add the train op explicitly for now, so that we don't have to\n # change the Builder public interface. Note that this is a no-op\n # for prediction, where train_op is None.\n builder._add_train_op(estimator_spec.train_op) # pylint: disable=protected-access\n\n meta_graph_kwargs = dict(\n tags=export_tags,\n signature_def_map=signature_def_map,\n assets_collection=ops.get_collection(\n ops.GraphKeys.ASSET_FILEPATHS),\n strip_default_attrs=strip_default_attrs,\n legacy_init_op=local_init_op,\n saver=graph_saver)\n\n if save_variables:\n builder.add_meta_graph_and_variables(\n session, **meta_graph_kwargs)\n else:\n builder.add_meta_graph(**meta_graph_kwargs)\n\n def _get_export_outputs_for_spec(self, estimator_spec):\n \"\"\"Given an `EstimatorSpec`, determine what our export outputs should be.\n\n `EstimatorSpecs` contains `export_outputs` that are used for serving, but\n for\n training and eval graphs, we must wrap the tensors of interest in\n appropriate `tf.estimator.export.ExportOutput` objects.\n\n Args:\n estimator_spec: `tf.estimator.EstimatorSpec` object that will be exported.\n\n Returns:\n a dict mapping `export_output_name` to `tf.estimator.export.ExportOutput`\n object.\n\n Raises:\n ValueError: if an appropriate `ExportOutput` cannot be found for the\n passed `EstimatorSpec.mode`\n \"\"\"\n mode = estimator_spec.mode\n if mode == model_fn_lib.ModeKeys.PREDICT:\n outputs = estimator_spec.export_outputs\n else:\n if mode == model_fn_lib.ModeKeys.TRAIN:\n output_class = export_output.TrainOutput\n elif mode == model_fn_lib.ModeKeys.EVAL:\n output_class = export_output.EvalOutput\n else:\n raise ValueError(\n 'Export output type not found for mode: {}'.format(mode))\n\n export_out = output_class(\n loss=estimator_spec.loss,\n predictions=estimator_spec.predictions,\n metrics=estimator_spec.eval_metric_ops)\n outputs = {mode: export_out}\n\n return outputs\n\n def _get_features_from_input_fn(self, input_fn, mode):\n \"\"\"Extracts the `features` from return values of `input_fn`.\"\"\"\n result = self._call_input_fn(input_fn, mode)\n result, _, hooks = estimator_util.parse_input_fn_result(result)\n self._validate_features_in_predict_input(result)\n return result, hooks\n\n def _validate_features_in_predict_input(self, result):\n if not _has_dataset_or_queue_runner(result):\n logging.warning('Input graph does not use tf.data.Dataset or contain a '\n 'QueueRunner. That means predict yields forever. '\n 'This is probably a mistake.')\n\n def _get_iterator_from_input_fn(self, input_fn, mode, distribution=None):\n if distribution is not None:\n result = distribution.distribute_dataset(\n lambda: self._call_input_fn(input_fn, mode))\n else:\n result = self._call_input_fn(input_fn, mode)\n\n iterator = result.make_initializable_iterator()\n input_hooks = [estimator_util._DatasetInitializerHook(iterator)] # pylint: disable=protected-access\n return iterator, input_hooks\n\n def _get_features_and_labels_from_input_fn(self, input_fn, mode):\n \"\"\"Extracts the `features` and labels from return values of `input_fn`.\"\"\"\n return estimator_util.parse_input_fn_result(\n self._call_input_fn(input_fn, mode))\n\n def _extract_batch_length(self, preds_evaluated):\n \"\"\"Extracts batch length of predictions.\"\"\"\n batch_length = None\n for key, value in six.iteritems(preds_evaluated):\n batch_length = batch_length or value.shape[0]\n if value.shape[0] != batch_length:\n raise ValueError('Batch length of predictions should be same. %s has '\n 'different batch length than others.' % key)\n return batch_length\n\n def _extract_keys(self, predictions, predict_keys):\n \"\"\"Extracts `predict_keys` from `predictions`.\"\"\"\n if not predict_keys:\n return predictions\n if not isinstance(predictions, dict):\n raise ValueError(\n 'predict_keys argument is not valid in case of non-dict predictions.')\n existing_keys = predictions.keys()\n predictions = {\n key: value\n for key, value in six.iteritems(predictions) if key in predict_keys\n }\n if not predictions:\n raise ValueError('Expected to run at least one output from %s, '\n 'provided %s.' % (existing_keys, predict_keys))\n return predictions\n\n def _create_global_step(self, graph):\n \"\"\"Creates the global step tensor in graph.\n\n The global step tensor must be an integer type with name 'global_step' and\n be added to the collection @{tf.GraphKeys#GLOBAL_STEP$GLOBAL_STEP}.\n\n Args:\n graph: The graph in which to create the global step tensor.\n\n Returns:\n The global step `tf.Tensor`.\n \"\"\"\n return training.create_global_step(graph)\n\n def _create_and_assert_global_step(self, graph):\n \"\"\"Creates and asserts properties of the global step.\n\n Args:\n graph: The graph in which to create the global step tensor.\n\n Returns:\n The global step `tf.Tensor`.\n \"\"\"\n step = self._create_global_step(graph)\n assert step == training.get_global_step()\n assert step.dtype.is_integer\n return step\n\n def _call_input_fn(self, input_fn, mode):\n \"\"\"Calls the input function.\n\n Args:\n input_fn: The input function.\n mode: `tf.estimator.ModeKeys`\n\n Returns:\n The return value of the passed `input_fn`, which should be one of:\n\n * A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a\n tuple `(features, labels)` with same constraints as below.\n * A tuple `(features, labels)`: Where `features` is a `Tensor` or a\n dictionary of string feature name to `Tensor` and `labels` is a\n `Tensor` or a dictionary of string label name to `Tensor`. Both\n `features` and `labels` are consumed by `model_fn`. They should\n satisfy the expectation of `model_fn` from inputs.\n\n Raises:\n ValueError: if `input_fn` takes invalid arguments.\n \"\"\"\n input_fn_args = function_utils.fn_args(input_fn)\n kwargs = {}\n if 'mode' in input_fn_args:\n kwargs['mode'] = mode\n if 'params' in input_fn_args:\n kwargs['params'] = self.params\n if 'config' in input_fn_args:\n kwargs['config'] = self.config\n with ops.device('/cpu:0'):\n return input_fn(**kwargs)\n\n def _call_model_fn(self, features, labels, mode, config):\n \"\"\"Calls model function.\n\n Args:\n features: features dict.\n labels: labels dict.\n mode: `tf.estimator.ModeKeys`\n config: `tf.estimator.RunConfig`\n\n Returns:\n An `tf.estimator.EstimatorSpec` object.\n\n Raises:\n ValueError: if `model_fn` returns invalid objects.\n \"\"\"\n model_fn_args = function_utils.fn_args(self._model_fn)\n kwargs = {}\n if 'labels' in model_fn_args:\n kwargs['labels'] = labels\n else:\n if labels is not None:\n raise ValueError(\n 'model_fn does not take labels, but input_fn returns labels.')\n if 'mode' in model_fn_args:\n kwargs['mode'] = mode\n if 'params' in model_fn_args:\n kwargs['params'] = self.params\n if 'config' in model_fn_args:\n kwargs['config'] = config\n\n logging.info('Calling model_fn.')\n model_fn_results = self._model_fn(features=features, **kwargs)\n logging.info('Done calling model_fn.')\n\n if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):\n raise ValueError('model_fn should return an EstimatorSpec.')\n\n return model_fn_results\n\n def _train_model(self, input_fn, hooks, saving_listeners):\n if self._train_distribution:\n return self._train_model_distributed(input_fn, hooks, saving_listeners)\n else:\n return self._train_model_default(input_fn, hooks, saving_listeners)\n\n def _train_model_default(self, input_fn, hooks, saving_listeners):\n \"\"\"Initiate training with `input_fn`, without `DistributionStrategies`.\n\n Args:\n input_fn: A function that provides input data for training as minibatches.\n hooks: List of `tf.train.SessionRunHook` subclass instances. Used for\n callbacks inside the training loop.\n saving_listeners: list of `tf.train.CheckpointSaverListener` objects. Used\n for callbacks that run immediately before or after checkpoint savings.\n\n Returns:\n Loss from training\n \"\"\"\n worker_hooks = []\n with ops.Graph().as_default() as g, g.device(self._device_fn):\n random_seed.set_random_seed(self._config.tf_random_seed)\n global_step_tensor = self._create_and_assert_global_step(g)\n\n # Skip creating a read variable if _create_and_assert_global_step\n # returns None (e.g. tf.contrib.estimator.SavedModelEstimator).\n if global_step_tensor is not None:\n training_util._get_or_create_global_step_read(g) # pylint: disable=protected-access\n\n features, labels, input_hooks = (\n self._get_features_and_labels_from_input_fn(\n input_fn, model_fn_lib.ModeKeys.TRAIN))\n worker_hooks.extend(input_hooks)\n estimator_spec = self._call_model_fn(\n features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)\n global_step_tensor = training_util.get_global_step(g)\n return self._train_with_estimator_spec(estimator_spec, worker_hooks,\n hooks, global_step_tensor,\n saving_listeners)\n\n def _train_model_distributed(self, input_fn, hooks, saving_listeners):\n \"\"\"Initiate training with `input_fn`, using `DistributionStrategies`.\n\n Args:\n input_fn: A function that provides input data for training as minibatches.\n hooks: List of `tf.train.SessionRunHook` subclass instances. Used for\n callbacks inside the training loop.\n saving_listeners: list of `tf.train.CheckpointSaverListener` objects. Used\n for callbacks that run immediately before or after checkpoint savings.\n\n Returns:\n Loss from training\n \"\"\"\n self._train_distribution.configure(self._session_config)\n\n # TODO(sourabhbajaj): Remove this hack once we migrate the other strategies\n # to use the new API\n is_tpu_strategy = (\n self._train_distribution.__class__.__name__ == 'TPUStrategy')\n\n worker_hooks = []\n with ops.Graph().as_default() as g:\n # We want to create the iterations variable outside the distribution scope\n # as that is just stored on the host and mainly used to drive the loop\n # and doesn't need to be a Mirrored/Device variable.\n steps_per_run_variable = training.get_or_create_steps_per_run_variable()\n with self._train_distribution.scope():\n random_seed.set_random_seed(self._config.tf_random_seed)\n iterator, input_hooks = self._get_iterator_from_input_fn(\n input_fn, model_fn_lib.ModeKeys.TRAIN, self._train_distribution)\n worker_hooks.extend(input_hooks)\n global_step_tensor = self._create_and_assert_global_step(g)\n # we want to add to the global collection in the main thread not the\n # tower threads.\n ops.add_to_collection(\n training_util.GLOBAL_STEP_READ_KEY,\n self._train_distribution.read_var(global_step_tensor))\n\n if is_tpu_strategy:\n # Create a step_fn from the train_op of grouped_estimator_spec\n def step_fn(ctx, features, labels):\n \"\"\"A single step that is passed to run_on_dataset.\"\"\"\n estimator_spec = self._train_distribution.call_for_each_tower(\n self._call_model_fn,\n features,\n labels,\n model_fn_lib.ModeKeys.TRAIN,\n self.config)\n ctx.set_last_step_output(\n name='loss',\n output=estimator_spec.loss,\n aggregation=distribute_lib.get_loss_reduction())\n ctx.set_non_tensor_output(\n name='estimator_spec', output=estimator_spec)\n return estimator_spec.train_op\n\n # Create new train_op post graph rewrites\n initial_training_loss = constant_op.constant(1e7)\n ctx = self._train_distribution.run_steps_on_dataset(\n step_fn, iterator, iterations=steps_per_run_variable,\n initial_loop_values={'loss': initial_training_loss})\n distributed_train_op = ctx.run_op\n loss = ctx.last_step_outputs['loss']\n grouped_estimator_spec = ctx.non_tensor_outputs['estimator_spec']\n else:\n features, labels = iterator.get_next()\n grouped_estimator_spec = self._train_distribution.call_for_each_tower(\n self._call_model_fn,\n features,\n labels, # although this will be None it seems\n model_fn_lib.ModeKeys.TRAIN,\n self.config)\n loss = self._train_distribution.unwrap(\n self._train_distribution.reduce(\n distribute_lib.get_loss_reduction(),\n grouped_estimator_spec.loss,\n destinations='/device:CPU:0'))[0]\n distributed_train_op = grouped_estimator_spec.train_op\n\n scaffold = _combine_distributed_scaffold(\n grouped_estimator_spec.scaffold, self._train_distribution)\n\n def get_hooks_from_the_first_device(per_device_hooks):\n hooks_list = self._train_distribution.unwrap(per_device_hooks)\n assert hooks_list\n return hooks_list[0]\n\n training_hooks = get_hooks_from_the_first_device(\n grouped_estimator_spec.training_hooks)\n training_chief_hooks = get_hooks_from_the_first_device(\n grouped_estimator_spec.training_chief_hooks)\n worker_hooks.append(\n estimator_util.StrategyInitFinalizeHook(\n self._train_distribution.initialize,\n self._train_distribution.finalize))\n\n estimator_spec = model_fn_lib.EstimatorSpec(\n mode=grouped_estimator_spec.mode,\n loss=loss,\n train_op=self._train_distribution.group(distributed_train_op),\n training_hooks=training_hooks,\n training_chief_hooks=training_chief_hooks,\n scaffold=scaffold)\n return self._train_with_estimator_spec(estimator_spec, worker_hooks,\n hooks, global_step_tensor,\n saving_listeners)\n\n def _train_with_estimator_spec(self, estimator_spec, worker_hooks, hooks,\n global_step_tensor, saving_listeners):\n \"\"\"Train a model with the given Estimator Spec.\"\"\"\n if self._warm_start_settings:\n logging.info('Warm-starting with WarmStartSettings: %s' %\n (self._warm_start_settings,))\n warm_starting_util.warm_start(*self._warm_start_settings)\n # Check if the user created a loss summary, and add one if they didn't.\n # We assume here that the summary is called 'loss'. If it is not, we will\n # make another one with the name 'loss' to ensure it shows up in the right\n # graph in TensorBoard.\n if not any([x.op.name == 'loss'\n for x in ops.get_collection(ops.GraphKeys.SUMMARIES)]):\n summary.scalar('loss', estimator_spec.loss)\n ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)\n worker_hooks.extend(hooks)\n worker_hooks.append(\n training.NanTensorHook(estimator_spec.loss)\n )\n if self._config.log_step_count_steps is not None:\n worker_hooks.append(\n training.LoggingTensorHook(\n {\n 'loss': estimator_spec.loss,\n 'step': global_step_tensor\n },\n every_n_iter=self._config.log_step_count_steps)\n )\n worker_hooks.extend(estimator_spec.training_hooks)\n\n if not (estimator_spec.scaffold.saver or\n ops.get_collection(ops.GraphKeys.SAVERS)):\n ops.add_to_collection(\n ops.GraphKeys.SAVERS,\n training.Saver(\n sharded=True,\n max_to_keep=self._config.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=(\n self._config.keep_checkpoint_every_n_hours),\n defer_build=True,\n save_relative_paths=True))\n\n chief_hooks = []\n all_hooks = worker_hooks + list(estimator_spec.training_chief_hooks)\n saver_hooks = [\n h for h in all_hooks if isinstance(h, training.CheckpointSaverHook)]\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n if not saver_hooks:\n chief_hooks = [\n training.CheckpointSaverHook(\n self._model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n scaffold=estimator_spec.scaffold)\n ]\n saver_hooks = [chief_hooks[0]]\n if saving_listeners:\n if not saver_hooks:\n raise ValueError(\n 'There should be a CheckpointSaverHook to use saving_listeners. '\n 'Please set one of the RunConfig.save_checkpoints_steps or '\n 'RunConfig.save_checkpoints_secs.')\n else:\n # It is expected to have one CheckpointSaverHook. If multiple, we pick\n # up the first one to add listener.\n saver_hooks[0]._listeners.extend(saving_listeners) # pylint: disable=protected-access\n with training.MonitoredTrainingSession(\n master=self._config.master,\n is_chief=self._config.is_chief,\n checkpoint_dir=self._model_dir,\n scaffold=estimator_spec.scaffold,\n hooks=worker_hooks,\n chief_only_hooks=(\n tuple(chief_hooks) + tuple(estimator_spec.training_chief_hooks)),\n save_checkpoint_secs=0, # Saving is handled by a hook.\n save_summaries_steps=self._config.save_summary_steps,\n config=self._session_config,\n log_step_count_steps=self._config.log_step_count_steps) as mon_sess:\n loss = None\n while not mon_sess.should_stop():\n _, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])\n return loss\n\n def _evaluate_build_graph(self, input_fn, hooks=None, checkpoint_path=None):\n \"\"\"Builds the graph and related hooks to run evaluation.\"\"\"\n random_seed.set_random_seed(self._config.tf_random_seed)\n self._create_and_assert_global_step(ops.get_default_graph())\n\n if self._eval_distribution:\n (scaffold, evaluation_hooks, input_hooks, update_op, eval_dict) = (\n self._call_model_fn_eval_distributed(input_fn, self.config))\n else:\n (scaffold, evaluation_hooks, input_hooks, update_op, eval_dict) = (\n self._call_model_fn_eval(input_fn, self.config))\n\n global_step_tensor = training_util.get_global_step(ops.get_default_graph())\n # Call to warm_start has to be after model_fn is called.\n self._maybe_warm_start(checkpoint_path)\n\n if ops.GraphKeys.GLOBAL_STEP in eval_dict:\n raise ValueError(\n 'Metric with name `global_step` is not allowed, because Estimator '\n 'already defines a default metric with the same name.')\n eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor\n\n all_hooks = list(input_hooks)\n all_hooks.extend(hooks)\n all_hooks.extend(list(evaluation_hooks or []))\n # New local variables have been added, so update the estimator spec's\n # local init op if it was defined.\n if scaffold and scaffold.local_init_op:\n # Ensure that eval step has been created before updating local init op.\n evaluation._get_or_create_eval_step() # pylint: disable=protected-access\n\n scaffold = monitored_session.Scaffold(\n local_init_op=control_flow_ops.group(\n scaffold.local_init_op,\n monitored_session.Scaffold.default_local_init_op()),\n copy_from_scaffold=scaffold\n )\n\n return scaffold, update_op, eval_dict, all_hooks\n\n def _call_model_fn_eval(self, input_fn, config):\n \"\"\"Call model_fn for evaluation and handle return values.\"\"\"\n features, labels, input_hooks = self._get_features_and_labels_from_input_fn(\n input_fn, model_fn_lib.ModeKeys.EVAL)\n\n estimator_spec = self._call_model_fn(\n features, labels, model_fn_lib.ModeKeys.EVAL, config)\n eval_metric_ops = _verify_and_create_loss_metric(\n estimator_spec.eval_metric_ops, estimator_spec.loss)\n update_op, eval_dict = _extract_metric_update_ops(eval_metric_ops)\n return (estimator_spec.scaffold, estimator_spec.evaluation_hooks,\n input_hooks, update_op, eval_dict)\n\n def _call_model_fn_eval_distributed(self, input_fn, config):\n \"\"\"Call model_fn in distribution mode and handle return values.\"\"\"\n\n iterator, input_hooks = self._get_iterator_from_input_fn(\n input_fn, model_fn_lib.ModeKeys.EVAL, self._eval_distribution)\n\n is_tpu_strategy = (\n self._eval_distribution.__class__.__name__ == 'TPUStrategy')\n\n if is_tpu_strategy:\n def step_fn(ctx, features, labels):\n \"\"\"Runs one step of the eval computation and captures outputs.\"\"\"\n estimator_spec = self._eval_distribution.call_for_each_tower(\n self._call_model_fn, features, labels, model_fn_lib.ModeKeys.EVAL,\n config)\n eval_metric_ops = _verify_and_create_loss_metric(\n estimator_spec.eval_metric_ops, estimator_spec.loss,\n self._eval_distribution)\n update_op, eval_dict = _extract_metric_update_ops(\n eval_metric_ops, self._eval_distribution)\n ctx.set_non_tensor_output(name='estimator_spec', output=estimator_spec)\n ctx.set_non_tensor_output(name='eval_dict', output=eval_dict)\n return update_op\n\n # TODO(priyag): Fix eval step hook to account for steps_per_run.\n ctx = self._eval_distribution.run_steps_on_dataset(\n step_fn, iterator, iterations=self._eval_distribution.steps_per_run)\n update_op = ctx.run_op\n eval_dict = ctx.non_tensor_outputs['eval_dict']\n grouped_estimator_spec = ctx.non_tensor_outputs['estimator_spec']\n else:\n features, labels = iterator.get_next()\n grouped_estimator_spec = self._eval_distribution.call_for_each_tower(\n self._call_model_fn, features, labels,\n model_fn_lib.ModeKeys.EVAL, config)\n eval_metric_ops = _verify_and_create_loss_metric(\n grouped_estimator_spec.eval_metric_ops, grouped_estimator_spec.loss,\n self._eval_distribution)\n update_op, eval_dict = _extract_metric_update_ops(\n eval_metric_ops, self._eval_distribution)\n\n scaffold = _combine_distributed_scaffold(\n grouped_estimator_spec.scaffold, self._eval_distribution)\n evaluation_hooks = self._eval_distribution.unwrap(\n grouped_estimator_spec.evaluation_hooks)[0]\n evaluation_hooks = evaluation_hooks + (\n estimator_util.StrategyInitFinalizeHook(\n self._eval_distribution.initialize,\n self._eval_distribution.finalize),)\n\n return (scaffold, evaluation_hooks, input_hooks, update_op, eval_dict)\n\n def _evaluate_run(self, checkpoint_path, scaffold, update_op, eval_dict,\n all_hooks, output_dir):\n \"\"\"Run evaluation.\"\"\"\n eval_results = evaluation._evaluate_once( # pylint: disable=protected-access\n checkpoint_path=checkpoint_path,\n master=self._config.evaluation_master,\n scaffold=scaffold,\n eval_ops=update_op,\n final_ops=eval_dict,\n hooks=all_hooks,\n config=self._session_config)\n\n current_global_step = eval_results[ops.GraphKeys.GLOBAL_STEP]\n\n _write_dict_to_summary(\n output_dir=output_dir,\n dictionary=eval_results,\n current_global_step=current_global_step)\n\n if checkpoint_path:\n _write_checkpoint_path_to_summary(\n output_dir=output_dir,\n checkpoint_path=checkpoint_path,\n current_global_step=current_global_step)\n\n return eval_results\n\n def _maybe_warm_start(self, checkpoint_path):\n if not checkpoint_path and self._warm_start_settings:\n logging.info('Warm-starting with WarmStartSettings: %s' %\n (self._warm_start_settings,))\n warm_starting_util.warm_start(*self._warm_start_settings)\n\n\ndef _verify_and_create_loss_metric(eval_metric_ops, loss, distribution=None):\n \"\"\"Creates a metric for loss and throws an error if one already exists.\"\"\"\n if model_fn_lib.LOSS_METRIC_KEY in eval_metric_ops:\n raise ValueError(\n 'Metric with name \"%s\" is not allowed, because Estimator ' %\n (model_fn_lib.LOSS_METRIC_KEY) +\n 'already defines a default metric with the same name.')\n\n if distribution is None:\n loss_metric = metrics_lib.mean(loss)\n else:\n loss_metric = distribution.call_for_each_tower(\n metrics_lib.mean, loss)\n eval_metric_ops[model_fn_lib.LOSS_METRIC_KEY] = loss_metric\n return eval_metric_ops\n\n\ndef maybe_overwrite_model_dir_and_session_config(config, model_dir):\n \"\"\"Overwrite estimator config by `model_dir` and `session_config` if needed.\n\n Args:\n config: Original estimator config.\n model_dir: Estimator model checkpoint directory.\n\n Returns:\n Overwritten estimator config.\n\n Raises:\n ValueError: Model directory inconsistent between `model_dir` and `config`.\n \"\"\"\n\n if config is None:\n config = run_config.RunConfig()\n logging.info('Using default config.')\n if not isinstance(config, run_config.RunConfig):\n raise ValueError(\n 'config must be an instance of `RunConfig`, but provided %s.' % config)\n\n if config.session_config is None:\n session_config = run_config.get_default_session_config()\n config = run_config.RunConfig.replace(config, session_config=session_config)\n\n model_dir = compat_internal.path_to_str(model_dir)\n if model_dir is not None:\n if (getattr(config, 'model_dir', None) is not None and\n config.model_dir != model_dir):\n raise ValueError(\n \"`model_dir` are set both in constructor and `RunConfig`, but with \"\n \"different values. In constructor: '{}', in `RunConfig`: \"\n \"'{}' \".format(model_dir, config.model_dir))\n if model_dir:\n config = run_config.RunConfig.replace(config, model_dir=model_dir)\n elif getattr(config, 'model_dir', None) is None:\n model_dir = tempfile.mkdtemp()\n logging.warning('Using temporary folder as model directory: %s', model_dir)\n config = run_config.RunConfig.replace(config, model_dir=model_dir)\n\n return config\n\n\ndef create_per_tower_ready_op(scaffold):\n \"\"\"Create a `tf.train.Scaffold.ready_op` inside a tower.\"\"\"\n if scaffold.ready_op:\n return scaffold.ready_op\n\n def default_ready_op():\n return array_ops.concat([\n variables.report_uninitialized_variables(),\n resources.report_uninitialized_resources()\n ], 0)\n\n return monitored_session.Scaffold.get_or_default(\n 'ready_op', ops.GraphKeys.READY_OP, default_ready_op)\n\n\ndef create_per_tower_ready_for_local_init_op(scaffold):\n \"\"\"Create a `tf.train.Scaffold.ready_for_local_init_op` inside a tower.\"\"\"\n if scaffold.ready_for_local_init_op:\n return scaffold.ready_for_local_init_op\n\n def default_ready_for_local_init_op():\n return variables.report_uninitialized_variables(\n variables.global_variables())\n\n return monitored_session.Scaffold.get_or_default(\n 'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,\n default_ready_for_local_init_op)\n\n\ndef _combine_distributed_scaffold(grouped_scaffold, distribution):\n \"\"\"Combines scaffold(s) returned from `distribution.call_for_each_tower`.\"\"\"\n\n # TODO(anjalisridhar): Figure out how to resolve the following scaffold\n # parameters: init_feed_dict, init_fn.\n scaffold_list = distribution.unwrap(grouped_scaffold)\n init_feed_dict = [\n s.init_feed_dict\n for s in scaffold_list\n if s.init_feed_dict is not None\n ]\n if init_feed_dict:\n init_feed_dict = distribution.group(init_feed_dict)\n else:\n init_feed_dict = None\n\n init_fn = [s.init_fn for s in scaffold_list if s.init_fn is not None]\n if init_fn:\n init_fn = distribution.group(init_fn)\n else:\n init_fn = None\n\n init_op = [s.init_op for s in scaffold_list if s.init_op is not None]\n if init_op:\n init_op = distribution.group(init_op)\n else:\n init_op = None\n\n def _unwrap_and_concat(value):\n value = nest.flatten(distribution.unwrap(value))\n if len(value) != 1:\n return array_ops.concat(value)\n return value[0]\n\n ready_op = distribution.call_for_each_tower(\n create_per_tower_ready_op, grouped_scaffold)\n if ready_op is not None:\n ready_op = _unwrap_and_concat(ready_op)\n else:\n ready_op = None\n\n ready_for_local_init_op = distribution.call_for_each_tower(\n create_per_tower_ready_for_local_init_op, grouped_scaffold)\n if ready_for_local_init_op is not None:\n ready_for_local_init_op = _unwrap_and_concat(ready_for_local_init_op)\n else:\n ready_for_local_init_op = None\n\n local_init_op = [\n s.local_init_op\n for s in scaffold_list\n if s.local_init_op is not None\n ]\n if local_init_op:\n local_init_op = distribution.group(local_init_op)\n else:\n local_init_op = None\n\n summary_op = [\n s.summary_op for s in scaffold_list if s.summary_op is not None\n ]\n if summary_op:\n summary_op = distribution.group(summary_op)\n else:\n summary_op = None\n\n scaffold = monitored_session.Scaffold(\n init_op=init_op,\n ready_op=ready_op,\n ready_for_local_init_op=ready_for_local_init_op,\n local_init_op=local_init_op,\n summary_op=summary_op,\n init_feed_dict=init_feed_dict,\n init_fn=init_fn)\n return scaffold\n\n\ndef _check_checkpoint_available(model_dir):\n latest_path = checkpoint_management.latest_checkpoint(model_dir)\n if not latest_path:\n raise ValueError(\n 'Could not find trained model in model_dir: {}.'.format(model_dir))\n\n\ndef _check_hooks_type(hooks):\n \"\"\"Returns hooks if all are `SessionRunHook`, raises TypeError otherwise.\"\"\"\n hooks = list(hooks or [])\n for h in hooks:\n if not isinstance(h, training.SessionRunHook):\n raise TypeError('Hooks must be a SessionRunHook, given: {}'.format(h))\n return hooks\n\n\ndef _check_listeners_type(saving_listeners):\n \"\"\"Check listeners type.\"\"\"\n listeners = list(saving_listeners or [])\n for l in listeners:\n if not isinstance(l, training.CheckpointSaverListener):\n raise TypeError(\n 'saving_listeners must be a list of CheckpointSaverListener, '\n 'given: {}'.format(l))\n return listeners\n\n\ndef _get_replica_device_setter(config):\n \"\"\"Creates a replica device setter if required as a default `device_fn`.\n\n `Estimator` uses `tf.train.ReplicaDeviceSetter` as a default device placer. It\n sets the\n distributed related arguments such as number of `ps_replicas` based on given\n `config`.\n\n Args:\n config: A `tf.estimator.RunConfig` instance.\n\n Returns:\n A replica device setter, or `None`.\n \"\"\"\n if config.task_type:\n worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)\n else:\n worker_device = '/job:worker'\n\n if config.num_ps_replicas > 0:\n return training.replica_device_setter(\n ps_tasks=config.num_ps_replicas,\n worker_device=worker_device,\n merge_devices=True,\n ps_ops=list(device_setter.STANDARD_PS_OPS),\n cluster=config.cluster_spec)\n else:\n return None\n\n\ndef _verify_model_fn_args(model_fn, params):\n \"\"\"Verifies `model_fn` arguments.\"\"\"\n args = set(function_utils.fn_args(model_fn))\n if 'features' not in args:\n raise ValueError('model_fn (%s) must include features argument.' % model_fn)\n if params is not None and 'params' not in args:\n raise ValueError('model_fn (%s) does not include params argument, '\n 'but params (%s) is passed to Estimator.' % (model_fn,\n params))\n if params is None and 'params' in args:\n logging.warning('Estimator\\'s model_fn (%s) includes params '\n 'argument, but params are not passed to Estimator.',\n model_fn)\n non_valid_args = list(args - _VALID_MODEL_FN_ARGS)\n if non_valid_args:\n raise ValueError('model_fn (%s) has following not expected args: %s' %\n (model_fn, non_valid_args))\n\n\ndef _load_global_step_from_checkpoint_dir(checkpoint_dir):\n try:\n checkpoint_reader = training.NewCheckpointReader(\n training.latest_checkpoint(checkpoint_dir))\n return checkpoint_reader.get_tensor(ops.GraphKeys.GLOBAL_STEP)\n except: # pylint: disable=bare-except\n return 0\n\n\ndef _extract_metric_update_ops(eval_dict, distribution=None):\n \"\"\"Separate update operations from metric value operations.\"\"\"\n update_ops = []\n value_ops = {}\n # Sort metrics lexicographically so graph is identical every time.\n for name, metric_ops in sorted(six.iteritems(eval_dict)):\n value_ops[name] = metric_ops[0]\n if distribution:\n update_op = distribution.group(metric_ops[1])\n else:\n update_op = metric_ops[1]\n update_ops.append(update_op)\n\n if update_ops:\n update_op = control_flow_ops.group(*update_ops)\n else:\n update_op = None\n\n return update_op, value_ops\n\n\ndef _dict_to_str(dictionary):\n \"\"\"Get a `str` representation of a `dict`.\n\n Args:\n dictionary: The `dict` to be represented as `str`.\n\n Returns:\n A `str` representing the `dictionary`.\n \"\"\"\n return ', '.join('%s = %s' % (k, v)\n for k, v in sorted(six.iteritems(dictionary))\n if not isinstance(v, six.binary_type))\n\n\ndef _write_dict_to_summary(output_dir,\n dictionary,\n current_global_step):\n \"\"\"Writes a `dict` into summary file in given output directory.\n\n Args:\n output_dir: `str`, directory to write the summary file in.\n dictionary: the `dict` to be written to summary file.\n current_global_step: `int`, the current global step.\n \"\"\"\n logging.info('Saving dict for global step %d: %s', current_global_step,\n _dict_to_str(dictionary))\n summary_writer = writer_cache.FileWriterCache.get(output_dir)\n summary_proto = summary_pb2.Summary()\n for key in dictionary:\n if dictionary[key] is None:\n continue\n if key == 'global_step':\n continue\n if (isinstance(dictionary[key], np.float32) or\n isinstance(dictionary[key], float)):\n summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))\n elif (isinstance(dictionary[key], np.int64) or\n isinstance(dictionary[key], np.int32) or\n isinstance(dictionary[key], int)):\n summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))\n elif isinstance(dictionary[key], six.binary_type):\n try:\n summ = summary_pb2.Summary.FromString(dictionary[key])\n for i, _ in enumerate(summ.value):\n summ.value[i].tag = '%s/%d' % (key, i)\n summary_proto.value.extend(summ.value)\n except message.DecodeError:\n logging.warn('Skipping summary for %s, cannot parse string to Summary.',\n key)\n continue\n elif isinstance(dictionary[key], np.ndarray):\n value = summary_proto.value.add()\n value.tag = key\n value.node_name = key\n tensor_proto = tensor_util.make_tensor_proto(dictionary[key])\n value.tensor.CopyFrom(tensor_proto)\n # pylint: disable=line-too-long\n logging.info(\n 'Summary for np.ndarray is not visible in Tensorboard by default. '\n 'Consider using a Tensorboard plugin for visualization (see '\n 'https://github.com/tensorflow/tensorboard-plugin-example/blob/master/README.md'\n ' for more information).')\n # pylint: enable=line-too-long\n else:\n logging.warn(\n 'Skipping summary for %s, must be a float, np.float32, np.int64, '\n 'np.int32 or int or np.ndarray or a serialized string of Summary.',\n key)\n summary_writer.add_summary(summary_proto, current_global_step)\n summary_writer.flush()\n\n\ndef _write_checkpoint_path_to_summary(output_dir, checkpoint_path,\n current_global_step):\n \"\"\"Writes `checkpoint_path` into summary file in the given output directory.\n\n Args:\n output_dir: `str`, directory to write the summary file in.\n checkpoint_path: `str`, checkpoint file path to be written to summary file.\n current_global_step: `int`, the current global step.\n \"\"\"\n\n checkpoint_path_tag = 'checkpoint_path'\n\n logging.info('Saving \\'%s\\' summary for global step %d: %s',\n checkpoint_path_tag, current_global_step, checkpoint_path)\n summary_proto = summary_pb2.Summary()\n summary_proto.value.add(\n tag=checkpoint_path_tag,\n tensor=tensor_util.make_tensor_proto(\n checkpoint_path, dtype=dtypes.string))\n summary_writer = writer_cache.FileWriterCache.get(output_dir)\n summary_writer.add_summary(summary_proto, current_global_step)\n summary_writer.flush()\n\n\ndef _has_dataset_or_queue_runner(maybe_tensor):\n \"\"\"Returns `True` if `Dataset` or `QueueRunner` has been used.\"\"\"\n # Check TF dataset first. Here, we use a simple algorithm to check the top\n # level Tensors only, which should be sufficient for most users.\n tensors = [x for x in nest.flatten(maybe_tensor) if isinstance(x, ops.Tensor)]\n if any([t.op.type == 'IteratorGetNext' for t in tensors]):\n return True\n\n # Now, check queue.\n return ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS)\n\n\nVocabInfo = warm_starting_util.VocabInfo # pylint: disable=invalid-name\nestimator_export('estimator.VocabInfo')(VocabInfo)\n\n\n@estimator_export('estimator.WarmStartSettings')\nclass WarmStartSettings(\n collections.namedtuple('WarmStartSettings', [\n 'ckpt_to_initialize_from',\n 'vars_to_warm_start',\n 'var_name_to_vocab_info',\n 'var_name_to_prev_var_name',\n ])):\n \"\"\"Settings for warm-starting in `tf.estimator.Estimators`.\n\n Example Use with canned `tf.estimator.DNNEstimator`:\n\n ```\n emb_vocab_file = tf.feature_column.embedding_column(\n tf.feature_column.categorical_column_with_vocabulary_file(\n \"sc_vocab_file\", \"new_vocab.txt\", vocab_size=100),\n dimension=8)\n emb_vocab_list = tf.feature_column.embedding_column(\n tf.feature_column.categorical_column_with_vocabulary_list(\n \"sc_vocab_list\", vocabulary_list=[\"a\", \"b\"]),\n dimension=8)\n estimator = tf.estimator.DNNClassifier(\n hidden_units=[128, 64], feature_columns=[emb_vocab_file, emb_vocab_list],\n warm_start_from=ws)\n ```\n\n where `ws` could be defined as:\n\n Warm-start all weights in the model (input layer and hidden weights).\n Either the directory or a specific checkpoint can be provided (in the case\n of the former, the latest checkpoint will be used):\n\n ```\n ws = WarmStartSettings(ckpt_to_initialize_from=\"/tmp\")\n ws = WarmStartSettings(ckpt_to_initialize_from=\"/tmp/model-1000\")\n ```\n\n Warm-start only the embeddings (input layer):\n\n ```\n ws = WarmStartSettings(ckpt_to_initialize_from=\"/tmp\",\n vars_to_warm_start=\".*input_layer.*\")\n ```\n\n Warm-start all weights but the embedding parameters corresponding to\n `sc_vocab_file` have a different vocab from the one used in the current\n model:\n\n ```\n vocab_info = tf.estimator.VocabInfo(\n new_vocab=sc_vocab_file.vocabulary_file,\n new_vocab_size=sc_vocab_file.vocabulary_size,\n num_oov_buckets=sc_vocab_file.num_oov_buckets,\n old_vocab=\"old_vocab.txt\"\n )\n ws = WarmStartSettings(\n ckpt_to_initialize_from=\"/tmp\",\n var_name_to_vocab_info={\n \"input_layer/sc_vocab_file_embedding/embedding_weights\": vocab_info\n })\n ```\n\n Warm-start only `sc_vocab_file` embeddings (and no other variables), which\n have a different vocab from the one used in the current model:\n\n ```\n vocab_info = tf.estimator.VocabInfo(\n new_vocab=sc_vocab_file.vocabulary_file,\n new_vocab_size=sc_vocab_file.vocabulary_size,\n num_oov_buckets=sc_vocab_file.num_oov_buckets,\n old_vocab=\"old_vocab.txt\"\n )\n ws = WarmStartSettings(\n ckpt_to_initialize_from=\"/tmp\",\n vars_to_warm_start=None,\n var_name_to_vocab_info={\n \"input_layer/sc_vocab_file_embedding/embedding_weights\": vocab_info\n })\n ```\n\n Warm-start all weights but the parameters corresponding to `sc_vocab_file`\n have a different vocab from the one used in current checkpoint, and only\n 100 of those entries were used:\n\n ```\n vocab_info = tf.estimator.VocabInfo(\n new_vocab=sc_vocab_file.vocabulary_file,\n new_vocab_size=sc_vocab_file.vocabulary_size,\n num_oov_buckets=sc_vocab_file.num_oov_buckets,\n old_vocab=\"old_vocab.txt\",\n old_vocab_size=100\n )\n ws = WarmStartSettings(\n ckpt_to_initialize_from=\"/tmp\",\n var_name_to_vocab_info={\n \"input_layer/sc_vocab_file_embedding/embedding_weights\": vocab_info\n })\n ```\n\n Warm-start all weights but the parameters corresponding to `sc_vocab_file`\n have a different vocab from the one used in current checkpoint and the\n parameters corresponding to `sc_vocab_list` have a different name from the\n current checkpoint:\n\n ```\n vocab_info = tf.estimator.VocabInfo(\n new_vocab=sc_vocab_file.vocabulary_file,\n new_vocab_size=sc_vocab_file.vocabulary_size,\n num_oov_buckets=sc_vocab_file.num_oov_buckets,\n old_vocab=\"old_vocab.txt\",\n old_vocab_size=100\n )\n ws = WarmStartSettings(\n ckpt_to_initialize_from=\"/tmp\",\n var_name_to_vocab_info={\n \"input_layer/sc_vocab_file_embedding/embedding_weights\": vocab_info\n },\n var_name_to_prev_var_name={\n \"input_layer/sc_vocab_list_embedding/embedding_weights\":\n \"old_tensor_name\"\n })\n ```\n\n Attributes:\n ckpt_to_initialize_from: [Required] A string specifying the directory with\n checkpoint file(s) or path to checkpoint from which to warm-start the\n model parameters.\n vars_to_warm_start: [Optional] One of the following: - A regular expression\n (string) that captures which variables to warm-start (see\n `tf.get_collection`). This expression will only consider variables in the\n `TRAINABLE_VARIABLES` collection. - A list of Variables to warm-start. - A\n list of strings, each representing a full variable name to warm-start. -\n `None`, in which case only variables specified in `var_name_to_vocab_info`\n will be warm-started. Defaults to `'.*'`, which warm-starts all variables\n in the `TRAINABLE_VARIABLES` collection. Note that this excludes\n variables such as accumulators and moving statistics from batch norm.\n var_name_to_vocab_info: [Optional] Dict of variable names (strings) to\n `tf.estimator.VocabInfo`. The variable names should be \"full\" variables,\n not the names of the partitions. If not explicitly provided, the variable\n is assumed to have no vocabulary.\n var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to\n name of the previously-trained variable in `ckpt_to_initialize_from`. If\n not explicitly provided, the name of the variable is assumed to be same\n between previous checkpoint and current model.\n \"\"\"\n\n def __new__(cls,\n ckpt_to_initialize_from,\n vars_to_warm_start='.*',\n var_name_to_vocab_info=None,\n var_name_to_prev_var_name=None):\n if not ckpt_to_initialize_from:\n raise ValueError(\n '`ckpt_to_initialize_from` MUST be set in WarmStartSettings')\n return super(WarmStartSettings, cls).__new__(\n cls,\n ckpt_to_initialize_from,\n vars_to_warm_start,\n var_name_to_vocab_info or {},\n var_name_to_prev_var_name or {},\n )\n\n\ndef _get_saved_model_ckpt(saved_model_dir):\n \"\"\"Return path to variables checkpoint in a `SavedModel` directory.\"\"\"\n if not gfile.Exists(\n os.path.join(saved_model_utils.get_variables_dir(saved_model_dir),\n compat.as_text('variables.index'))):\n raise ValueError('Directory provided has an invalid SavedModel format: %s'\n % saved_model_dir)\n return saved_model_utils.get_variables_path(saved_model_dir)\n\n\ndef _get_default_warm_start_settings(warm_start_from):\n \"\"\"Returns default `tf.estimator.WarmStartSettings`.\n\n Args:\n warm_start_from: Either a string representing the filepath of a checkpoint\n or `SavedModel` to initialize from, or an instance of\n `tf.estimator.WarmStartSettings`.\n\n Returns:\n Either None or an instance of `WarmStartSettings`.\n\n Raises:\n ValueError: If `warm_start_from` is not `None` but is neither a string nor\n an\n instance of `WarmStartSettings`.\n \"\"\"\n if warm_start_from is None:\n return None\n if isinstance(warm_start_from, (six.string_types, six.binary_type)):\n # Infer that this is a SavedModel if export_path +\n # 'variables/variables.index' exists, and if so, construct the\n # WarmStartSettings pointing to the variables path\n # (export_path + 'variables/variables').\n if gfile.Exists(os.path.join(\n saved_model_utils.get_variables_dir(warm_start_from),\n compat.as_text('variables.index'))):\n logging.info('Warm-starting from a SavedModel')\n return WarmStartSettings(\n ckpt_to_initialize_from=saved_model_utils.get_variables_path(\n warm_start_from))\n return WarmStartSettings(ckpt_to_initialize_from=warm_start_from)\n elif isinstance(warm_start_from, WarmStartSettings):\n return warm_start_from\n else:\n raise ValueError('warm_start_from must be a string or a WarmStartSettings, '\n 'instead got {}'.format(type(warm_start_from)))\n" ]
[ [ "tensorflow.python.util.tf_export.estimator_export", "tensorflow.python.eager.context.graph_mode", "tensorflow.python.training.training.get_global_step", "tensorflow.python.training.training_util._get_or_create_global_step_read", "tensorflow.python.training.evaluation._evaluate_once", "tensorflow.python.util.compat_internal.path_to_str", "tensorflow.python.training.training.get_or_create_steps_per_run_variable", "tensorflow.python.training.saver.Saver", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.training.training.Saver", "tensorflow.python.training.training.ChiefSessionCreator", "tensorflow.python.util.nest.flatten", "tensorflow.python.training.basic_session_run_hooks._MultiStepStopAtStepHook", "tensorflow.python.estimator.util._DatasetInitializerHook", "tensorflow.python.estimator.export.export.get_timestamped_export_dir", "tensorflow.python.framework.ops.device", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.training.checkpoint_management.latest_checkpoint", "tensorflow.python.platform.gfile.Rename", "tensorflow.core.framework.summary_pb2.Summary.FromString", "tensorflow.python.platform.tf_logging.warn", "tensorflow.python.platform.gfile.Copy", "tensorflow.python.training.monitored_session.Scaffold.default_local_init_op", "tensorflow.python.util.compat.as_text", "tensorflow.python.training.monitored_session.Scaffold", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.client.session.Session", "tensorflow.python.framework.tensor_util.make_tensor_proto", "tensorflow.python.saved_model.builder.SavedModelBuilder", "tensorflow.python.util.function_utils.fn_args", "tensorflow.python.framework.random_seed.set_random_seed", "tensorflow.python.training.monitored_session.Scaffold.get_or_default", "tensorflow.python.training.training.NanTensorHook", "tensorflow.python.training.evaluation._StopAfterNEvalsHook", "tensorflow.python.ops.resources.report_uninitialized_resources", "tensorflow.python.training.training.StopAtStepHook", "tensorflow.python.framework.ops.Graph", "tensorflow.python.training.training.load_variable", "tensorflow.python.ops.metrics.mean", "tensorflow.python.estimator.run_config.get_default_session_config", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.summary.summary.scalar", "tensorflow.python.ops.variables.report_uninitialized_variables", "tensorflow.python.saved_model.utils_impl.get_variables_dir", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.training.evaluation._get_or_create_eval_step", "tensorflow.python.summary.writer.writer_cache.FileWriterCache.get", "tensorflow.python.estimator.run_config.RunConfig.replace", "tensorflow.python.training.training_util.get_global_step", "tensorflow.python.training.training.LoggingTensorHook", "tensorflow.python.training.training.CheckpointSaverHook", "tensorflow.python.training.distribute.get_loss_reduction", "tensorflow.python.training.training.list_variables", "tensorflow.python.training.training.create_global_step", "tensorflow.python.estimator.export.export.get_temp_export_dir", "tensorflow.python.estimator.util.StrategyInitFinalizeHook", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.ops.variables.global_variables", "tensorflow.core.framework.summary_pb2.Summary", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.python.estimator.run_config.RunConfig", "tensorflow.python.training.warm_starting_util.warm_start", "tensorflow.python.platform.gfile.MakeDirs", "tensorflow.python.saved_model.utils_impl.get_variables_path", "tensorflow.python.training.training.latest_checkpoint", "tensorflow.python.estimator.util.parse_input_fn_result" ] ]
ScarWar/DeepSTORM3D
[ "8ba5bc61120abedba9c1b24a994e616e280bdda2" ]
[ "DeepSTORM3D/postprocess_utils.py" ]
[ "# Import modules and libraries\nimport numpy as np\nimport torch\nfrom torch.nn import Module, MaxPool3d, ConstantPad3d\nfrom torch.nn.functional import conv3d\n\n\n# convert gpu tensors to numpy\ndef tensor_to_np(x):\n return np.squeeze(x.cpu().numpy())\n\n\n# post-processing on GPU: thresholding and local maxima finding\nclass Postprocess(Module):\n def __init__(self, thresh, radius, setup_params):\n super().__init__()\n self.thresh = thresh\n self.r = radius\n self.device = setup_params['device']\n self.psize_xy = setup_params['pixel_size_rec']\n self.psize_z = setup_params['pixel_size_axial']\n self.zmin = setup_params['zmin']\n self.upsampling_shift = 0 # 2 due to floor(W/2) affected by upsampling factor of 4\n self.maxpool = MaxPool3d(kernel_size=2*self.r + 1, stride=1, padding=self.r)\n self.pad = ConstantPad3d(self.r, 0.0)\n self.zero = torch.FloatTensor([0.0]).to(self.device)\n\n # construct the local average filters\n filt_vec = np.arange(-self.r, self.r + 1)\n yfilter, zfilter, xfilter = np.meshgrid(filt_vec, filt_vec, filt_vec)\n xfilter = torch.FloatTensor(xfilter).unsqueeze(0).unsqueeze(0)\n yfilter = torch.FloatTensor(yfilter).unsqueeze(0).unsqueeze(0)\n zfilter = torch.FloatTensor(zfilter).unsqueeze(0).unsqueeze(0)\n sfilter = torch.ones_like(xfilter)\n self.local_filter = torch.cat((sfilter, xfilter, yfilter, zfilter), 0).to(self.device)\n\n def local_avg(self, xbool, ybool, zbool, pred_vol_pad, num_pts, device):\n\n # create the concatenated tensor of all local volumes\n pred_vol_all = torch.zeros(num_pts, 1, self.r*2 + 1, self.r*2 + 1, self.r*2 + 1).to(device)\n for pt in range(num_pts):\n\n # local 3D volume\n xpt = [xbool[pt], xbool[pt] + 2 * self.r + 1]\n ypt = [ybool[pt], ybool[pt] + 2 * self.r + 1]\n zpt = [zbool[pt], zbool[pt] + 2 * self.r + 1]\n pred_vol_all[pt, :] = pred_vol_pad[:, :, zpt[0]:zpt[1], ypt[0]:ypt[1], xpt[0]:xpt[1]]\n\n # convolve it using conv3d\n sums = conv3d(pred_vol_all, self.local_filter)\n\n # squeeze the sums and convert them to local perturbations\n xloc = sums[:, 1] / sums[:, 0]\n yloc = sums[:, 2] / sums[:, 0]\n zloc = sums[:, 3] / sums[:, 0]\n\n return xloc, yloc, zloc\n\n def forward(self, pred_vol):\n\n # check size of the prediction and expand it accordingly to be 5D\n num_dims = len(pred_vol.size())\n if np.not_equal(num_dims, 5):\n if num_dims == 4:\n pred_vol = pred_vol.unsqueeze(0)\n else:\n pred_vol = pred_vol.unsqueeze(0)\n pred_vol = pred_vol.unsqueeze(0)\n\n # apply the threshold\n pred_thresh = torch.where(pred_vol > self.thresh, pred_vol, self.zero)\n\n # apply the 3D maxpooling operation to find local maxima\n conf_vol = self.maxpool(pred_thresh)\n conf_vol = torch.where((conf_vol > self.zero) & (conf_vol == pred_thresh), conf_vol, self.zero)\n\n # find locations of confs (bigger than 0)\n conf_vol = torch.squeeze(conf_vol)\n batch_indices = torch.nonzero(conf_vol)\n zbool, ybool, xbool = batch_indices[:, 0], batch_indices[:, 1], batch_indices[:, 2]\n\n # if the prediction is empty return None otherwise convert to list of locations\n if len(zbool) == 0:\n xyz_rec = None\n conf_rec = None\n\n else:\n\n # pad the result with radius_px 0's for average calc.\n pred_vol_pad = self.pad(pred_vol)\n\n # for each point calculate local weighted average\n num_pts = len(zbool)\n xloc, yloc, zloc = self.local_avg(xbool, ybool, zbool, pred_vol_pad, num_pts, self.device)\n\n # convert lists and tensors to numpy\n xloc, yloc, zloc = tensor_to_np(xloc), tensor_to_np(xloc), tensor_to_np(xloc)\n xbool, ybool, zbool = tensor_to_np(xbool), tensor_to_np(ybool), tensor_to_np(zbool)\n\n # dimensions of the prediction\n D, H, W = conf_vol.size()\n\n # calculate the recovered positions assuming mid-voxel\n xrec = (xbool + xloc - np.floor(W / 2) + self.upsampling_shift + 0.5) * self.psize_xy\n yrec = (ybool + yloc - np.floor(H / 2) + self.upsampling_shift + 0.5) * self.psize_xy\n zrec = (zbool + zloc + 0.5) * self.psize_z + self.zmin\n\n # rearrange the result into a Nx3 array\n xyz_rec = np.column_stack((xrec, yrec, zrec))\n\n # confidence of these positions\n conf_rec = conf_vol[zbool, ybool, xbool]\n conf_rec = tensor_to_np(conf_rec)\n\n return xyz_rec, conf_rec\n" ]
[ [ "torch.zeros", "torch.nonzero", "torch.cat", "numpy.not_equal", "torch.nn.MaxPool3d", "torch.FloatTensor", "torch.squeeze", "numpy.arange", "torch.ones_like", "torch.nn.ConstantPad3d", "torch.nn.functional.conv3d", "numpy.column_stack", "numpy.meshgrid", "torch.where", "numpy.floor" ] ]
dmadea/Spectra-Manipulator
[ "ddc1b27cb4f4691096dfa7b2975df350d2eaf40e" ]
[ "spectramanipulator/user_namespace.py" ]
[ "import numpy as np\nfrom typing import Iterable\nimport os\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt # we plot graphs with this library\nfrom matplotlib import cm\nfrom matplotlib.ticker import *\nimport matplotlib.gridspec as gridspec\n\nfrom matplotlib import colors as c\n\n# from copy import deepcopy\nfrom PyQt5.QtWidgets import QApplication\nfrom scipy.linalg import lstsq\n\nfrom spectramanipulator.settings import Settings\nfrom spectramanipulator.spectrum import fi, Spectrum, SpectrumList, group2mat\nfrom scipy.integrate import simps, cumtrapz\nfrom scipy.stats import linregress\nfrom uncertainties import ufloat, unumpy\n\n\nimport functools\n\n# for backward compatibility of smpj files\nItemList = list\n\nWL_LABEL = 'Wavelength / nm'\nWN_LABEL = \"Wavenumber / $10^{4}$ cm$^{-1}$\"\n\n\n# needed for correctly display tics for symlog scale\nclass MinorSymLogLocator(Locator):\n \"\"\"\n Dynamically find minor tick positions based on the positions of\n major ticks for a symlog scaling.\n \"\"\"\n\n def __init__(self, linthresh, nints=10):\n \"\"\"\n Ticks will be placed between the major ticks.\n The placement is linear for x between -linthresh and linthresh,\n otherwise its logarithmically. nints gives the number of\n intervals that will be bounded by the minor ticks.\n \"\"\"\n self.linthresh = linthresh\n self.nintervals = nints\n\n def __call__(self):\n # Return the locations of the ticks\n majorlocs = self.axis.get_majorticklocs()\n\n if len(majorlocs) == 1:\n return self.raise_if_exceeds(np.array([]))\n\n # add temporary major tick locs at either end of the current range\n # to fill in minor tick gaps\n dmlower = majorlocs[1] - majorlocs[0] # major tick difference at lower end\n dmupper = majorlocs[-1] - majorlocs[-2] # major tick difference at upper e\tnd\n\n # add temporary major tick location at the lower end\n if majorlocs[0] != 0. and ((majorlocs[0] != self.linthresh and dmlower > self.linthresh) or (\n dmlower == self.linthresh and majorlocs[0] < 0)):\n majorlocs = np.insert(majorlocs, 0, majorlocs[0] * 10.)\n else:\n majorlocs = np.insert(majorlocs, 0, majorlocs[0] - self.linthresh)\n\n # add temporary major tick location at the upper end\n if majorlocs[-1] != 0. and ((np.abs(majorlocs[-1]) != self.linthresh and dmupper > self.linthresh) or (\n dmupper == self.linthresh and majorlocs[-1] > 0)):\n majorlocs = np.append(majorlocs, majorlocs[-1] * 10.)\n else:\n majorlocs = np.append(majorlocs, majorlocs[-1] + self.linthresh)\n\n # iterate through minor locs\n minorlocs = []\n\n # handle the lowest part\n for i in range(1, len(majorlocs)):\n majorstep = majorlocs[i] - majorlocs[i - 1]\n if abs(majorlocs[i - 1] + majorstep / 2) < self.linthresh:\n ndivs = self.nintervals\n else:\n ndivs = self.nintervals - 1.\n\n minorstep = majorstep / ndivs\n locs = np.arange(majorlocs[i - 1], majorlocs[i], minorstep)[1:]\n minorlocs.extend(locs)\n\n return self.raise_if_exceeds(np.array(minorlocs))\n\n def tick_values(self, vmin, vmax):\n raise NotImplementedError('Cannot get tick locations for a '\n '%s type.' % type(self))\n\n\ndef setup_wavenumber_axis(ax, x_label=WN_LABEL,\n x_major_locator=None, x_minor_locator=AutoMinorLocator(5), factor=1e3):\n secondary_ax = ax.secondary_xaxis('top', functions=(lambda x: factor / x, lambda x: 1 / (factor * x)))\n\n secondary_ax.tick_params(which='major', direction='in')\n secondary_ax.tick_params(which='minor', direction='in')\n\n if x_major_locator:\n secondary_ax.xaxis.set_major_locator(x_major_locator)\n\n if x_minor_locator:\n secondary_ax.xaxis.set_minor_locator(x_minor_locator)\n\n secondary_ax.set_xlabel(x_label)\n\n return secondary_ax\n\n\ndef set_main_axis(ax, x_label=WL_LABEL, y_label=\"Absorbance\", xlim=(None, None), ylim=(None, None),\n x_major_locator=None, x_minor_locator=None, y_major_locator=None, y_minor_locator=None):\n ax.set_ylabel(y_label)\n ax.set_xlabel(x_label)\n if xlim[0] is not None:\n ax.set_xlim(xlim)\n if ylim[0] is not None:\n ax.set_ylim(ylim)\n\n if x_major_locator:\n ax.xaxis.set_major_locator(x_major_locator)\n\n if x_minor_locator:\n ax.xaxis.set_minor_locator(x_minor_locator)\n\n if y_major_locator:\n ax.yaxis.set_major_locator(y_major_locator)\n\n if y_minor_locator:\n ax.yaxis.set_minor_locator(y_minor_locator)\n\n ax.tick_params(axis='both', which='major', direction='in')\n ax.tick_params(axis='both', which='minor', direction='in')\n\n\ndef _transform_func(transform=lambda y: y):\n def decorator(fn):\n @functools.wraps(fn)\n def func(item):\n fn_name = fn.__name__\n if isinstance(item, Spectrum):\n y_data = transform(item.data[:, 1])\n return Spectrum.from_xy_values(item.data[:, 0], np.nan_to_num(y_data), name=f'{fn_name}({item.name})')\n elif isinstance(item, SpectrumList):\n sl = SpectrumList(name=f'{fn_name}({item.name})')\n for sp in item:\n y_data = transform(sp.data[:, 1])\n new_sp = Spectrum.from_xy_values(sp.data[:, 0], np.nan_to_num(y_data), name=f'{fn_name}({sp.name})')\n sl.children.append(new_sp)\n return sl\n else:\n return transform(item) # this may not be implemented, but for numbers and ndarrays it will work\n return func\n return decorator\n\n\n@_transform_func(lambda y: np.exp(y))\ndef exp(item):\n \"\"\"\n Calculates the exponential of y values and returns a new Spectrum/SpectrumList.\n \"\"\"\n pass\n\n\n@_transform_func(lambda y: np.log10(y))\ndef log10(item):\n \"\"\"\n Calculates the decadic logarithm of y values and returns a new Spectrum/SpectrumList.\n \"\"\"\n pass\n\n\n@_transform_func(lambda y: np.log(y))\ndef log(item):\n \"\"\"\n Calculates the natural logarithm of y values and returns a new Spectrum/SpectrumList\n \"\"\"\n pass\n\n\n@_transform_func(lambda y: -np.log10(-y))\ndef T2A_LFP(item):\n \"\"\"\n Performs the transmittance to absorbance conversion for nano kinetics,\n y_transformed = -log10(-y).\"\"\"\n pass\n\n#\n# def add_to_list(spectra):\n# \"\"\"\n# Copies all spectra and imports them to the Tree Widget.\n#\n# Parameters\n# ----------\n# spectra : {:class:`Spectrum`, :class:`SpectrumList`, list, list of lists, SpectrumItemGroup, SpectrumItem}\n# The input spectra to be added into Tree Widget.\n# \"\"\"\n#\n# if UserNamespace.instance is not None:\n# UserNamespace.instance.tw.add_to_list(spectra)\n\n\n# def load_kinetics(spectra):\n# \"\"\"\n# Copies all spectra and imports them to the Tree Widget.\n#\n# Parameters\n# ----------\n# spectra : {:class:`Spectrum`, :class:`SpectrumList`, list, list of lists, SpectrumItemGroup, SpectrumItem}\n# The input spectra to be added into Tree Widget.\n# \"\"\"\n#\n# if UserNamespace.instance is not None:\n# UserNamespace.instance.tw.add_to_list(spectra)\n\n\n\ndef import_files(filepaths):\n \"\"\"\n Imports the filepaths and add to Tree Widget\n\n Parameters\n ----------\n filepaths : list of strs or str\n List of filepaths to import.\n \"\"\"\n if UserNamespace.instance is not None:\n UserNamespace.instance.main.tree_widget.import_files(filepaths)\n\n\ndef set_xy_range(x0=None, x1=None, y0=None, y1=None, padding=0):\n \"\"\"\n Changes the x and y ranges of scene in Plot Widget.\n\n Parameters\n ----------\n x0 : {int, float, None}\n New fist x value. If None, old value is kept.\n x1 : {int, float, None}\n New last x value. If None, old value is kept.\n y0 : {int, float, None}\n New fist y value. If None, old value is kept.\n y1 : {int, float, None}\n New last y value. If None, old value is kept.\n padding : {int, float}\n Sets the padding around the choosed rectangle. If 0, no padding will be used.\n \"\"\"\n plot_widget = UserNamespace.instance.main.grpView\n\n x_range, y_range = plot_widget.plotItem.getViewBox().viewRange()\n\n plot_widget.plotItem.getViewBox().setXRange(x_range[0] if x0 is None else x0,\n x_range[1] if x1 is None else x1,\n padding=padding)\n\n plot_widget.plotItem.getViewBox().setYRange(y_range[0] if y0 is None else y0,\n y_range[1] if y1 is None else y1,\n padding=padding)\n\n\ndef set_default_HSV_color_scheme():\n \"\"\"Sets the default values for HSV color scheme.\"\"\"\n Settings.hues = 9\n Settings.values = 1\n Settings.maxValue = 255\n Settings.minValue = 150\n Settings.maxHue = 360\n Settings.minHue = 0\n Settings.sat = 255\n Settings.alpha = 255\n\n if Settings.HSV_color_scheme:\n redraw_all_spectra()\n\n\ndef set_HSV_color_scheme(active=True, **kwargs):\n \"\"\"Set the options for HSV color scheme and whether the scheme is active.\n\n Options\n -------\n ================ =================================================================================\n *active* (bool) True for setting the scheme active, False for not (default color scheme will be\n used).\n *hues* (int) The number of hues that will be repeating, default 9.\n *values* (int) The number of values/brightnesses that will be repeating, default 1.\n *minValue* (int) A minimum value/brightness, this can be <0, 255>, default 150.\n *maxValue* (int) A maximum value/brightness, this can be <0, 255>, default 255.\n *minHue* (int) A minimum hue, this can be <0, 360>, default 0\n *maxHue* (int) A maximum hue, this can be <0, 360>, default 360\n *sat* (int) The saturation value, this can be <0, 255>, default 255\n *alpha* (int) The transparency value, this can be <0, 255>, default 255\n ================ =================================================================================\n \"\"\"\n hues = kwargs.get('hues', None)\n values = kwargs.get('values', None)\n maxValue = kwargs.get('maxValue', None)\n minValue = kwargs.get('minValue', None)\n maxHue = kwargs.get('maxHue', None)\n minHue = kwargs.get('minHue', None)\n sat = kwargs.get('sat', None)\n alpha = kwargs.get('alpha', None)\n\n Settings.HSV_color_scheme = active\n Settings.hues = hues if hues is not None else Settings.hues\n Settings.values = values if values is not None else Settings.values\n Settings.maxValue = maxValue if maxValue is not None else Settings.maxValue\n Settings.minValue = minValue if minValue is not None else Settings.minValue\n Settings.maxHue = maxHue if maxHue is not None else Settings.maxHue\n Settings.minHue = minHue if minHue is not None else Settings.minHue\n Settings.sat = sat if sat is not None else Settings.sat\n Settings.alpha = alpha if alpha is not None else Settings.alpha\n\n redraw_all_spectra()\n\n\ndef copy_to_clipboard(array, delimiter='\\t', decimal_sep='.', new_line='\\n'):\n \"\"\"Copies the *array* of numbers into clipboard. This can be then pasted to Excel for example.\n\n Parameters\n ----------\n array : {array_like, iterable}\n Array of values. Can be 1D or 2D array\n delimiter : str\n Delimiter between numbers, default tabulator '\\\\\\\\t'\n decimal_sep : str\n Decimal separator, default '.'\n new_line : str\n New line character, default '\\\\\\\\n'\n \"\"\"\n if not isinstance(array, (np.ndarray, Iterable, list, tuple)):\n raise ValueError(f\"Cannot copy {type(array)} to clipboard.\")\n\n try:\n text = new_line.join(delimiter.join(str(num).replace('.', decimal_sep) for num in row) for row in array)\n except: # the second dimension is not iterable, we probably got only 1D array, so lets put into clipboard only this\n text = delimiter.join(str(num).replace('.', decimal_sep) for num in array)\n\n cb = QApplication.clipboard()\n cb.clear(mode=cb.Clipboard)\n cb.setText(text, mode=cb.Clipboard)\n\n\ndef update_view():\n \"\"\"Updates the Tree Widget.\"\"\"\n if UserNamespace.instance is None:\n return\n\n mw = UserNamespace.instance.main\n\n mw.tree_widget.update_view()\n mw.tree_widget.setup_info()\n\n\ndef redraw_all_spectra():\n \"\"\"Redraws all spectra.\"\"\"\n if UserNamespace.instance is None:\n return\n\n mw = UserNamespace.instance.main\n\n mw.redraw_all_spectra()\n\n### Calculation of epsilon from concentration-dependent absorption spectra, the name of the spectra must contain\n### real concentration, the spectra must me ordered from lowest to highest concentration\n\ndef _get_C(group):\n \"\"\"Returns parsed names to floats from a group\"\"\"\n x_vals_temp = []\n for sp in group:\n try:\n x_vals_temp.append(float(sp.name.replace(',', '.').strip()))\n except ValueError:\n raise ValueError(\"Names of spectra cannot be parsed to float.\")\n return np.asarray(x_vals_temp, dtype=np.float64)\n\n# def _get_D(group):\n# D = group[0].data[:, 1]\n# for i in range(1, len(group)):\n# D = np.vstack((D, group[i].data[:, 1]))\n# return D\n\n\ndef calc_Eps(group):\n\n wls, c, D = group2mat(group)\n\n # C needs to be changed to column vector\n ST = lstsq(c[:, None], D.T)[0]\n\n\n # add a spectrum to list\n return Spectrum.from_xy_values(wls, ST.flatten(), name=group.name + '-epsilon')\n\n\ndef rename_times(group, decimal_places=1):\n \"\"\"Renames the group that has names in seconds. Changes for minutes for 60s <= time < 1 hour to minutes and\n time >= 1 hour to hours.\"\"\"\n\n parsed_times = []\n times = _get_C(group)\n\n for time in times:\n unit = ' s'\n if time >= 3600:\n time /= 3600\n unit = ' h'\n elif 60 <= time < 3600:\n time /= 60\n unit = ' min'\n\n time = np.round(time, decimal_places)\n parsed_times.append(f'{time}{unit}')\n\n group.set_names(parsed_times)\n\n\n# def load_kinetics(dir_name, spectra_dir_name='spectra', times_fname='times.txt', blank_spectrum='blank.dx', dt=None,\n# b_corr=None, cut=None, corr_to_zero_time=True):\n# \"\"\"Given a directory name that contains folders of individual experiments, it loads all kinetics.\n# each experiment folder must contain folder spectra (or defined in spectra_dir_name arg.)\n# if blank is given, it will be subtracted from all spectra, times.txt will contain\n# times for all spectra, optional baseline correction and cut can be done.\n#\n# Folder structure:\n# [dir_name]\n# [exp1_dir]\n# [spectra]\n# 01.dx (or .csv or .txt)\n# 02.dx\n# ...\n# times.txt (optional)\n# blank.dx (optional)\n# [exp2_dir]\n# ...\n# ...\n# \"\"\"\n#\n# if UserNamespace.instance is None:\n# return\n#\n# if not os.path.isdir(dir_name):\n# raise ValueError(f'{dir_name} does not exist!')\n#\n# for item in os.listdir(dir_name):\n# path = os.path.join(dir_name, item)\n# if not os.path.isdir(path):\n# continue\n#\n# load_kinetic(path, spectra_dir_name=spectra_dir_name, times_fname=times_fname, blank_spectrum=blank_spectrum,\n# dt=dt, b_corr=b_corr, cut=cut, corr_to_zero_time=corr_to_zero_time)\n#\n#\n# def load_kinetic(dir_name, spectra_dir_name='spectra', times_fname='times.txt', blank_spectrum='blank.dx', dt=None,\n# b_corr=None, cut=None, corr_to_zero_time=True):\n# \"\"\"Given a directory name, it loads all spectra in dir named \"spectra\" - func. arg.,\n# if blank is given, it will be subtracted from all spectra, times.txt will contain\n# times for all spectra, optional baseline correction and cut can be done.\n#\n# Folder structure:\n# [dir_name]\n# [spectra]\n# 01.dx\n# 02.dx\n# ...\n# times.txt (optional)\n# blank.dx (optional)\n# \"\"\"\n#\n# if UserNamespace.instance is None:\n# return\n#\n# tw = UserNamespace.instance.main.tree_widget\n# root = tw.myModel.root # item in IPython console\n#\n# if not os.path.isdir(dir_name):\n# raise ValueError(f'{dir_name} does not exist!')\n#\n# spectra_path = os.path.join(dir_name, spectra_dir_name)\n#\n# if not os.path.isdir(spectra_path):\n# raise ValueError(f'{spectra_dir_name} does not exist in {dir_name}!')\n#\n# spectras = [os.path.join(spectra_path, filename) for filename in os.listdir(spectra_path)]\n#\n# n_items_before = root.__len__()\n# tw.import_files(spectras)\n# n_spectra = root.__len__() - n_items_before\n#\n# tw.add_items_to_group(root[n_items_before:], edit=False) # add loaded spectra to group\n# root[n_items_before].name = f'raw [{os.path.split(dir_name)[1]}]' # set name of a group\n#\n# times = np.asarray([dt * i for i in range(n_spectra)]) if dt is not None else None\n# # idx_add = 0\n# group_idx = n_items_before\n# blank_used = False\n#\n# # load explicit times\n# times_fpath = os.path.join(dir_name, times_fname)\n# if os.path.isfile(times_fpath):\n# tw.import_files(times_fpath)\n# # idx_add += 1\n# if times is None:\n# times = root[-1].data[:, 0].copy()\n# if corr_to_zero_time:\n# times -= times[0]\n#\n# # push times variable to the console\n# UserNamespace.instance.main.console.push_variables(\n# {\n# 'times': times\n# }\n# )\n#\n# if times is not None:\n# root[group_idx].set_names(times)\n#\n# # load blank spectrum if available\n# blank_fpath = os.path.join(dir_name, blank_spectrum)\n# if os.path.isfile(blank_fpath):\n# last_idx = root.__len__() - 1\n# tw.import_files(blank_fpath)\n# add_to_list(root[group_idx] - root[last_idx + 1])\n# if times is not None:\n# root[-1].set_names(times)\n# blank_used = True\n#\n# corr_idx = -1 if blank_used else group_idx\n#\n# if b_corr is not None:\n# root[corr_idx].baseline_correct(*b_corr)\n# root[corr_idx].name += 'bcorr'\n# if cut is not None:\n# root[corr_idx].cut(*cut)\n# root[corr_idx].name += 'cut'\n#\n# # return times\n#\n\ndef _setup_wavenumber_axis(ax, x_label=WN_LABEL,\n x_major_locator=None, x_minor_locator=AutoMinorLocator(5), factor=1e3):\n secondary_ax = ax.secondary_xaxis('top', functions=(lambda x: factor / x, lambda x: 1 / (factor * x)))\n\n secondary_ax.tick_params(which='major', direction='in')\n secondary_ax.tick_params(which='minor', direction='in')\n\n if x_major_locator:\n secondary_ax.xaxis.set_major_locator(x_major_locator)\n\n if x_minor_locator:\n secondary_ax.xaxis.set_minor_locator(x_minor_locator)\n\n secondary_ax.set_xlabel(x_label)\n\n return secondary_ax\n\n\ndef _set_main_axis(ax, x_label=WL_LABEL, y_label=\"Absorbance\", xlim=(None, None), ylim=(None, None),\n x_major_locator=None, x_minor_locator=None, y_major_locator=None, y_minor_locator=None,\n direction='in'):\n ax.set_ylabel(y_label)\n ax.set_xlabel(x_label)\n if xlim[0] is not None:\n ax.set_xlim(xlim)\n if ylim[0] is not None:\n ax.set_ylim(ylim)\n\n if x_major_locator:\n ax.xaxis.set_major_locator(x_major_locator)\n\n if x_minor_locator:\n ax.xaxis.set_minor_locator(x_minor_locator)\n\n if y_major_locator:\n ax.yaxis.set_major_locator(y_major_locator)\n\n if y_minor_locator:\n ax.yaxis.set_minor_locator(y_minor_locator)\n\n ax.tick_params(axis='both', which='major', direction=direction)\n ax.tick_params(axis='both', which='minor', direction=direction)\n\n\ndef setup_twin_x_axis(ax, y_label=\"$I_{0,\\\\mathrm{m}}$ / $10^{-10}$ einstein s$^{-1}$ nm$^{-1}$\",\n x_label=None, ylim=(None, None), y_major_locator=None, y_minor_locator=None,\n keep_zero_aligned=True):\n ax2 = ax.twinx()\n\n ax2.tick_params(which='major', direction='in')\n ax2.tick_params(which='minor', direction='in')\n\n if y_major_locator:\n ax2.yaxis.set_major_locator(y_major_locator)\n\n if y_minor_locator:\n ax2.yaxis.set_minor_locator(y_minor_locator)\n\n ax2.set_ylabel(y_label)\n\n if keep_zero_aligned and ylim[0] is None and ylim[1] is not None:\n # a = bx/(x-1)\n ax1_ylim = ax.get_ylim()\n x = -ax1_ylim[0] / (ax1_ylim[1] - ax1_ylim[0]) # position of zero in ax1, from 0, to 1\n a = ylim[1] * x / (x - 1) # calculates the ylim[0] so that zero position is the same for both axes\n ax2.set_ylim(a, ylim[1])\n\n elif ylim[0] is not None:\n ax2.set_ylim(ylim)\n\n return ax2\n\n\ndef plot_kinetics(kin_group_items: list, n_rows: int = None, n_cols: int = None, n_spectra=50, linscale=1,\n linthresh=100, cmap='jet_r', major_ticks_labels=(100, 1000), emph_t=(0, 200, 1000),\n inset_loc=(0.75, 0.1, 0.03, 0.8), colorbar_label='Time / s', lw=0.5, alpha=0.5,\n fig_size_one_graph=(5, 4), y_label='Absorbance', x_label=WL_LABEL, x_lim=(230, 600), filepath=None,\n dpi=500, transparent=True, LED_sources: list = None):\n\n kin_group_items = kin_group_items if isinstance(kin_group_items, list) else [kin_group_items]\n n = len(kin_group_items) # number of EEMs to plot\n\n if LED_sources is not None:\n LED_sources = LED_sources if isinstance(LED_sources, list) else [LED_sources]\n if len(LED_sources) == 1 and n > 1:\n LED_sources = LED_sources * n\n\n assert len(LED_sources) == n, \"Number of provided LEDs must be the same as spectra\"\n else:\n LED_sources = [None] * n\n\n if n_rows is None and n_cols is None: # estimate the n_rows and n_cols from the sqrt of number of graphs\n sqrt = n ** 0.5\n n_rows = int(np.ceil(sqrt))\n n_cols = int(sqrt)\n elif n_rows is None and n_cols is not None:\n n_rows = int(np.ceil(n / n_cols))\n elif n_rows is not None and n_cols is None:\n n_cols = int(np.ceil(n / n_rows))\n\n # assert n_rows * n_cols >= n # not necessary, if the condition is not valid, fewer plots will be plotted\n\n fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_size_one_graph[0] * n_cols, fig_size_one_graph[1] * n_rows))\n\n axes = axes.flatten() if np.iterable(axes) else [axes]\n\n for ax, group, LED_source in zip(axes, kin_group_items, LED_sources):\n\n t = np.asarray(group.get_names(), dtype=np.float64)\n w = group[0].data[:, 0]\n\n _set_main_axis(ax, x_label=x_label, y_label=y_label, xlim=x_lim, x_minor_locator=None, y_minor_locator=None)\n _ = _setup_wavenumber_axis(ax)\n\n cmap = cm.get_cmap(cmap)\n norm = mpl.colors.SymLogNorm(vmin=t[0], vmax=t[-1], linscale=linscale, linthresh=linthresh, base=10, clip=True)\n\n tsb_idxs = fi(t, emph_t)\n ts_real = np.round(t[tsb_idxs])\n\n x_space = np.linspace(0, 1, n_spectra, endpoint=True, dtype=np.float64)\n\n t_idx_space = fi(t, norm.inverse(x_space))\n t_idx_space = np.sort(np.asarray(list(set(t_idx_space).union(set(tsb_idxs)))))\n\n for i in t_idx_space:\n x_real = norm(t[i])\n x_real = 0 if np.ma.is_masked(x_real) else x_real\n ax.plot(w, group[i].data[:, 1], color=cmap(x_real),\n lw=1.5 if i in tsb_idxs else lw,\n alpha=1 if i in tsb_idxs else alpha,\n zorder=1 if i in tsb_idxs else 0)\n\n cbaxes = ax.inset_axes(inset_loc)\n\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)\n sm.set_array([])\n cbar = plt.colorbar(sm, cax=cbaxes, orientation='vertical',\n format=mpl.ticker.ScalarFormatter(),\n label=colorbar_label)\n\n cbaxes.invert_yaxis()\n\n minor_ticks = [10, 20, 30, 40, 50, 60, 70, 80, 90, 200, 300, 400, 500, 600, 700, 800, 900] + list(\n np.arange(2e3, t[-1], 1e3))\n cbaxes.yaxis.set_ticks(cbar._locate(minor_ticks), minor=True)\n\n major_ticks = np.sort(np.hstack((np.asarray([100, 1000]), ts_real)))\n major_ticks_labels = np.sort(np.hstack((np.asarray(major_ticks_labels), ts_real)))\n\n cbaxes.yaxis.set_ticks(cbar._locate(major_ticks), minor=False)\n cbaxes.set_yticklabels([(f'{num:0.0f}' if num in major_ticks_labels else \"\") for num in major_ticks])\n\n for ytick, ytick_label, _t in zip(cbaxes.yaxis.get_major_ticks(), cbaxes.get_yticklabels(), major_ticks):\n if _t in ts_real:\n color = cmap(norm(_t))\n ytick_label.set_color(color)\n ytick_label.set_fontweight('bold')\n ytick.tick2line.set_color(color)\n ytick.tick2line.set_markersize(5)\n # ytick.tick2line.set_markeredgewidth(2)\n\n if LED_source is not None:\n ax_sec = setup_twin_x_axis(ax, ylim=(None, LED_source.y.max() * 3), y_label=\"\", y_major_locator=FixedLocator([]))\n ax_sec.fill(LED_source.x, LED_source.y, facecolor='gray', alpha=0.5)\n ax_sec.plot(LED_source.x, LED_source.y, color='black', ls='dotted', lw=1)\n\n plt.tight_layout()\n if filepath:\n plt.savefig(fname=filepath, transparent=transparent, dpi=dpi)\n\n plt.show()\n\n#\n# def plot_kinetic_ax(group_item, n_spectra=50, linscale=1, linthresh=100, cmap='jet_r',\n# major_ticks_labels=(100, 1000), emph_t=(0, 200, 1000), inset_loc=(0.75, 0.1, 0.03, 0.8),\n# colorbar_label='Time / s', lw=0.5, alpha=0.5, fig_size=(5, 4), y_label='Absorbance', x_label=WL_LABEL,\n# x_lim=(230, 600), filepath=None, dpi=500, transparent=True, LED_source_xy=(None, None)):\n#\n# t = np.asarray(group_item.get_names(), dtype=np.float64)\n# w = group_item[0].data[:, 0]\n#\n# fig, ax1 = plt.subplots(1, 1, figsize=fig_size)\n#\n# _set_main_axis(ax1, x_label=x_label, y_label=y_label, xlim=x_lim, x_minor_locator=None, y_minor_locator=None)\n# _ = _setup_wavenumber_axis(ax1)\n#\n# cmap = cm.get_cmap(cmap)\n# norm = mpl.colors.SymLogNorm(vmin=t[0], vmax=t[-1], linscale=linscale, linthresh=linthresh, base=10, clip=True)\n#\n# tsb_idxs = fi(t, emph_t)\n# ts_real = np.round(t[tsb_idxs])\n#\n# x_space = np.linspace(0, 1, n_spectra, endpoint=True, dtype=np.float64)\n#\n# t_idx_space = fi(t, norm.inverse(x_space))\n# t_idx_space = np.sort(np.asarray(list(set(t_idx_space).union(set(tsb_idxs)))))\n#\n# for i in t_idx_space:\n# x_real = norm(t[i])\n# x_real = 0 if np.ma.is_masked(x_real) else x_real\n# ax1.plot(w, group_item[i].data[:, 1], color=cmap(x_real),\n# lw=1.5 if i in tsb_idxs else lw,\n# alpha=1 if i in tsb_idxs else alpha,\n# zorder=1 if i in tsb_idxs else 0)\n#\n# cbaxes = ax1.inset_axes(inset_loc)\n#\n# sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)\n# sm.set_array([])\n# cbar = plt.colorbar(sm, cax=cbaxes, orientation='vertical',\n# format=mpl.ticker.ScalarFormatter(),\n# label=colorbar_label)\n#\n# cbaxes.invert_yaxis()\n#\n# minor_ticks = [10, 20, 30, 40, 50, 60, 70, 80, 90, 200, 300, 400, 500, 600, 700, 800, 900] + list(\n# np.arange(2e3, t[-1], 1e3))\n# cbaxes.yaxis.set_ticks(cbar._locate(minor_ticks), minor=True)\n#\n# major_ticks = np.sort(np.hstack((np.asarray([100, 1000]), ts_real)))\n# major_ticks_labels = np.sort(np.hstack((np.asarray(major_ticks_labels), ts_real)))\n#\n# cbaxes.yaxis.set_ticks(cbar._locate(major_ticks), minor=False)\n# cbaxes.set_yticklabels([(f'{num:0.0f}' if num in major_ticks_labels else \"\") for num in major_ticks])\n#\n# for ytick, ytick_label, _t in zip(cbaxes.yaxis.get_major_ticks(), cbaxes.get_yticklabels(), major_ticks):\n# if _t in ts_real:\n# color = cmap(norm(_t))\n# ytick_label.set_color(color)\n# ytick_label.set_fontweight('bold')\n# ytick.tick2line.set_color(color)\n# ytick.tick2line.set_markersize(5)\n# # ytick.tick2line.set_markeredgewidth(2)\n#\n# if LED_source_xy[0] is not None and LED_source_xy[1] is not None:\n# x_LED, y_LED = LED_source_xy\n# ax_sec = setup_twin_x_axis(ax, ylim=(None, y_LED.max() * 3), y_label=\"\", y_major_locator=FixedLocator([]))\n# ax_sec.fill(x_LED, y_LED, facecolor='gray', alpha=0.5)\n# ax_sec.plot(x_LED, y_LED, color='black', ls='dotted', lw=1)\n#\n# if filepath:\n# ext = os.path.splitext(filepath)[1].lower()[1:]\n# plt.savefig(fname=filepath, format=ext, transparent=transparent, dpi=dpi)\n#\n# plt.show()\n\n\ndef plot_kinetics_no_colorbar(group_item, x_lim=(None, None), y_lim=(None, None), slice_to_plot=slice(0, -1, 5),\n x_label='Time / s', y_label='$A$', cmap='jet', darkens_factor_cmap=1, colors=None,\n x_major_locator=None, x_minor_locator=None,\n y_major_locator=None, y_minor_locator=None,\n add_wn_axis=True, lw=1.5, ls='-', plot_zero_line=True,\n label_format_fcn=lambda name: name,\n legend_loc='best', legend_spacing=0.2, legend_columns=1, legend_column_spacing=2,\n legend_entry_prefix='pH = ', legend_entry_postfix='', plot_legend_line=True,\n fig_size=(5.5, 4.5),\n dpi=500, filepath=None, transparent=True):\n\n fig, ax = plt.subplots(1, 1, figsize=fig_size)\n\n x = group_item[0].data[:, 0]\n sel_items = group_item[slice_to_plot]\n\n x_range = (x_lim[0] if x_lim[0] is not None else x[0], x_lim[1] if x_lim[1] is not None else x[-1])\n\n set_main_axis(ax, x_label=x_label, y_label=y_label, xlim=x_range, ylim=y_lim,\n x_major_locator=x_major_locator, x_minor_locator=x_minor_locator,\n y_major_locator=y_major_locator, y_minor_locator=y_minor_locator)\n\n if add_wn_axis:\n _ = setup_wavenumber_axis(ax, x_major_locator=MultipleLocator(0.5))\n\n _cmap = cm.get_cmap(cmap, len(sel_items))\n\n if plot_zero_line:\n\n ax.axhline(0, x_range[0], x_range[1], ls='--', color='black', lw=1)\n\n for i, item in enumerate(sel_items):\n if colors is None:\n color = np.asarray(c.to_rgb(_cmap(i))) * darkens_factor_cmap\n color[color > 1] = 1\n else:\n color = colors[i % len(colors)]\n\n ax.plot(item.x, item.y, color=color, lw=lw, ls=ls,\n label=f'{legend_entry_prefix}{label_format_fcn(item.name)}{legend_entry_postfix}')\n\n l = ax.legend(loc=legend_loc, frameon=False, labelspacing=legend_spacing, ncol=legend_columns,\n handlelength=None if plot_legend_line else 0, handletextpad=None if plot_legend_line else 0,\n columnspacing=legend_column_spacing)\n\n for i, text in enumerate(l.get_texts()):\n # text.set_ha('right')\n text.set_color(_cmap(i))\n\n ax.set_axisbelow(False)\n ax.yaxis.set_ticks_position('both')\n\n plt.tight_layout()\n\n if filepath:\n ext = os.path.splitext(filepath)[1].lower()[1:]\n plt.savefig(fname=filepath, format=ext, transparent=transparent, dpi=dpi)\n else:\n plt.show()\n\n\ndef plot_EEMs(EEM_group_items: list, n_rows: int = None, n_cols: int = None, log_z: bool = False, transform2wavenumber=True,\n fig_size_one_graph=(5.5, 4), x_lim=(None, None), y_lim=(None, None), z_lim=(1, None), filepath=None, dpi=500,\n transparent=False, show_title=True, cmap='hot_r', z_label='Counts', x_major_locators=(None, None), x_minor_locators=(None, None),\n y_major_locators=(None, None), y_minor_locators=(None, None)):\n\n \"\"\"This will assume that excitation wavelengths are used as names for individual spectra\n x a y lims are in given wavelengths, despite the possible recalculation to wavenumber.\"\"\"\n\n n = len(EEM_group_items) # number of EEMs to plot\n\n if n_rows is None and n_cols is None: # estimate the n_rows and n_cols from the sqrt of number of graphs\n sqrt = n ** 0.5\n n_rows = int(np.ceil(sqrt))\n n_cols = int(sqrt)\n elif n_rows is None and n_cols is not None:\n n_rows = int(np.ceil(n / n_cols))\n elif n_rows is not None and n_cols is None:\n n_cols = int(np.ceil(n / n_rows))\n\n # assert n_rows * n_cols >= n # not necessary, if the condition is not valid, fewer plots will be plotted\n\n fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_size_one_graph[0] * n_cols, fig_size_one_graph[1] * n_rows))\n\n axes = axes.flatten() if np.iterable(axes) else [axes]\n\n t2w = lambda x: 1e3 / x # function that transforms wavelength into 10^4 cm-1\n\n for ax, item in zip(axes, EEM_group_items):\n\n em_wls, ex_wls, mat = group2mat(item) # convert group to matrix and extracts ex. wavelengths\n if ex_wls is None:\n raise ValueError(f'Excitation wavelengths of {item.name} could not be extracted from spectra names.')\n\n x, y = em_wls, ex_wls\n\n # emission wavelengths limits\n xlim0 = x_lim[0] if x_lim[0] is not None else x[0]\n xlim1 = x_lim[1] if x_lim[1] is not None else x[-1]\n\n # excitation wavelengths limits\n ylim0 = y_lim[0] if y_lim[0] is not None else y[0]\n ylim1 = y_lim[1] if y_lim[1] is not None else y[-1]\n\n x_label_down, x_label_top = 'Em. wavelength / nm', 'Em. wavenumber / $10^4$ cm$^{-1}$'\n y_label_left, y_label_right = 'Ex. wavelength / nm', 'Ex. wavenumber / $10^4$ cm$^{-1}$'\n\n if transform2wavenumber:\n x, y = t2w(x), t2w(y)\n xlim0, xlim1 = t2w(xlim0), t2w(xlim1)\n ylim0, ylim1 = t2w(ylim0), t2w(ylim1)\n\n # switch the labels\n x_label_down, x_label_top = x_label_top, x_label_down\n y_label_left, y_label_right = y_label_right, y_label_left\n\n _set_main_axis(ax, xlim=(xlim0, xlim1), ylim=(ylim0, ylim1),\n y_label=y_label_left,\n x_label=x_label_down, direction='out',\n x_major_locator=x_major_locators[0],\n y_major_locator=y_major_locators[0],\n x_minor_locator=x_minor_locators[0],\n y_minor_locator=y_minor_locators[0])\n\n if log_z: # use log of z axis\n # mat[mat < 0] = 0\n zmin = mat.max() * 1e-3 if z_lim[0] is None else z_lim[0] # 3 orders lower than max as default value\n else:\n zmin = mat.min() if z_lim[0] is None else z_lim[0] # for linear plot, min as default value\n\n zmax = mat.max() if z_lim[1] is None else z_lim[1]\n\n # add left axis\n lambda_ax = ax.secondary_xaxis('top', functions=(t2w, t2w))\n lambda_ax.tick_params(which='both', direction='out', zorder=1000)\n if x_major_locators[1] is not None:\n lambda_ax.xaxis.set_major_locator(x_major_locators[1]) # FixedLocator([500, 600, ...])\n if x_minor_locators[1] is not None:\n lambda_ax.xaxis.set_minor_locator(x_minor_locators[1])\n lambda_ax.set_xlabel(x_label_top)\n\n # add right axis\n lambda_ax2 = ax.secondary_yaxis('right', functions=(t2w, t2w))\n lambda_ax2.tick_params(which='both', direction='out', zorder=1000)\n if y_major_locators[1] is not None:\n lambda_ax2.yaxis.set_major_locator(y_major_locators[1]) # MultipleLocator(20)\n if y_minor_locators[1] is not None:\n lambda_ax2.yaxis.set_minor_locator(y_minor_locators[1]) # AutoMinorLocator(2)\n lambda_ax2.set_ylabel(y_label_right)\n\n # norm for z values\n norm = mpl.colors.LogNorm(vmin=zmin, vmax=zmax, clip=True) if log_z else mpl.colors.Normalize(vmin=zmin,\n vmax=zmax,\n clip=True)\n _x, _y = np.meshgrid(x, y)\n mappable = ax.pcolormesh(_x, _y, mat.T, norm=norm, cmap=cmap, shading='auto')\n fig.colorbar(mappable, ax=ax, label=z_label, pad=0.17, format=None if log_z else '%.0e')\n\n if show_title:\n ax.set_title(item.name)\n\n # if x_major_formatter:\n # ax_data.xaxis.set_major_formatter(x_major_formatter)\n # ax_res.xaxis.set_major_formatter(x_major_formatter)\n\n plt.tight_layout()\n\n if filepath:\n ext = os.path.splitext(filepath)[1].lower()[1:]\n plt.savefig(fname=filepath, format=ext, transparent=transparent, dpi=dpi)\n\n plt.show()\n\n\ndef plot_fit(data_item, fit_item, residuals_item, symlog=False, linscale=1, linthresh=100,\n lw_data=0.5, lw_fit=1.5, fig_size_one_graph=(5, 4), y_label='$\\\\Delta$A', x_label='Time / $\\\\mu$s',\n x_lim=(None, None), t_mul_factor=1, y_lim=(None, None), x_margin=1, y_margin=1.05, filepath=None, dpi=500,\n transparent=False, x_major_formatter=ScalarFormatter(), x_major_locator=None, y_major_locator=None,\n data_color='red', show_title=True):\n\n plot_fits([data_item], [fit_item], [residuals_item], n_rows=1, n_cols=1, symlog=symlog, linscale=linscale,\n linthresh=linthresh, lw_data=lw_data, lw_fit=lw_fit, fig_size_one_graph=fig_size_one_graph,\n y_label=y_label, x_label=x_label, x_lim=x_lim, t_mul_factor=t_mul_factor, y_lim=y_lim, x_margin=x_margin,\n y_margin=y_margin, filepath=filepath, dpi=dpi, transparent=transparent,\n x_major_formatter=x_major_formatter, x_major_locator=x_major_locator, y_major_locator=y_major_locator,\n data_color=data_color, show_title=show_title)\n\n\ndef plot_fits(data_group, fit_group, residuals_group, n_rows=None, n_cols=None, symlog=False, linscale=1, linthresh=100,\n lw_data=0.5, lw_fit=1.5, fig_size_one_graph=(5, 4), y_label='$\\\\Delta$A', x_label='Time / $\\\\mu$s',\n x_lim=(None, None), t_mul_factor=1, y_lim=(None, None), x_margin=1, y_margin=1.05, filepath=None, dpi=500,\n transparent=False, x_major_formatter=ScalarFormatter(), x_major_locator=None, y_major_locator=None,\n data_color='red', show_title=True):\n\n n = len(data_group)\n assert n == len(fit_group) == len(residuals_group)\n\n if n_rows is None and n_cols is None: # estimate the n_rows and n_cols from the sqrt of number of graphs\n sqrt = n ** 0.5\n n_rows = int(np.ceil(sqrt))\n n_cols = int(sqrt)\n elif n_rows is None and n_cols is not None:\n n_rows = int(np.ceil(n / n_cols))\n elif n_rows is not None and n_cols is None:\n n_cols = int(np.ceil(n / n_rows))\n\n # assert n_rows * n_cols >= n # not necessary, if the condition is not valid, fewer plots will be plotted\n\n fig = plt.figure(figsize=(fig_size_one_graph[0] * n_cols, fig_size_one_graph[1] * n_rows))\n\n outer_grid = gridspec.GridSpec(n_rows, n_cols, wspace=0.25, hspace=0.3)\n\n for og, data, fit, res in zip(outer_grid, data_group, fit_group, residuals_group):\n\n # nice tutorial about gridspec here https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.gridspec.GridSpecFromSubplotSpec.html\n # each unit consist of two graphs - data and residuals\n inner_grid = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=og, wspace=0.1, hspace=0.1,\n height_ratios=(4, 1))\n\n ax_data = fig.add_subplot(inner_grid[0])\n ax_res = fig.add_subplot(inner_grid[1])\n\n t_data = data.data[:, 0] * t_mul_factor\n\n _x_lim = list(x_lim)\n _y_lim = list(y_lim)\n\n _x_lim[0] = data.data[0, 0] * x_margin * t_mul_factor if _x_lim[0] is None else _x_lim[0]\n _x_lim[1] = data.data[-1, 0] * x_margin * t_mul_factor if _x_lim[1] is None else _x_lim[1]\n\n _y_lim[0] = data.data[:, 1].min() * y_margin if _y_lim[0] is None else _y_lim[0]\n _y_lim[1] = data.data[:, 1].max() * y_margin if _y_lim[1] is None else _y_lim[1]\n\n _set_main_axis(ax_data, x_label=\"\", y_label=y_label, xlim=_x_lim, ylim=_y_lim, x_major_locator=x_major_locator,\n y_major_locator=y_major_locator)\n _set_main_axis(ax_res, x_label=x_label, y_label='res.', xlim=_x_lim, x_minor_locator=None, y_minor_locator=None)\n\n # plot zero lines\n ax_data.axline((0, 0), slope=0, ls='--', color='black', lw=0.5)\n ax_res.axline((0, 0), slope=0, ls='--', color='black', lw=0.5)\n\n ax_data.tick_params(labelbottom=False)\n\n if show_title:\n ax_data.set_title(data.name)\n ax_data.plot(t_data, data.data[:, 1], lw=lw_data, color=data_color)\n ax_data.plot(fit.data[:, 0] * t_mul_factor, fit.data[:, 1], lw=lw_fit, color='black')\n ax_res.plot(res.data[:, 0] * t_mul_factor, res.data[:, 1], lw=lw_data, color=data_color)\n\n ax_data.set_axisbelow(False)\n ax_res.set_axisbelow(False)\n\n ax_data.yaxis.set_ticks_position('both')\n ax_data.xaxis.set_ticks_position('both')\n\n ax_res.yaxis.set_ticks_position('both')\n ax_res.xaxis.set_ticks_position('both')\n\n if symlog:\n ax_data.set_xscale('symlog', subs=[2, 3, 4, 5, 6, 7, 8, 9], linscale=linscale, linthresh=linthresh)\n ax_res.set_xscale('symlog', subs=[2, 3, 4, 5, 6, 7, 8, 9], linscale=linscale, linthresh=linthresh)\n ax_data.xaxis.set_minor_locator(MinorSymLogLocator(linthresh))\n ax_res.xaxis.set_minor_locator(MinorSymLogLocator(linthresh))\n\n if x_major_formatter:\n ax_data.xaxis.set_major_formatter(x_major_formatter)\n ax_res.xaxis.set_major_formatter(x_major_formatter)\n\n if filepath:\n ext = os.path.splitext(filepath)[1].lower()[1:]\n plt.savefig(fname=filepath, format=ext, transparent=transparent, dpi=dpi)\n\n plt.show()\n\n\ndef save_group(group_item, fname='', delimiter='\\t', encoding='utf8'):\n \"\"\"Data will be saved x-axis explicit\"\"\"\n\n x, y, mat = group2mat(group_item)\n\n mat = np.vstack((x, mat.T))\n buffer = delimiter + delimiter.join(f\"{num}\" for num in y) + '\\n'\n buffer += '\\n'.join(delimiter.join(f\"{num}\" for num in row) for row in mat.T)\n\n with open(fname, 'w', encoding=encoding) as f:\n f.write(buffer)\n\n\ndef reaction_QY_relative(sample_items, actinometer_items, QY_act=1, irradiation_spectrum=None,\n irradiation_wavelength=None, integration_range=(None, None), V_solution=1,\n conc_calc_range=(None, None), samples_times=None, actinometers_times=None,\n c_0_samples=1, c_0_acts=1, integration_method='trapz'):\n \"\"\"\n TODO.....\n\n Initial spectrua of both sample and actinometers must be at time = 0 (before irradiaiton).\n\n if conc_calc_range is (None, None), all spectrum will be taken for calclation of conccentration\n V solution\n\n integration method works for spectra integration as well as for cumulative time integration\n\n\n :param sample_items:\n :param actinometer_items:\n :param irradiation_spectrum:\n :param irradiation_wavelength:\n :param integration_range:\n :param sample_times:\n :param actinometer_times:\n :param integration_method: trapz or simps - simpsons rule\n :return:\n \"\"\"\n\n # type checking\n if sample_items is None or actinometer_items is None:\n raise ValueError(\"Arguments sample_items and actinometer_items must not be None.\")\n\n if not isinstance(sample_items, (list, tuple, SpectrumList)) and \\\n not isinstance(actinometer_items, (list, tuple, SpectrumList)):\n raise ValueError(\"Arguments sample_items and actinometer_items must be type list, tuple or SpectrumList.\")\n\n if isinstance(sample_items, (list, tuple)) and not isinstance(sample_items[0], SpectrumList):\n raise ValueError(\"Entries of sample_items must be type of SpectrumList.\")\n\n if isinstance(actinometer_items, (list, tuple)) and not isinstance(actinometer_items[0], SpectrumList):\n raise ValueError(\"Entries of actinometer_items must be type of SpectrumList.\")\n\n if irradiation_wavelength is None and irradiation_spectrum is None:\n raise ValueError(\"Argument irradiation_spectrum or irradiation_wavelength must be provided.\")\n\n if irradiation_spectrum is not None and irradiation_wavelength is not None:\n raise ValueError(\"Only one argument of irradiation_spectrum or irradiation_wavelength must be provided.\")\n\n if irradiation_spectrum is None and not isinstance(irradiation_wavelength, (int, float)):\n raise ValueError(\"Argument irradiation_wavelength must be type of int or float.\")\n\n if irradiation_wavelength is None and not isinstance(irradiation_spectrum, (Spectrum, np.ndarray, list)):\n raise ValueError(\"Argument irradiation_spectrum must be type of Spectrum, ndarray or list.\")\n\n if not isinstance(integration_range, tuple) or len(integration_range) != 2:\n raise ValueError(\"Argument integration_range must be type of tuple and have length of 2.\")\n\n samples = [sample_items] if isinstance(sample_items, SpectrumList) else sample_items\n acts = [actinometer_items] if isinstance(actinometer_items, SpectrumList) else actinometer_items\n\n if irradiation_spectrum:\n irr_sp_x = None\n if isinstance(irradiation_spectrum, Spectrum):\n irr_sp_x = irradiation_spectrum.data[:, 0]\n irr_sp_y = irradiation_spectrum.data[:, 1]\n else:\n irr_sp_y = np.asarray(irradiation_spectrum)\n\n x0, x1 = integration_range\n if x0 is not None and x1 is not None and x0 > x1:\n x0, x1 = x1, x0\n\n def abs_photons(data):\n start = 0\n end = data.shape[0]\n start_sp = 0\n end_sp = irr_sp_y.shape[0]\n if x0 is not None:\n start = fi(data[:, 0], x0)\n start_sp = fi(irr_sp_y, x0)\n if x1 is not None:\n end = fi(data[:, 0], x1) + 1\n end_sp = fi(irr_sp_y, x1) + 1\n\n if start - end != start_sp - end_sp:\n if irr_sp_x is None:\n raise ValueError(\"Irradiation spectrum and data does not have equal dimension.\")\n irr = np.interp(data[:, 0], irr_sp_x, irr_sp_y) # interpolate to match data if x vals are provided\n else:\n irr = irr_sp_y[start_sp:end_sp] # slice the irradiation spectrum to match the data\n\n x = data[start:end, 0]\n y = data[start:end, 1]\n\n abs_light = (1 - 10 ** -y) * irr # (1 - 10 ** -A) * I_irr\n\n if integration_method == 'trapz':\n return np.trapz(abs_light, x) # integrate using trapezoidal rule\n else:\n return simps(abs_light, x) # integrate using simpsons rule\n\n else: # only irradiation wavelength\n def abs_photons(data):\n idx = fi(data[:, 0], irradiation_wavelength)\n return 1 - 10 ** -data[idx, 1] # 1 - 10 ** -A\n\n _sample_times = [] if samples_times is None else samples_times\n _acs_times = [] if actinometers_times is None else actinometers_times\n\n if samples_times is None:\n for sample in samples:\n _sample_times = np.asarray([float(sp.name) for sp in sample])\n\n if actinometers_times is None:\n for act in acts:\n _acs_times = np.asarray([float(sp.name) for sp in act])\n\n _c_0_samples = [c_0_samples] * len(samples) if isinstance(c_0_samples, (int, float)) else c_0_samples\n _c_0_acts = [c_0_acts] * len(acts) if isinstance(c_0_acts, (int, float)) else c_0_acts\n\n def cumintegrate(y, x, initial=0):\n if integration_method == 'trapz':\n return cumtrapz(y, x, initial=initial)\n else:\n # simpsons rule\n raise NotImplementedError() # TODO----->\n\n def calc_c(unknown_sp_data, sp_data_c0, c0=1):\n \"\"\"\n Calculation of concentariton by least squares.\n\n :param unknown_sp_data:\n :param sp_data_c0:\n :param c0:\n :return:\n \"\"\"\n assert unknown_sp_data.shape == sp_data_c0.shape\n x0, x1 = conc_calc_range\n start = 0\n end = sp_data_c0.shape[0]\n if x0 is not None:\n start = fi(sp_data_c0[:, 0], x0)\n if x1 is not None:\n end = fi(sp_data_c0[:, 0], x1) + 1\n\n # for min || xA - B ||_2^2 for scalar x, x = sum(A*B) / sum(A*A)\n a = sp_data_c0[start:end, 1]\n b = unknown_sp_data[start:end, 1]\n\n return c0 * (a * b).sum() / (a * a).sum()\n\n results_sample = SpectrumList(name='Results of samples')\n results_act = SpectrumList(name='Results of actinometers')\n\n def calculate_line(sl, times, c0):\n # calculate the amount of absorbed photons\n qs = np.asarray([abs_photons(sp.data) for sp in sl])\n # calculate the time-dependent concentration\n c0s = np.asarray([calc_c(sp.data, sl[0].data, c0) for sp in sl])\n # Delta n = (c(t=0) - c(t)) * V\n d_n_dec = (c0s[0] - c0s) * V_solution\n # cumulative integration of light absorbed: int_0^t q(t') dt'\n np_abs = cumintegrate(qs, times, initial=0)\n\n return Spectrum.from_xy_values(np_abs, d_n_dec, name=f'Result for {sl.name}')\n\n for sample, s_times, c0_sample in zip(samples, _sample_times, _c_0_samples):\n results_sample.children.append(calculate_line(sample, s_times, c0_sample))\n\n for act, act_times, c0_act in zip(acts, _acs_times, _c_0_acts):\n results_act.children.append(calculate_line(act, act_times, c0_act))\n\n QYs = []\n\n # calculate for each combination of sample and actinometer kinetics\n for res_sample in results_sample:\n slope_sam, intercept_sam, r_sam, _, err_sam = linregress(res_sample.x, res_sample.y)\n\n for res_act in results_act:\n slope_act, intercept_act, r_act, _, err_act = linregress(res_act.x, res_act.y)\n\n # use uncertainty package to automatically propagate errors\n # QY is type of ufloat - uncertainties.core.Variable\n QY = QY_act * ufloat(slope_sam, err_sam) / ufloat(slope_act, err_act)\n\n QYs.append(QY)\n\n average_QY = sum(QYs) / len(QYs)\n\n add_to_list(results_sample)\n add_to_list(results_act)\n\n\n\n\n\n\ndef bcorr_1D(item, first_der_tresh=1e-4, second_der_tresh=0.1):\n \"\"\"Gradient based baseline correction\"\"\"\n\n x = item.data[:, 0].copy()\n y = item.data[:, 1].copy()\n\n grad1 = np.gradient(y, x) # first central derivative\n grad2 = np.gradient(grad1, x) # second central derivative\n\n grad1, grad2 = grad1 / grad1.max(), grad2 / grad2.max()\n\n zero_idxs = np.argwhere(\n (grad1 < first_der_tresh) & (grad1 > -first_der_tresh) &\n (grad2 < second_der_tresh) & (grad2 >= 0)\n )\n\n zero_idxs = zero_idxs.squeeze()\n\n baseline = np.interp(x, x[zero_idxs], y[zero_idxs])\n\n sp = Spectrum.from_xy_values(x, baseline, f'{item.name} baseline')\n UserNamespace.instance.add_items_to_list(sp)\n\n\nclass UserNamespace:\n instance = None\n\n def __init__(self, main):\n\n self.main = main\n UserNamespace.instance = self\n self.tw = self.main.tree_widget\n\n # execute first commands\n self.main.console.execute_command(\n \"\"\"\n import numpy as np\n from spectramanipulator.user_namespace import *\n # from spectramanipulator.spectrum import fi, group2mat\n import matplotlib.pyplot as plt\n %matplotlib inline\n \n # setup important methods\n add_to_list = UserNamespace.instance.tw.add_to_list\n load_kinetic = UserNamespace.instance.tw.load_kinetic\n load_kinetics = UserNamespace.instance.tw.load_kinetics\n import_files = UserNamespace.instance.tw.import_files\n \n \"\"\"\n )\n\n # from IPython.display import display, Math, Latex\\n\n\n self.main.console.push_variables(\n {\n 'main': self.main,\n 'tree_widget': self.main.tree_widget,\n 'item': self.main.tree_widget.myModel.root\n }\n )\n\n # def add_items_to_list(self, spectra):\n # \"\"\"\n # Copies all spectra and import them to the treewidget\n # :param spectra: input parameter can be single spectrum object, or hierarchic list of spectra\n # \"\"\"\n #\n # # self.main.tree_widget.get\n #\n # if spectra.__class__ == Spectrum:\n # self.main.tree_widget.import_spectra([spectra])\n # return\n #\n # if spectra.__class__.__name__ == 'SpectrumItem':\n # self.main.tree_widget.import_spectra([spectra.__copy__()])\n # return\n #\n # if isinstance(spectra, list):\n # self.main.tree_widget.import_spectra(spectra)\n # return\n #\n # if spectra.__class__.__name__ == 'SpectrumItemGroup' or spectra.__class__.__name__ == 'SpectrumList':\n # l = []\n # for sp in spectra:\n # new_sp = sp.__copy__()\n # new_sp.group_name = spectra.name\n # l.append(new_sp)\n #\n # self.main.tree_widget.import_spectra([l])\n # return\n" ]
[ [ "scipy.stats.linregress", "numpy.exp", "numpy.gradient", "matplotlib.gridspec.GridSpecFromSubplotSpec", "numpy.nan_to_num", "numpy.log", "matplotlib.pyplot.savefig", "numpy.interp", "matplotlib.pyplot.subplots", "scipy.linalg.lstsq", "numpy.arange", "matplotlib.pyplot.tight_layout", "numpy.trapz", "numpy.append", "numpy.log10", "numpy.vstack", "numpy.array", "scipy.integrate.simps", "matplotlib.cm.get_cmap", "numpy.round", "scipy.integrate.cumtrapz", "matplotlib.pyplot.figure", "matplotlib.colors.SymLogNorm", "numpy.argwhere", "matplotlib.pyplot.cm.ScalarMappable", "matplotlib.pyplot.show", "matplotlib.ticker.ScalarFormatter", "numpy.insert", "matplotlib.gridspec.GridSpec", "matplotlib.colors.LogNorm", "numpy.ceil", "numpy.iterable", "numpy.asarray", "matplotlib.colors.Normalize", "numpy.abs", "numpy.ma.is_masked", "numpy.linspace", "numpy.meshgrid" ] ]
ALWYNWU/stoic2021-baseline
[ "6372b7185a335a7ca78d8fac5dec775ce3b5c121" ]
[ "algorithm/i3d/i3dpt.py" ]
[ "# Altered from https://github.com/hassony2/kinetics_i3d_pytorch\n\nimport math\nimport os\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\ndef get_padding_shape(filter_shape, stride):\n def _pad_top_bottom(filter_dim, stride_val):\n pad_along = max(filter_dim - stride_val, 0)\n pad_top = pad_along // 2\n pad_bottom = pad_along - pad_top\n return pad_top, pad_bottom\n\n padding_shape = []\n for filter_dim, stride_val in zip(filter_shape, stride):\n pad_top, pad_bottom = _pad_top_bottom(filter_dim, stride_val)\n padding_shape.append(pad_top)\n padding_shape.append(pad_bottom)\n depth_top = padding_shape.pop(0)\n depth_bottom = padding_shape.pop(0)\n padding_shape.append(depth_top)\n padding_shape.append(depth_bottom)\n\n return tuple(padding_shape)\n\n\ndef simplify_padding(padding_shapes):\n all_same = True\n padding_init = padding_shapes[0]\n for pad in padding_shapes[1:]:\n if pad != padding_init:\n all_same = False\n return all_same, padding_init\n\n\nclass Unit3Dpy(torch.nn.Module):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size=(1, 1, 1),\n stride=(1, 1, 1),\n activation='relu',\n padding='SAME',\n use_bias=False,\n use_bn=True):\n super(Unit3Dpy, self).__init__()\n\n self.padding = padding\n self.activation = activation\n self.use_bn = use_bn\n if padding == 'SAME':\n padding_shape = get_padding_shape(kernel_size, stride)\n simplify_pad, pad_size = simplify_padding(padding_shape)\n self.simplify_pad = simplify_pad\n elif padding == 'VALID':\n padding_shape = 0\n else:\n raise ValueError(\n 'padding should be in [VALID|SAME] but got {}'.format(padding))\n\n if padding == 'SAME':\n if not simplify_pad:\n self.pad = torch.nn.ConstantPad3d(padding_shape, 0)\n self.conv3d = torch.nn.Conv3d(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n bias=use_bias)\n else:\n self.conv3d = torch.nn.Conv3d(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=pad_size,\n bias=use_bias)\n elif padding == 'VALID':\n self.conv3d = torch.nn.Conv3d(\n in_channels,\n out_channels,\n kernel_size,\n padding=padding_shape,\n stride=stride,\n bias=use_bias)\n else:\n raise ValueError(\n 'padding should be in [VALID|SAME] but got {}'.format(padding))\n\n if self.use_bn:\n self.batch3d = torch.nn.BatchNorm3d(out_channels)\n\n if activation == 'relu':\n self.activation = torch.nn.functional.relu\n\n def forward(self, inp):\n if self.padding == 'SAME' and self.simplify_pad is False:\n inp = self.pad(inp)\n out = self.conv3d(inp)\n if self.use_bn:\n out = self.batch3d(out)\n if self.activation is not None:\n out = torch.nn.functional.relu(out)\n return out\n\n\nclass MaxPool3dTFPadding(torch.nn.Module):\n def __init__(self, kernel_size, stride=None, padding='SAME'):\n super(MaxPool3dTFPadding, self).__init__()\n if padding == 'SAME':\n padding_shape = get_padding_shape(kernel_size, stride)\n self.padding_shape = padding_shape\n self.pad = torch.nn.ConstantPad3d(padding_shape, 0)\n self.pool = torch.nn.MaxPool3d(kernel_size, stride, ceil_mode=True)\n\n def forward(self, inp):\n inp = self.pad(inp)\n out = self.pool(inp)\n return out\n\n\nclass Mixed(torch.nn.Module):\n def __init__(self, in_channels, out_channels):\n super(Mixed, self).__init__()\n # Branch 0\n self.branch_0 = Unit3Dpy(\n in_channels, out_channels[0], kernel_size=(1, 1, 1))\n\n # Branch 1\n branch_1_conv1 = Unit3Dpy(\n in_channels, out_channels[1], kernel_size=(1, 1, 1))\n branch_1_conv2 = Unit3Dpy(\n out_channels[1], out_channels[2], kernel_size=(3, 3, 3))\n self.branch_1 = torch.nn.Sequential(branch_1_conv1, branch_1_conv2)\n\n # Branch 2\n branch_2_conv1 = Unit3Dpy(\n in_channels, out_channels[3], kernel_size=(1, 1, 1))\n branch_2_conv2 = Unit3Dpy(\n out_channels[3], out_channels[4], kernel_size=(3, 3, 3))\n self.branch_2 = torch.nn.Sequential(branch_2_conv1, branch_2_conv2)\n\n # Branch3\n branch_3_pool = MaxPool3dTFPadding(\n kernel_size=(3, 3, 3), stride=(1, 1, 1), padding='SAME')\n branch_3_conv2 = Unit3Dpy(\n in_channels, out_channels[5], kernel_size=(1, 1, 1))\n self.branch_3 = torch.nn.Sequential(branch_3_pool, branch_3_conv2)\n\n def forward(self, inp):\n out_0 = self.branch_0(inp)\n out_1 = self.branch_1(inp)\n out_2 = self.branch_2(inp)\n out_3 = self.branch_3(inp)\n out = torch.cat((out_0, out_1, out_2, out_3), 1)\n return out\n\n\nclass I3D(torch.nn.Module):\n def __init__(self,\n input_channels=3,\n nr_outputs=2,\n modality='rgb',\n dropout_prob=0,\n name='inception',\n pre_trained=False,\n pre_trained_path='./algorithm/i3d/model_rgb.pth'):\n super(I3D, self).__init__()\n self.name = name\n self.input_channels = input_channels\n if modality == 'rgb':\n in_channels = 3\n elif modality == 'flow':\n in_channels = 2\n else:\n raise ValueError(\n '{} not among known modalities [rgb|flow]'.format(modality))\n if in_channels != input_channels:\n raise ValueError(\"Modality \"+str(modality)+' does not correspond to input_channels '+str(input_channels) +\n '. input_channels should be: '+str(3 if modality == 'rgb' else 2))\n self.modality = modality\n\n conv3d_1a_7x7 = Unit3Dpy(\n out_channels=64,\n in_channels=in_channels,\n kernel_size=(7, 7, 7),\n stride=(2, 2, 2),\n padding='SAME')\n # 1st conv-pool\n self.conv3d_1a_7x7 = conv3d_1a_7x7\n self.maxPool3d_2a_3x3 = MaxPool3dTFPadding(\n kernel_size=(1, 3, 3), stride=(1, 2, 2), padding='SAME')\n # conv conv\n conv3d_2b_1x1 = Unit3Dpy(\n out_channels=64,\n in_channels=64,\n kernel_size=(1, 1, 1),\n padding='SAME')\n self.conv3d_2b_1x1 = conv3d_2b_1x1\n conv3d_2c_3x3 = Unit3Dpy(\n out_channels=192,\n in_channels=64,\n kernel_size=(3, 3, 3),\n padding='SAME')\n self.conv3d_2c_3x3 = conv3d_2c_3x3\n self.maxPool3d_3a_3x3 = MaxPool3dTFPadding(\n kernel_size=(1, 3, 3), stride=(1, 2, 2), padding='SAME')\n\n # Mixed_3b\n self.mixed_3b = Mixed(192, [64, 96, 128, 16, 32, 32])\n self.mixed_3c = Mixed(256, [128, 128, 192, 32, 96, 64])\n\n self.maxPool3d_4a_3x3 = MaxPool3dTFPadding(\n kernel_size=(3, 3, 3), stride=(2, 2, 2), padding='SAME')\n\n # Mixed 4\n self.mixed_4b = Mixed(480, [192, 96, 208, 16, 48, 64])\n self.mixed_4c = Mixed(512, [160, 112, 224, 24, 64, 64])\n self.mixed_4d = Mixed(512, [128, 128, 256, 24, 64, 64])\n self.mixed_4e = Mixed(512, [112, 144, 288, 32, 64, 64])\n self.mixed_4f = Mixed(528, [256, 160, 320, 32, 128, 128])\n\n self.maxPool3d_5a_2x2 = MaxPool3dTFPadding(\n kernel_size=(2, 2, 2), stride=(2, 2, 2), padding='SAME')\n\n # Mixed 5\n self.mixed_5b = Mixed(832, [256, 160, 320, 32, 128, 128])\n self.mixed_5c = Mixed(832, [384, 192, 384, 48, 128, 128])\n\n self.avg_pool = torch.nn.AvgPool3d((2, 7, 7), (1, 1, 1))\n self.dropout = torch.nn.Dropout(dropout_prob)\n\n # set original final layer to original 400 classes\n self.conv3d_0c_1x1 = Unit3Dpy(\n in_channels=1024,\n out_channels=400,\n kernel_size=(1, 1, 1),\n activation=None,\n use_bias=True,\n use_bn=False)\n\n if pre_trained:\n self.load_state_dict(torch.load(pre_trained_path))\n\n # add new final linear layer to replace the original one\n self.linear = torch.nn.Linear(1024, nr_outputs)\n\n def forward(self, inp):\n if self.input_channels == 3 and inp.shape[1] == 1:\n inp = inp.expand(-1, 3, -1, -1, -1)\n\n out = self.conv3d_1a_7x7(inp)\n out = self.maxPool3d_2a_3x3(out)\n out = self.conv3d_2b_1x1(out)\n out = self.conv3d_2c_3x3(out)\n out = self.maxPool3d_3a_3x3(out)\n out = self.mixed_3b(out)\n out = self.mixed_3c(out)\n out = self.maxPool3d_4a_3x3(out)\n out = self.mixed_4b(out)\n out = self.mixed_4c(out)\n out = self.mixed_4d(out)\n out = self.mixed_4e(out)\n out = self.mixed_4f(out)\n out = self.maxPool3d_5a_2x2(out)\n out = self.mixed_5b(out)\n out = self.mixed_5c(out)\n out = self.avg_pool(out)\n features = F.normalize(torch.flatten(out,1), dim=1)\n out = self.dropout(out)\n\n out = out.mean(dim=[2, 3, 4])\n out = self.linear(out)\n return features, out\n\n def load_tf_weights(self, sess):\n state_dict = {}\n if self.modality == 'rgb':\n prefix = 'RGB/inception_i3d'\n elif self.modality == 'flow':\n prefix = 'Flow/inception_i3d'\n load_conv3d(state_dict, 'conv3d_1a_7x7', sess,\n os.path.join(prefix, 'Conv3d_1a_7x7'))\n load_conv3d(state_dict, 'conv3d_2b_1x1', sess,\n os.path.join(prefix, 'Conv3d_2b_1x1'))\n load_conv3d(state_dict, 'conv3d_2c_3x3', sess,\n os.path.join(prefix, 'Conv3d_2c_3x3'))\n\n load_mixed(state_dict, 'mixed_3b', sess,\n os.path.join(prefix, 'Mixed_3b'))\n load_mixed(state_dict, 'mixed_3c', sess,\n os.path.join(prefix, 'Mixed_3c'))\n load_mixed(state_dict, 'mixed_4b', sess,\n os.path.join(prefix, 'Mixed_4b'))\n load_mixed(state_dict, 'mixed_4c', sess,\n os.path.join(prefix, 'Mixed_4c'))\n load_mixed(state_dict, 'mixed_4d', sess,\n os.path.join(prefix, 'Mixed_4d'))\n load_mixed(state_dict, 'mixed_4e', sess,\n os.path.join(prefix, 'Mixed_4e'))\n # Here goest to 0.1 max error with tf\n load_mixed(state_dict, 'mixed_4f', sess,\n os.path.join(prefix, 'Mixed_4f'))\n\n load_mixed(\n state_dict,\n 'mixed_5b',\n sess,\n os.path.join(prefix, 'Mixed_5b'),\n fix_typo=True)\n load_mixed(state_dict, 'mixed_5c', sess,\n os.path.join(prefix, 'Mixed_5c'))\n load_conv3d(\n state_dict,\n 'conv3d_0c_1x1',\n sess,\n os.path.join(prefix, 'Logits', 'Conv3d_0c_1x1'),\n bias=True,\n bn=False)\n self.load_state_dict(state_dict)\n\n\ndef get_conv_params(sess, name, bias=False):\n # Get conv weights\n conv_weights_tensor = sess.graph.get_tensor_by_name(\n os.path.join(name, 'w:0'))\n if bias:\n conv_bias_tensor = sess.graph.get_tensor_by_name(\n os.path.join(name, 'b:0'))\n conv_bias = sess.run(conv_bias_tensor)\n conv_weights = sess.run(conv_weights_tensor)\n conv_shape = conv_weights.shape\n\n kernel_shape = conv_shape[0:3]\n in_channels = conv_shape[3]\n out_channels = conv_shape[4]\n\n conv_op = sess.graph.get_operation_by_name(\n os.path.join(name, 'convolution'))\n padding_name = conv_op.get_attr('padding')\n padding = _get_padding(padding_name, kernel_shape)\n all_strides = conv_op.get_attr('strides')\n strides = all_strides[1:4]\n conv_params = [\n conv_weights, kernel_shape, in_channels, out_channels, strides, padding\n ]\n if bias:\n conv_params.append(conv_bias)\n return conv_params\n\n\ndef get_bn_params(sess, name):\n moving_mean_tensor = sess.graph.get_tensor_by_name(\n os.path.join(name, 'moving_mean:0'))\n moving_var_tensor = sess.graph.get_tensor_by_name(\n os.path.join(name, 'moving_variance:0'))\n beta_tensor = sess.graph.get_tensor_by_name(os.path.join(name, 'beta:0'))\n moving_mean = sess.run(moving_mean_tensor)\n moving_var = sess.run(moving_var_tensor)\n beta = sess.run(beta_tensor)\n return moving_mean, moving_var, beta\n\n\ndef _get_padding(padding_name, conv_shape):\n padding_name = padding_name.decode(\"utf-8\")\n if padding_name == \"VALID\":\n return [0, 0]\n elif padding_name == \"SAME\":\n # return [math.ceil(int(conv_shape[0])/2), math.ceil(int(conv_shape[1])/2)]\n return [\n math.floor(int(conv_shape[0]) / 2),\n math.floor(int(conv_shape[1]) / 2),\n math.floor(int(conv_shape[2]) / 2)\n ]\n else:\n raise ValueError('Invalid padding name ' + padding_name)\n\n\ndef load_conv3d(state_dict, name_pt, sess, name_tf, bias=False, bn=True):\n # Transfer convolution params\n conv_name_tf = os.path.join(name_tf, 'conv_3d')\n conv_params = get_conv_params(sess, conv_name_tf, bias=bias)\n if bias:\n conv_weights, kernel_shape, in_channels, out_channels, strides, padding, conv_bias = conv_params\n else:\n conv_weights, kernel_shape, in_channels, out_channels, strides, padding = conv_params\n\n conv_weights_rs = np.transpose(\n conv_weights, (4, 3, 0, 1,\n 2)) # to pt format (out_c, in_c, depth, height, width)\n state_dict[name_pt + '.conv3d.weight'] = torch.from_numpy(conv_weights_rs)\n if bias:\n state_dict[name_pt + '.conv3d.bias'] = torch.from_numpy(conv_bias)\n\n # Transfer batch norm params\n if bn:\n conv_tf_name = os.path.join(name_tf, 'batch_norm')\n moving_mean, moving_var, beta = get_bn_params(sess, conv_tf_name)\n\n out_planes = conv_weights_rs.shape[0]\n state_dict[name_pt + '.batch3d.weight'] = torch.ones(out_planes)\n state_dict[name_pt +\n '.batch3d.bias'] = torch.from_numpy(beta.squeeze())\n state_dict[name_pt\n + '.batch3d.running_mean'] = torch.from_numpy(moving_mean.squeeze())\n state_dict[name_pt\n + '.batch3d.running_var'] = torch.from_numpy(moving_var.squeeze())\n\n\ndef load_mixed(state_dict, name_pt, sess, name_tf, fix_typo=False):\n # Branch 0\n load_conv3d(state_dict, name_pt + '.branch_0', sess,\n os.path.join(name_tf, 'Branch_0/Conv3d_0a_1x1'))\n\n # Branch .1\n load_conv3d(state_dict, name_pt + '.branch_1.0', sess,\n os.path.join(name_tf, 'Branch_1/Conv3d_0a_1x1'))\n load_conv3d(state_dict, name_pt + '.branch_1.1', sess,\n os.path.join(name_tf, 'Branch_1/Conv3d_0b_3x3'))\n\n # Branch 2\n load_conv3d(state_dict, name_pt + '.branch_2.0', sess,\n os.path.join(name_tf, 'Branch_2/Conv3d_0a_1x1'))\n if fix_typo:\n load_conv3d(state_dict, name_pt + '.branch_2.1', sess,\n os.path.join(name_tf, 'Branch_2/Conv3d_0a_3x3'))\n else:\n load_conv3d(state_dict, name_pt + '.branch_2.1', sess,\n os.path.join(name_tf, 'Branch_2/Conv3d_0b_3x3'))\n\n # Branch 3\n load_conv3d(state_dict, name_pt + '.branch_3.1', sess,\n os.path.join(name_tf, 'Branch_3/Conv3d_0b_1x1'))\n" ]
[ [ "torch.nn.Linear", "torch.nn.ConstantPad3d", "torch.cat", "torch.nn.Dropout", "torch.flatten", "torch.nn.Sequential", "torch.nn.MaxPool3d", "torch.from_numpy", "torch.ones", "numpy.transpose", "torch.nn.Conv3d", "torch.load", "torch.nn.AvgPool3d", "torch.nn.functional.relu", "torch.nn.BatchNorm3d" ] ]
liubaishuo-github/VTX550_post_processor
[ "d7f530efd89887b86510391c3a89ec2283094dd4" ]
[ "test1.py" ]
[ "\r\n\r\n\r\nimport re\r\n\r\nfrom numpy import mat, cross\r\n\r\n\r\naa = [1,2,3]\r\nbb = [3,4,8]\r\n\r\n\r\n\r\na = mat(aa).T.T\r\nb = mat(bb).T.T\r\n\r\n\r\nprint(a)\r\nprint(b)\r\n" ]
[ [ "numpy.mat" ] ]
tekdogan/gcn
[ "068c79905f31c052270a70febe4e928067940562" ]
[ "py/pubmed/sag_inference/dgl_.py" ]
[ "#!/usr/bin/env python3\nimport dgl\nimport dgl.function as fn\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom dgl import DGLGraph\nimport sys\nimport torch.cuda.profiler as profiler\nimport pyprof\nimport dgl.data as da\nfrom dgl.nn import SAGEConv\n\npyprof.init()\n\nwith torch.autograd.profiler.emit_nvtx():\n\n profiler.start()\n\n gcn_msg = fn.copy_src(src='h', out='m')\n gcn_reduce = fn.sum(msg='m', out='h')\n\n class GCNLayer(nn.Module):\n def __init__(self, in_feats, out_feats):\n super(GCNLayer, self).__init__()\n self.linear = nn.Linear(in_feats, out_feats)\n\n def forward(self, g, feature):\n # Creating a local scope so that all the stored ndata and edata\n # (such as the `'h'` ndata below) are automatically popped out\n # when the scope exits.\n with g.local_scope():\n g.ndata['h'] = feature\n g.update_all(gcn_msg, gcn_reduce)\n h = g.ndata['h']\n return self.linear(h)\n\n class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.layer1 = SAGEConv(500, 16, 'gcn')\n self.layer2 = SAGEConv(16, 3, 'gcn')\n\n def forward(self, g, features):\n x = F.relu(self.layer1(g, features))\n x = F.log_softmax(self.layer2(g, x))\n return x\n\n from dgl.data import citation_graph as citegrh\n import networkx as nx\n def load_pubmed_data():\n data = citegrh.load_pubmed()\n features = torch.FloatTensor(data.features)\n labels = torch.LongTensor(data.labels)\n train_mask = torch.BoolTensor(data.train_mask)\n test_mask = torch.BoolTensor(data.test_mask)\n g = DGLGraph(data.graph)\n return g, features, labels, train_mask, test_mask\n\n\n data = citegrh.load_pubmed()\n #features = torch.FloatTensor(data.features)\n #g = DGLGraph(data.graph).to(device)\n\n\n #dataset = da.CoraGraphDataset()\n\n device = torch.device('cuda')\n\n #model = Net()\n model = Net().to(device)\n\n features = torch.FloatTensor(data.features).to(device)\n g = DGLGraph(data.graph).to(device)\n\n #data = dataset[0].to(device)\n\n g = g.to(device)\n\n out = model(g, features)\n\n profiler.stop()\n\n #print(net)\n" ]
[ [ "torch.nn.Linear", "torch.device", "torch.autograd.profiler.emit_nvtx", "torch.cuda.profiler.stop", "torch.FloatTensor", "torch.cuda.profiler.start", "torch.LongTensor", "torch.BoolTensor" ] ]
dibschat/activity-recognition-KTH
[ "a01e9c4d0d70a68bffd6c4f446c22b4b0fd720bb" ]
[ "src/contextual_3x3.py" ]
[ "import numpy as np\nimport cv2\nfrom utils import window, mag_check, dir_check, create_hist_context\n\n# create Haar-cascade object\nbody_cascade = cv2.CascadeClassifier('cascadG.xml')\n\n# create background-subtraction object\nfgbg = cv2.createBackgroundSubtractorMOG2(detectShadows = False)\nkernel = np.ones((5,5),np.uint8)\n\ndef dense_flow(fm):\n # initialize variables\n count = 0\n x = y = w = h = 0\n magnitude_histogram = []\n direction_histogram = []\n magnitude_histogram1 = []\n direction_histogram1 = []\n magnitude_histogram2 = []\n direction_histogram2 = []\n magnitude_histogram3 = []\n direction_histogram3 = []\n magnitude_histogram4 = []\n direction_histogram4 = [] \n magnitude_histogram5 = []\n direction_histogram5 = []\n magnitude_histogram6 = []\n direction_histogram6 = []\n magnitude_histogram7 = []\n direction_histogram7 = []\n magnitude_histogram8 = []\n direction_histogram8 = []\n magnitude_histogram9 = []\n direction_histogram9 = []\n\n # start reading the video\n cap = cv2.VideoCapture(fm)\n\n # take the first frame and convert it to gray\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # create the HSV color image\n hsvImg = np.zeros_like(frame)\n hsvImg[..., 1] = 255\n \n # play until the user decides to stop\n frame_no = 0\n while True:\n # save the previous frame data\n previousGray = gray\n # get the next frame\n ret , frame = cap.read()\n \n if ret:\n # background-subtraction\n fgmask = fgbg.apply(frame)\n\n # median-blur\n seg_mask = cv2.medianBlur(fgmask, 5)\n\n # dilation\n seg_mask = cv2.dilate(seg_mask, kernel, iterations = 1)\n\n # for drawing bounding box over the entire body\n body = body_cascade.detectMultiScale(gray, 1.05, 3)\n if(len(body)!=0):\n for (x_t,y_t,w_t,h_t) in body: \n x, y, w, h = x_t, y_t, w_t, h_t\n \n # convert the frame to gray scale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n # exception-handling\n if((x, y, w, h) == (0 ,0, 0, 0)):\n continue\n\n # calculate the dense optical flow\n flow = cv2.calcOpticalFlowFarneback(previousGray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n \n # obtain the flow magnitude and direction angle\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n mag = cv2.bitwise_and(mag, mag, mask = seg_mask)\n ang = cv2.bitwise_and(ang, ang, mask = seg_mask)\n\n # scaling\n ang=((ang*180)/(np.pi/2))%180\n \n # find the intersection points to draw the 3x3 grid\n k=1\n if(w%3==0):\n k=0\n c_x1 = x+(w//3)+k\n c_x2 = (x+2*(w//3))+k\n \n k=1\n if(h%3==0):\n k=0\n c_y1 = y+(h//3)+k\n c_y2 = (y+2*(h//3))+k \n\n flag1=flag2=flag3=flag4=0\n if(x-5>=0):\n x-=5\n flag1=1\n if(x+w+10<ang.shape[1]):\n w+=10\n flag2=1 \n if(y-5>=0):\n y-=5\n flag3=1\n if(y+h+10<ang.shape[0]):\n h+=10\n flag4=1\n\n # extract the region-of-interests corresponding to the 3x3 grids\n roi_mag1 = mag[y:c_y1, x:c_x1]\n roi_mag2 = mag[y:c_y1, c_x1:c_x2]\n roi_mag3 = mag[y:c_y1, c_x2:x+w]\n roi_mag4 = mag[c_y1:c_y2, x:c_x1]\n roi_mag5 = mag[c_y1:c_y2, c_x1:c_x2]\n roi_mag6 = mag[c_y1:c_y2, c_x2:x+w]\n roi_mag7 = mag[c_y2:y+h, x:c_x1]\n roi_mag8 = mag[c_y2:y+h, c_x1:c_x2]\n roi_mag9 = mag[c_y2:y+h, c_x2:x+w]\n roi_dir1 = ang[y:c_y1, x:c_x1]\n roi_dir2 = ang[y:c_y1, c_x1:c_x2]\n roi_dir3 = ang[y:c_y1, c_x2:x+w]\n roi_dir4 = ang[c_y1:c_y2, x:c_x1]\n roi_dir5 = ang[c_y1:c_y2, c_x1:c_x2]\n roi_dir6 = ang[c_y1:c_y2, c_x2:x+w]\n roi_dir7 = ang[c_y2:y+h, x:c_x1]\n roi_dir8 = ang[c_y2:y+h, c_x1:c_x2]\n roi_dir9 = ang[c_y2:y+h, c_x2:x+w]\n\n magnitude = np.array(mag).flatten()\n direction = np.array(ang).flatten()\n magnitude1 = np.array(roi_mag1).flatten()\n direction1 = np.array(roi_dir1).flatten()\n magnitude2 = np.array(roi_mag2).flatten()\n direction2 = np.array(roi_dir2).flatten()\n magnitude3 = np.array(roi_mag3).flatten()\n direction3 = np.array(roi_dir3).flatten()\n magnitude4 = np.array(roi_mag4).flatten()\n direction4 = np.array(roi_dir4).flatten()\n magnitude5 = np.array(roi_mag5).flatten()\n direction5 = np.array(roi_dir5).flatten()\n magnitude6 = np.array(roi_mag6).flatten()\n direction6 = np.array(roi_dir6).flatten()\n magnitude7 = np.array(roi_mag7).flatten()\n direction7 = np.array(roi_dir7).flatten()\n magnitude8 = np.array(roi_mag8).flatten()\n direction8 = np.array(roi_dir8).flatten()\n magnitude9 = np.array(roi_mag9).flatten()\n direction9 = np.array(roi_dir9).flatten()\n\n # create magnitude and direction optical flow histogram per frame for each grid \n magnitude_histogram, direction_histogram = create_hist_context(magnitude, direction, magnitude_histogram, direction_histogram)\n magnitude_histogram1, direction_histogram1 = create_hist_context(magnitude1, direction1, magnitude_histogram1, direction_histogram1)\n magnitude_histogram2, direction_histogram2 = create_hist_context(magnitude2, direction2, magnitude_histogram2, direction_histogram2)\n magnitude_histogram3, direction_histogram3 = create_hist_context(magnitude3, direction3, magnitude_histogram3, direction_histogram3)\n magnitude_histogram4, direction_histogram4 = create_hist_context(magnitude4, direction4, magnitude_histogram4, direction_histogram4)\n magnitude_histogram5, direction_histogram5 = create_hist_context(magnitude5, direction5, magnitude_histogram5, direction_histogram5)\n magnitude_histogram6, direction_histogram6 = create_hist_context(magnitude6, direction6, magnitude_histogram6, direction_histogram6)\n magnitude_histogram7, direction_histogram7 = create_hist_context(magnitude7, direction7, magnitude_histogram7, direction_histogram7)\n magnitude_histogram8, direction_histogram8 = create_hist_context(magnitude8, direction8, magnitude_histogram8, direction_histogram8)\n magnitude_histogram9, direction_histogram9 = create_hist_context(magnitude9, direction9, magnitude_histogram9, direction_histogram9)\n \n #---------------------------------------------------------#\n # if you wish to see the optical flow frames uncomment the next 3 paragraphs\n '''\n # update the color image\n hsvImg[..., 0] = 0.5 * ang * 180 / np.pi\n hsvImg[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgbImg = cv2.cvtColor(hsvImg, cv2.COLOR_HSV2BGR)\n \n #drawing the bounding box\n cv2.rectangle(rgbImg, (x,y), (c_x1,c_y1), (255,0,0), 2)\n cv2.rectangle(rgbImg, (c_x1,y), (c_x2,c_y1), (0,255,0), 2)\n cv2.rectangle(rgbImg, (c_x2,y), (x+w,c_y1), (0,0,255), 2)\n cv2.rectangle(rgbImg, (x,c_y1), (c_x1,c_y2), (255,255,0), 2)\n cv2.rectangle(rgbImg, (c_x1,c_y1), (c_x2,c_y2), (0,255,255), 2)\n cv2.rectangle(rgbImg, (c_x2,c_y1), (x+w,c_y2), (255,0,255), 2)\n cv2.rectangle(rgbImg, (x,c_y2), (c_x1,y+h), (128,255,0), 2)\n cv2.rectangle(rgbImg, (c_x1,c_y2), (c_x2,y+h), (255,128,0), 2)\n cv2.rectangle(rgbImg, (c_x2,c_y2), (x+w,y+h), (0,255,128), 2)\n \n #Display the resulting frame\n cv2.imshow('dense optical flow', np.hstack((frame, rgbImg)))\n '''\n #---------------------------------------------------------#\n\n frame_no+=1\n\n # adjusting the bounding box over the POI to facilitate outward motion of the human body\n if(flag1==1):\n x+=5\n if(flag2==1):\n w-=10\n if(flag3==1):\n y+=5\n if(flag4==1):\n h-=10\n\n k = cv2.waitKey(30) & 0xff \n if k == 27:\n break\n \n else:\n break\n\n # check the magnitude and direction histograms to have expected shapes\n magnitude_histogram = mag_check(magnitude_histogram)\n magnitude_histogram1 = mag_check(magnitude_histogram1)\n magnitude_histogram2 = mag_check(magnitude_histogram2)\n magnitude_histogram3 = mag_check(magnitude_histogram3)\n magnitude_histogram4 = mag_check(magnitude_histogram4)\n magnitude_histogram5 = mag_check(magnitude_histogram5)\n magnitude_histogram6 = mag_check(magnitude_histogram6)\n magnitude_histogram7 = mag_check(magnitude_histogram7)\n magnitude_histogram8 = mag_check(magnitude_histogram8)\n magnitude_histogram9 = mag_check(magnitude_histogram9)\n direction_histogram = dir_check(direction_histogram)\n direction_histogram1 = dir_check(direction_histogram1)\n direction_histogram2 = dir_check(direction_histogram2)\n direction_histogram3 = dir_check(direction_histogram3)\n direction_histogram4 = dir_check(direction_histogram4) \n direction_histogram5 = dir_check(direction_histogram5)\n direction_histogram6 = dir_check(direction_histogram6)\n direction_histogram7 = dir_check(direction_histogram7)\n direction_histogram8 = dir_check(direction_histogram8)\n direction_histogram9 = dir_check(direction_histogram9)\n\n # apply windowing to extract contextual information\n mag_hist = window(magnitude_histogram)\n dir_hist = window(direction_histogram)\n mag_hist1 = window(magnitude_histogram1)\n dir_hist1 = window(direction_histogram1)\n mag_hist2 = window(magnitude_histogram2)\n dir_hist2 = window(direction_histogram2)\n mag_hist3 = window(magnitude_histogram3)\n dir_hist3 = window(direction_histogram3)\n mag_hist4 = window(magnitude_histogram4)\n dir_hist4 = window(direction_histogram4)\n mag_hist5 = window(magnitude_histogram5)\n dir_hist5 = window(direction_histogram5)\n mag_hist6 = window(magnitude_histogram6)\n dir_hist6 = window(direction_histogram6)\n mag_hist7 = window(magnitude_histogram7)\n dir_hist7 = window(direction_histogram7)\n mag_hist8 = window(magnitude_histogram8)\n dir_hist8 = window(direction_histogram8)\n mag_hist9 = window(magnitude_histogram9)\n dir_hist9 = window(direction_histogram9)\n\n # calculate the mean of the magnitude and direction histograms for each 3x3 grids\n mag_avg_hist = np.mean(mag_hist, axis=0)\n dir_avg_hist = np.mean(dir_hist, axis=0)\n mag_avg_hist1 = np.mean(mag_hist1, axis=0)\n dir_avg_hist1 = np.mean(dir_hist1, axis=0)\n mag_avg_hist2 = np.mean(mag_hist2, axis=0)\n dir_avg_hist2 = np.mean(dir_hist2, axis=0)\n mag_avg_hist3 = np.mean(mag_hist3, axis=0)\n dir_avg_hist3 = np.mean(dir_hist3, axis=0)\n mag_avg_hist4 = np.mean(mag_hist4, axis=0)\n dir_avg_hist4 = np.mean(dir_hist4, axis=0)\n mag_avg_hist5 = np.mean(mag_hist5, axis=0)\n dir_avg_hist5 = np.mean(dir_hist5, axis=0)\n mag_avg_hist6 = np.mean(mag_hist6, axis=0)\n dir_avg_hist6 = np.mean(dir_hist6, axis=0)\n mag_avg_hist7 = np.mean(mag_hist7, axis=0)\n dir_avg_hist7 = np.mean(dir_hist7, axis=0)\n mag_avg_hist8 = np.mean(mag_hist8, axis=0)\n dir_avg_hist8 = np.mean(dir_hist8, axis=0)\n mag_avg_hist9 = np.mean(mag_hist9, axis=0)\n dir_avg_hist9 = np.mean(dir_hist9, axis=0)\n\n # calculate the standard deviation of the magnitude and direction histograms for each 3x3 grids\n mag_std_hist = np.std(mag_hist, axis=0)\n dir_std_hist = np.std(dir_hist, axis=0)\n mag_std_hist1 = np.std(mag_hist1, axis=0)\n dir_std_hist1 = np.std(dir_hist1, axis=0)\n mag_std_hist2 = np.std(mag_hist2, axis=0)\n dir_std_hist2 = np.std(dir_hist2, axis=0)\n mag_std_hist3 = np.std(mag_hist3, axis=0)\n dir_std_hist3 = np.std(dir_hist3, axis=0)\n mag_std_hist4 = np.std(mag_hist4, axis=0)\n dir_std_hist4 = np.std(dir_hist4, axis=0)\n mag_std_hist5 = np.std(mag_hist5, axis=0)\n dir_std_hist5 = np.std(dir_hist5, axis=0)\n mag_std_hist6 = np.std(mag_hist6, axis=0)\n dir_std_hist6 = np.std(dir_hist6, axis=0)\n mag_std_hist7 = np.std(mag_hist7, axis=0)\n dir_std_hist7 = np.std(dir_hist7, axis=0)\n mag_std_hist8 = np.std(mag_hist8, axis=0)\n dir_std_hist8 = np.std(dir_hist8, axis=0)\n mag_std_hist9 = np.std(mag_hist9, axis=0)\n dir_std_hist9 = np.std(dir_hist9, axis=0)\n\n # concatenate all the histogram features to get the contextual descriptor for 3x3 grids\n histogram = mag_avg_hist\n histogram = np.hstack((histogram, mag_std_hist))\n histogram = np.hstack((histogram, dir_avg_hist))\n histogram = np.hstack((histogram, dir_std_hist))\n histogram = np.hstack((histogram, mag_avg_hist1))\n histogram = np.hstack((histogram, mag_std_hist1))\n histogram = np.hstack((histogram, dir_avg_hist1))\n histogram = np.hstack((histogram, dir_std_hist1))\n histogram = np.hstack((histogram, mag_avg_hist2))\n histogram = np.hstack((histogram, mag_std_hist2))\n histogram = np.hstack((histogram, dir_avg_hist2))\n histogram = np.hstack((histogram, dir_std_hist2))\n histogram = np.hstack((histogram, mag_avg_hist3))\n histogram = np.hstack((histogram, mag_std_hist3))\n histogram = np.hstack((histogram, dir_avg_hist3))\n histogram = np.hstack((histogram, dir_std_hist3))\n histogram = np.hstack((histogram, mag_avg_hist4))\n histogram = np.hstack((histogram, mag_std_hist4))\n histogram = np.hstack((histogram, dir_avg_hist4))\n histogram = np.hstack((histogram, dir_std_hist4))\n histogram = np.hstack((histogram, mag_avg_hist5))\n histogram = np.hstack((histogram, mag_std_hist5))\n histogram = np.hstack((histogram, dir_avg_hist5))\n histogram = np.hstack((histogram, dir_std_hist5))\n histogram = np.hstack((histogram, mag_avg_hist6))\n histogram = np.hstack((histogram, mag_std_hist6))\n histogram = np.hstack((histogram, dir_avg_hist6))\n histogram = np.hstack((histogram, dir_std_hist6))\n histogram = np.hstack((histogram, mag_avg_hist7))\n histogram = np.hstack((histogram, mag_std_hist7))\n histogram = np.hstack((histogram, dir_avg_hist7))\n histogram = np.hstack((histogram, dir_std_hist7))\n histogram = np.hstack((histogram, mag_avg_hist8))\n histogram = np.hstack((histogram, mag_std_hist8))\n histogram = np.hstack((histogram, dir_avg_hist8))\n histogram = np.hstack((histogram, dir_std_hist8))\n histogram = np.hstack((histogram, mag_avg_hist9))\n histogram = np.hstack((histogram, mag_std_hist9))\n histogram = np.hstack((histogram, dir_avg_hist9))\n histogram = np.hstack((histogram, dir_std_hist9))\n \n cv2.destroyAllWindows()\n cap.release()\n return histogram, frame_no" ]
[ [ "numpy.zeros_like", "numpy.array", "numpy.ones", "numpy.mean", "numpy.std", "numpy.hstack" ] ]
sschrod/BITES
[ "64c76feebd8b0869e74938f79d93b1946dcf88b5" ]
[ "bites/utils/Simple_Network.py" ]
[ "\"\"\"Copyright (c) 2018, Haavard Kvamme\n 2021, Schrod Stefan\"\"\"\n\nimport numpy as np\nfrom torch import nn\n\n\nclass DenseVanillaBlock(nn.Module):\n def __init__(self, in_features, out_features, bias=True, batch_norm=True, dropout=0., activation=nn.ReLU,\n w_init_=lambda w: nn.init.kaiming_normal_(w, nonlinearity='relu')):\n super().__init__()\n self.linear = nn.Linear(in_features, out_features, bias)\n if w_init_:\n w_init_(self.linear.weight.data)\n self.activation = activation()\n self.batch_norm = nn.BatchNorm1d(out_features) if batch_norm else None\n self.dropout = nn.Dropout(dropout) if dropout else None\n\n def forward(self, input):\n input = self.activation(self.linear(input))\n if self.batch_norm:\n input = self.batch_norm(input)\n if self.dropout:\n input = self.dropout(input)\n return input\n\nclass MLPVanilla(nn.Module):\n def __init__(self, in_features, num_nodes, out_features, batch_norm=True, dropout=None, activation=nn.ReLU,\n output_activation=None, output_bias=True,\n w_init_=lambda w: nn.init.kaiming_normal_(w, nonlinearity='relu')):\n super().__init__()\n\n num_nodes=np.append(in_features, num_nodes)\n if not hasattr(dropout, '__iter__'):\n dropout = [dropout for _ in range(len(num_nodes) - 1)]\n net = []\n for n_in, n_out, p in zip(num_nodes[:-1], num_nodes[1:], dropout):\n net.append(DenseVanillaBlock(n_in, n_out, True, batch_norm, p, activation, w_init_))\n net.append(nn.Linear(num_nodes[-1], out_features, output_bias))\n if output_activation:\n net.append(output_activation)\n self.net = nn.Sequential(*net)\n\n def forward(self, input):\n return self.net(input)" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.Sequential", "torch.nn.init.kaiming_normal_", "torch.nn.BatchNorm1d", "numpy.append" ] ]
hanzz2007/mxnet
[ "cc0b2d67c40170aced702c9f80b4b7acbb1f2b79" ]
[ "tests/python/unittest/test_optimizer.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport numpy as np\nimport mxnet as mx\nimport mxnet.lr_scheduler as lr_scheduler\nfrom mxnet import gluon\nimport unittest\nfrom nose.tools import raises\nimport math\nfrom mxnet.test_utils import *\n\ndef test_learning_rate():\n o1 = mx.optimizer.Optimizer(learning_rate=0.01)\n o1.set_learning_rate(0.2)\n assert o1.learning_rate == 0.2\n\n lr_s = lr_scheduler.FactorScheduler(step=1)\n o2 = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)\n assert o2.learning_rate == 0.3\n o2.lr_scheduler.base_lr = 0.4\n assert o2.learning_rate == 0.4\n\n\n@raises(UserWarning)\ndef test_learning_rate_expect_user_warning():\n lr_s = lr_scheduler.FactorScheduler(step=1)\n o = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)\n o.set_learning_rate(0.5)\n\n\ndef test_lr_wd_mult():\n data = mx.sym.Variable('data')\n bias = mx.sym.Variable('fc1_bias', lr_mult=1.0)\n fc1 = mx.sym.FullyConnected(data=data, bias=bias, name='fc1', num_hidden=10, lr_mult=0)\n fc2 = mx.sym.FullyConnected(data=fc1, name='fc2', num_hidden=10, wd_mult=0.5)\n\n mod = mx.mod.Module(symbol=fc2, label_names=None, context=default_context())\n mod.bind(data_shapes=[('data', (5,10))])\n mod.init_params(initializer=mx.init.Uniform(1.0))\n mod.init_optimizer(optimizer_params={'learning_rate': 1.0})\n args1, _ = mod.get_params()\n args1 = {k: v.asnumpy() for k, v in args1.items()}\n mod.forward(mx.io.DataBatch(data=[mx.random.uniform(low=-1.0, high=1.0, shape=(5,10))], label=None), is_train=True)\n mod.backward(mod.get_outputs())\n mod.update()\n args2, _ = mod.get_params()\n args2 = {k: v.asnumpy() for k, v in args2.items()}\n\n assert mod._optimizer.lr_mult == {'fc1_bias': 1.0, 'fc1_weight': 0.0}\n assert mod._optimizer.wd_mult == {'fc2_bias': 0.5, 'fc2_weight': 0.5, 'fc1_bias': 0.0}\n assert mx.test_utils.almost_equal(args1['fc1_weight'], args2['fc1_weight'], 1e-10)\n assert not mx.test_utils.almost_equal(args1['fc1_bias'], args2['fc1_bias'], 1e-1)\n assert not mx.test_utils.almost_equal(args1['fc2_weight'], args2['fc2_weight'], 1e-1)\n\ndef compare_ndarray_tuple(t1, t2, rtol=None, atol=None):\n if t1 is not None and t2 is not None:\n if isinstance(t1, tuple):\n for s1, s2 in zip(t1, t2):\n compare_ndarray_tuple(s1, s2, rtol, atol)\n else:\n assert_almost_equal(t1.asnumpy(), t2.asnumpy(), rtol=rtol, atol=atol)\n\n\ndef compare_optimizer(opt1, opt2, shape, dtype, w_stype='default', g_stype='default'):\n if w_stype == 'default':\n w2 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)\n w1 = w2.copyto(default_context())\n elif w_stype == 'row_sparse' or w_stype == 'csr':\n w2 = rand_ndarray(shape, w_stype, density=1, dtype=dtype)\n w1 = w2.copyto(default_context()).tostype('default')\n else:\n raise Exception(\"type not supported yet\")\n if g_stype == 'default':\n g2 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)\n g1 = g2.copyto(default_context())\n elif g_stype == 'row_sparse' or g_stype == 'csr':\n g2 = rand_ndarray(shape, g_stype, dtype=dtype)\n g1 = g2.copyto(default_context()).tostype('default')\n else:\n raise Exception(\"type not supported yet\")\n\n state1 = opt1.create_state_multi_precision(0, w1)\n state2 = opt2.create_state_multi_precision(0, w2)\n compare_ndarray_tuple(state1, state2)\n\n opt1.update_multi_precision(0, w1, g1, state1)\n opt2.update_multi_precision(0, w2, g2, state2)\n compare_ndarray_tuple(state1, state2, rtol=1e-4, atol=1e-5)\n assert_almost_equal(w1.asnumpy(), w2.asnumpy(), rtol=1e-4, atol=1e-5)\n\n# SGD\n\nclass PySGD(mx.optimizer.Optimizer):\n \"\"\"python reference implemenation of sgd\"\"\"\n def __init__(self, learning_rate=0.01, momentum=0.0, multi_precision=False, **kwargs):\n super(PySGD, self).__init__(learning_rate=learning_rate, **kwargs)\n self.momentum = momentum\n self.multi_precision = multi_precision\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state: momentum\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n momentum = None\n weight_master_copy = None\n do_multi_precision = self.multi_precision and weight.dtype == np.float16\n if do_multi_precision:\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=np.float32)\n weight_master_copy = array(weight, ctx=weight.context, dtype=np.float32)\n return (momentum, weight_master_copy)\n else:\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)\n return momentum\n\n def create_state_multi_precision(self, index, weight):\n return self.create_state(index, weight)\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n use_multi_precision = isinstance(state, list) or isinstance(state, tuple)\n\n if not use_multi_precision:\n if self.momentum == 0.0:\n if self.clip_gradient is not None:\n weight[:] = ((1 - lr*wd)*weight -\n lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n else:\n weight[:] = (1 - lr*wd)*weight - lr*self.rescale_grad*grad\n else:\n mom = state\n if self.clip_gradient is not None:\n mom[:] = (self.momentum*mom - lr*wd*weight -\n lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n weight += mom\n else:\n mom[:] = self.momentum*mom - lr*wd*weight - lr*self.rescale_grad*grad\n weight += mom\n else:\n grad32 = array(grad, ctx=grad.context, dtype=np.float32)\n mom = state[0]\n weight32 = state[1]\n if self.momentum == 0.0:\n if self.clip_gradient is not None:\n weight32[:] = ((1 - lr*wd)*weight32 -\n lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n else:\n weight32[:] = (1 - lr*wd)*weight32 - lr*self.rescale_grad*grad32\n else:\n if self.clip_gradient is not None:\n mom[:] = (self.momentum*mom - lr*wd*weight32 -\n lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n weight32 += mom\n else:\n mom[:] = self.momentum*mom - lr*wd*weight32 - lr*self.rescale_grad*grad32\n weight32 += mom\n tmp = weight32.astype(weight.dtype)\n tmp.copyto(weight)\n\n def update_multi_precision(self, index, weight, grad, state):\n self.update(index, weight, grad, state)\n\n@unittest.skip(\"Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/9000\")\ndef test_sgd():\n mx.random.seed(0)\n opt1 = PySGD\n opt2 = mx.optimizer.SGD\n shape = (3, 4, 5)\n mom_options = [{}, {'momentum': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float16, np.float32, np.float64]:\n for mom_option in mom_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(mom_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n if (dtype == np.float16 and\n ('multi_precision' not in kwarg or\n not kwarg['multi_precision'])):\n continue\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n # test operator fallback on cpu\n if (default_context() == mx.cpu()):\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,\n g_stype='row_sparse')\n if dtype != np.float16:\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape[:2],\n dtype, w_stype='csr', g_stype='csr')\n # test optimizer with a big shape\n big_shape = (54686454, 1)\n kwarg = {'momentum': 0.9, 'wd': 0.05}\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), big_shape, np.float32)\n\nclass PySparseSGD(mx.optimizer.Optimizer):\n \"\"\"python reference implemenation of sgd\"\"\"\n def __init__(self, learning_rate=0.01, momentum=0.0, **kwargs):\n super(PySparseSGD, self).__init__(learning_rate=learning_rate, **kwargs)\n self.momentum = momentum\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state: momentum\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n if self.momentum == 0.0:\n return None\n else:\n return mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n num_rows = weight.shape[0]\n if self.momentum == 0.0:\n # Update on a per row basis, skip all-zero rows\n for row in range(num_rows):\n grad_row = grad[row].asnumpy()\n all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))\n if all_zeros:\n continue\n if self.clip_gradient is not None:\n weight[row] = ((1 - lr*wd)*weight[row] -\n lr*mx.nd.clip(grad[row]*self.rescale_grad,\n -self.clip_gradient, self.clip_gradient))\n else:\n weight[row] = (1 - lr*wd)*weight[row] - lr*self.rescale_grad*grad[row]\n else:\n mom = state\n for row in range(num_rows):\n grad_row = grad[row].asnumpy()\n all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))\n if all_zeros:\n continue\n if self.clip_gradient is not None:\n mom[row] = (self.momentum*mom[row] - lr*wd*weight[row] -\n lr*mx.nd.clip(grad[row]*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n weight[row] += mom[row]\n else:\n mom[row] = self.momentum*mom[row] - lr*wd*weight[row] - lr*self.rescale_grad*grad[row]\n weight[row] += mom[row]\n\ndef test_sparse_sgd():\n mx.random.seed(0)\n opt1 = PySparseSGD\n opt2 = mx.optimizer.SGD\n shape = (3, 4, 5)\n mom_options = [{}, {'momentum': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float32]:\n for mom_option in mom_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(mom_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,\n w_stype='row_sparse', g_stype='row_sparse')\n\n\ndef test_std_sparse_sgd():\n mx.random.seed(0)\n opt1 = PySGD\n opt2 = mx.optimizer.SGD\n shape = (3, 4, 5)\n mom_options = [{'momentum': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n for dtype in [np.float32]:\n for mom_option in mom_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n kwarg = {}\n kwarg.update(mom_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n compare_optimizer(opt1(**kwarg), opt2(lazy_update=False, **kwarg), shape, dtype,\n w_stype='row_sparse', g_stype='row_sparse')\n\n\nclass PyNAG(PySGD):\n def __init__(self, **kwargs):\n super(PyNAG, self).__init__(**kwargs)\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state: momentum\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n momentum = None\n weight_master_copy = None\n do_multi_precision = self.multi_precision and weight.dtype == np.float16\n if do_multi_precision:\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=np.float32)\n weight_master_copy = array(weight, ctx=weight.context, dtype=np.float32)\n return (weight_master_copy, momentum)\n else:\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)\n return momentum\n\n def create_state_multi_precision(self, index, weight):\n return self.create_state(index, weight)\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n use_multi_precision = isinstance(state, list) or isinstance(state, tuple)\n if not use_multi_precision:\n grad = grad * self.rescale_grad\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n if self.momentum == 0.0:\n weight[:] += -lr * (grad + wd * weight)\n else:\n mom = state\n mom[:] *= self.momentum\n grad += wd * weight\n mom[:] += grad\n grad[:] += self.momentum * mom\n weight[:] += -lr * grad \n else:\n grad32 = array(grad, ctx=grad.context, dtype=np.float32)\n grad32 = grad32 * self.rescale_grad\n if self.clip_gradient is not None:\n grad32 = mx.nd.clip(grad32, -self.clip_gradient, self.clip_gradient)\n mom = state[1]\n weight32 = state[0]\n if self.momentum == 0.0:\n weight32[:] += -lr * (grad32 + wd * weight32)\n else:\n mom[:] *= self.momentum\n grad32 += wd * weight32\n mom[:] += grad32\n grad32[:] += self.momentum * mom\n weight32[:] += -lr * grad32\n tmp = weight32.astype(weight.dtype)\n tmp.copyto(weight)\n\n\ndef test_nag():\n mx.random.seed(0)\n opt1 = PyNAG\n opt2 = mx.optimizer.NAG\n shape = (3, 4, 5)\n mom_options = [{}, {'momentum': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float16, np.float32, np.float64]:\n for mom_option in mom_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(mom_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n if (dtype == np.float16 and\n ('multi_precision' not in kwarg or\n not kwarg['multi_precision'])):\n continue\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n\n\n\n# FTML\n\nclass PyFTML(mx.optimizer.Optimizer):\n \"\"\"python reference implemenation of FTML\"\"\"\n def __init__(self, beta1=0.6, beta2=0.999, epsilon=1e-8, **kwargs):\n super(PyFTML, self).__init__(**kwargs)\n self.beta1 = beta1\n self.beta2 = beta2\n self.epsilon = epsilon\n\n def create_state(self, index, weight):\n return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # d_0\n mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # v_0\n mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # z_0\n\n def update(self, index, weight, grad, state):\n assert(isinstance(weight, mx.nd. NDArray))\n assert(isinstance(grad, mx.nd.NDArray))\n self._update_count(index)\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n t = self._index_update_count[index]\n\n grad = grad * self.rescale_grad + wd * weight\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n # get previous states\n prev_d, prev_v, prev_z = state\n # compute states\n v_t = self.beta2 * prev_v + (1 - self.beta2) * mx.nd.square(grad)\n d_t = (1 - pow(self.beta1, t)) / lr * (mx.nd.sqrt(v_t / (1 - pow(self.beta2, t))) + self.epsilon)\n sigma_t = d_t - self.beta1 * prev_d\n z_t = self.beta1 * prev_z + (1 - self.beta1) * grad - sigma_t * weight\n # update weight\n weight[:] = - z_t / d_t\n # update states\n prev_d[:] = d_t\n prev_v[:] = v_t\n prev_z[:] = z_t\n\n\ndef test_ftml():\n mx.random.seed(0)\n opt1 = PyFTML\n opt2 = mx.optimizer.FTML\n shape = (3, 4, 5)\n beta1_options = [{}, {'beta1': 0.5}, {'beta1': 0.7}]\n beta2_options = [{}, {'beta2': 0.8}, {'beta2': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n for dtype in [np.float32]:\n for beta1_option in beta1_options:\n for beta2_option in beta2_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n kwarg = {}\n kwarg.update(beta1_option)\n kwarg.update(beta2_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n\n\n# ADAM\n\nclass PyAdam(mx.optimizer.Optimizer):\n \"\"\"python reference implemenation of adam\"\"\"\n def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,\n decay_factor=(1 - 1e-8), sparse_update=False, **kwargs):\n super(PyAdam, self).__init__(learning_rate=learning_rate, **kwargs)\n self.beta1 = beta1\n self.beta2 = beta2\n self.epsilon = epsilon\n self.decay_factor = decay_factor\n self.sparse_update = sparse_update\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state: mean, variance\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # mean\n mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n self._update_count(index)\n\n t = self._index_update_count[index]\n mean, variance = state\n\n wd = self._get_wd(index)\n num_rows = weight.shape[0]\n coef1 = 1. - self.beta1**t\n coef2 = 1. - self.beta2**t\n lr *= math.sqrt(coef2)/coef1\n for row in range(num_rows):\n # check row slices of all zeros\n all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))\n # skip zeros during sparse update\n if all_zeros and self.sparse_update:\n continue\n grad[row] = grad[row] * self.rescale_grad + wd * weight[row]\n # clip gradients\n if self.clip_gradient is not None:\n mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])\n # update mean\n mean[row] *= self.beta1\n mean[row] += grad[row] * (1. - self.beta1)\n # update variance\n variance[row] *= self.beta2\n variance[row] += (1 - self.beta2) * mx.nd.square(grad[row], out=grad[row])\n # update weight\n weight[row] -= lr*mean[row]/(mx.nd.sqrt(variance[row]) + self.epsilon)\n\n\ndef test_adam():\n mx.random.seed(0)\n opt1 = PyAdam\n opt2 = mx.optimizer.Adam\n shape = (3, 4, 5)\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float16, np.float32, np.float64]:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n if (dtype == np.float16 and\n ('multi_precision' not in kwarg or\n not kwarg['multi_precision'])):\n continue\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n compare_optimizer(opt1(sparse_update=True, **kwarg), opt2(**kwarg), shape,\n dtype, w_stype='row_sparse', g_stype='row_sparse')\n compare_optimizer(opt1(**kwarg), opt2(lazy_update=False, **kwarg), shape,\n dtype, w_stype='row_sparse', g_stype='row_sparse')\n\n# Signum\nclass PySignum(mx.optimizer.Optimizer):\n \"\"\"The python reference of Signum optimizer.\n\n The optimizer updates the weight by:\n\n rescaled_grad = rescale_grad * clip(grad, clip_gradient) + wd * weight\n state = momentum * state + (1-momentum)*rescaled_grad\n weight = (1 - lr * wd_lh) * weight - lr * sign(state)\n\n See the original paper at: https://jeremybernste.in/projects/amazon/signum.pdf\n\n For details of the update algorithm see\n :class:`~mxnet.ndarray.signsgd_update` and :class:`~mxnet.ndarray.signum_update`.\n\n This optimizer accepts the following parameters in addition to those accepted\n by :class:`.Optimizer`.\n\n Parameters\n ----------\n momentum : float, optional\n The momentum value.\n wd_lh : float, optitional\n The amount of decoupled weight decay regularization.\n \"\"\"\n def __init__(self, learning_rate=0.01, momentum=0.9, wd_lh = 0.0, **kwargs):\n super(PySignum, self).__init__(learning_rate = learning_rate, **kwargs)\n self.momentum = momentum\n self.wd_lh = wd_lh\n\n def create_state(self, index, weight):\n momentum = None\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype, stype=weight.stype)\n return momentum\n\n def update(self, index, weight, grad, state):\n self._update_count(index)\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n\n if state is not None:\n mom = state\n if self.clip_gradient is not None:\n mom[:] = (self.momentum*mom - (1-self.momentum)*(wd*weight +\n mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient)))\n else:\n mom[:] = self.momentum*mom - (1-self.momentum)*wd*weight - (1-self.momentum)*self.rescale_grad*grad\n weight[:] = (1 - lr*self.wd_lh)*weight + lr*mx.nd.sign(mom)\n else:\n weight[:] = (1 - lr*(wd+self.wd_lh))*weight - lr*mx.nd.sign(grad)\n\ndef test_signum():\n mx.random.seed(0)\n opt1 = PySignum\n opt2 = mx.optimizer.Signum\n shape = (3, 4, 5)\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n wd_lh_options = [{}, {'wd_lh': 0.015}, {'wd_lh': 0.0}]\n mom_options = [{}, {'momentum': 0.9}]\n lr_options = [{'learning_rate': 0.05},{'learning_rate': 0.01}]\n for dtype in [np.float32, np.float64]:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in wd_lh_options:\n for lr_option in lr_options:\n for mom_option in mom_options:\n kwarg = {}\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n kwarg.update(lr_option)\n kwarg.update(mom_option)\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n\n\n# RMSProp\nclass PyRMSProp(mx.optimizer.Optimizer):\n \"\"\"RMSProp optimizer of Tieleman & Hinton, 2012,\n\n For centered=False, the code follows the version in\n http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by\n Tieleman & Hinton, 2012\n\n For centered=True, the code follows the version in\n http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013.\n\n Parameters\n ----------\n learning_rate : float, optional\n Step size.\n Default value is set to 0.001.\n gamma1: float, optional\n decay factor of moving average for gradient, gradient^2.\n Default value is set to 0.9.\n gamma2: float, optional\n \"momentum\" factor.\n Default value if set to 0.9.\n Only used if centered=True\n epsilon : float, optional\n Default value is set to 1e-8.\n centered : boolean, optional\n Use Graves or Tielemans & Hintons version of RMSProp\n wd : float, optional\n L2 regularization coefficient add to all the weights\n rescale_grad : float, optional\n rescaling factor of gradient.\n clip_gradient : float, optional\n clip gradient in range [-clip_gradient, clip_gradient]\n clip_weights : float, optional\n clip weights in range [-clip_weights, clip_weights]\n\n \"\"\"\n def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,\n epsilon=1e-8, centered=False, clip_weights=None, **kwargs):\n super(PyRMSProp, self).__init__(learning_rate=learning_rate, **kwargs)\n self.centered = centered\n self.gamma1 = gamma1\n self.gamma2 = gamma2\n self.epsilon = epsilon\n self.clip_weights = clip_weights\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state.\n\n For centered=False: n\n For centered=True: n, g, delta\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n \"\"\"\n if self.centered:\n return (mx.nd.zeros(weight.shape, weight.context), # n\n mx.nd.zeros(weight.shape, weight.context), # g\n mx.nd.zeros(weight.shape, weight.context)) # delta\n else:\n return (mx.nd.zeros(weight.shape, weight.context), ) # n\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n grad = grad * self.rescale_grad + wd * weight\n\n if not self.centered:\n (n, ) = state\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n\n weight[:] -= lr * grad/(mx.nd.sqrt(n + self.epsilon))\n\n else:\n n, g, delta = state\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n\n g[:] = (1 - self.gamma1) * grad + self.gamma1 * g\n delta[:] = (self.gamma2) * delta - lr * grad/(mx.nd.sqrt(n - g*g + self.epsilon))\n weight[:] += delta\n\n if self.clip_weights:\n mx.ndarray.clip(weight, -self.clip_weights, self.clip_weights, out=weight)\n\n@unittest.skip(\"Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/8230\")\ndef test_rms():\n mx.random.seed(0)\n opt1 = PyRMSProp\n opt2 = mx.optimizer.RMSProp\n shape = (3, 4, 5)\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n cw_options = [{}, {'clip_weights': 0.01}]\n center_options = [{}, {'centered': False}, {'centered': True}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float16, np.float32]:\n for cw_option in cw_options:\n for cg_option in cg_options:\n for center_option in center_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(cw_option)\n kwarg.update(cg_option)\n kwarg.update(center_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n if (dtype == np.float16 and\n ('multi_precision' not in kwarg or\n not kwarg['multi_precision'])):\n continue\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n if (default_context() == mx.cpu()):\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype, g_stype='row_sparse')\n\nclass PyFtrl(mx.optimizer.Optimizer):\n \"\"\"The Ftrl optimizer.\n\n Referenced from *Ad Click Prediction: a View from the Trenches*, available at\n http://dl.acm.org/citation.cfm?id=2488200.\n\n Parameters\n ----------\n lamda1 : float, optional\n L1 regularization coefficient.\n learning_rate : float, optional\n The initial learning rate.\n beta : float, optional\n Per-coordinate learning rate correlation parameter.\n eta :\n .. math::\n \\\\eta_{t,i} = \\\\frac{learningrate}{\\\\beta+\\\\sqrt{\\\\sum_{s=1}^tg_{s,i}^t}}\n \"\"\"\n\n def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, sparse_update=False, **kwargs):\n super(PyFtrl, self).__init__(**kwargs)\n self.lamda1 = lamda1\n self.beta = beta\n self.lr = learning_rate\n self.sparse_update = sparse_update\n\n def create_state(self, index, weight):\n return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # dn\n mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # n\n\n def update(self, index, weight, grad, state):\n self._update_count(index)\n wd = self._get_wd(index)\n lr = self._get_lr(index)\n num_rows = weight.shape[0]\n\n dn, n = state\n for row in range(num_rows):\n all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))\n if all_zeros and self.sparse_update:\n continue\n grad[row] = grad[row] * self.rescale_grad\n if self.clip_gradient is not None:\n mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])\n\n #update dn, n\n dn[row] += grad[row] - (mx.nd.sqrt(n[row] + grad[row] * grad[row]) - mx.nd.sqrt(n[row])) * weight[row] / lr\n n[row] += grad[row] * grad[row]\n\n # update weight\n weight[row] = (mx.nd.sign(dn[row]) * self.lamda1 - dn[row]) / \\\n ((self.beta + mx.nd.sqrt(n[row])) / lr + wd) * (mx.nd.abs(dn[row]) > self.lamda1)\n\ndef test_ftrl():\n mx.random.seed(0)\n opt1 = PyFtrl\n opt2 = mx.optimizer.Ftrl\n shape = (3, 4, 5)\n kwargs = [{},\n {'clip_gradient': 0.5},\n {'clip_gradient': 0.4, 'rescale_grad': 0.14},\n {'rescale_grad': 0.8},\n {'clip_gradient': 0.5, 'wd': 0.07},\n {'clip_gradient': 0.4, 'rescale_grad': 0.14, 'wd': 0.03},\n {'rescale_grad': 0.8, 'wd': 0.05},\n {'rescale_grad': 0.8, 'wd': 0.05, 'lamda1': 0.01},\n {'clip_gradient': 0.5, 'wd': 0.07, 'lamda1': 1.0}]\n for kwarg in kwargs:\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, np.float32)\n compare_optimizer(opt1(sparse_update=True, **kwarg), opt2(**kwarg), shape,\n np.float32, w_stype='row_sparse', g_stype='row_sparse')\n\ndef test_nadam():\n\n def get_net(num_hidden, flatten=True):\n data = mx.symbol.Variable('data')\n fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128, flatten=flatten)\n act1 = mx.symbol.Activation(fc1, name='relu1', act_type=\"relu\")\n fc2 = mx.symbol.FullyConnected(act1, name = 'fc2', num_hidden = 64, flatten=flatten)\n act2 = mx.symbol.Activation(fc2, name='relu2', act_type=\"relu\")\n fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=num_hidden, flatten=flatten)\n return fc3\n np.random.seed(1234)\n N = 20\n data = mx.random.uniform(-1, 1, shape=(N, 10))\n label = mx.random.uniform(-1, 1, shape=(N, 1))\n data_iter = mx.io.NDArrayIter(data, label, batch_size=5, label_name='label', shuffle=True)\n output = get_net(1)\n l = mx.symbol.Variable('label')\n Loss = gluon.loss.L1Loss()\n loss = Loss(output, l)\n loss = mx.sym.make_loss(loss)\n mod = mx.mod.Module(loss, data_names=('data',), label_names=('label',))\n mod.fit(data_iter, num_epoch=60, optimizer_params={'learning_rate': 0.0005, 'wd': 0.0005},\n initializer=mx.init.Xavier(magnitude=2), eval_metric=mx.metric.Loss(),\n optimizer='nadam')\n assert mod.score(data_iter, eval_metric=mx.metric.Loss())[0][1] < 0.1\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n" ]
[ [ "numpy.random.seed", "numpy.zeros_like" ] ]
tabdelaal/SpaGE
[ "7533cbf2275c3049561e8a17b9f7866e0e324743" ]
[ "benchmark/osmFISH_AllenSSp/SpaGE/Precise_output.py" ]
[ "import os\r\nos.chdir('osmFISH_AllenSSp/')\r\n\r\nimport pickle\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib\r\nmatplotlib.rcParams['pdf.fonttype'] = 42\r\nmatplotlib.rcParams['ps.fonttype'] = 42\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport sys\r\nsys.path.insert(1,'SpaGE/')\r\nfrom principal_vectors import PVComputation\r\n\r\nwith open ('data/SpaGE_pkl/osmFISH_Cortex.pkl', 'rb') as f:\r\n datadict = pickle.load(f)\r\n\r\nosmFISH_data = datadict['osmFISH_data']\r\nosmFISH_data_scaled = datadict['osmFISH_data_scaled']\r\nosmFISH_meta= datadict['osmFISH_meta']\r\ndel datadict\r\n\r\nwith open ('data/SpaGE_pkl/Allen_SSp.pkl', 'rb') as f:\r\n datadict = pickle.load(f)\r\n \r\nRNA_data = datadict['RNA_data']\r\nRNA_data_scaled = datadict['RNA_data_scaled']\r\ndel datadict\r\n\r\nCommon_data = RNA_data_scaled[np.intersect1d(osmFISH_data_scaled.columns,RNA_data_scaled.columns)]\r\n\r\nn_factors = 30\r\nn_pv = 30\r\nn_pv_display = 30\r\ndim_reduction = 'pca'\r\ndim_reduction_target = 'pca'\r\n\r\npv_FISH_RNA = PVComputation(\r\n n_factors = n_factors,\r\n n_pv = n_pv,\r\n dim_reduction = dim_reduction,\r\n dim_reduction_target = dim_reduction_target\r\n)\r\n\r\npv_FISH_RNA.fit(Common_data,osmFISH_data_scaled[Common_data.columns])\r\n\r\nfig = plt.figure()\r\nsns.heatmap(pv_FISH_RNA.initial_cosine_similarity_matrix_[:n_pv_display,:n_pv_display], cmap='seismic_r',\r\n center=0, vmax=1., vmin=0)\r\nplt.xlabel('osmFISH',fontsize=18, color='black')\r\nplt.ylabel('Allen_SSp',fontsize=18, color='black')\r\nplt.xticks(np.arange(n_pv_display)+0.5, range(1, n_pv_display+1), fontsize=12)\r\nplt.yticks(np.arange(n_pv_display)+0.5, range(1, n_pv_display+1), fontsize=12, rotation='horizontal')\r\nplt.gca().set_ylim([n_pv_display,0])\r\nplt.show()\r\n\r\nplt.figure()\r\nsns.heatmap(pv_FISH_RNA.cosine_similarity_matrix_[:n_pv_display,:n_pv_display], cmap='seismic_r',\r\n center=0, vmax=1., vmin=0)\r\nfor i in range(n_pv_display-1):\r\n plt.text(i+1,i+.7,'%1.2f'%pv_FISH_RNA.cosine_similarity_matrix_[i,i], fontsize=14,color='black')\r\n \r\nplt.xlabel('osmFISH',fontsize=18, color='black')\r\nplt.ylabel('Allen_SSp',fontsize=18, color='black')\r\nplt.xticks(np.arange(n_pv_display)+0.5, range(1, n_pv_display+1), fontsize=12)\r\nplt.yticks(np.arange(n_pv_display)+0.5, range(1, n_pv_display+1), fontsize=12, rotation='horizontal')\r\nplt.gca().set_ylim([n_pv_display,0])\r\nplt.show()\r\n\r\nImportance = pd.Series(np.sum(pv_FISH_RNA.source_components_**2,axis=0),index=Common_data.columns)\r\nImportance.sort_values(ascending=False,inplace=True)\r\nImportance.index[0:30]\r\n\r\n### Technology specific Processes\r\nEffective_n_pv = sum(np.diag(pv_FISH_RNA.cosine_similarity_matrix_) > 0.3)\r\n\r\n# explained variance RNA\r\nnp.sum(pv_FISH_RNA.source_explained_variance_ratio_[np.arange(Effective_n_pv)])*100\r\n# explained variance spatial\r\nnp.sum(pv_FISH_RNA.target_explained_variance_ratio_[np.arange(Effective_n_pv)])*100\r\n" ]
[ [ "matplotlib.pyplot.text", "matplotlib.pyplot.xlabel", "numpy.sum", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.ylabel", "numpy.intersect1d", "matplotlib.pyplot.show", "matplotlib.pyplot.gca", "numpy.diag" ] ]
menrmenr/Psychotoolbox-3
[ "b294355ea46bcd3bcd6876196d0c0f321b3a6ddc" ]
[ "setup.py" ]
[ "# setup.py -- Build-Script for building Psychtoolbox-3 \"mex\" files as Python extensions.\n#\n# (c) 2018 Mario Kleiner - Licensed under MIT license.\n#\n\n# from distutils.core import setup, Extension # Build system.\nfrom setuptools import setup, Extension, find_packages\nimport os, fnmatch, shutil # Directory traversal, file list building.\nimport platform # OS detection.\nimport sys # cpu arch detection.\nimport numpy # To get include dir on macOS.\n\nis_64bits = sys.maxsize > 2**32\n\n# unified version number, read from simple text file\ndef get_version():\n import re\n VERSIONFILE = \"PsychPython/psychtoolbox/_version.py\"\n with open(VERSIONFILE, \"rt\") as fid:\n verstrline = fid.read()\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n mo = re.search(VSRE, verstrline, re.M)\n if mo:\n verstr = mo.group(1)\n else:\n raise RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE,))\n return verstr\nversion = get_version()\n\ndef get_sourcefiles(path):\n sources = []\n pattern1 = '*.c'\n pattern2 = '*.cpp'\n for filename in sorted(os.listdir(path)):\n if fnmatch.fnmatch(filename, pattern1) or fnmatch.fnmatch(filename, pattern2):\n sources += [os.path.join(path,filename)]\n\n # Fancy schmanzi, not needed atm. for recursive dir traversal:\n # for root, dirs, files in os.walk(path):\n # dirs.sort()\n # for filename in sorted(files):\n # sources += [os.path.join(root,filename)]\n\n return(sources)\n\ndef get_basemacros(name, osname):\n return([('PTBMODULE_' + name, None), ('PTBMODULENAME', name)] + base_macros)\n\ndef get_baseincludedirs(name, osname):\n return(['PsychSourceGL/Source/Common/' + name] + baseincludes_common + ['PsychSourceGL/Source/' + osname + '/Base'] + ['PsychSourceGL/Source/' + osname + '/' + name])\n\ndef get_basesources(name, osname):\n extrafiles = []\n if os.access('./PsychSourceGL/Source/' + osname + '/' + name, os.F_OK):\n extrafiles = get_sourcefiles('./PsychSourceGL/Source/' + osname + '/' + name)\n\n return(basefiles_common + get_sourcefiles('./PsychSourceGL/Source/' + osname + '/Base/') + get_sourcefiles('./PsychSourceGL/Source/Common/' + name) + extrafiles)\n\n# Treating some special cases like Octave seems to be the right thing to do,\n# PSYCH_LANGUAGE setting is self-explanatory:\nbase_macros = [('PTBOCTAVE3MEX', None), ('PSYCH_LANGUAGE', 'PSYCH_PYTHON')]\n# Disabled: This would enable Py_LIMITED_API, to allow to build one set of modules for all versions of\n# Python >= 3.2. The downside is loss of important functionality [PsychRuntimeEvaluateString()) does not work!].\n# Also, we have build failure on at least Ubuntu 18.04 LTS with Python 3.6, so it is a no-go on Linux for now!\n#base_macros = [('PTBOCTAVE3MEX', None), ('PSYCH_LANGUAGE', 'PSYCH_PYTHON'), ('Py_LIMITED_API', None)]\n\n# Common infrastructure and the scripting glue module for interfacing with the Python runtime:\nbasefiles_common = get_sourcefiles('./PsychSourceGL/Source/Common/Base') + ['PsychSourceGL/Source/Common/Base/PythonGlue/PsychScriptingGluePython.c']\nbaseincludes_common = [numpy.get_include(), 'PsychSourceGL/Source/Common/Base', 'PsychSourceGL/Source/Common/Screen']\n\n# OS detection and file selection for the different OS specific backends:\nprint('Platform reported as: %s\\n' % platform.system())\nif platform.system() == 'Linux':\n # Linux specific backend code:\n print('Building for Linux...\\n')\n osname = 'Linux'\n # All libraries to link to all modules:\n base_libs = ['c', 'rt', 'dl']\n # No \"no reproducible builds\" warning:\n base_compile_args = ['-Wno-date-time']\n # Extra OS specific libs for PsychPortAudio:\n audio_libdirs = []\n audio_extralinkargs = []\n audio_libs = ['portaudio', 'asound']\n audio_objects = []\n\n # Extra OS specific libs for PsychHID:\n psychhid_includes = ['/usr/include/libusb-1.0']\n psychhid_libdirs = []\n psychhid_libs = ['dl', 'usb-1.0', 'X11', 'Xi', 'util']\n psychhid_extra_objects = []\n\n # Extra files needed, e.g., libraries:\n extra_files = {}\n\nif platform.system() == 'Windows':\n print('Building for Windows...\\n')\n osname = 'Windows'\n base_libs = ['kernel32', 'user32', 'advapi32', 'winmm']\n base_compile_args = []\n\n # libusb includes:\n psychhid_includes = ['PsychSourceGL/Cohorts/libusb1-win32/include/libusb-1.0']\n\n if is_64bits:\n # 64bit supports PsychPortAudio\n psychhid_libdirs = ['PsychSourceGL/Cohorts/libusb1-win32/MS64/dll']\n # and copy the files to the folder\n shutil.copy('PsychSourceGL/Cohorts/libusb1-win32/MS64/dll/libusb-1.0.dll',\n 'PsychPython/')\n shutil.copy('Psychtoolbox/PsychSound/portaudio_x64.dll',\n 'PsychPython/')\n # list them so they get packaged\n extra_files = {'psychtoolbox': ['portaudio_x64.dll', 'libusb-1.0.dll']}\n\n # Extra OS specific libs for PsychPortAudio:\n audio_libdirs = ['PsychSourceGL/Cohorts/PortAudio']\n audio_extralinkargs = [] # No runtime delay loading atm. No benefit with current packaging method: ['/DELAYLOAD:portaudio_x64.dll']\n audio_libs = ['delayimp', 'portaudio_x64']\n audio_objects = []\n else:\n # for win32 we use a different libusb dll and the portaudio dll is not supported\n psychhid_libdirs = ['PsychSourceGL/Cohorts/libusb1-win32/MS32/dll']\n shutil.copy('PsychSourceGL/Cohorts/libusb1-win32/MS32/dll/libusb-1.0.dll',\n 'PsychPython/')\n # list them so they get packaged\n extra_files = {'psychtoolbox': ['libusb-1.0.dll']}\n\n psychhid_libs = ['dinput8', 'libusb-1.0', 'setupapi']\n psychhid_extra_objects = []\n\nif platform.system() == 'Darwin':\n print('Building for macOS...\\n')\n osname = 'OSX'\n # These should go to extra_link_args in Extension() below, but apparently distutils\n # always appends the extra_link_args at the end of the linker command line, which is\n # wrong for -framework's, as they must be stated *before* the .o object files which\n # want to use functions from them. A solution to this problem doesn't exist in distutils\n # almost two decades after the debut of Mac OSX, because hey, take your time!\n #\n # The hack is to set the LDFLAGS environment variable to what we need, as LDFLAGS\n # apparently gets prepended to the linker invocation arguments, so -framework statements\n # precede the .o'bjects they should apply to and the linker is happy again.\n #\n # Downside is that now we have to pass the union of *all* -framework switches ever\n # used for *any* extension module, as os.environ can't be changed during the build\n # sequencing for a distribution package.\n #\n # Is this awful? Absolutely! And i thought the Octave/Matlab build process on macOS\n # sucked big time, but apparently somebody in Pythonland shouted \"Hold my beer!\" :(\n # Hopefully some new info about this issue will prove me wrong, and there is a sane\n # and elegant solution, but this is the best i could find after hours of Googling and\n # trying.\n #\n # The following would be the full list of frameworks, but apparently including -framework Carbon\n # is enough. Maybe a catch-all including all other frameworks?\n #\n # -framework Carbon -framework CoreServices -framework CoreFoundation -framework CoreAudio -framework AudioToolbox -framework AudioUnit\n # -framework ApplicationServices -framework OpenGL -framework CoreVideo -framework IOKit -framework SystemConfiguration\n # -framework CoreText -framework Cocoa\n #\n os.environ['LDFLAGS'] = '-framework Carbon -framework CoreAudio'\n base_libs = []\n\n # No \"no reproducible builds\" warning. macOS minimum version 10.9 selected by Jon Peirce.\n # May work for current modules, but is completely untested and unsupported by Psychtoolbox\n # upstream as of v3.0.15+, which only allows 10.11 as minimum version for Psychtoolbox mex\n # files and only tests with 10.13. Also note that already 10.11 is unsupported by Apple and\n # therefore a security risk.\n base_compile_args = ['-Wno-date-time', '-mmacosx-version-min=10.9']\n\n # Extra OS specific libs for PsychPortAudio:\n audio_libdirs = []\n audio_extralinkargs = []\n audio_libs = []\n # Include our statically linked on-steroids version of PortAudio:\n audio_objects = ['PsychSourceGL/Cohorts/PortAudio/libportaudio_osx_64.a']\n\n # Include Apples open-source HID Utilities for all things USB-HID device handling:\n psychhid_includes = ['PsychSourceGL/Cohorts/HID_Utilities_64Bit/', 'PsychSourceGL/Cohorts/HID_Utilities_64Bit/IOHIDManager']\n psychhid_libdirs = []\n psychhid_libs = []\n # Extra objects for PsychHID - statically linked HID utilities:\n psychhid_extra_objects = ['PsychSourceGL/Cohorts/HID_Utilities_64Bit/build/Release/libHID_Utilities64.a']\n\n # Extra files needed, e.g., libraries:\n extra_files = {}\n\next_modules = []\n# GetSecs module: Clock queries.\nname = 'GetSecs'\nGetSecs = Extension(name,\n extra_compile_args = base_compile_args,\n define_macros = get_basemacros(name, osname),\n include_dirs = get_baseincludedirs(name, osname),\n sources = get_basesources(name, osname),\n libraries = base_libs,\n )\next_modules.append(GetSecs)\n\n# WaitSecs module: Timed waits.\nname = 'WaitSecs'\nWaitSecs = Extension(name,\n extra_compile_args = base_compile_args,\n define_macros = get_basemacros(name, osname),\n include_dirs = get_baseincludedirs(name, osname),\n sources = get_basesources(name, osname),\n libraries = base_libs\n )\next_modules.append(WaitSecs)\n\n# PsychPortAudio module: High precision, high reliability, multi-channel, multi-card audio i/o.\nif is_64bits or platform.system() == 'Linux':\n # This won't compile on 32bit windows or macOS. Linux also has 32 Bit non-Intel variants, e.g., RaspberryPi\n name = 'PsychPortAudio'\n PsychPortAudio = Extension(name,\n extra_compile_args = base_compile_args,\n define_macros = get_basemacros(name, osname),\n include_dirs = get_baseincludedirs(name, osname),\n sources = get_basesources(name, osname),\n library_dirs = audio_libdirs,\n libraries = base_libs + audio_libs,\n extra_link_args = audio_extralinkargs,\n extra_objects = audio_objects\n )\n ext_modules.append(PsychPortAudio)\n\n# PsychHID module: Note the extra include_dirs and libraries:\nname = 'PsychHID'\nPsychHID = Extension(name,\n extra_compile_args = base_compile_args,\n define_macros = get_basemacros(name, osname),\n include_dirs = get_baseincludedirs(name, osname) + psychhid_includes,\n sources = get_basesources(name, osname),\n library_dirs = psychhid_libdirs,\n libraries = base_libs + psychhid_libs,\n extra_objects = psychhid_extra_objects\n )\next_modules.append(PsychHID)\n\n# IOPort module:\nname = 'IOPort'\nIOPort = Extension(name,\n extra_compile_args = base_compile_args,\n define_macros = get_basemacros(name, osname),\n include_dirs = get_baseincludedirs(name, osname),\n sources = get_basesources(name, osname),\n libraries = base_libs\n )\next_modules.append(IOPort)\n\nsetup (name = 'psychtoolbox',\n version = version,\n description = 'Pieces of Psychtoolbox-3 ported to CPython.',\n author = 'Mario Kleiner',\n author_email = 'mario.kleiner.de@gmail.com',\n url = 'http://psychtoolbox.org',\n packages = ['psychtoolbox', 'psychtoolbox.demos'],\n package_dir = {'' : 'PsychPython',\n 'psychtoolbox' : 'PsychPython/psychtoolbox',\n 'psychtoolbox.demos' : 'PsychPython/demos'},\n package_data = extra_files,\n ext_package = 'psychtoolbox',\n ext_modules = ext_modules,\n include_package_data=True, # Include files listed in MANIFEST.in\n )\n\nif platform.system() == 'Windows':\n # Get rid of the now no longer needed copies of dll's inside PsychPython,\n # now that setup() has already copied them into the distribution.\n if os.path.exists('PsychPython/portaudio_x64.dll'):\n os.remove('PsychPython/portaudio_x64.dll')\n os.remove('PsychPython/libusb-1.0.dll')\n" ]
[ [ "numpy.get_include" ] ]
astutespruce/sarp
[ "7ce503380440c47b762ed1a8efd1d3e3aab6605e", "7ce503380440c47b762ed1a8efd1d3e3aab6605e" ]
[ "analysis/prep/barriers/lib/points.py", "analysis/rank/rank_small_barriers.py" ]
[ "import pygeos as pg\nimport pandas as pd\nimport numpy as np\n\n\ndef connect_points(start, end):\n \"\"\"Convert a series or array of points to an array or series of lines.\n\n Parameters\n ----------\n start : Series or ndarray\n end : Series or ndarray\n\n Returns\n -------\n Series or ndarray\n \"\"\"\n\n is_series = False\n\n if isinstance(start, pd.Series):\n is_series = True\n index = start.index\n start = start.values\n if isinstance(end, pd.Series):\n end = end.values\n\n x1 = pg.get_x(start)\n y1 = pg.get_y(start)\n x2 = pg.get_x(end)\n y2 = pg.get_y(end)\n\n lines = pg.linestrings(np.array([[x1, x2], [y1, y2]]).T)\n\n if is_series:\n return pd.Series(lines, index=index)\n\n return lines\n\n\ndef window(geometries, distance):\n \"\"\"Return windows around geometries bounds +/- distance\n\n Parameters\n ----------\n geometries : Series or ndarray\n geometries to window\n distance : number or ndarray\n radius of window\n if ndarry, must match length of geometries\n\n Returns\n -------\n Series or ndarray\n polygon windows\n \"\"\"\n minx, miny, maxx, maxy = pg.bounds(geometries).T\n windows = pg.box(minx - distance, miny - distance, maxx + distance, maxy + distance)\n\n if isinstance(geometries, pd.Series):\n return pd.Series(windows, index=geometries.index)\n\n return windows\n", "import os\nfrom pathlib import Path\nfrom time import time\nimport warnings\n\nimport pandas as pd\n\nfrom analysis.rank.lib.networks import get_network_results\nfrom analysis.rank.lib.metrics import (\n classify_streamorder,\n classify_spps,\n classify_percent_altered,\n)\nfrom api.constants import SB_API_FIELDS\n\n\nwarnings.filterwarnings(\"ignore\", message=\".*initial implementation of Parquet.*\")\n\n\nstart = time()\n\ndata_dir = Path(\"data\")\nbarriers_dir = data_dir / \"barriers/master\"\napi_dir = data_dir / \"api\"\nresults_dir = data_dir / \"barriers/networks\"\n\nif not os.path.exists(api_dir):\n os.makedirs(api_dir)\n\nif not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\n### Read in master\nprint(\"Reading master...\")\ndf = (\n pd.read_feather(barriers_dir / \"small_barriers.feather\")\n .set_index(\"id\")\n .drop(\n columns=[\n \"geometry\",\n \"level_0\",\n \"index\",\n \"dup_group\",\n \"dup_count\",\n \"dup_log\",\n \"snap_dist\",\n \"snap_tolerance\",\n \"snap_ref_id\",\n \"snap_log\",\n \"snapped\",\n \"log\",\n \"lineID\",\n \"wbID\",\n ],\n errors=\"ignore\",\n )\n .rename(columns={\"excluded\": \"Excluded\", \"intermittent\": \"Intermittent\",})\n)\n\n# Drop any that are duplicates\n# NOTE: we retain those that were dropped because these are relevant for folks to know what\n# has been inventoried (e.g., those dropped because no barrier, etc)\n# but do drop any that have no state or HUC2\ndf = df.loc[(~df.duplicate) & (df.State)].copy()\n\n\n### Classify StreamOrder\ndf[\"StreamOrderClass\"] = classify_streamorder(df.StreamOrder)\n\n\nfor col in [\"TESpp\", \"StateSGCNSpp\", \"RegionalSGCNSpp\"]:\n df[f\"{col}Class\"] = classify_spps(df[col])\n\n\n### Get network results\nnetworks = get_network_results(df, \"small_barriers\")\n\ndf = df.join(networks)\n\n# True if the barrier was snapped to a network and has network results in the\n# all networks scenario\ndf[\"HasNetwork\"] = df.index.isin(networks.index)\ndf[\"Ranked\"] = df.HasNetwork & (~df.unranked)\n\n# Intermittent is not applicable if it doesn't have a network\ndf[\"Intermittent\"] = df[\"Intermittent\"].astype(\"int8\")\ndf.loc[~df.HasNetwork, \"Intermittent\"] = -1\n\n### Classify PercentAltered\ndf[\"PercentAltered\"] = -1\ndf.loc[df.HasNetwork, \"PercentAltered\"] = 100 - df.loc[df.HasNetwork].PercentUnaltered\ndf[\"PercentAlteredClass\"] = classify_percent_altered(df.PercentAltered)\n\n\n# fill network columns and set proper type\nfor col in networks.columns:\n df[col] = df[col].fillna(-1).astype(networks[col].dtype)\n\n### Sanity check\nif df.groupby(level=0).size().max() > 1:\n raise ValueError(\n \"Error - there are duplicate barriers in the results for small_barriers. Check uniqueness of IDs and joins.\"\n )\n\n\n### Write out data for API\nprint(f\"Writing to output files...\")\n\n# Full results for tiles, etc\ndf.reset_index().to_feather(results_dir / \"small_barriers.feather\")\n\n# save for API\ndf[df.columns.intersection(SB_API_FIELDS)].reset_index().to_feather(\n api_dir / f\"small_barriers.feather\"\n)\n\n" ]
[ [ "numpy.array", "pandas.Series" ], [ "pandas.read_feather" ] ]
caisarl76/TADE-AgnosticLT
[ "8a23f6609622dd30feb22101067e644666810400" ]
[ "dataset/flowers.py" ]
[ "import os\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torchvision\nimport tqdm\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\n\nclass Flowers(Dataset):\n def __init__(self, root, train=True, download=False, transform=None, rand_number=0, imb_factor=1, imb_type='exp'):\n np.random.seed(rand_number)\n\n root = os.path.join(root, 'flowers')\n if train:\n excel_file = os.path.join(root, 'train.txt')\n else:\n excel_file = os.path.join(root, 'valid.txt')\n\n self.samples = pd.read_csv(excel_file, delimiter=' ')\n self.root_dir = root\n self.transform = transform\n self.targets = self.samples['TARGET'].array\n self.classes = np.unique(self.targets)\n self.cls_num = len(self.classes)\n\n self.samples = np.array(self.samples)\n self.targets = np.array(self.targets, dtype=np.int64)\n\n num_in_class = []\n for class_idx in np.unique(self.targets):\n num_in_class.append(len(np.where(self.targets == class_idx)[0]))\n self.num_in_class = num_in_class\n if train:\n img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imb_factor)\n self.gen_imbalanced_data(img_num_list)\n\n def get_img_num_per_cls(self, cls_num, imb_type, imb_factor):\n img_max = len(self.samples) / cls_num\n img_num_per_cls = []\n if imb_type == 'exp':\n for cls_idx in range(cls_num):\n num = img_max * (imb_factor ** (cls_idx / (cls_num - 1.0)))\n img_num_per_cls.append(int(num))\n elif imb_type == 'step':\n for cls_idx in range(cls_num // 2):\n img_num_per_cls.append(int(img_max))\n for cls_idx in range(cls_num // 2):\n img_num_per_cls.append(int(img_max * imb_factor))\n else:\n img_num_per_cls.extend([int(img_max)] * cls_num)\n return img_num_per_cls\n\n def gen_imbalanced_data(self, img_num_per_cls):\n new_data = []\n new_targets = []\n classes = np.unique(self.targets)\n # np.random.shuffle(classes)\n self.num_per_cls_dict = dict()\n for the_class, the_img_num in zip(classes, img_num_per_cls):\n self.num_per_cls_dict[the_class] = the_img_num\n idx = np.where(self.targets == the_class)[0]\n np.random.shuffle(idx)\n selec_idx = idx[:the_img_num]\n self.num_per_cls_dict[the_class] = len(selec_idx)\n new_data.append(self.samples[selec_idx])\n new_targets.extend([the_class, ] * the_img_num)\n new_data = np.vstack(new_data)\n self.samples = new_data\n self.targets = new_targets\n self.labels = new_targets\n\n def get_cls_num_list(self):\n cls_num_list = []\n for i in range(self.cls_num):\n cls_num_list.append(self.num_per_cls_dict[i])\n return cls_num_list\n\n def __len__(self):\n return len(self.samples)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.root_dir, self.samples[index, 0])\n y_label = torch.tensor(self.samples[index, 1]).long()\n image = Image.open(img_path)\n if self.transform:\n if isinstance(self.transform, list):\n sample1 = self.transform[0](image)\n sample2 = self.transform[1](image)\n image = [sample1, sample2]\n else:\n image = self.transform(image)\n return image, y_label\n\nif __name__ == '__main__':\n train_transform = transforms.Compose([\n transforms.ToTensor(),\n ])\n # train_dataset = Flowers(root='/data', train=True, download=False, transform=train_transform, imb_factor=1)\n # train_loader = torch.utils.data.DataLoader(\n # train_dataset, batch_size=1, shuffle=False,\n # num_workers=0, persistent_workers=False, pin_memory=True)\n # for i in range(len(train_dataset.get_cls_num_list())):\n # images = torch.empty(train_dataset.get_cls_num_list()[0], 3, 224, 224)\n # idx = 0\n # for image, y in train_loader:\n # if y == i:\n # images[idx] = image\n # idx += 1\n #\n # plt.figure()\n # plt.title(f'{i}')\n # plt.clf()\n # plt.imshow(torchvision.utils.make_grid(images, normalize=True).permute(1, 2, 0))\n # plt.savefig(f'Flowers_{i}.png')\n train_dataset = Flowers('/data', train=True, download=False, transform=train_transform, imb_factor=0.1)\n test_dataset = Flowers('/data', train=False, download=False, transform=train_transform)\n # train_loader = torch.utils.data.DataLoader(\n # train_dataset, batch_size=128, shuffle=False,\n # num_workers=0, persistent_workers=False, pin_memory=True)\n # for images, y in train_loader:\n # print(y)\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False,\n num_workers=0, persistent_workers=False, pin_memory=True)\n\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=1, shuffle=False,\n num_workers=0, persistent_workers=False, pin_memory=True)\n\n # classes_freq = np.zeros(102)\n # for x, y in tqdm.tqdm(train_loader):\n # classes_freq[np.array(y)] += 1\n # print(classes_freq)\n\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=1, shuffle=False,\n num_workers=0, persistent_workers=False, pin_memory=True)\n\n # classes_freq = np.zeros(102)\n # for x, y in tqdm.tqdm(test_loader):\n # classes_freq[np.array(y)] += 1\n # print(classes_freq)\n\n # print(train_dataset.get_cls_num_list())\n\n mean = 0.\n std = 0.\n classes_freq = np.zeros(102)\n for images, y in train_loader:\n batch_samples = images.size(0) # batch size (the last batch can have smaller size!)\n images = images.view(batch_samples, images.size(1), -1)\n mean += images.mean(2).sum(0)\n std += images.std(2).sum(0)\n classes_freq[np.array(y)] += 1\n mean /= len(train_loader.dataset)\n std /= len(train_loader.dataset)\n print(classes_freq)\n print(mean, std)\n\n\n # classes_freq = np.zeros(102)\n # for images, y in test_loader:\n # classes_freq[np.array(y)] += 1\n # print(classes_freq)" ]
[ [ "numpy.array", "numpy.zeros", "numpy.random.seed", "numpy.random.shuffle", "numpy.where", "numpy.vstack", "torch.utils.data.DataLoader", "torch.tensor", "pandas.read_csv", "numpy.unique" ] ]
rajasoun/app-analytics
[ "101ebb9795b3c80efee072dc79c37633cf0b241b" ]
[ "notebook/demand_forecast_by_ga.py" ]
[ "# flake8: noqa\n# coding: utf-8\n# # Setup\n# ## Jupyter Shell\n# In[1]:\n\n\nshell = 'ZMQInteractiveShell'\nIN_JUPYTER = 'get_ipython' in globals() and get_ipython().__class__.__name__ == shell\n\n# Allow modules and files to be loaded with relative paths\nfrom pkg_resources import resource_filename as fpath\nimport sys\nsys.path.append(fpath(__name__, ''))\n\n\n# ## Theme\n\n# In[2]:\n\n\nif IN_JUPYTER:\n get_ipython().system('jt -l')\n # toggle toolbar ON and notebook name ON\n get_ipython().system('jt -t grade3 -T -N')\n\n\n# # Load Packages & Track Versions\n\n# In[3]:\n\n\n# check the versions of key python librarise\n# Python\nimport sys\nimport platform\nprint('python: %s' % platform.python_version())\n\n\n# In[4]:\n\n\npkgs = [\n 'numpy', 'matplotlib', 'pandas', 'statsmodels', 'sklearn', 'fbprophet',\n 'numba',\n]\nfor pkg in pkgs:\n try:\n globals()['est_module'] = __import__(pkg)\n print(pkg, ': %s' % est_module.__version__)\n except ModuleNotFoundError:\n print(pkg, 'Not Found')\n\n\n# In[6]:\n\n\nimport os\nif IN_JUPYTER:\n workspace_dir = os.path.realpath('..')\nelse:\n workspace_dir = os.getcwd()\nprint('Workspace Dir ->', workspace_dir)\n\n\n# In[7]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom fbprophet import Prophet\n\nimport datetime\nfrom numba import jit\nimport math\n\nif IN_JUPYTER:\n import matplotlib.pyplot as plt\n get_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# # Functions\n\n# In[8]:\n\n\ndef group_by_col(df, col):\n group = df.groupby(df[str(col)])\n group_by = pd.DataFrame(group.size().reset_index(name='Count'))\n return group_by\n\n\n# In[9]:\n\n\ndef delete_columns(df, cols):\n df = df.drop(list(cols), axis=1)\n return df\n\n\n# In[10]:\n\n\ndef print_cols_type(df):\n # Print Column Type\n for col in df:\n print(str(col), '->', type(df[col][1]))\n\n\n# In[11]:\n\n\ndef coerce_columns_to_numeric(df, column_list):\n df[column_list] = df[column_list].apply(pd.to_numeric, errors='coerce')\n\n\n# In[12]:\n\n\nimport dateutil\n# Convert date from string to date times\n\n\ndef coerce_columns_to_date(df, col):\n df[str(col)] = df[str(col)].apply(dateutil.parser.parse, dayfirst=True)\n\n\n# In[13]:\n\n\n# function to create a DataFrame in the format required by Prophet\ndef create_df_for_prophet(ts):\n ts.columns = ['ds', 'y']\n ts = ts.dropna()\n ts.reset_index(drop=True, inplace=True)\n return ts\n\n\n# In[14]:\n\n\nfrom scipy import stats\nimport numpy as np\ndir_name = workspace_dir + '/data/output/'\n\n\ndef remove_outliers_by_col(df, col):\n file = dir_name + 'outliers_' + str(col).lower() + '.csv'\n z = np.abs(stats.zscore(df[str(col)]))\n threshold = 3\n df[(z > 3)].to_csv(file, index=False)\n print('Removed Outliers Stores In ->', file)\n return df[(z < 3)]\n\n\n# In[15]:\n\n\ndef visualize_outliers_by_col(df, col):\n if IN_JUPYTER:\n import seaborn as sns\n sns.boxplot(x=df[str(col)])\n\n\n# In[16]:\n\n\n# function to remove any negative forecasted values.\ndef remove_negtives(ts):\n ts['yhat'] = ts['yhat'].clip_lower(0)\n ts['yhat_lower'] = ts['yhat_lower'].clip_lower(0)\n ts['yhat_upper'] = ts['yhat_upper'].clip_lower(0)\n return ts\n\n\n# In[17]:\n\n\nimport math\n\n\ndef mse(y_actual, y_pred):\n # compute the mean square error\n mse = ((y_actual - y_pred)**2).mean()\n return mse\n\n\n# In[18]:\n\n\n# Symmetric Mean Absolute Percent Error (SMAPE)\n# function to calculate in sample SMAPE scores\ndef smape_fast(y_true, y_pred):\n out = 0\n for i in range(y_true.shape[0]):\n if (y_true[i] != None and np.isnan(y_true[i]) == False):\n a = y_true[i]\n b = y_pred[i]\n c = a + b\n if c == 0:\n continue\n out += math.fabs(a - b) / c\n out *= (200.0 / y_true.shape[0])\n return out\n\n\n# In[19]:\n\n\ndef visualize_user_access(df):\n if IN_JUPYTER:\n df.set_index('ds').plot(style=['+'])\n plt.xlabel('Date')\n plt.ylabel('Users')\n plt.title('User Access By Date')\n plt.show()\n\n\n# In[20]:\n\n\ndef visualize_forecast(df):\n if IN_JUPYTER:\n mdl.plot(df)\n plt.show()\n\n\n# In[21]:\n\n\ndef visualize_forecast_details(df):\n if IN_JUPYTER:\n # plot time series components\n mdl.plot_components(df)\n plt.show()\n\n\n# In[64]:\n\n\ndef convert_notebook_to_python():\n get_ipython().system('jupyter nbconvert --to=python notebook demand_forecast_by_ga.ipynb')\n get_ipython().system('ls')\n\n\n# # Sanity Check - Input Data\n\n# In[22]:\n\n\n# import required data\nfrom subprocess import check_output\ninput_dir = workspace_dir + '/data/input/'\nprint(check_output(['ls', input_dir]).decode('utf8'))\n\n\n# # Predict - From Google Analytics Data\n\n# ## Load & Clean Up Data\n\n# In[23]:\n\n\nmax_date_past_data = '2018-10-23' # str(clean_ga_data.ds.max().date())\ndata_file = workspace_dir + '/data/input/est_daily_access.csv'\n\nga_data = pd.read_csv(data_file)\nm = ga_data.shape[0]\nn = ga_data.shape[1]\n\nprint(' Data Set Details')\nprint('+++++++++++++++++++++++++++++++')\nprint('# Of Observations', str(m))\nprint('# Of Features', str(n))\n\n\n# In[24]:\n\n\nvisualize_outliers_by_col(ga_data, 'Users')\n\n\n# In[25]:\n\n\nga_data = remove_outliers_by_col(ga_data, 'Users')\nm = ga_data.shape[0]\nprint(' Data Set without Outliers')\nprint('+++++++++++++++++++++++++++++++')\nprint('# Of Observations', str(m))\nga_data.tail()\n\n\n# In[26]:\n\n\nclean_ga_data = create_df_for_prophet(ga_data)\ncoerce_columns_to_numeric(clean_ga_data, ['y'])\ncoerce_columns_to_date(clean_ga_data, 'ds')\nprint_cols_type(clean_ga_data)\nclean_ga_data.tail()\n\n\n# In[27]:\n\n\nvisualize_user_access(clean_ga_data)\n\n\n# In[28]:\n\n\n# log transform data\nga_data['y'] = np.log(ga_data['y'])\nga_data.tail()\n\n\n# In[29]:\n\n\nvisualize_user_access(ga_data)\n\n\n# ## Prediction\n\n# In[30]:\n\n\nholidays_csv = workspace_dir + '/data/input/us_holidays.csv'\nus_public_holidays = pd.read_csv(holidays_csv)\nmdl = Prophet(\n interval_width=0.95,\n daily_seasonality=True,\n weekly_seasonality=True,\n yearly_seasonality=True,\n holidays=us_public_holidays,\n)\nmdl.fit(ga_data)\n\nga_future = mdl.make_future_dataframe(\n periods=31 + 28, freq='D', include_history=True,\n)\nga_forecast = mdl.predict(ga_future)\n\n\n# In[31]:\n\n\nga_forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()\n\n\n# In[32]:\n\n\nnp.exp(ga_forecast[['yhat', 'yhat_lower', 'yhat_upper']].tail())\n\n\n# In[33]:\n\n\nga_forecast = remove_negtives(ga_forecast)\n\n\n# In[34]:\n\n\nvisualize_forecast(ga_forecast)\n\n\n# In[35]:\n\n\nvisualize_forecast_details(ga_forecast)\n\n\n# In[ ]:\n\n\nga_forecast['yhat'] = np.exp(ga_forecast[['yhat']])\nga_forecast['yhat_lower'] = np.exp(ga_forecast[['yhat_lower']])\nga_forecast['yhat_upper'] = np.exp(ga_forecast[['yhat_upper']])\n\nga_forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()\n\n\n# In[ ]:\n\n\nmodelling_csv = workspace_dir + '/data/output/prediction_based_ga_modelling.csv'\nga_forecast.to_csv(modelling_csv)\n\n\n# In[ ]:\n\n\n# retransform using e\ny_hat = ga_forecast['yhat'][:]\ny_true = clean_ga_data['y']\nmse = mse(y_hat, y_true)\nprint('Prediction quality: {:.2f} MSE ({:.2f} RMSE)'.format(\n mse, math.sqrt(mse),\n))\n\n\n# In[ ]:\n\n\ny_prediction = ga_forecast['yhat'][:]\ny_actual = clean_ga_data['y']\nsmape = smape_fast(y_actual.values, y_prediction.values)\nprint('Prediction quality: SMAPE : {:.2f} '.format(smape))\n\n\n# In[ ]:\n\n\nprediction = ga_forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]\ncolumn_headers = [\n 'Date', 'PredictedUser', 'Lower(PredictedUser)', 'Upper(PredictedUser)',\n]\nprediction.columns = column_headers\nforecast_csv = workspace_dir + '/data/output/forecast_for_future.csv'\nprediction_future = prediction[prediction.Date > max_date_past_data]\nprediction_future.to_csv(forecast_csv, index=False)\nprediction_future.tail()\n\n\n# In[ ]:\n\nds = ga_forecast[['ds']]\nactual = clean_ga_data['y']\nforecast = ga_forecast[['yhat', 'yhat_lower', 'yhat_upper']]\nframes = [ds, actual, forecast]\ncolumn_headers = [\n 'Date', 'ActualUser', 'PredictedUser', 'Lower(PredictedUser)',\n 'Upper(PredictedUser)',\n]\nresult = pd.concat(frames, axis=1, join='inner')\nresult.columns = column_headers\nforecast_csv = workspace_dir + '/data/output/forecast_for_past.csv'\nresult.to_csv(forecast_csv, index=False)\nresult.tail()\nprint('Prediction Completed Successfully ')\n" ]
[ [ "numpy.isnan", "numpy.log", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "numpy.exp", "matplotlib.pyplot.ylabel", "pandas.concat", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
peaceofmind123/esrgan_modified
[ "33a0f2478185eff90a7233b968b7901f7cf3a04a" ]
[ "train.py" ]
[ "# Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n# ============================================================================\n# File description: Realize the model training function.\n# ============================================================================\nfrom torch.utils.data import DataLoader\n\nfrom config import *\nfrom dataset import BaseDataset\n\n\ndef train_generator(train_dataloader, epoch) -> None:\n \"\"\"Training the generator network.\n\n Args:\n train_dataloader (torch.utils.data.DataLoader): The loader of the training dataset.\n epoch (int): number of training cycles.\n \"\"\"\n # Calculate how many iterations there are under epoch.\n batches = len(train_dataloader)\n # Set generator network in training mode.\n generator.train()\n\n for index, (lr, hr) in enumerate(train_dataloader):\n # Copy the data to the specified device.\n lr = lr.to(device)\n hr = hr.to(device)\n # Initialize the gradient of the generator model.\n generator.zero_grad()\n # Generate super-resolution images.\n sr = generator(lr)\n # Calculate the difference between the super-resolution image and the high-resolution image at the pixel level.\n pixel_loss = pixel_criterion(sr, hr)\n # Update the weights of the generator model.\n pixel_loss.backward()\n p_optimizer.step()\n # Write the loss during training into Tensorboard.\n iters = index + epoch * batches + 1\n writer.add_scalar(\"Train_Generator/Loss\", pixel_loss.item(), iters)\n # Print the loss function every ten iterations and the last iteration in this epoch.\n if (index + 1) % 10 == 0 or (index + 1) == batches:\n print(f\"Train Epoch[{epoch + 1:04d}/{p_epochs:04d}]({index + 1:05d}/{batches:05d}) \"\n f\"Loss: {pixel_loss.item():.6f}.\")\n\n\ndef train_adversarial(train_dataloader, epoch) -> None:\n \"\"\"Training the adversarial network.\n\n Args:\n train_dataloader (torch.utils.data.DataLoader): The loader of the training dataset.\n epoch (int): number of training cycles.\n \"\"\"\n # Calculate how many iterations there are under Epoch.\n batches = len(train_dataloader)\n # Set adversarial network in training mode.\n discriminator.train()\n generator.train()\n\n for index, (lr, hr) in enumerate(train_dataloader):\n # Copy the data to the specified device.\n lr = lr.to(device)\n hr = hr.to(device)\n label_size = lr.size(0)\n # Create label. Set the real sample label to 1, and the false sample label to 0.\n real_label = torch.full([label_size, 1], 1.0, dtype=lr.dtype, device=device)\n fake_label = torch.full([label_size, 1], 0.0, dtype=lr.dtype, device=device)\n\n # Initialize the gradient of the discriminator model.\n discriminator.zero_grad()\n # Generate super-resolution images.\n sr = generator(lr)\n # Calculate the loss of the discriminator model on the high-resolution image.\n hr_output = discriminator(hr)\n sr_output = discriminator(sr.detach())\n d_loss_hr = adversarial_criterion(hr_output - torch.mean(sr_output), real_label)\n d_loss_hr.backward()\n d_hr = hr_output.mean().item()\n # Calculate the loss of the discriminator model on the super-resolution image.\n hr_output = discriminator(hr)\n sr_output = discriminator(sr.detach())\n d_loss_sr = adversarial_criterion(sr_output - torch.mean(hr_output), fake_label)\n d_loss_sr.backward()\n d_sr1 = sr_output.mean().item()\n # Update the weights of the discriminator model.\n d_loss = d_loss_hr + d_loss_sr\n d_optimizer.step()\n\n # Initialize the gradient of the generator model.\n generator.zero_grad()\n # Generate super-resolution images.\n sr = generator(lr)\n # Calculate the loss of the discriminator model on the super-resolution image.\n hr_output = discriminator(hr.detach())\n sr_output = discriminator(sr)\n # Perceptual loss=0.01 * pixel loss + 1.0 * content loss + 0.005 * adversarial loss.\n pixel_loss = pixel_weight * pixel_criterion(sr, hr.detach())\n content_loss = content_weight * content_criterion(sr, hr.detach())\n adversarial_loss = adversarial_weight * adversarial_criterion(sr_output - torch.mean(hr_output), real_label)\n # Update the weights of the generator model.\n g_loss = pixel_loss + content_loss + adversarial_loss\n g_loss.backward()\n g_optimizer.step()\n d_sr2 = sr_output.mean().item()\n\n # Write the loss during training into Tensorboard.\n iters = index + epoch * batches + 1\n writer.add_scalar(\"Train_Adversarial/D_Loss\", d_loss.item(), iters)\n writer.add_scalar(\"Train_Adversarial/G_Loss\", g_loss.item(), iters)\n writer.add_scalar(\"Train_Adversarial/D_HR\", d_hr, iters)\n writer.add_scalar(\"Train_Adversarial/D_SR1\", d_sr1, iters)\n writer.add_scalar(\"Train_Adversarial/D_SR2\", d_sr2, iters)\n # Print the loss function every ten iterations and the last iteration in this epoch.\n if (index + 1) % 10 == 0 or (index + 1) == batches:\n print(f\"Train stage: adversarial \"\n f\"Epoch[{epoch + 1:04d}/{epochs:04d}]({index + 1:05d}/{batches:05d}) \"\n f\"D Loss: {d_loss.item():.6f} G Loss: {g_loss.item():.6f} \"\n f\"D(HR): {d_hr:.6f} D(SR1)/D(SR2): {d_sr1:.6f}/{d_sr2:.6f}.\")\n\n\ndef validate(valid_dataloader, epoch, stage) -> float:\n \"\"\"Verify the generator model.\n\n Args:\n valid_dataloader (torch.utils.data.DataLoader): loader for validating dataset.\n epoch (int): number of training cycles.\n stage (str): In which stage to verify, one is `generator`, the other is `adversarial`.\n\n Returns:\n PSNR value(float).\n \"\"\"\n # Calculate how many iterations there are under epoch.\n batches = len(valid_dataloader)\n # Set generator model in verification mode.\n generator.eval()\n # Initialize the evaluation index.\n total_psnr_value = 0.0\n\n with torch.no_grad():\n for index, (lr, hr) in enumerate(valid_dataloader):\n # Copy the data to the specified device.\n lr = lr.to(device)\n hr = hr.to(device)\n # Generate super-resolution images.\n sr = generator(lr)\n # Calculate the PSNR indicator.\n mse_loss = psnr_criterion(sr, hr)\n psnr_value = 10 * torch.log10(1 / mse_loss).item()\n total_psnr_value += psnr_value\n\n avg_psnr_value = total_psnr_value / batches\n # Write the value of each round of verification indicators into Tensorboard.\n if stage == \"generator\":\n writer.add_scalar(\"Val_Generator/PSNR\", avg_psnr_value, epoch + 1)\n elif stage == \"adversarial\":\n writer.add_scalar(\"Val_Adversarial/PSNR\", avg_psnr_value, epoch + 1)\n # Print evaluation indicators.\n print(f\"Valid stage: {stage} Epoch[{epoch + 1:04d}] avg PSNR: {avg_psnr_value:.2f}.\\n\")\n\n return avg_psnr_value\n\n\ndef main() -> None:\n # Create a super-resolution experiment result folder.\n if not os.path.exists(exp_dir1):\n os.makedirs(exp_dir1)\n if not os.path.exists(exp_dir2):\n os.makedirs(exp_dir2)\n\n # Load the dataset.\n train_dataset = BaseDataset(train_dir, image_size, upscale_factor, \"train\")\n valid_dataset = BaseDataset(valid_dir, image_size, upscale_factor, \"valid\")\n train_dataloader = DataLoader(train_dataset, batch_size, True, pin_memory=True)\n valid_dataloader = DataLoader(valid_dataset, batch_size, False, pin_memory=True)\n # Check whether the training progress of the last abnormal end is restored, for example, the power is\n # cut off in the middle of the training.\n if resume:\n print(\"Resuming...\")\n if resume_p_weight != \"\":\n generator.load_state_dict(torch.load(resume_p_weight))\n else:\n discriminator.load_state_dict(torch.load(resume_d_weight))\n generator.load_state_dict(torch.load(resume_g_weight))\n\n # Initialize the evaluation indicators for the training stage of the generator model.\n best_psnr_value = 0.0\n # Train the generative network stage.\n for epoch in range(start_p_epoch, p_epochs):\n # Train each epoch for generator network.\n train_generator(train_dataloader, epoch)\n # Verify each epoch for generator network.\n psnr_value = validate(valid_dataloader, epoch, \"generator\")\n # Determine whether the performance of the generator network under epoch is the best.\n is_best = psnr_value > best_psnr_value\n best_psnr_value = max(psnr_value, best_psnr_value)\n # Save the weight of the generator network under epoch. If the performance of the generator network under epoch\n # is best, save a file ending with `-best.pth` in the `results` directory.\n torch.save(generator.state_dict(), os.path.join(exp_dir1, f\"p_epoch{epoch + 1}.pth\"))\n if is_best:\n torch.save(generator.state_dict(), os.path.join(exp_dir2, \"p-best.pth\"))\n # Adjust the learning rate of the generator model.\n p_scheduler.step()\n\n # Save the weight of the last generator network under epoch in this stage.\n torch.save(generator.state_dict(), os.path.join(exp_dir2, \"p-last.pth\"))\n\n # Initialize the evaluation index of the adversarial network training phase.\n best_psnr_value = 0.0\n # Load the model weights with the best indicators in the previous round of training.\n generator.load_state_dict(torch.load(os.path.join(exp_dir2, \"p-best.pth\")))\n # Training the adversarial network stage.\n for epoch in range(start_epoch, epochs):\n # Train each epoch for adversarial network.\n train_adversarial(train_dataloader, epoch)\n # Verify each epoch for adversarial network.\n psnr_value = validate(valid_dataloader, epoch, \"adversarial\")\n # Determine whether the performance of the adversarial network under epoch is the best.\n is_best = psnr_value > best_psnr_value\n best_psnr_value = max(psnr_value, best_psnr_value)\n # Save the weight of the adversarial network under epoch. If the performance of the adversarial network\n # under epoch is the best, it will save two additional files ending with `-best.pth` in the `results` directory.\n torch.save(discriminator.state_dict(), os.path.join(exp_dir1, f\"d_epoch{epoch + 1}.pth\"))\n torch.save(generator.state_dict(), os.path.join(exp_dir1, f\"g_epoch{epoch + 1}.pth\"))\n if is_best:\n torch.save(discriminator.state_dict(), os.path.join(exp_dir2, \"d-best.pth\"))\n torch.save(generator.state_dict(), os.path.join(exp_dir2, \"g-best.pth\"))\n # Adjust the learning rate of the adversarial model.\n d_scheduler.step()\n g_scheduler.step()\n\n # Save the weight of the adversarial model under the last Epoch in this stage.\n torch.save(discriminator.state_dict(), os.path.join(exp_dir2, \"d-last.pth\"))\n torch.save(generator.state_dict(), os.path.join(exp_dir2, \"g-last.pth\"))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
katiekeegan/StereoMatching_SVD_ANCC
[ "c0223d820ebfc21fbf8f39eae2812a0fac140d78" ]
[ "geometry.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 10 17:49:52 2022\n\n@author: katie\n\"\"\"\n\nimport cv2\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\ndef grayImage(img):\n maxVal = np.max(img)\n minVal = np.min(img)\n alpha = 255. / (maxVal - minVal)\n beta = -minVal * alpha\n dst = cv2.convertScaleAbs(src=img, dst=None, alpha=alpha, beta=beta)\n return dst\n\n\ndef disparity_map(img1, img2):\n window_size = 3\n min_disp = 16\n num_disp = 112 - min_disp\n stereo = cv2.StereoSGBM_create(minDisparity=min_disp,\n numDisparities=num_disp,\n blockSize=16,\n P1=8 * 3 * window_size ** 2,\n P2=32 * 3 * window_size ** 2,\n disp12MaxDiff=1,\n uniquenessRatio=10,\n speckleWindowSize=100,\n speckleRange=32\n )\n # stereo = cv2.StereoSGBM_create(-128, 128, 5, 600, 2400, -1, 4, 1, 150, 2, True)\n disparity = stereo.compute(img1, img2).astype(np.float32) / 16.0\n return disparity\n\n\ndef rectify(kpts1, kpts2, img1, img2):\n \"\"\"\n kpts1: numpy array of coordonnees of key points in image1, shape (nb_points, 2)\n kpts2: numpy array of coordonnees of key points in image2, shape (nb_points, 2)\n img1: left gray image of shape (h, w)\n img2: right gray image of shape (h, w)\n \"\"\"\n # change the reference in image1\n x_centroids1, y_centroids1 = np.mean(kpts1, axis=0)\n\n print(x_centroids1)\n print(\"...........\")\n print(y_centroids1)\n\n T1 = np.array([-x_centroids1, -y_centroids1])\n kpts1 = kpts1 + T1\n # print(kpts1)\n\n # change the reference in image2\n x_centroids2, y_centroids2 = np.mean(kpts2, axis=0)\n\n T2 = np.array([-x_centroids2, -y_centroids2])\n kpts2 = kpts2 + T2\n\n # measurement matrix\n M = np.concatenate([kpts1.T, kpts2.T], axis=0)\n # print(M)\n # Singular value decomposition of M\n U, sigma, Vh = np.linalg.svd(M)\n # print(sigma.shape)\n # print(Vh.shape)\n # Sigma = np.zeros((U.shape[0], Vh.shape[0]))\n # Sigma[:U.shape[0], :U.shape[0]] = np.diag(sigma)\n # print(np.linalg.norm(M - np.dot(U, np.dot(Sigma, Vh))))\n U_ = U[:, :3]\n U1 = U_[:2, :]\n U2 = U_[2:, :]\n\n # partition U_i\n A1 = U1[:2, :2]\n d1 = U1[:, 2]\n A2 = U2[:2, :2]\n d2 = U2[:, 2]\n\n # define B_i, U_1' and U_2'\n B1 = np.zeros(shape=(3, 3))\n B1[-1, -1] = 1\n B1[:2, :2] = np.linalg.inv(A1)\n B1[:2, 2] = -np.dot(np.linalg.inv(A1), d1)\n\n B2 = np.zeros(shape=(3, 3))\n B2[-1, -1] = 1\n B2[:2, :2] = np.linalg.inv(A2)\n B2[:2, 2] = -np.dot(np.linalg.inv(A2), d2)\n\n U1_prime = np.dot(U1, B2)\n U2_prime = np.dot(U2, B1)\n\n # calculate theta1, theta2\n x1 = U1_prime[0, -1]\n y1 = U1_prime[1, -1]\n theta1 = np.arctan(y1 / x1)\n\n x2 = U2_prime[0, -1]\n y2 = U2_prime[1, -1]\n theta2 = np.arctan(y2 / x2)\n\n # rotation matrix\n R1 = np.array([[np.cos(theta1), np.sin(theta1)],\n [-np.sin(theta1), np.cos(theta1)]])\n\n R2 = np.array([[np.cos(theta2), np.sin(theta2)],\n [-np.sin(theta2), np.cos(theta2)]])\n\n # calculate B and B_inv\n B = np.zeros(shape=(3, 3))\n B[:2, :] = np.dot(R1, U1_prime)\n B[2, :] = np.dot(R2, U2_prime)[0, :]\n\n try:\n B_inv = np.linalg.inv(B)\n except LinAlgError:\n B[2, :] = np.array([0, 0, 1])\n B_inv = np.linalg.inv(B)\n\n # calculate s and H_s\n tmp = np.dot(R2, np.dot(U2_prime, B_inv))\n s = tmp[1, 1]\n\n H_s = np.array([[1, 0],\n [0, 1. / s]])\n\n # rectify I1 and I2\n # create firstly a map between original position and rectified position\n rows1, cols1 = img1.shape\n map1 = np.zeros((rows1, cols1, 2))\n for h in range(rows1):\n for w in range(cols1):\n map1[h, w] = np.dot(R1, np.array([w, h]) + T1)\n\n w_min1 = np.min(map1[:, :, 0])\n w_max1 = np.max(map1[:, :, 0])\n h_min1 = np.min(map1[:, :, 1])\n h_max1 = np.max(map1[:, :, 1])\n map1[:, :, 0] = map1[:, :, 0] - w_min1\n map1[:, :, 1] = map1[:, :, 1] - h_min1\n rectified_h1 = int(round(h_max1 - h_min1) + 1)\n rectified_w1 = int(round(w_max1 - w_min1) + 1)\n rectified1 = np.zeros((rectified_h1, rectified_w1))\n for h in range(rows1):\n for w in range(cols1):\n rectified1[int(round(map1[h, w, 1])), int(round(map1[h, w, 0]))] = img1[h, w]\n\n rows2, cols2 = img2.shape\n map2 = np.zeros((rows2, cols2, 2))\n for h in range(rows2):\n for w in range(cols2):\n map2[h, w] = np.dot(H_s, np.dot(R2, np.array([w, h]) + T2))\n\n # w_min2 = np.min(map2[:, :, 0])\n # w_max2 = np.max(map2[:, :, 0])\n # h_min2 = np.min(map2[:, :, 1])\n # h_max2 = np.max(map2[:, :, 1])\n map2[:, :, 0] = map2[:, :, 0] - w_min1\n map2[:, :, 1] = map2[:, :, 1] - h_min1\n # rectified_h2 = int(h_max2 - h_min2)+1\n # rectified_w2 = int(w_max2 - w_min2)+1\n rectified2 = np.zeros_like(rectified1)\n for h in range(rows2):\n for w in range(cols2):\n y = int(round(map2[h, w, 1]))\n x = int(round(map2[h, w, 0]))\n if 0 <= y < rectified_h1 and 0 <= x < rectified_w1:\n rectified2[y, x] = img2[h, w]\n\n # translation1 = np.array([[1, 0, T1[0]],\n # [0, 1, T1[1]]])\n #\n # translation2 = np.array([[1, 0, T2[0]],\n # [0, 1, T2[1]]])\n #\n # rows, cols = img1.shape\n #\n # dst1 = cv2.warpAffine(img1, translation1, (cols, rows))\n # dst2 = cv2.warpAffine(img2, translation2, (cols, rows))\n #\n # r1 = np.array([[np.cos(theta1), -np.sin(theta1), 0],\n # [np.sin(theta1), np.cos(theta1), 0]])\n # r2 = np.array([[np.cos(theta2), -np.sin(theta2), 0],\n # [np.sin(theta2), np.cos(theta2), 0]])\n #\n # dst1 = cv2.warpAffine(dst1, r1, (cols, rows))\n # dst2 = cv2.warpAffine(dst2, r2, (cols, rows))\n #\n # dst2 = cv2.resize(dst2, None, fx=1, fy=1. / s)\n\n return rectified1.astype(np.uint8), rectified2.astype(np.uint8), theta1, theta2, s, T1, T2\n\n\ndef interpolate(i, imgL, imgR, disparity):\n \"\"\"\n :param i:\n :param imgL:\n :param imgR:\n :param disparity:\n :return:\n \"\"\"\n ir = np.zeros_like(imgL)\n for y in range(imgL.shape[0]):\n for x1 in range(imgL.shape[1]):\n x2 = int(x1 + disparity[y, x1])\n x_i = int((2 - i) * x1 + (i - 1) * x2)\n if 0 <= x_i < ir.shape[1] and 0 <= x2 < imgR.shape[1]:\n ir[y, x_i] = (2 - i) * imgL[y, x1] + (i - 1) * imgR[y, x2]\n\n return ir\n\n\ndef deRectify(ir, theta1, theta2, T1, T2, s, i):\n \"\"\"\n :param ir: numpy array, interpolated image to be de-rectified\n :param theta1: float, rotation angle in left image\n :param theta2: float, rotation angle in right image\n :param T1: numpy array, translation vector in left image\n :param T2: numpy array, translation vector in right image\n :param s: float number, scale factor\n :param i: float number\n :return: numpy array, de-rectified image\n \"\"\"\n theta_i = (2 - i) * theta1 + (i - 1) * theta2\n s_i = (2 - i) * 1. + (i - 1) * s\n T_i = (2 - i) * T1 + (i - 1) * T2\n H_s_i = np.array([[1, 0],\n [0, s_i]])\n R_i = np.array([[np.cos(theta_i), -np.sin(theta_i)],\n [np.sin(theta_i), np.cos(theta_i)]])\n # de-rectify\n rows, cols = ir.shape\n mapping = np.zeros((rows, cols, 2))\n for h in range(rows):\n for w in range(cols):\n mapping[h, w] = np.dot(R_i, np.dot(H_s_i, np.array([w, h]))) - T_i\n\n w_min = np.min(mapping[:, :, 0])\n w_max = np.max(mapping[:, :, 0])\n h_min = np.min(mapping[:, :, 1])\n h_max = np.max(mapping[:, :, 1])\n mapping[:, :, 0] = mapping[:, :, 0] - w_min\n mapping[:, :, 1] = mapping[:, :, 1] - h_min\n de_rectified_h = int(round(h_max - h_min) + 1)\n de_rectified_w = int(round(w_max - w_min) + 1)\n de_rectified = np.zeros((de_rectified_h, de_rectified_w))\n for h in range(rows):\n for w in range(cols):\n de_rectified[int(round(mapping[h, w, 1])), int(round(mapping[h, w, 0]))] = ir[h, w]\n\n return de_rectified\n" ]
[ [ "numpy.max", "numpy.concatenate", "numpy.array", "numpy.dot", "numpy.zeros_like", "numpy.sin", "numpy.zeros", "numpy.min", "numpy.mean", "numpy.arctan", "numpy.linalg.svd", "numpy.cos", "numpy.linalg.inv" ] ]
kartik144/Language_Modelling-NTU
[ "19b388f9584ac9bea0685c9214f3548417a95a37" ]
[ "main.py" ]
[ "import argparse\nimport torch\nimport pickle\nfrom utils import data_test\nfrom utils import process\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser(description='PyTorch Sentence Completion Model')\n\n# Model parameters.\nparser.add_argument('--data', type=str, default='./data/penn',\n help='location of the data corpus')\nparser.add_argument('--model_bi', type=str, default='./models/model.pt',\n help='model checkpoint to use')\nparser.add_argument('--model_left', type=str, default='./models/model_left.pt',\n help='model checkpoint to use')\nparser.add_argument('--model_right', type=str, default='./models/model_right.pt',\n help='model checkpoint to use')\nparser.add_argument('--model_attn', type=str, default='./models/model_attn.pt',\n help='model checkpoint to use')\nparser.add_argument('--dict', type=str, default='./Dictionary/dict.pt',\n help='path to pickled dictionary')\nparser.add_argument('--dict_attn', type=str, default='./Dictionary/dict_attn.pt',\n help='path to pickled dictionary')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed')\nparser.add_argument('--cuda', action='store_true',\n help='use CUDA')\nparser.add_argument('--file', type=str, default='#stdin#',\n help='use when giving inputs through file instead of STDIN')\nparser.add_argument('--N', type=int, default=10,\n help='denotes number of words displayed (top N words predicted are displayed)')\nparser.add_argument('--sen_length', type=int,\n default=50,\n help='Threshold for limiting sentences of the data '\n '(to restrict unnecessary long sentences)')\nargs = parser.parse_args()\n\n# Set the random seed manually for reproducibility.\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\ndevice = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\nwith open(args.model_bi, 'rb') as f:\n model = torch.load(f, map_location = device)\nmodel.eval()\n\nwith open(args.model_attn, 'rb') as f:\n model_attn = torch.load(f, map_location=device)\nmodel_attn.eval()\n\nwith open(args.model_left, 'rb') as f:\n model_left = torch.load(f, map_location = device)\nmodel_left.eval()\n\nwith open(args.model_right, 'rb') as f:\n model_right = torch.load(f, map_location = device)\nmodel_right.eval()\n\ndictionary, threshold = pickle.load(open(args.dict, \"rb\"))\ndict_attn, threshold_attn = pickle.load(open(args.dict_attn, \"rb\"))\nntokens = len(dictionary)\n\n\ndef complete_sentence(sentence, index):\n left_ids, right_ids = data_test.tokenize_input(sentence, dictionary)\n hidden_left = model_left.init_hidden(1)\n hidden_right = model_right.init_hidden(1)\n\n input_left = torch.LongTensor(left_ids).view(-1, 1).to(device)\n input_right = torch.LongTensor(right_ids).view(-1, 1).flip(0).to(device)\n\n outputs_left, hidden_left = model_left(input_left, hidden_left)\n outputs_right, hidden_right = model_right(input_right, hidden_right)\n\n output_flat_left = outputs_left.view(-1, ntokens)[-1]\n output_flat_right = outputs_right.view(-1, ntokens)[-1]\n output_flat = output_flat_left + output_flat_right\n\n missing_word = process.get_missing_word(output_flat, dictionary, args.N)\n missing_word_left = process.get_missing_word(output_flat_left, dictionary, args.N)\n missing_word_right = process.get_missing_word(output_flat_right, dictionary, args.N)\n\n # print(\"Candidate words (bidirectional):\\t\\t\", end=\" \")\n # process.print_predictions(dictionary, missing_word)\n\n print(\"Candidate words (unidirectional-left):\\t\", end=\" \")\n process.print_predictions(dictionary, missing_word_left)\n\n print(\"Candidate words (unidirectional-right):\\t\", end=\" \")\n process.print_predictions(dictionary, missing_word_right)\n\n hidden_left = model.init_hidden(1)\n hidden_right = model.init_hidden(1)\n input_left = torch.LongTensor(left_ids).view(-1, 1).to(device)\n input_right = torch.LongTensor(right_ids).view(-1, 1).to(device)\n\n outputs = model.text_imputation(input_left, input_right, hidden_left, hidden_right)\n output_flat = outputs.view(-1, ntokens)[-1] # check this\n\n missing_word = process.get_missing_word(output_flat, dictionary, args.N)\n\n print(\"Candidate words (joint-model): \\t\\t\", end=\"\")\n process.print_predictions(dictionary, missing_word)\n\n ntokens_attn = len(dict_attn)\n l, r = data_test.tokenize_input(sentence, dict_attn, args.sen_length)\n hidden_left = model_attn.init_hidden(1)\n hidden_right = model_attn.init_hidden(1)\n input_left = torch.LongTensor(l).view(-1, 1)\n input_right = torch.LongTensor(r).view(-1, 1)\n output, attn_weights = model_attn.text_imputation(input_left, input_right, hidden_left, hidden_right)\n output_flat = output.view(-1, ntokens_attn)[-1]\n missing_word = process.get_missing_word(output_flat, dict_attn, args.N)\n print(\"Candidate words (attn): \\t\\t\", end=\"\")\n process.print_predictions(dict_attn, missing_word)\n\n fig, ax = plt.subplots()\n sentence = sentence.replace(\"___\", \"\")\n im = ax.matshow(attn_weights.view(attn_weights.size(0), -1)[:len(sentence.split()) + 2].t().detach().numpy())\n\n ax.set_xticks(np.arange(len(sentence.split()) + 2))\n ax.set_xticklabels([x for x in [\"<sos>\"] + sentence.split() + [\"eos\"]])\n\n fig.colorbar(im)\n plt.xticks(rotation=\"45\")\n\n if index != 0:\n plt.savefig('Attention_images/{0}.png'.format(index))\n plt.close()\n else:\n plt.show()\n\n print()\n\n\nif args.file == '#stdin#':\n\n sentence = input(\"Enter sentence (Enter $TOP to stop)\\n\")\n while sentence != \"$TOP\":\n try:\n complete_sentence(sentence, 0)\n except Exception as e:\n print(e)\n\n sentence = input(\"Enter sentence (Enter $TOP to stop)\\n\")\n\nelse:\n\n with open(args.file, \"r\") as f:\n index = 0\n for line in f:\n index += 1\n print(str(index)+\". \"+line, end=\"\")\n try:\n complete_sentence(line, index)\n except Exception as e:\n print(e)" ]
[ [ "torch.device", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "torch.manual_seed", "torch.cuda.is_available", "torch.LongTensor", "torch.load", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks" ] ]
wukailu/EDSR-PyTorch
[ "5625cf83ce88050b68e649beb4155b32c38018fa", "5625cf83ce88050b68e649beb4155b32c38018fa" ]
[ "code/model/super_resolution_model/ddbpn_model.py", "code/model/repdistiller_models/vgg.py" ]
[ "# Deep Back-Projection Networks For Super-Resolution\n# https://arxiv.org/abs/1803.02735\n\nfrom model.super_resolution_model import common\nfrom .utils import register_model, unpack_feature, pack_feature\n\nimport torch\nimport torch.nn as nn\n\n@register_model\ndef DDBPN(**hparams):\n return DDBPN_Model(**hparams)\n\ndef projection_conv(in_channels, out_channels, scale, up=True):\n kernel_size, stride, padding = {\n 2: (6, 2, 2),\n 4: (8, 4, 2),\n 8: (12, 8, 2)\n }[scale]\n if up:\n conv_f = nn.ConvTranspose2d\n else:\n conv_f = nn.Conv2d\n\n return conv_f(\n in_channels, out_channels, kernel_size,\n stride=stride, padding=padding\n )\n\nclass DenseProjection(nn.Module):\n def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):\n super(DenseProjection, self).__init__()\n if bottleneck:\n self.bottleneck = nn.Sequential(*[\n nn.Conv2d(in_channels, nr, 1),\n nn.PReLU(nr)\n ])\n inter_channels = nr\n else:\n self.bottleneck = None\n inter_channels = in_channels\n\n self.conv_1 = nn.Sequential(*[\n projection_conv(inter_channels, nr, scale, up),\n nn.PReLU(nr)\n ])\n self.conv_2 = nn.Sequential(*[\n projection_conv(nr, inter_channels, scale, not up),\n nn.PReLU(inter_channels)\n ])\n self.conv_3 = nn.Sequential(*[\n projection_conv(inter_channels, nr, scale, up),\n nn.PReLU(nr)\n ])\n\n def forward(self, x):\n if self.bottleneck is not None:\n x = self.bottleneck(x)\n\n a_0 = self.conv_1(x)\n b_0 = self.conv_2(a_0)\n e = b_0.sub(x)\n a_1 = self.conv_3(e)\n\n out = a_0.add(a_1)\n\n return out\n\n\nclass DDBPN_Model(nn.Module):\n def __init__(self, scale=4, rgb_range=255, n_feats=128, n_colors=3, **kwargs):\n super(DDBPN_Model, self).__init__()\n\n n0 = n_feats\n nr = 32\n self.depth = 6\n\n rgb_mean = (0.4488, 0.4371, 0.4040)\n rgb_std = (1.0, 1.0, 1.0)\n self.sub_mean = common.MeanShift(rgb_range, rgb_mean, rgb_std)\n initial = [\n nn.Conv2d(n_colors, n0, 3, padding=1),\n nn.PReLU(n0),\n nn.Conv2d(n0, nr, 1),\n nn.PReLU(nr)\n ]\n self.initial = nn.Sequential(*initial)\n\n self.upmodules = nn.ModuleList()\n self.downmodules = nn.ModuleList()\n channels = nr\n for i in range(self.depth):\n self.upmodules.append(\n DenseProjection(channels, nr, scale, True, i > 1)\n )\n if i != 0:\n channels += nr\n \n channels = nr\n for i in range(self.depth - 1):\n self.downmodules.append(\n DenseProjection(channels, nr, scale, False, i != 0)\n )\n channels += nr\n\n reconstruction = [\n nn.Conv2d(self.depth * nr, n_colors, 3, padding=1)\n ]\n self.reconstruction = nn.Sequential(*reconstruction)\n\n self.add_mean = common.MeanShift(rgb_range, rgb_mean, rgb_std, 1)\n\n def forward(self, x):\n x = self.sub_mean(x)\n x = self.initial(x)\n\n h_list = []\n l_list = []\n for i in range(self.depth - 1):\n if i == 0:\n l = x\n else:\n l = torch.cat(l_list, dim=1)\n h_list.append(self.upmodules[i](l))\n l_list.append(self.downmodules[i](torch.cat(h_list, dim=1)))\n \n h_list.append(self.upmodules[-1](torch.cat(l_list, dim=1)))\n out = self.reconstruction(torch.cat(h_list, dim=1))\n out = self.add_mean(out)\n\n return out\n\n", "'''VGG for CIFAR10. FC layers are removed.\n(c) YANG, Wei\n'''\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\n\n__all__ = [\n 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',\n 'vgg19_bn', 'vgg19',\n]\n\n\nmodel_urls = {\n 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',\n 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',\n 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',\n 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',\n}\n\n\nclass VGG(nn.Module):\n\n def __init__(self, cfg, batch_norm=False, num_classes=1000):\n super(VGG, self).__init__()\n self.block0 = self._make_layers(cfg[0], batch_norm, 3)\n self.block1 = self._make_layers(cfg[1], batch_norm, cfg[0][-1])\n self.block2 = self._make_layers(cfg[2], batch_norm, cfg[1][-1])\n self.block3 = self._make_layers(cfg[3], batch_norm, cfg[2][-1])\n self.block4 = self._make_layers(cfg[4], batch_norm, cfg[3][-1])\n\n self.pool0 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool4 = nn.AdaptiveAvgPool2d((1, 1))\n # self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)\n\n self.classifier = nn.Linear(512, num_classes)\n self._initialize_weights()\n\n def get_feat_modules(self):\n feat_m = nn.ModuleList([])\n feat_m.append(self.block0)\n feat_m.append(self.pool0)\n feat_m.append(self.block1)\n feat_m.append(self.pool1)\n feat_m.append(self.block2)\n feat_m.append(self.pool2)\n feat_m.append(self.block3)\n feat_m.append(self.pool3)\n feat_m.append(self.block4)\n feat_m.append(self.pool4)\n return feat_m\n\n def get_bn_before_relu(self):\n bn1 = self.block1[-1]\n bn2 = self.block2[-1]\n bn3 = self.block3[-1]\n bn4 = self.block4[-1]\n return [bn1, bn2, bn3, bn4]\n\n def forward(self, x, with_feature=False, pre_act=False):\n h = x.shape[2]\n x = F.relu(self.block0(x))\n f0 = x\n x = self.pool0(x)\n x = self.block1(x)\n f1_pre = x\n x = F.relu(x)\n f1 = x\n x = self.pool1(x)\n x = self.block2(x)\n f2_pre = x\n x = F.relu(x)\n f2 = x\n x = self.pool2(x)\n x = self.block3(x)\n f3_pre = x\n x = F.relu(x)\n f3 = x\n if h == 64:\n x = self.pool3(x)\n x = self.block4(x)\n f4_pre = x\n x = F.relu(x)\n f4 = x\n x = self.pool4(x)\n x = x.view(x.size(0), -1)\n f5 = x\n x = self.classifier(x)\n\n if with_feature:\n if pre_act:\n return [f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], x\n else:\n return [f0, f1, f2, f3, f4, f5], x\n else:\n return x\n\n @staticmethod\n def _make_layers(cfg, batch_norm=False, in_channels=3):\n layers = []\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n layers = layers[:-1]\n return nn.Sequential(*layers)\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\ncfg = {\n 'A': [[64], [128], [256, 256], [512, 512], [512, 512]],\n 'B': [[64, 64], [128, 128], [256, 256], [512, 512], [512, 512]],\n 'D': [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]],\n 'E': [[64, 64], [128, 128], [256, 256, 256, 256], [512, 512, 512, 512], [512, 512, 512, 512]],\n 'S': [[64], [128], [256], [512], [512]],\n}\n\n\ndef vgg8(**kwargs):\n \"\"\"VGG 8-layer model (configuration \"S\")\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(cfg['S'], **kwargs)\n return model\n\n\ndef vgg8_bn(**kwargs):\n \"\"\"VGG 8-layer model (configuration \"S\")\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(cfg['S'], batch_norm=True, **kwargs)\n return model\n\n\ndef vgg11(**kwargs):\n \"\"\"VGG 11-layer model (configuration \"A\")\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(cfg['A'], **kwargs)\n return model\n\n\ndef vgg11_bn(**kwargs):\n \"\"\"VGG 11-layer model (configuration \"A\") with batch normalization\"\"\"\n model = VGG(cfg['A'], batch_norm=True, **kwargs)\n return model\n\n\ndef vgg13(**kwargs):\n \"\"\"VGG 13-layer model (configuration \"B\")\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(cfg['B'], **kwargs)\n return model\n\n\ndef vgg13_bn(**kwargs):\n \"\"\"VGG 13-layer model (configuration \"B\") with batch normalization\"\"\"\n model = VGG(cfg['B'], batch_norm=True, **kwargs)\n return model\n\n\ndef vgg16(**kwargs):\n \"\"\"VGG 16-layer model (configuration \"D\")\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(cfg['D'], **kwargs)\n return model\n\n\ndef vgg16_bn(**kwargs):\n \"\"\"VGG 16-layer model (configuration \"D\") with batch normalization\"\"\"\n model = VGG(cfg['D'], batch_norm=True, **kwargs)\n return model\n\n\ndef vgg19(**kwargs):\n \"\"\"VGG 19-layer model (configuration \"E\")\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(cfg['E'], **kwargs)\n return model\n\n\ndef vgg19_bn(**kwargs):\n \"\"\"VGG 19-layer model (configuration 'E') with batch normalization\"\"\"\n model = VGG(cfg['E'], batch_norm=True, **kwargs)\n return model\n\n\nif __name__ == '__main__':\n import torch\n\n x = torch.randn(2, 3, 32, 32)\n net = vgg19_bn(num_classes=100)\n feats, logit = net(x, with_feature=True, pre_act=True)\n\n for f in feats:\n print(f.shape, f.min().item())\n print(logit.shape)\n\n for m in net.get_bn_before_relu():\n if isinstance(m, nn.BatchNorm2d):\n print('pass')\n else:\n print('warning')\n" ]
[ [ "torch.cat", "torch.nn.ModuleList", "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.PReLU" ], [ "torch.nn.Linear", "torch.nn.ModuleList", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.BatchNorm2d", "torch.nn.functional.relu", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.AdaptiveAvgPool2d", "torch.randn" ] ]
sridhar551/ONNX
[ "69894f207dfcd72d1e70497d387201cec327efbc", "69894f207dfcd72d1e70497d387201cec327efbc" ]
[ "onnx/backend/test/case/node/sub.py", "onnx/backend/test/case/node/clip.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass Sub(Base):\n\n @staticmethod\n def export():\n node = onnx.helper.make_node(\n 'Sub',\n inputs=['x', 'y'],\n outputs=['z'],\n )\n\n x = np.array([1, 2, 3]).astype(np.float32)\n y = np.array([3, 2, 1]).astype(np.float32)\n z = x - y # expected output [-2., 0., 2.]\n expect(node, inputs=[x, y], outputs=[z],\n name='test_sub_example')\n\n x = np.random.randn(3, 4, 5).astype(np.float32)\n y = np.random.randn(3, 4, 5).astype(np.float32)\n z = x - y\n expect(node, inputs=[x, y], outputs=[z],\n name='test_sub')\n\n @staticmethod\n def export_sub_broadcast():\n node = onnx.helper.make_node(\n 'Sub',\n inputs=['x', 'y'],\n outputs=['z'],\n broadcast=1,\n )\n\n x = np.random.randn(3, 4, 5).astype(np.float32)\n y = np.random.randn(5).astype(np.float32)\n z = x - y\n expect(node, inputs=[x, y], outputs=[z],\n name='test_sub_bcast')\n", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass Clip(Base):\n\n @staticmethod\n def export():\n node = onnx.helper.make_node(\n 'Clip',\n inputs=['x'],\n outputs=['y'],\n min=-1.0,\n max=1.0\n )\n\n x = np.array([-2, 0, 2]).astype(np.float32)\n y = np.clip(x, -1, 1) # expected output [-1., 0., 1.]\n expect(node, inputs=[x], outputs=[y],\n name='test_clip_example')\n\n x = np.random.randn(3, 4, 5).astype(np.float32)\n y = np.clip(x, -1.0, 1.0)\n expect(node, inputs=[x], outputs=[y],\n name='test_clip')\n\n @staticmethod\n def export_clip_default():\n node = onnx.helper.make_node(\n 'Clip',\n inputs=['x'],\n outputs=['y'],\n min=0.0\n )\n x = np.random.randn(3, 4, 5).astype(np.float32)\n y = np.clip(x, 0.0, np.inf)\n expect(node, inputs=[x], outputs=[y],\n name='test_clip_default_min')\n\n node = onnx.helper.make_node(\n 'Clip',\n inputs=['x'],\n outputs=['y'],\n max=0.0\n )\n x = np.random.randn(3, 4, 5).astype(np.float32)\n y = np.clip(x, -np.inf, 0.0)\n expect(node, inputs=[x], outputs=[y],\n name='test_clip_default_max')\n" ]
[ [ "numpy.array", "numpy.random.randn" ], [ "numpy.array", "numpy.random.randn", "numpy.clip" ] ]
mjawadak/COVID360
[ "34587f0f0eb78886eb4590e4728eed0b869090c5" ]
[ "RecoveryModel.py" ]
[ "import pandas as pd\n\n##### death prediction model\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import preprocessing\n\nclass RecoveryModel():\n\n\tdef __init__(self,MAX_DAYS_OF_INFECTION,NUMBER_ITERATIONS_PER_DAY):\n\n\t\t# Loads the recovery model data and builds a decision tree model with fixed parameters.\n\t\t# We can test other models as well, but to speed up the code, we rely on a simple model.\n\t\t# The model takes as input the age and gender and predicts whether the individual recovers or not.\n\t\t# The training data (recovery_model_data.csv) is obtained from https://c3.ai/covid-19-api-documentation/#tag/LineListRecord (nCoV2019 Data Working Group and MOBS Lab)\n\n\t\tdf = pd.read_csv(\"data/recovery_model_data.csv\")\n\t\tdf[\"gender\"]= df[\"gender\"].str.lower()\n\t\tdf[\"status\"]= df[\"status\"].str.lower()\n\t\tself.le_gender = preprocessing.LabelEncoder()\n\t\tself.le_gender.fit(df[\"gender\"].unique())\n\t\tself.le_death = preprocessing.LabelEncoder()\n\t\tself.le_death.fit(df[\"status\"].unique())\n\t\tself.MAX_DAYS_OF_INFECTION = MAX_DAYS_OF_INFECTION\n\t\tself.NUMBER_ITERATIONS_PER_DAY = NUMBER_ITERATIONS_PER_DAY\n\t\tdf[\"gender_int\"]= self.le_gender.transform(df[\"gender\"])\n\t\tdf[\"status_int\"]= self.le_death.transform(df[\"status\"])\n\n\t\t# Train the ML model\n\t\tself.clf = DecisionTreeClassifier(min_samples_leaf=25, max_depth=3)\n\t\tself.clf.fit(df[[\"age\",\"gender_int\"]].values,df[\"status_int\"].values)\n\n\tdef predictDeathProbs(self,df):\n\t\tinputs = df\n\t\tdeath_probabilities = self.clf.predict_proba(inputs)[:,0] # a list of death probabilites for each infected individual\n\n\t\t# Below, dividing each prob with the average total number of infected days (15+30)/2 and then by NUMBER_ITERATIONS_PER_DAY.\n\t\t# This is because this function is called in every hourly interval, so we equally divide the probability by the average duration of the infection.\n\t\treturn death_probabilities/((12+self.MAX_DAYS_OF_INFECTION)/2.0)/self.NUMBER_ITERATIONS_PER_DAY\n\n################## TESTING CODE ##################\n\nif __name__ == \"__main__\":\n\trecovery_model = RecoveryModel(30,24)\n\tprint(recovery_model.predictDeathProbs([[0,1],\n\t\t\t\t\t\t\t\t\t\t\t\t\t[10, 1],\n\t\t\t\t\t\t\t\t\t\t\t\t\t[20, 1],\n\t\t\t\t\t\t\t\t\t\t\t\t\t[30, 1],\n\t\t\t\t\t\t\t\t\t\t\t\t\t[40, 1],\n\t\t\t\t\t\t\t\t\t\t\t\t\t[50, 1],\n\t\t\t\t\t\t\t\t\t\t\t\t\t[60, 1],\n\t\t\t\t\t\t\t\t\t\t\t\t\t[70, 1],\n\t\t\t\t\t\t\t\t\t\t\t\t\t[80, 1],\n\t\t\t\t\t\t\t\t\t\t\t\t\t[90, 1],\n\t\t\t\t\t\t\t\t\t\t\t\t\t]))" ]
[ [ "sklearn.preprocessing.LabelEncoder", "pandas.read_csv", "sklearn.tree.DecisionTreeClassifier" ] ]
SultanOrazbayev/dask
[ "fc1cea9cdb2ea31348204aa51e4f6f7327a2af33", "fc1cea9cdb2ea31348204aa51e4f6f7327a2af33", "fc1cea9cdb2ea31348204aa51e4f6f7327a2af33" ]
[ "dask/dataframe/io/parquet/utils.py", "dask/dataframe/reshape.py", "dask/dataframe/io/csv.py" ]
[ "import re\n\nimport pandas as pd\n\nfrom dask import config\nfrom dask.dataframe.io.utils import _is_local_fs\nfrom dask.utils import natural_sort_key\n\n\nclass Engine:\n \"\"\"The API necessary to provide a new Parquet reader/writer\"\"\"\n\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n **kwargs,\n ):\n \"\"\"Gather metadata about a Parquet Dataset to prepare for a read\n\n This function is called once in the user's Python session to gather\n important metadata about the parquet dataset.\n\n Parameters\n ----------\n fs: FileSystem\n paths: List[str]\n A list of paths to files (or their equivalents)\n categories: list, dict or None\n Column(s) containing categorical data.\n index: str, List[str], or False\n The column name(s) to be used as the index.\n If set to ``None``, pandas metadata (if available) can be used\n to reset the value in this function\n gather_statistics: bool\n Whether or not to gather statistics to calculate divisions\n for the output DataFrame collection.\n filters: list\n List of filters to apply, like ``[('x', '>', 0), ...]``.\n **kwargs: dict (of dicts)\n User-specified arguments to pass on to backend.\n Top level key can be used by engine to select appropriate dict.\n\n Returns\n -------\n meta: pandas.DataFrame\n An empty DataFrame object to use for metadata.\n Should have appropriate column names and dtypes but need not have\n any actual data\n statistics: Optional[List[Dict]]\n Either None, if no statistics were found, or a list of dictionaries\n of statistics data, one dict for every partition (see the next\n return value). The statistics should look like the following:\n\n [\n {'num-rows': 1000, 'columns': [\n {'name': 'id', 'min': 0, 'max': 100},\n {'name': 'x', 'min': 0.0, 'max': 1.0},\n ]},\n ...\n ]\n parts: List[object]\n A list of objects to be passed to ``Engine.read_partition``.\n Each object should represent a piece of data (usually a row-group).\n The type of each object can be anything, as long as the\n engine's read_partition function knows how to interpret it.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def read_partition(cls, fs, piece, columns, index, **kwargs):\n \"\"\"Read a single piece of a Parquet dataset into a Pandas DataFrame\n\n This function is called many times in individual tasks\n\n Parameters\n ----------\n fs: FileSystem\n piece: object\n This is some token that is returned by Engine.read_metadata.\n Typically it represents a row group in a Parquet dataset\n columns: List[str]\n List of column names to pull out of that row group\n index: str, List[str], or False\n The index name(s).\n **kwargs:\n Includes `\"kwargs\"` values stored within the `parts` output\n of `engine.read_metadata`. May also include arguments to be\n passed to the backend (if stored under a top-level `\"read\"` key).\n\n Returns\n -------\n A Pandas DataFrame\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def initialize_write(\n cls,\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n **kwargs,\n ):\n \"\"\"Perform engine-specific initialization steps for this dataset\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n fs: FileSystem\n path: str\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n append: bool\n If True, may use existing metadata (if any) and perform checks\n against the new data being stored.\n partition_on: List(str)\n Column(s) to use for dataset partitioning in parquet.\n ignore_divisions: bool\n Whether or not to ignore old divisions when appending. Otherwise,\n overlapping divisions will lead to an error being raised.\n division_info: dict\n Dictionary containing the divisions and corresponding column name.\n **kwargs: dict\n Other keyword arguments (including `index_cols`)\n\n Returns\n -------\n tuple:\n engine-specific instance\n list of filenames, one per partition\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def write_partition(\n cls, df, path, fs, filename, partition_on, return_metadata, **kwargs\n ):\n \"\"\"\n Output a partition of a dask.DataFrame. This will correspond to\n one output file, unless partition_on is set, in which case, it will\n correspond to up to one file in each sub-directory.\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n path: str\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n fs: FileSystem\n filename: str\n partition_on: List(str)\n Column(s) to use for dataset partitioning in parquet.\n return_metadata : bool\n Whether to return list of instances from this write, one for each\n output file. These will be passed to write_metadata if an output\n metadata file is requested.\n **kwargs: dict\n Other keyword arguments (including `fmd` and `index_cols`)\n\n Returns\n -------\n List of metadata-containing instances (if `return_metadata` is `True`)\n or empty list\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def write_metadata(cls, parts, meta, fs, path, append=False, **kwargs):\n \"\"\"\n Write the shared metadata file for a parquet dataset.\n\n Parameters\n ----------\n parts: List\n Contains metadata objects to write, of the type undrestood by the\n specific implementation\n meta: non-chunk metadata\n Details that do not depend on the specifics of each chunk write,\n typically the schema and pandas metadata, in a format the writer\n can use.\n fs: FileSystem\n path: str\n Output file to write to, usually ``\"_metadata\"`` in the root of\n the output dataset\n append: boolean\n Whether or not to consolidate new metadata with existing (True)\n or start from scratch (False)\n **kwargs: dict\n Other keyword arguments (including `compression`)\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def collect_file_metadata(cls, path, fs, file_path):\n \"\"\"\n Collect parquet metadata from a file and set the file_path.\n\n Parameters\n ----------\n path: str\n Parquet-file path to extract metadata from.\n fs: FileSystem\n file_path: str\n Relative path to set as `file_path` in the metadata.\n\n Returns\n -------\n A metadata object. The specific type should be recognized\n by the aggregate_metadata method.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def aggregate_metadata(cls, meta_list, fs, out_path):\n \"\"\"\n Aggregate a list of metadata objects and optionally\n write out the final result as a _metadata file.\n\n Parameters\n ----------\n meta_list: list\n List of metadata objects to be aggregated into a single\n metadata object, and optionally written to disk. The\n specific element type can be engine specific.\n fs: FileSystem\n out_path: str or None\n Directory to write the final _metadata file. If None\n is specified, the aggregated metadata will be returned,\n and nothing will be written to disk.\n\n Returns\n -------\n If out_path is None, an aggregate metadata object is returned.\n Otherwise, None is returned.\n \"\"\"\n raise NotImplementedError()\n\n\ndef _parse_pandas_metadata(pandas_metadata):\n \"\"\"Get the set of names from the pandas metadata section\n\n Parameters\n ----------\n pandas_metadata : dict\n Should conform to the pandas parquet metadata spec\n\n Returns\n -------\n index_names : list\n List of strings indicating the actual index names\n column_names : list\n List of strings indicating the actual column names\n storage_name_mapping : dict\n Pairs of storage names (e.g. the field names for\n PyArrow) and actual names. The storage and field names will\n differ for index names for certain writers (pyarrow > 0.8).\n column_indexes_names : list\n The names for ``df.columns.name`` or ``df.columns.names`` for\n a MultiIndex in the columns\n\n Notes\n -----\n This should support metadata written by at least\n\n * fastparquet>=0.1.3\n * pyarrow>=0.7.0\n \"\"\"\n index_storage_names = [\n n[\"name\"] if isinstance(n, dict) else n\n for n in pandas_metadata[\"index_columns\"]\n ]\n index_name_xpr = re.compile(r\"__index_level_\\d+__\")\n\n # older metadatas will not have a 'field_name' field so we fall back\n # to the 'name' field\n pairs = [\n (x.get(\"field_name\", x[\"name\"]), x[\"name\"]) for x in pandas_metadata[\"columns\"]\n ]\n\n # Need to reconcile storage and real names. These will differ for\n # pyarrow, which uses __index_leveL_d__ for the storage name of indexes.\n # The real name may be None (e.g. `df.index.name` is None).\n pairs2 = []\n for storage_name, real_name in pairs:\n if real_name and index_name_xpr.match(real_name):\n real_name = None\n pairs2.append((storage_name, real_name))\n index_names = [name for (storage_name, name) in pairs2 if name != storage_name]\n\n # column_indexes represents df.columns.name\n # It was added to the spec after pandas 0.21.0+, and implemented\n # in PyArrow 0.8. It was added to fastparquet in 0.3.1.\n column_index_names = pandas_metadata.get(\"column_indexes\", [{\"name\": None}])\n column_index_names = [x[\"name\"] for x in column_index_names]\n\n # Now we need to disambiguate between columns and index names. PyArrow\n # 0.8.0+ allows for duplicates between df.index.names and df.columns\n if not index_names:\n # For PyArrow < 0.8, Any fastparquet. This relies on the facts that\n # 1. Those versions used the real index name as the index storage name\n # 2. Those versions did not allow for duplicate index / column names\n # So we know that if a name is in index_storage_names, it must be an\n # index name\n if index_storage_names and isinstance(index_storage_names[0], dict):\n # Cannot handle dictionary case\n index_storage_names = []\n index_names = list(index_storage_names) # make a copy\n index_storage_names2 = set(index_storage_names)\n column_names = [\n name for (storage_name, name) in pairs if name not in index_storage_names2\n ]\n else:\n # For newer PyArrows the storage names differ from the index names\n # iff it's an index level. Though this is a fragile assumption for\n # other systems...\n column_names = [name for (storage_name, name) in pairs2 if name == storage_name]\n\n storage_name_mapping = dict(pairs2) # TODO: handle duplicates gracefully\n\n return index_names, column_names, storage_name_mapping, column_index_names\n\n\ndef _normalize_index_columns(user_columns, data_columns, user_index, data_index):\n \"\"\"Normalize user and file-provided column and index names\n\n Parameters\n ----------\n user_columns : None, str or list of str\n data_columns : list of str\n user_index : None, str, or list of str\n data_index : list of str\n\n Returns\n -------\n column_names : list of str\n index_names : list of str\n \"\"\"\n specified_columns = user_columns is not None\n specified_index = user_index is not None\n\n if user_columns is None:\n user_columns = list(data_columns)\n elif isinstance(user_columns, str):\n user_columns = [user_columns]\n else:\n user_columns = list(user_columns)\n\n if user_index is None:\n user_index = data_index\n elif user_index is False:\n # When index is False, use no index and all fields should be treated as\n # columns (unless `columns` provided).\n user_index = []\n data_columns = data_index + data_columns\n elif isinstance(user_index, str):\n user_index = [user_index]\n else:\n user_index = list(user_index)\n\n if specified_index and not specified_columns:\n # Only `index` provided. Use specified index, and all column fields\n # that weren't specified as indices\n index_names = user_index\n column_names = [x for x in data_columns if x not in index_names]\n elif specified_columns and not specified_index:\n # Only `columns` provided. Use specified columns, and all index fields\n # that weren't specified as columns\n column_names = user_columns\n index_names = [x for x in data_index if x not in column_names]\n elif specified_index and specified_columns:\n # Both `index` and `columns` provided. Use as specified, but error if\n # they intersect.\n column_names = user_columns\n index_names = user_index\n if set(column_names).intersection(index_names):\n raise ValueError(\"Specified index and column names must not intersect\")\n else:\n # Use default columns and index from the metadata\n column_names = data_columns\n index_names = data_index\n\n return column_names, index_names\n\n\ndef _sort_and_analyze_paths(file_list, fs, root=False):\n file_list = sorted(file_list, key=natural_sort_key)\n base, fns = _analyze_paths(file_list, fs, root=root)\n return file_list, base, fns\n\n\ndef _analyze_paths(file_list, fs, root=False):\n \"\"\"Consolidate list of file-paths into parquet relative paths\n\n Note: This function was mostly copied from dask/fastparquet to\n use in both `FastParquetEngine` and `ArrowEngine`.\"\"\"\n\n def _join_path(*path):\n def _scrub(i, p):\n # Convert path to standard form\n # this means windows path separators are converted to linux\n p = p.replace(fs.sep, \"/\")\n if p == \"\": # empty path is assumed to be a relative path\n return \".\"\n if p[-1] == \"/\": # trailing slashes are not allowed\n p = p[:-1]\n if i > 0 and p[0] == \"/\": # only the first path can start with /\n p = p[1:]\n return p\n\n abs_prefix = \"\"\n if path and path[0]:\n if path[0][0] == \"/\":\n abs_prefix = \"/\"\n path = list(path)\n path[0] = path[0][1:]\n elif fs.sep == \"\\\\\" and path[0][1:].startswith(\":/\"):\n # If windows, then look for the \"c:/\" prefix\n abs_prefix = path[0][0:3]\n path = list(path)\n path[0] = path[0][3:]\n\n _scrubbed = []\n for i, p in enumerate(path):\n _scrubbed.extend(_scrub(i, p).split(\"/\"))\n simpler = []\n for s in _scrubbed:\n if s == \".\":\n pass\n elif s == \"..\":\n if simpler:\n if simpler[-1] == \"..\":\n simpler.append(s)\n else:\n simpler.pop()\n elif abs_prefix:\n raise Exception(\"can not get parent of root\")\n else:\n simpler.append(s)\n else:\n simpler.append(s)\n\n if not simpler:\n if abs_prefix:\n joined = abs_prefix\n else:\n joined = \".\"\n else:\n joined = abs_prefix + (\"/\".join(simpler))\n return joined\n\n path_parts_list = [_join_path(fn).split(\"/\") for fn in file_list]\n if root is False:\n basepath = path_parts_list[0][:-1]\n for i, path_parts in enumerate(path_parts_list):\n j = len(path_parts) - 1\n for k, (base_part, path_part) in enumerate(zip(basepath, path_parts)):\n if base_part != path_part:\n j = k\n break\n basepath = basepath[:j]\n l = len(basepath)\n else:\n basepath = _join_path(root).split(\"/\")\n l = len(basepath)\n assert all(\n p[:l] == basepath for p in path_parts_list\n ), \"All paths must begin with the given root\"\n out_list = []\n for path_parts in path_parts_list:\n out_list.append(\n \"/\".join(path_parts[l:])\n ) # use '/'.join() instead of _join_path to be consistent with split('/')\n\n return (\n \"/\".join(basepath),\n out_list,\n ) # use '/'.join() instead of _join_path to be consistent with split('/')\n\n\ndef _aggregate_stats(\n file_path,\n file_row_group_stats,\n file_row_group_column_stats,\n stat_col_indices,\n):\n \"\"\"Utility to aggregate the statistics for N row-groups\n into a single dictionary.\n\n Used by `Engine._construct_parts`\n \"\"\"\n if len(file_row_group_stats) < 1:\n # Empty statistics\n return {}\n elif len(file_row_group_column_stats) == 0:\n assert len(file_row_group_stats) == 1\n return file_row_group_stats[0]\n else:\n # Note: It would be better to avoid df_rgs and df_cols\n # construction altogether. It makes it fast to aggregate\n # the statistics for many row groups, but isn't\n # worthwhile for a small number of row groups.\n if len(file_row_group_stats) > 1:\n df_rgs = pd.DataFrame(file_row_group_stats)\n s = {\n \"file_path_0\": file_path,\n \"num-rows\": df_rgs[\"num-rows\"].sum(),\n \"num-row-groups\": df_rgs[\"num-rows\"].count(),\n \"total_byte_size\": df_rgs[\"total_byte_size\"].sum(),\n \"columns\": [],\n }\n else:\n s = {\n \"file_path_0\": file_path,\n \"num-rows\": file_row_group_stats[0][\"num-rows\"],\n \"num-row-groups\": 1,\n \"total_byte_size\": file_row_group_stats[0][\"total_byte_size\"],\n \"columns\": [],\n }\n\n df_cols = None\n if len(file_row_group_column_stats) > 1:\n df_cols = pd.DataFrame(file_row_group_column_stats)\n for ind, name in enumerate(stat_col_indices):\n i = ind * 2\n if df_cols is None:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": file_row_group_column_stats[0][i],\n \"max\": file_row_group_column_stats[0][i + 1],\n }\n )\n else:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": df_cols.iloc[:, i].min(),\n \"max\": df_cols.iloc[:, i + 1].max(),\n }\n )\n return s\n\n\ndef _row_groups_to_parts(\n gather_statistics,\n split_row_groups,\n aggregation_depth,\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n stat_col_indices,\n make_part_func,\n make_part_kwargs,\n):\n\n # Construct `parts` and `stats`\n parts = []\n stats = []\n if split_row_groups:\n # Create parts from each file,\n # limiting the number of row_groups in each piece\n split_row_groups = int(split_row_groups)\n residual = 0\n for filename, row_groups in file_row_groups.items():\n row_group_count = len(row_groups)\n if residual:\n _rgs = [0] + list(range(residual, row_group_count, split_row_groups))\n else:\n _rgs = list(range(residual, row_group_count, split_row_groups))\n\n for i in _rgs:\n\n i_end = i + split_row_groups\n if aggregation_depth is True:\n if residual and i == 0:\n i_end = residual\n residual = 0\n _residual = i_end - row_group_count\n if _residual > 0:\n residual = _residual\n\n rg_list = row_groups[i:i_end]\n\n part = make_part_func(\n filename,\n rg_list,\n **make_part_kwargs,\n )\n if part is None:\n continue\n\n parts.append(part)\n if gather_statistics:\n stat = _aggregate_stats(\n filename,\n file_row_group_stats[filename][i:i_end],\n file_row_group_column_stats[filename][i:i_end],\n stat_col_indices,\n )\n stats.append(stat)\n else:\n for filename, row_groups in file_row_groups.items():\n\n part = make_part_func(\n filename,\n row_groups,\n **make_part_kwargs,\n )\n if part is None:\n continue\n\n parts.append(part)\n if gather_statistics:\n stat = _aggregate_stats(\n filename,\n file_row_group_stats[filename],\n file_row_group_column_stats[filename],\n stat_col_indices,\n )\n stats.append(stat)\n\n return parts, stats\n\n\ndef _get_aggregation_depth(aggregate_files, partition_names):\n # Use `aggregate_files` to set `aggregation_depth`\n #\n # Note that `partition_names` must be ordered. `True` means that we allow\n # aggregation of any two files. `False` means that we will never aggregate\n # files. If a string is specified, it must be the name of a partition\n # column, and the \"partition depth\" of that column will be used for\n # aggregation. Note that we always convert the string into the partition\n # \"depth\" to simplify the aggregation logic.\n\n # Summary of output `aggregation_depth` settings:\n #\n # True : Free-for-all aggregation (any two files may be aggregated)\n # False : No file aggregation allowed\n # <int> : Allow aggregation within this partition-hierarchy depth\n\n aggregation_depth = aggregate_files\n if isinstance(aggregate_files, str):\n if aggregate_files in partition_names:\n # aggregate_files corresponds to a partition column. Reset the\n # value of this variable to reflect the partition \"depth\" (in the\n # range of 1 to the total number of partition levels)\n aggregation_depth = len(partition_names) - partition_names.index(\n aggregate_files\n )\n else:\n raise ValueError(\n f\"{aggregate_files} is not a recognized directory partition.\"\n )\n\n return aggregation_depth\n\n\ndef _set_metadata_task_size(metadata_task_size, fs):\n # Set metadata_task_size using the config file\n # if the kwarg value was not specified\n if metadata_task_size is None:\n # If a default value is not specified in the config file,\n # otherwise we use \"0\"\n config_str = \"dataframe.parquet.metadata-task-size-\" + (\n \"local\" if _is_local_fs(fs) else \"remote\"\n )\n return config.get(config_str, 0)\n\n return metadata_task_size\n\n\ndef _process_open_file_options(\n open_file_options,\n metadata=None,\n columns=None,\n row_groups=None,\n default_engine=None,\n default_cache=\"readahead\",\n allow_precache=True,\n):\n # Process `open_file_options`.\n # Set default values and extract `precache_options`\n open_file_options = (open_file_options or {}).copy()\n precache_options = open_file_options.pop(\"precache_options\", {}).copy()\n if not allow_precache:\n # Precaching not allowed\n # (probably because the file system is local)\n precache_options = {}\n if \"open_file_func\" not in open_file_options:\n if precache_options.get(\"method\", None) == \"parquet\":\n open_file_options[\"cache_type\"] = open_file_options.get(\n \"cache_type\", \"parts\"\n )\n precache_options.update(\n {\n \"metadata\": metadata,\n \"columns\": columns,\n \"row_groups\": row_groups,\n \"engine\": precache_options.get(\"engine\", default_engine),\n }\n )\n else:\n open_file_options[\"cache_type\"] = open_file_options.get(\n \"cache_type\", default_cache\n )\n open_file_options[\"mode\"] = open_file_options.get(\"mode\", \"rb\")\n return precache_options, open_file_options\n\n\ndef _split_user_options(**kwargs):\n # Check user-defined options.\n # Split into \"file\" and \"dataset\"-specific kwargs\n user_kwargs = kwargs.copy()\n dataset_options = {\n **user_kwargs.pop(\"file\", {}).copy(),\n **user_kwargs.pop(\"dataset\", {}).copy(),\n }\n read_options = user_kwargs.pop(\"read\", {}).copy()\n read_options[\"open_file_options\"] = user_kwargs.pop(\"open_file_options\", {}).copy()\n return (\n dataset_options,\n read_options,\n user_kwargs,\n )\n\n\ndef _set_gather_statistics(\n gather_statistics,\n chunksize,\n split_row_groups,\n aggregation_depth,\n filter_columns,\n stat_columns,\n):\n # Use available information about the current read options\n # and target dataset to decide if we need to gather metadata\n # statistics to construct the graph for a `read_parquet` op.\n\n # If the user has specified `calculate_divisions=True`, then\n # we will be starting with `gather_statistics=True` here.\n if (\n chunksize\n or (int(split_row_groups) > 1 and aggregation_depth)\n or filter_columns.intersection(stat_columns)\n ):\n # Need to gather statistics if we are aggregating files\n # or filtering\n # NOTE: Should avoid gathering statistics when the agg\n # does not depend on a row-group statistic\n gather_statistics = True\n elif not stat_columns:\n # Not aggregating files/row-groups.\n # We only need to gather statistics if `stat_columns`\n # is populated\n gather_statistics = False\n\n return bool(gather_statistics)\n", "import sys\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_list_like, is_scalar\n\nfrom dask.dataframe import methods\nfrom dask.dataframe.core import DataFrame, Series, apply_concat_apply, map_partitions\nfrom dask.dataframe.utils import has_known_categories\nfrom dask.utils import M\n\n###############################################################\n# Dummies\n###############################################################\n\n\ndef get_dummies(\n data,\n prefix=None,\n prefix_sep=\"_\",\n dummy_na=False,\n columns=None,\n sparse=False,\n drop_first=False,\n dtype=np.uint8,\n **kwargs,\n):\n \"\"\"\n Convert categorical variable into dummy/indicator variables.\n\n Data must have category dtype to infer result's ``columns``.\n\n Parameters\n ----------\n data : Series, or DataFrame\n For Series, the dtype must be categorical.\n For DataFrame, at least one column must be categorical.\n prefix : string, list of strings, or dict of strings, default None\n String to append DataFrame column names.\n Pass a list with length equal to the number of columns\n when calling get_dummies on a DataFrame. Alternatively, `prefix`\n can be a dictionary mapping column names to prefixes.\n prefix_sep : string, default '_'\n If appending prefix, separator/delimiter to use. Or pass a\n list or dictionary as with `prefix.`\n dummy_na : bool, default False\n Add a column to indicate NaNs, if False NaNs are ignored.\n columns : list-like, default None\n Column names in the DataFrame to be encoded.\n If `columns` is None then all the columns with\n `category` dtype will be converted.\n sparse : bool, default False\n Whether the dummy columns should be sparse or not. Returns\n SparseDataFrame if `data` is a Series or if all columns are included.\n Otherwise returns a DataFrame with some SparseBlocks.\n\n .. versionadded:: 0.18.2\n\n drop_first : bool, default False\n Whether to get k-1 dummies out of k categorical levels by removing the\n first level.\n\n dtype : dtype, default np.uint8\n Data type for new columns. Only a single dtype is allowed.\n\n .. versionadded:: 0.18.2\n\n Returns\n -------\n dummies : DataFrame\n\n Examples\n --------\n Dask's version only works with Categorical data, as this is the only way to\n know the output shape without computing all the data.\n\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> s = dd.from_pandas(pd.Series(list('abca')), npartitions=2)\n >>> dd.get_dummies(s)\n Traceback (most recent call last):\n ...\n NotImplementedError: `get_dummies` with non-categorical dtypes is not supported...\n\n With categorical data:\n\n >>> s = dd.from_pandas(pd.Series(list('abca'), dtype='category'), npartitions=2)\n >>> dd.get_dummies(s) # doctest: +NORMALIZE_WHITESPACE\n Dask DataFrame Structure:\n a b c\n npartitions=2\n 0 uint8 uint8 uint8\n 2 ... ... ...\n 3 ... ... ...\n Dask Name: get_dummies, 4 tasks\n >>> dd.get_dummies(s).compute() # doctest: +ELLIPSIS\n a b c\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n 3 1 0 0\n\n See Also\n --------\n pandas.get_dummies\n \"\"\"\n if isinstance(data, (pd.Series, pd.DataFrame)):\n return pd.get_dummies(\n data,\n prefix=prefix,\n prefix_sep=prefix_sep,\n dummy_na=dummy_na,\n columns=columns,\n sparse=sparse,\n drop_first=drop_first,\n dtype=dtype,\n **kwargs,\n )\n\n not_cat_msg = (\n \"`get_dummies` with non-categorical dtypes is not \"\n \"supported. Please use `df.categorize()` beforehand to \"\n \"convert to categorical dtype.\"\n )\n\n unknown_cat_msg = (\n \"`get_dummies` with unknown categories is not \"\n \"supported. Please use `column.cat.as_known()` or \"\n \"`df.categorize()` beforehand to ensure known \"\n \"categories\"\n )\n\n if isinstance(data, Series):\n if not methods.is_categorical_dtype(data):\n raise NotImplementedError(not_cat_msg)\n if not has_known_categories(data):\n raise NotImplementedError(unknown_cat_msg)\n elif isinstance(data, DataFrame):\n if columns is None:\n if (data.dtypes == \"object\").any():\n raise NotImplementedError(not_cat_msg)\n columns = data._meta.select_dtypes(include=[\"category\"]).columns\n else:\n if not all(methods.is_categorical_dtype(data[c]) for c in columns):\n raise NotImplementedError(not_cat_msg)\n\n if not all(has_known_categories(data[c]) for c in columns):\n raise NotImplementedError(unknown_cat_msg)\n\n package_name = data._meta.__class__.__module__.split(\".\")[0]\n dummies = sys.modules[package_name].get_dummies\n\n return map_partitions(\n dummies,\n data,\n prefix=prefix,\n prefix_sep=prefix_sep,\n dummy_na=dummy_na,\n columns=columns,\n sparse=sparse,\n drop_first=drop_first,\n dtype=dtype,\n **kwargs,\n )\n\n\n###############################################################\n# Pivot table\n###############################################################\n\n\ndef pivot_table(df, index=None, columns=None, values=None, aggfunc=\"mean\"):\n \"\"\"\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, and ``aggfunc`` must be all scalar.\n ``values`` can be scalar or list-like.\n\n Parameters\n ----------\n df : DataFrame\n index : scalar\n column to be index\n columns : scalar\n column to be columns\n values : scalar or list(scalar)\n column(s) to aggregate\n aggfunc : {'mean', 'sum', 'count', 'first', 'last'}, default 'mean'\n\n Returns\n -------\n table : DataFrame\n\n See Also\n --------\n pandas.DataFrame.pivot_table\n \"\"\"\n\n if not is_scalar(index) or index is None:\n raise ValueError(\"'index' must be the name of an existing column\")\n if not is_scalar(columns) or columns is None:\n raise ValueError(\"'columns' must be the name of an existing column\")\n if not methods.is_categorical_dtype(df[columns]):\n raise ValueError(\"'columns' must be category dtype\")\n if not has_known_categories(df[columns]):\n raise ValueError(\n \"'columns' must have known categories. Please use \"\n \"`df[columns].cat.as_known()` beforehand to ensure \"\n \"known categories\"\n )\n if not (\n is_list_like(values)\n and all([is_scalar(v) for v in values])\n or is_scalar(values)\n ):\n raise ValueError(\"'values' must refer to an existing column or columns\")\n\n available_aggfuncs = [\"mean\", \"sum\", \"count\", \"first\", \"last\"]\n\n if not is_scalar(aggfunc) or aggfunc not in available_aggfuncs:\n raise ValueError(\n \"aggfunc must be either \" + \", \".join(f\"'{x}'\" for x in available_aggfuncs)\n )\n\n # _emulate can't work for empty data\n # the result must have CategoricalIndex columns\n\n columns_contents = pd.CategoricalIndex(df[columns].cat.categories, name=columns)\n if is_scalar(values):\n new_columns = columns_contents\n else:\n new_columns = pd.MultiIndex.from_product(\n (sorted(values), columns_contents), names=[None, columns]\n )\n\n if aggfunc in [\"first\", \"last\"]:\n # Infer datatype as non-numeric values are allowed\n if is_scalar(values):\n meta = pd.DataFrame(\n columns=new_columns,\n dtype=df[values].dtype,\n index=pd.Index(df._meta[index]),\n )\n else:\n meta = pd.DataFrame(\n columns=new_columns,\n index=pd.Index(df._meta[index]),\n )\n for value_col in values:\n meta[value_col] = meta[value_col].astype(df[values].dtypes[value_col])\n else:\n # Use float64 as other aggregate functions require numerical data\n meta = pd.DataFrame(\n columns=new_columns, dtype=np.float64, index=pd.Index(df._meta[index])\n )\n\n kwargs = {\"index\": index, \"columns\": columns, \"values\": values}\n\n if aggfunc in [\"sum\", \"mean\"]:\n pv_sum = apply_concat_apply(\n [df],\n chunk=methods.pivot_sum,\n aggregate=methods.pivot_agg,\n meta=meta,\n token=\"pivot_table_sum\",\n chunk_kwargs=kwargs,\n )\n\n if aggfunc in [\"count\", \"mean\"]:\n pv_count = apply_concat_apply(\n [df],\n chunk=methods.pivot_count,\n aggregate=methods.pivot_agg,\n meta=meta,\n token=\"pivot_table_count\",\n chunk_kwargs=kwargs,\n )\n\n if aggfunc == \"sum\":\n return pv_sum\n elif aggfunc == \"count\":\n return pv_count\n elif aggfunc == \"mean\":\n return pv_sum / pv_count\n elif aggfunc == \"first\":\n return apply_concat_apply(\n [df],\n chunk=methods.pivot_first,\n aggregate=methods.pivot_agg_first,\n meta=meta,\n token=\"pivot_table_first\",\n chunk_kwargs=kwargs,\n )\n elif aggfunc == \"last\":\n return apply_concat_apply(\n [df],\n chunk=methods.pivot_last,\n aggregate=methods.pivot_agg_last,\n meta=meta,\n token=\"pivot_table_last\",\n chunk_kwargs=kwargs,\n )\n else:\n raise ValueError\n\n\n###############################################################\n# Melt\n###############################################################\n\n\ndef melt(\n frame,\n id_vars=None,\n value_vars=None,\n var_name=None,\n value_name=\"value\",\n col_level=None,\n):\n \"\"\"\n Unpivots a DataFrame from wide format to long format, optionally leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where one or more columns are identifier variables\n (``id_vars``), while all other columns, considered measured variables (``value_vars``), are \"unpivoted\" to the row\n axis, leaving just two non-identifier columns, 'variable' and 'value'.\n\n Parameters\n ----------\n frame : DataFrame\n id_vars : tuple, list, or ndarray, optional\n Column(s) to use as identifier variables.\n value_vars : tuple, list, or ndarray, optional\n Column(s) to unpivot. If not specified, uses all columns that\n are not set as `id_vars`.\n var_name : scalar\n Name to use for the 'variable' column. If None it uses\n ``frame.columns.name`` or 'variable'.\n value_name : scalar, default 'value'\n Name to use for the 'value' column.\n col_level : int or string, optional\n If columns are a MultiIndex then use this level to melt.\n\n Returns\n -------\n DataFrame\n Unpivoted DataFrame.\n\n See Also\n --------\n pandas.DataFrame.melt\n \"\"\"\n\n from dask.dataframe.core import no_default\n\n return frame.map_partitions(\n M.melt,\n meta=no_default,\n id_vars=id_vars,\n value_vars=value_vars,\n var_name=var_name,\n value_name=value_name,\n col_level=col_level,\n token=\"melt\",\n )\n", "import os\nfrom collections.abc import Mapping\nfrom io import BytesIO\nfrom warnings import catch_warnings, simplefilter, warn\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nimport fsspec.implementations.local\nimport numpy as np\nimport pandas as pd\nfrom fsspec.compression import compr\nfrom fsspec.core import get_fs_token_paths\nfrom fsspec.core import open as open_file\nfrom fsspec.core import open_files\nfrom fsspec.utils import infer_compression\nfrom pandas.api.types import (\n CategoricalDtype,\n is_datetime64_any_dtype,\n is_float_dtype,\n is_integer_dtype,\n is_object_dtype,\n)\n\nfrom dask.base import tokenize\nfrom dask.bytes import read_bytes\nfrom dask.core import flatten\nfrom dask.dataframe.io.io import from_map\nfrom dask.dataframe.io.utils import DataFrameIOFunction\nfrom dask.dataframe.utils import clear_known_categories\nfrom dask.delayed import delayed\nfrom dask.utils import asciitable, parse_bytes\n\n\nclass CSVFunctionWrapper(DataFrameIOFunction):\n \"\"\"\n CSV Function-Wrapper Class\n Reads CSV data from disk to produce a partition (given a key).\n \"\"\"\n\n def __init__(\n self,\n full_columns,\n columns,\n colname,\n head,\n header,\n reader,\n dtypes,\n enforce,\n kwargs,\n ):\n self.full_columns = full_columns\n self._columns = columns\n self.colname = colname\n self.head = head\n self.header = header\n self.reader = reader\n self.dtypes = dtypes\n self.enforce = enforce\n self.kwargs = kwargs\n\n @property\n def columns(self):\n return self.full_columns if self._columns is None else self._columns\n\n def project_columns(self, columns):\n \"\"\"Return a new CSVFunctionWrapper object with\n a sub-column projection.\n \"\"\"\n # Make sure columns is ordered correctly\n columns = [c for c in self.head.columns if c in columns]\n if columns == self.columns:\n return self\n return CSVFunctionWrapper(\n self.full_columns,\n columns,\n self.colname,\n self.head[columns],\n self.header,\n self.reader,\n {c: self.dtypes[c] for c in columns},\n self.enforce,\n self.kwargs,\n )\n\n def __call__(self, part):\n\n # Part will be a 3-element tuple\n block, path, is_first, is_last = part\n\n # Construct `path_info`\n if path is not None:\n path_info = (\n self.colname,\n path,\n sorted(list(self.head[self.colname].cat.categories)),\n )\n else:\n path_info = None\n\n # Deal with arguments that are special\n # for the first block of each file\n write_header = False\n rest_kwargs = self.kwargs.copy()\n if not is_first:\n write_header = True\n rest_kwargs.pop(\"skiprows\", None)\n if rest_kwargs.get(\"header\", 0) is not None:\n rest_kwargs.pop(\"header\", None)\n if not is_last:\n rest_kwargs.pop(\"skipfooter\", None)\n\n # Deal with column projection\n columns = self.full_columns\n project_after_read = False\n if self._columns is not None:\n if self.kwargs:\n # To be safe, if any kwargs are defined, avoid\n # changing `usecols` here. Instead, we can just\n # select columns after the read\n project_after_read = True\n else:\n columns = self._columns\n rest_kwargs[\"usecols\"] = columns\n\n # Call `pandas_read_text`\n df = pandas_read_text(\n self.reader,\n block,\n self.header,\n rest_kwargs,\n self.dtypes,\n columns,\n write_header,\n self.enforce,\n path_info,\n )\n if project_after_read:\n return df[self.columns]\n return df\n\n\ndef pandas_read_text(\n reader,\n b,\n header,\n kwargs,\n dtypes=None,\n columns=None,\n write_header=True,\n enforce=False,\n path=None,\n):\n \"\"\"Convert a block of bytes to a Pandas DataFrame\n\n Parameters\n ----------\n reader : callable\n ``pd.read_csv`` or ``pd.read_table``.\n b : bytestring\n The content to be parsed with ``reader``\n header : bytestring\n An optional header to prepend to ``b``\n kwargs : dict\n A dictionary of keyword arguments to be passed to ``reader``\n dtypes : dict\n dtypes to assign to columns\n path : tuple\n A tuple containing path column name, path to file, and an ordered list of paths.\n\n See Also\n --------\n dask.dataframe.csv.read_pandas_from_bytes\n \"\"\"\n bio = BytesIO()\n if write_header and not b.startswith(header.rstrip()):\n bio.write(header)\n bio.write(b)\n bio.seek(0)\n df = reader(bio, **kwargs)\n if dtypes:\n coerce_dtypes(df, dtypes)\n\n if enforce and columns and (list(df.columns) != list(columns)):\n raise ValueError(\"Columns do not match\", df.columns, columns)\n if path:\n colname, path, paths = path\n code = paths.index(path)\n df = df.assign(\n **{colname: pd.Categorical.from_codes(np.full(len(df), code), paths)}\n )\n return df\n\n\ndef coerce_dtypes(df, dtypes):\n \"\"\"Coerce dataframe to dtypes safely\n\n Operates in place\n\n Parameters\n ----------\n df: Pandas DataFrame\n dtypes: dict like {'x': float}\n \"\"\"\n bad_dtypes = []\n bad_dates = []\n errors = []\n for c in df.columns:\n if c in dtypes and df.dtypes[c] != dtypes[c]:\n actual = df.dtypes[c]\n desired = dtypes[c]\n if is_float_dtype(actual) and is_integer_dtype(desired):\n bad_dtypes.append((c, actual, desired))\n elif is_object_dtype(actual) and is_datetime64_any_dtype(desired):\n # This can only occur when parse_dates is specified, but an\n # invalid date is encountered. Pandas then silently falls back\n # to object dtype. Since `object_array.astype(datetime)` will\n # silently overflow, error here and report.\n bad_dates.append(c)\n else:\n try:\n df[c] = df[c].astype(dtypes[c])\n except Exception as e:\n bad_dtypes.append((c, actual, desired))\n errors.append((c, e))\n\n if bad_dtypes:\n if errors:\n ex = \"\\n\".join(\n f\"- {c}\\n {e!r}\" for c, e in sorted(errors, key=lambda x: str(x[0]))\n )\n exceptions = (\n \"The following columns also raised exceptions on \"\n \"conversion:\\n\\n%s\\n\\n\"\n ) % ex\n extra = \"\"\n else:\n exceptions = \"\"\n # All mismatches are int->float, also suggest `assume_missing=True`\n extra = (\n \"\\n\\nAlternatively, provide `assume_missing=True` \"\n \"to interpret\\n\"\n \"all unspecified integer columns as floats.\"\n )\n\n bad_dtypes = sorted(bad_dtypes, key=lambda x: str(x[0]))\n table = asciitable([\"Column\", \"Found\", \"Expected\"], bad_dtypes)\n dtype_kw = \"dtype={%s}\" % \",\\n \".join(\n f\"{k!r}: '{v}'\" for (k, v, _) in bad_dtypes\n )\n\n dtype_msg = (\n \"{table}\\n\\n\"\n \"{exceptions}\"\n \"Usually this is due to dask's dtype inference failing, and\\n\"\n \"*may* be fixed by specifying dtypes manually by adding:\\n\\n\"\n \"{dtype_kw}\\n\\n\"\n \"to the call to `read_csv`/`read_table`.\"\n \"{extra}\"\n ).format(table=table, exceptions=exceptions, dtype_kw=dtype_kw, extra=extra)\n else:\n dtype_msg = None\n\n if bad_dates:\n also = \" also \" if bad_dtypes else \" \"\n cols = \"\\n\".join(\"- %s\" % c for c in bad_dates)\n date_msg = (\n \"The following columns{also}failed to properly parse as dates:\\n\\n\"\n \"{cols}\\n\\n\"\n \"This is usually due to an invalid value in that column. To\\n\"\n \"diagnose and fix it's recommended to drop these columns from the\\n\"\n \"`parse_dates` keyword, and manually convert them to dates later\\n\"\n \"using `dd.to_datetime`.\"\n ).format(also=also, cols=cols)\n else:\n date_msg = None\n\n if bad_dtypes or bad_dates:\n rule = \"\\n\\n%s\\n\\n\" % (\"-\" * 61)\n msg = \"Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\\n\\n%s\" % (\n rule.join(filter(None, [dtype_msg, date_msg]))\n )\n raise ValueError(msg)\n\n\ndef text_blocks_to_pandas(\n reader,\n block_lists,\n header,\n head,\n kwargs,\n enforce=False,\n specified_dtypes=None,\n path=None,\n blocksize=None,\n urlpath=None,\n):\n \"\"\"Convert blocks of bytes to a dask.dataframe\n\n This accepts a list of lists of values of bytes where each list corresponds\n to one file, and the value of bytes concatenate to comprise the entire\n file, in order.\n\n Parameters\n ----------\n reader : callable\n ``pd.read_csv`` or ``pd.read_table``.\n block_lists : list of lists of delayed values of bytes\n The lists of bytestrings where each list corresponds to one logical file\n header : bytestring\n The header, found at the front of the first file, to be prepended to\n all blocks\n head : pd.DataFrame\n An example Pandas DataFrame to be used for metadata.\n kwargs : dict\n Keyword arguments to pass down to ``reader``\n path : tuple, optional\n A tuple containing column name for path and the path_converter if provided\n\n Returns\n -------\n A dask.dataframe\n \"\"\"\n dtypes = head.dtypes.to_dict()\n # dtypes contains only instances of CategoricalDtype, which causes issues\n # in coerce_dtypes for non-uniform categories across partitions.\n # We will modify `dtype` (which is inferred) to\n # 1. contain instances of CategoricalDtypes for user-provided types\n # 2. contain 'category' for data inferred types\n categoricals = head.select_dtypes(include=[\"category\"]).columns\n\n if isinstance(specified_dtypes, Mapping):\n known_categoricals = [\n k\n for k in categoricals\n if isinstance(specified_dtypes.get(k), CategoricalDtype)\n and specified_dtypes.get(k).categories is not None\n ]\n unknown_categoricals = categoricals.difference(known_categoricals)\n else:\n unknown_categoricals = categoricals\n\n # Fixup the dtypes\n for k in unknown_categoricals:\n dtypes[k] = \"category\"\n\n columns = list(head.columns)\n\n blocks = tuple(flatten(block_lists))\n # Create mask of first blocks from nested block_lists\n is_first = tuple(block_mask(block_lists))\n is_last = tuple(block_mask_last(block_lists))\n\n if path:\n colname, path_converter = path\n paths = [b[1].path for b in blocks]\n if path_converter:\n paths = [path_converter(p) for p in paths]\n head = head.assign(\n **{\n colname: pd.Categorical.from_codes(\n np.zeros(len(head), dtype=int), set(paths)\n )\n }\n )\n path = (colname, paths)\n\n if len(unknown_categoricals):\n head = clear_known_categories(head, cols=unknown_categoricals)\n\n # Define parts\n parts = []\n colname, paths = path or (None, None)\n for i in range(len(blocks)):\n parts.append([blocks[i], paths[i] if paths else None, is_first[i], is_last[i]])\n\n # Construct the output collection with from_map\n return from_map(\n CSVFunctionWrapper(\n columns,\n None,\n colname,\n head,\n header,\n reader,\n dtypes,\n enforce,\n kwargs,\n ),\n parts,\n meta=head,\n label=\"read-csv\",\n token=tokenize(reader, urlpath, columns, enforce, head, blocksize),\n enforce_metadata=False,\n produces_tasks=True,\n )\n\n\ndef block_mask(block_lists):\n \"\"\"\n Yields a flat iterable of booleans to mark the zeroth elements of the\n nested input ``block_lists`` in a flattened output.\n\n >>> list(block_mask([[1, 2], [3, 4], [5]]))\n [True, False, True, False, True]\n \"\"\"\n for block in block_lists:\n if not block:\n continue\n yield True\n yield from (False for _ in block[1:])\n\n\ndef block_mask_last(block_lists):\n \"\"\"\n Yields a flat iterable of booleans to mark the last element of the\n nested input ``block_lists`` in a flattened output.\n\n >>> list(block_mask_last([[1, 2], [3, 4], [5]]))\n [False, True, False, True, True]\n \"\"\"\n for block in block_lists:\n if not block:\n continue\n yield from (False for _ in block[:-1])\n yield True\n\n\ndef auto_blocksize(total_memory, cpu_count):\n memory_factor = 10\n blocksize = int(total_memory // cpu_count / memory_factor)\n return min(blocksize, int(64e6))\n\n\ndef _infer_block_size():\n default = 2**25\n if psutil is not None:\n with catch_warnings():\n simplefilter(\"ignore\", RuntimeWarning)\n mem = psutil.virtual_memory().total\n cpu = psutil.cpu_count()\n\n if mem and cpu:\n return auto_blocksize(mem, cpu)\n\n return default\n\n\n# guess blocksize if psutil is installed or use acceptable default one if not\nAUTO_BLOCKSIZE = _infer_block_size()\n\n\ndef read_pandas(\n reader,\n urlpath,\n blocksize=\"default\",\n lineterminator=None,\n compression=\"infer\",\n sample=256000,\n sample_rows=10,\n enforce=False,\n assume_missing=False,\n storage_options=None,\n include_path_column=False,\n **kwargs,\n):\n reader_name = reader.__name__\n if lineterminator is not None and len(lineterminator) == 1:\n kwargs[\"lineterminator\"] = lineterminator\n else:\n lineterminator = \"\\n\"\n if include_path_column and isinstance(include_path_column, bool):\n include_path_column = \"path\"\n if \"index\" in kwargs or \"index_col\" in kwargs:\n raise ValueError(\n \"Keywords 'index' and 'index_col' not supported. \"\n f\"Use dd.{reader_name}(...).set_index('my-index') instead\"\n )\n for kw in [\"iterator\", \"chunksize\"]:\n if kw in kwargs:\n raise ValueError(f\"{kw} not supported for dd.{reader_name}\")\n if kwargs.get(\"nrows\", None):\n raise ValueError(\n \"The 'nrows' keyword is not supported by \"\n \"`dd.{0}`. To achieve the same behavior, it's \"\n \"recommended to use `dd.{0}(...).\"\n \"head(n=nrows)`\".format(reader_name)\n )\n if isinstance(kwargs.get(\"skiprows\"), int):\n skiprows = lastskiprow = firstrow = kwargs.get(\"skiprows\")\n elif kwargs.get(\"skiprows\") is None:\n skiprows = lastskiprow = firstrow = 0\n else:\n # When skiprows is a list, we expect more than max(skiprows) to\n # be included in the sample. This means that [0,2] will work well,\n # but [0, 440] might not work.\n skiprows = set(kwargs.get(\"skiprows\"))\n lastskiprow = max(skiprows)\n # find the firstrow that is not skipped, for use as header\n firstrow = min(set(range(len(skiprows) + 1)) - set(skiprows))\n if isinstance(kwargs.get(\"header\"), list):\n raise TypeError(f\"List of header rows not supported for dd.{reader_name}\")\n if isinstance(kwargs.get(\"converters\"), dict) and include_path_column:\n path_converter = kwargs.get(\"converters\").get(include_path_column, None)\n else:\n path_converter = None\n\n # If compression is \"infer\", inspect the (first) path suffix and\n # set the proper compression option if the suffix is recongnized.\n if compression == \"infer\":\n # Translate the input urlpath to a simple path list\n paths = get_fs_token_paths(urlpath, mode=\"rb\", storage_options=storage_options)[\n 2\n ]\n\n # Check for at least one valid path\n if len(paths) == 0:\n raise OSError(f\"{urlpath} resolved to no files\")\n\n # Infer compression from first path\n compression = infer_compression(paths[0])\n\n if blocksize == \"default\":\n blocksize = AUTO_BLOCKSIZE\n if isinstance(blocksize, str):\n blocksize = parse_bytes(blocksize)\n if blocksize and compression:\n # NONE of the compressions should use chunking\n warn(\n \"Warning %s compression does not support breaking apart files\\n\"\n \"Please ensure that each individual file can fit in memory and\\n\"\n \"use the keyword ``blocksize=None to remove this message``\\n\"\n \"Setting ``blocksize=None``\" % compression\n )\n blocksize = None\n if compression not in compr:\n raise NotImplementedError(\"Compression format %s not installed\" % compression)\n if blocksize and sample and blocksize < sample and lastskiprow != 0:\n warn(\n \"Unexpected behavior can result from passing skiprows when\\n\"\n \"blocksize is smaller than sample size.\\n\"\n \"Setting ``sample=blocksize``\"\n )\n sample = blocksize\n b_lineterminator = lineterminator.encode()\n b_out = read_bytes(\n urlpath,\n delimiter=b_lineterminator,\n blocksize=blocksize,\n sample=sample,\n compression=compression,\n include_path=include_path_column,\n **(storage_options or {}),\n )\n\n if include_path_column:\n b_sample, values, paths = b_out\n path = (include_path_column, path_converter)\n else:\n b_sample, values = b_out\n path = None\n\n if not isinstance(values[0], (tuple, list)):\n values = [values]\n # If we have not sampled, then use the first row of the first values\n # as a representative sample.\n if b_sample is False and len(values[0]):\n b_sample = values[0][0].compute()\n\n # Get header row, and check that sample is long enough. If the file\n # contains a header row, we need at least 2 nonempty rows + the number of\n # rows to skip.\n names = kwargs.get(\"names\", None)\n header = kwargs.get(\"header\", \"infer\" if names is None else None)\n need = 1 if header is None else 2\n\n if kwargs.get(\"comment\"):\n # if comment is provided, step through lines of b_sample and strip out comments\n parts = []\n for part in b_sample.split(b_lineterminator):\n split_comment = part.decode().split(kwargs.get(\"comment\"))\n if len(split_comment) > 1:\n # if line starts with comment, don't include that line in parts.\n if len(split_comment[0]) > 0:\n parts.append(split_comment[0].strip().encode())\n else:\n parts.append(part)\n if len(parts) > need:\n break\n else:\n parts = b_sample.split(b_lineterminator, lastskiprow + need)\n\n # If the last partition is empty, don't count it\n nparts = 0 if not parts else len(parts) - int(not parts[-1])\n\n if sample is not False and nparts < lastskiprow + need and len(b_sample) >= sample:\n raise ValueError(\n \"Sample is not large enough to include at least one \"\n \"row of data. Please increase the number of bytes \"\n \"in `sample` in the call to `read_csv`/`read_table`\"\n )\n\n if isinstance(header, int):\n firstrow += header\n header = b\"\" if header is None else parts[firstrow] + b_lineterminator\n\n # Use sample to infer dtypes and check for presence of include_path_column\n head_kwargs = kwargs.copy()\n head_kwargs.pop(\"skipfooter\", None)\n try:\n head = reader(BytesIO(b_sample), nrows=sample_rows, **head_kwargs)\n except pd.errors.ParserError as e:\n if \"EOF\" in str(e):\n raise ValueError(\n \"EOF encountered while reading header. \\n\"\n \"Pass argument `sample_rows` and make sure the value of `sample` \"\n \"is large enough to accommodate that many rows of data\"\n ) from e\n raise\n if include_path_column and (include_path_column in head.columns):\n raise ValueError(\n \"Files already contain the column name: %s, so the \"\n \"path column cannot use this name. Please set \"\n \"`include_path_column` to a unique name.\" % include_path_column\n )\n\n specified_dtypes = kwargs.get(\"dtype\", {})\n if specified_dtypes is None:\n specified_dtypes = {}\n # If specified_dtypes is a single type, then all columns were specified\n if assume_missing and isinstance(specified_dtypes, dict):\n # Convert all non-specified integer columns to floats\n for c in head.columns:\n if is_integer_dtype(head[c].dtype) and c not in specified_dtypes:\n head[c] = head[c].astype(float)\n\n values = [[list(dsk.dask.values()) for dsk in block] for block in values]\n\n return text_blocks_to_pandas(\n reader,\n values,\n header,\n head,\n kwargs,\n enforce=enforce,\n specified_dtypes=specified_dtypes,\n path=path,\n blocksize=blocksize,\n urlpath=urlpath,\n )\n\n\nREAD_DOC_TEMPLATE = \"\"\"\nRead {file_type} files into a Dask.DataFrame\n\nThis parallelizes the :func:`pandas.{reader}` function in the following ways:\n\n- It supports loading many files at once using globstrings:\n\n >>> df = dd.{reader}('myfiles.*.csv') # doctest: +SKIP\n\n- In some cases it can break up large files:\n\n >>> df = dd.{reader}('largefile.csv', blocksize=25e6) # 25MB chunks # doctest: +SKIP\n\n- It can read CSV files from external resources (e.g. S3, HDFS) by\n providing a URL:\n\n >>> df = dd.{reader}('s3://bucket/myfiles.*.csv') # doctest: +SKIP\n >>> df = dd.{reader}('hdfs:///myfiles.*.csv') # doctest: +SKIP\n >>> df = dd.{reader}('hdfs://namenode.example.com/myfiles.*.csv') # doctest: +SKIP\n\nInternally ``dd.{reader}`` uses :func:`pandas.{reader}` and supports many of the\nsame keyword arguments with the same performance guarantees. See the docstring\nfor :func:`pandas.{reader}` for more information on available keyword arguments.\n\nParameters\n----------\nurlpath : string or list\n Absolute or relative filepath(s). Prefix with a protocol like ``s3://``\n to read from alternative filesystems. To read from multiple files you\n can pass a globstring or a list of paths, with the caveat that they\n must all have the same protocol.\nblocksize : str, int or None, optional\n Number of bytes by which to cut up larger files. Default value is computed\n based on available physical memory and the number of cores, up to a maximum\n of 64MB. Can be a number like ``64000000`` or a string like ``\"64MB\"``. If\n ``None``, a single block is used for each file.\nsample : int, optional\n Number of bytes to use when determining dtypes\nassume_missing : bool, optional\n If True, all integer columns that aren't specified in ``dtype`` are assumed\n to contain missing values, and are converted to floats. Default is False.\nstorage_options : dict, optional\n Extra options that make sense for a particular storage connection, e.g.\n host, port, username, password, etc.\ninclude_path_column : bool or str, optional\n Whether or not to include the path to each particular file. If True a new\n column is added to the dataframe called ``path``. If str, sets new column\n name. Default is False.\n**kwargs\n Extra keyword arguments to forward to :func:`pandas.{reader}`.\n\nNotes\n-----\nDask dataframe tries to infer the ``dtype`` of each column by reading a sample\nfrom the start of the file (or of the first file if it's a glob). Usually this\nworks fine, but if the ``dtype`` is different later in the file (or in other\nfiles) this can cause issues. For example, if all the rows in the sample had\ninteger dtypes, but later on there was a ``NaN``, then this would error at\ncompute time. To fix this, you have a few options:\n\n- Provide explicit dtypes for the offending columns using the ``dtype``\n keyword. This is the recommended solution.\n\n- Use the ``assume_missing`` keyword to assume that all columns inferred as\n integers contain missing values, and convert them to floats.\n\n- Increase the size of the sample using the ``sample`` keyword.\n\nIt should also be noted that this function may fail if a {file_type} file\nincludes quoted strings that contain the line terminator. To get around this\nyou can specify ``blocksize=None`` to not split files into multiple partitions,\nat the cost of reduced parallelism.\n\"\"\"\n\n\ndef make_reader(reader, reader_name, file_type):\n def read(\n urlpath,\n blocksize=\"default\",\n lineterminator=None,\n compression=\"infer\",\n sample=256000,\n sample_rows=10,\n enforce=False,\n assume_missing=False,\n storage_options=None,\n include_path_column=False,\n **kwargs,\n ):\n return read_pandas(\n reader,\n urlpath,\n blocksize=blocksize,\n lineterminator=lineterminator,\n compression=compression,\n sample=sample,\n sample_rows=sample_rows,\n enforce=enforce,\n assume_missing=assume_missing,\n storage_options=storage_options,\n include_path_column=include_path_column,\n **kwargs,\n )\n\n read.__doc__ = READ_DOC_TEMPLATE.format(reader=reader_name, file_type=file_type)\n read.__name__ = reader_name\n return read\n\n\nread_csv = make_reader(pd.read_csv, \"read_csv\", \"CSV\")\nread_table = make_reader(pd.read_table, \"read_table\", \"delimited\")\nread_fwf = make_reader(pd.read_fwf, \"read_fwf\", \"fixed-width\")\n\n\ndef _write_csv(df, fil, *, depend_on=None, **kwargs):\n with fil as f:\n df.to_csv(f, **kwargs)\n return os.path.normpath(fil.path)\n\n\ndef to_csv(\n df,\n filename,\n single_file=False,\n encoding=\"utf-8\",\n mode=\"wt\",\n name_function=None,\n compression=None,\n compute=True,\n scheduler=None,\n storage_options=None,\n header_first_partition_only=None,\n compute_kwargs=None,\n **kwargs,\n):\n \"\"\"\n Store Dask DataFrame to CSV files\n\n One filename per partition will be created. You can specify the\n filenames in a variety of ways.\n\n Use a globstring::\n\n >>> df.to_csv('/path/to/data/export-*.csv') # doctest: +SKIP\n\n The * will be replaced by the increasing sequence 0, 1, 2, ...\n\n ::\n\n /path/to/data/export-0.csv\n /path/to/data/export-1.csv\n\n Use a globstring and a ``name_function=`` keyword argument. The\n name_function function should expect an integer and produce a string.\n Strings produced by name_function must preserve the order of their\n respective partition indices.\n\n >>> from datetime import date, timedelta\n >>> def name(i):\n ... return str(date(2015, 1, 1) + i * timedelta(days=1))\n\n >>> name(0)\n '2015-01-01'\n >>> name(15)\n '2015-01-16'\n\n >>> df.to_csv('/path/to/data/export-*.csv', name_function=name) # doctest: +SKIP\n\n ::\n\n /path/to/data/export-2015-01-01.csv\n /path/to/data/export-2015-01-02.csv\n ...\n\n You can also provide an explicit list of paths::\n\n >>> paths = ['/path/to/data/alice.csv', '/path/to/data/bob.csv', ...] # doctest: +SKIP\n >>> df.to_csv(paths) # doctest: +SKIP\n\n Parameters\n ----------\n df : dask.DataFrame\n Data to save\n filename : string\n Path glob indicating the naming scheme for the output files\n single_file : bool, default False\n Whether to save everything into a single CSV file. Under the\n single file mode, each partition is appended at the end of the\n specified CSV file. Note that not all filesystems support the\n append mode and thus the single file mode, especially on cloud\n storage systems such as S3 or GCS. A warning will be issued when\n writing to a file that is not backed by a local filesystem.\n encoding : string, optional\n A string representing the encoding to use in the output file,\n defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.\n mode : str\n Python write mode, default 'w'\n name_function : callable, default None\n Function accepting an integer (partition index) and producing a\n string to replace the asterisk in the given filename globstring.\n Should preserve the lexicographic order of partitions. Not\n supported when `single_file` is `True`.\n compression : string, optional\n a string representing the compression to use in the output file,\n allowed values are 'gzip', 'bz2', 'xz',\n only used when the first argument is a filename\n compute : bool\n If true, immediately executes. If False, returns a set of delayed\n objects, which can be computed at a later time.\n storage_options : dict\n Parameters passed on to the backend filesystem class.\n header_first_partition_only : boolean, default None\n If set to `True`, only write the header row in the first output\n file. By default, headers are written to all partitions under\n the multiple file mode (`single_file` is `False`) and written\n only once under the single file mode (`single_file` is `True`).\n It must not be `False` under the single file mode.\n compute_kwargs : dict, optional\n Options to be passed in to the compute method\n kwargs : dict, optional\n Additional parameters to pass to `pd.DataFrame.to_csv()`\n\n Returns\n -------\n The names of the file written if they were computed right away\n If not, the delayed tasks associated to the writing of the files\n\n Raises\n ------\n ValueError\n If `header_first_partition_only` is set to `False` or\n `name_function` is specified when `single_file` is `True`.\n \"\"\"\n if single_file and name_function is not None:\n raise ValueError(\"name_function is not supported under the single file mode\")\n if header_first_partition_only is None:\n header_first_partition_only = single_file\n elif not header_first_partition_only and single_file:\n raise ValueError(\n \"header_first_partition_only cannot be False in the single file mode.\"\n )\n file_options = dict(\n compression=compression,\n encoding=encoding,\n newline=\"\",\n **(storage_options or {}),\n )\n to_csv_chunk = delayed(_write_csv, pure=False)\n dfs = df.to_delayed()\n if single_file:\n first_file = open_file(filename, mode=mode, **file_options)\n if not isinstance(first_file.fs, fsspec.implementations.local.LocalFileSystem):\n warn(\"Appending data to a network storage system may not work.\")\n value = to_csv_chunk(dfs[0], first_file, **kwargs)\n append_mode = mode.replace(\"w\", \"\") + \"a\"\n append_file = open_file(filename, mode=append_mode, **file_options)\n kwargs[\"header\"] = False\n for d in dfs[1:]:\n value = to_csv_chunk(d, append_file, depend_on=value, **kwargs)\n values = [value]\n files = [first_file]\n else:\n files = open_files(\n filename,\n mode=mode,\n name_function=name_function,\n num=df.npartitions,\n **file_options,\n )\n values = [to_csv_chunk(dfs[0], files[0], **kwargs)]\n if header_first_partition_only:\n kwargs[\"header\"] = False\n values.extend(\n [to_csv_chunk(d, f, **kwargs) for d, f in zip(dfs[1:], files[1:])]\n )\n if compute:\n if compute_kwargs is None:\n compute_kwargs = dict()\n\n if scheduler is not None:\n warn(\n \"The 'scheduler' keyword argument for `to_csv()` is deprecated and\"\n \"will be removed in a future version. \"\n \"Please use the `compute_kwargs` argument instead. \"\n f\"For example, df.to_csv(..., compute_kwargs={{scheduler: {scheduler}}})\",\n FutureWarning,\n )\n\n if (\n scheduler is not None\n and compute_kwargs.get(\"scheduler\") is not None\n and compute_kwargs.get(\"scheduler\") != scheduler\n ):\n raise ValueError(\n f\"Differing values for 'scheduler' have been passed in.\\n\"\n f\"scheduler argument: {scheduler}\\n\"\n f\"via compute_kwargs: {compute_kwargs.get('scheduler')}\"\n )\n\n if scheduler is not None and compute_kwargs.get(\"scheduler\") is None:\n compute_kwargs[\"scheduler\"] = scheduler\n\n import dask\n\n return list(dask.compute(*values, **compute_kwargs))\n else:\n return values\n\n\nfrom dask.dataframe.core import _Frame\n\n_Frame.to_csv.__doc__ = to_csv.__doc__\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.Index", "pandas.api.types.is_list_like", "pandas.api.types.is_scalar", "pandas.CategoricalIndex", "pandas.get_dummies" ], [ "pandas.api.types.is_datetime64_any_dtype", "pandas.api.types.is_object_dtype", "pandas.api.types.is_float_dtype", "pandas.api.types.is_integer_dtype" ] ]
DaleBlomgren/DaleSC2
[ "f6c82f68ed4359fb91e5c37eba3d93968359069b" ]
[ "attack_agent.py" ]
[ "import math\nimport random \n\nimport numpy as np \nimport pandas as pd \n\nfrom pysc2.agents import base_agent\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\n\n_NO_OP = actions.FUNCTIONS.no_op.id\n_SELECT_POINT = actions.FUNCTIONS.select_point.id\n_BUILD_SUPPLY_DEPOT = actions.FUNCTIONS.Build_SupplyDepot_screen.id\n_BUILD_BARRACKS = actions.FUNCTIONS.Build_Barracks_screen.id\n_TRAIN_MARINE = actions.FUNCTIONS.Train_Marine_quick.id\n_TRAIN_SCV = actions.FUNCTIONS.Train_SCV_quick.id\n_SELECT_ARMY = actions.FUNCTIONS.select_army.id\n_SELECT_IDLE_SCV = actions.FUNCTIONS.select_idle_worker.id\n_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id\n\n_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index\n_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index\n_PLAYER_ID = features.SCREEN_FEATURES.player_id.index\n\n_PLAYER_SELF = 1\n\n_TERRAN_COMMANDCENTER = 18\n_TERRAN_SCV = 45 \n_TERRAN_SUPPLY_DEPOT = 19\n_TERRAN_BARRACKS = 21\n\n_NOT_QUEUED = [0]\n_QUEUED = [1]\n\n_PLAYER_HOSTILE = 4\n\n# Define actions\n\nACTION_DO_NOTHING = 'donothing'\nACTION_SELECT_SCV = 'selectscv'\nACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'\nACTION_BUILD_BARRACKS = 'buildbarracks'\nACTION_SELECT_BARRACKS = 'selectbarracks'\nACTION_BUILD_MARINE = 'buildmarine'\nACTION_SELECT_ARMY = 'selectarmy'\nACTION_ATTACK = 'attack'\nACTION_SELECT_IDLE_SCV = 'selectidlescv'\nACTION_BUILD_SCV = 'buildscv'\n\n\nsmart_actions = [\n\tACTION_DO_NOTHING,\n\tACTION_SELECT_SCV,\n\tACTION_BUILD_SUPPLY_DEPOT,\n\tACTION_BUILD_BARRACKS,\n\tACTION_SELECT_BARRACKS,\n\tACTION_BUILD_MARINE,\n\tACTION_SELECT_ARMY,\n\tACTION_SELECT_IDLE_SCV,\n\tACTION_BUILD_SCV \n]\n\nfor mm_x in range(0, 64):\n\tfor mm_y in range(0,64):\n\t\tif (mm_x + 1) % 16 == 0 and (mm_y + 1) % 16 == 0:\n\t\t\tsmart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 8) + '_' + str(mm_y - 8))\n\n\n\nKILL_UNIT_REWARD = 0.2\nKILL_BUILDING_REWARD = 0.5\nBUILDING_DEATH_REWARD = -0.5\n\n\n\n\n\nclass AttackAgent(base_agent.BaseAgent):\n\tdef __init__(self):\n\t\tsuper(AttackAgent, self).__init__()\n\n\t\tself.qlearn = QLearningTable(actions=list(range(len(smart_actions))))\n\n\t\tself.previous_killed_unit_score = 0\n\t\tself.previous_killed_building_score = 0\n\n\t\tself.previous_action = None\n\t\tself.previous_state = None\n\t\tself.barracks_built = 0\n\n\t# def fillActionArray(self):\n\t\t### REMEMBER TO EMPTY LIST EVERY STEP WITH THIS METHOD ###\n\t\t# If we are about to be supply blocked, add 'build supply depot' to list\n\t\t# If we have idle scv's, build scv's\n\t\t# If buildings are idle, build units\n\t\t# If we dont have gas, build refinery\n\t\t# If we have lots of minerals, build command center\n\t\t# If we have a command center, build a second barracks\n\t\t# If we have 2 barracks, build a factory\n\n\t#\tsmart_actions.append(ACTION_DO_NOTHING)\n\t\t#smart_actions.append(AC)\n\n\t\t# if food supply is within range of food cap\n\n\t#\tif (obs.observation['player'][3] - 3) > (obs.observation['player'][4]):\n\t#\t\tsmart_actions.append(ACTION_BUILD_SUPPLY_DEPOT)\n\n\t#\tif self.barracks_built < 1:\n\t#\t\tsmart_actions.append(ACTION_BUILD_BARRACKS)\n\n\tdef transformDistance(self, x, x_distance, y, y_distance):\n\t\tif not self.base_top_left:\n\t\t\treturn [x - x_distance, y - y_distance]\n\t\t\n\t\treturn [x + x_distance, y + y_distance]\n\t\n\tdef transformLocation(self, x, y):\n\t\tif not self.base_top_left:\n\t\t\treturn [64 - x, 64 - y]\n\t\t\n\t\treturn [x, y]\n\n\tdef step(self, obs):\n\t\tsuper(AttackAgent, self).step(obs)\n\n\t\tplayer_y, player_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_SELF).nonzero()\n\t\tself.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0\n\n\t\tunit_type = obs.observation['feature_screen'][_UNIT_TYPE]\n\n\t\tdepot_y, depot_x = (unit_type == _TERRAN_SUPPLY_DEPOT).nonzero()\n\t\tsupply_depot_count = 1 if depot_y.any() else 0\n\n\t\tbarracks_y, barracks_x = (unit_type == _TERRAN_BARRACKS).nonzero()\n\t\tbarracks_count = 1 if barracks_y.any() else 0\n\n\t\tsupply_limit = obs.observation['player'][4]\n\t\tarmy_supply = obs.observation['player'][5]\n\n\t\tkilled_unit_score = obs.observation['score_cumulative'][5]\n\t\tkilled_building_score = obs.observation['score_cumulative'][6]\n##This area will append the wrong amount of actions and attack routes, account for this!\n\t\tcurrent_state = np.zeros(20)\n\t\tcurrent_state[0] = supply_depot_count\n\t\tcurrent_state[1] = barracks_count\n\t\tcurrent_state[2] = supply_limit\n\t\tcurrent_state[3] = army_supply\n\n\t\thot_squares = np.zeros(16)\n\t\tenemy_y, enemy_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero()\n\t\tfor i in range(0, len(enemy_y)):\n\t\t\ty = int(math.ceil((enemy_y[i] + 1) / 16))\n\t\t\tx = int(math.ceil((enemy_x[i] + 1) / 16))\n\n\t\t\thot_squares[((y - 1) * 4) + (x - 1)] = 1\n\n\t\tif not self.base_top_left:\n\t\t\thot_squares = hot_squares[::-1]\n\n\t\tfor i in range(0, 16):\n\t\t\tcurrent_state[i + 4] = hot_squares[i]\n\n\t\tif self.previous_action is not None:\n\t\t\treward = 0\n\n\t\t\tif killed_unit_score > self.previous_killed_unit_score:\n\t\t\t\treward += KILL_UNIT_REWARD\n\n\t\t\tif killed_building_score > self.previous_killed_building_score:\n\t\t\t\treward += KILL_BUILDING_REWARD\n\n\t\t\tself.qlearn.learn(str(self.previous_state), self.previous_action, reward, str(current_state))\n\n#\t\tself.fillActionArray()\n\n# Use Q table to choose action\n\t\trl_action = self.qlearn.choose_action(str(current_state)) #Not sure current state gives proper information\n\t\tsmart_action = smart_actions[rl_action]\n\n\t\tself.previous_killed_unit_score = killed_unit_score\n\t\tself.previous_killed_building_score = killed_building_score\n\t\tself.previous_state = current_state\n\t\tself.previous_action = rl_action\n\n\t\tx = 0\n\t\ty = 0\n\t\tif '_' in smart_action:\n\t\t\tsmart_action, x, y = smart_action.split('_')\n\n\t\tif smart_action == ACTION_DO_NOTHING:\n\t\t\treturn actions.FunctionCall(_NO_OP, [])\n\n\t\telif smart_action == ACTION_SELECT_SCV:\n\t\t\tunit_type = obs.observation['feature_screen'][_UNIT_TYPE]\n\t\t\tunit_y, unit_x = (unit_type == _TERRAN_SCV).nonzero()\n\n\t\t\tif unit_y.any():\n\t\t\t\ti = random.randint(0, len(unit_y) - 1)\n\t\t\t\ttarget = [unit_x[i], unit_y[i]]\n\n\t\t\t\treturn actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])\n#\tNeeds supply depot to be built in a randomish position\n\t\telif smart_action == ACTION_BUILD_SUPPLY_DEPOT:\n\t\t\tif _BUILD_SUPPLY_DEPOT in obs.observation['available_actions']:\n\t\t\t\tunit_type = obs.observation['feature_screen'][_UNIT_TYPE]\n\t\t\t\tunit_y, unit_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()\n\n\t\t\t\tif unit_y.any():\n\t\t\t\t\t#target = self.transformDistance(int(unit_x.mean()), 0, int(unit_y.mean()), 20)\n\t\t\t\t\ttarget = self.transformDistance(int(unit_x.mean()), np.random.choice(30), int(unit_y.mean()), np.random.choice(30))\n\n\t\t\t\t\treturn actions.FunctionCall(_BUILD_SUPPLY_DEPOT, [_NOT_QUEUED, target])\n\n\t\telif smart_action == ACTION_BUILD_SCV:\n\t\t\tprint(\"Action select: Build SCV\\n\")\n\t\t\tunit_type = obs.observation['feature_screen'][_UNIT_TYPE]\n\t\t\tunit_y, unit_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()\n\n\t\t\tif unit_y.any():\n\t\t\t\ttarget = [int(unit_x.mean()), int(unit_y.mean())]\n\t\t\t\tactions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])\n\n\t\t\tif _TRAIN_SCV in obs.observation['available_actions']:\n\t\t\t\tprint(\"FunctionCall: Train SCV\")\n\t\t\t\treturn actions.FunctionCall(_TRAIN_SCV, [_QUEUED])\n\n#\t\t\tif unit_y.any():\n#\t\t\t\ttarget = self.transformDistance(int(unit_x.mean()), 0, int(unit_y.mean()), 20)\n\n\t\telif smart_action == ACTION_BUILD_BARRACKS:\n\t\t\tif _BUILD_BARRACKS in obs.observation['available_actions']:\n\t\t\t\tunit_type = obs.observation['feature_screen'][_UNIT_TYPE]\n\t\t\t\tunit_y, unit_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()\n\t\t\t\t\n\t\t\t\tif unit_y.any():\n\t\t\t\t\ttarget = self.transformDistance(int(unit_x.mean()), np.random.choice(30), int(unit_y.mean()), np.random.choice(30))\n\t\t\t\t\tself.barracks_built = True\n\t\t\t\t\treturn actions.FunctionCall(_BUILD_BARRACKS, [_NOT_QUEUED, target])\n\t\n\t\telif smart_action == ACTION_SELECT_BARRACKS:\n\t\t\tunit_type = obs.observation['feature_screen'][_UNIT_TYPE]\n\t\t\tunit_y, unit_x = (unit_type == _TERRAN_BARRACKS).nonzero()\n\t\t\t\t\n\t\t\tif unit_y.any():\n\t\t\t\ttarget = [int(unit_x.mean()), int(unit_y.mean())]\n\t\t\n\t\t\t\treturn actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])\n\t\t\n\n\t\telif smart_action == ACTION_BUILD_MARINE:\n\t\t\tunit_type = obs.observation['feature_screen'][_UNIT_TYPE]\n\t\t\tunit_y, unit_x = (unit_type == _TERRAN_BARRACKS).nonzero()\n\t\t\t\t\n\t\t\tif unit_y.any():\n\t\t\t\ttarget = [int(unit_x.mean()), int(unit_y.mean())]\n\t\t\n\t\t\t\tactions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])\n\n\t\t\tif _TRAIN_MARINE in obs.observation['available_actions']:\n\t\t\t\treturn actions.FunctionCall(_TRAIN_MARINE, [_QUEUED])\n\t\t\n\t\telif smart_action == ACTION_SELECT_ARMY:\n\t\t\tif _SELECT_ARMY in obs.observation['available_actions']:\n\t\t\t\treturn actions.FunctionCall(_SELECT_ARMY, [_NOT_QUEUED])\n\t\t\n\t\telif smart_action == ACTION_ATTACK:\n\t\t\tif obs.observation['single_select'][0][0] != _TERRAN_SCV and _ATTACK_MINIMAP in obs.observation['available_actions']:\n\t\t\t\treturn actions.FunctionCall(_ATTACK_MINIMAP, [_NOT_QUEUED, self.transformLocation(int(x), int(y))])\n\n\t\telif smart_action == ACTION_SELECT_IDLE_SCV:\n\t\t\tif _SELECT_IDLE_SCV in obs.observation['available_actions']:\n\t\t\t\tactions.FunctionCall(_SELECT_IDLE_SCV, [_NOT_QUEUED])\n\n\n\t\treturn actions.FunctionCall(_NO_OP, [])\n\nclass QLearningTable:\n\tdef __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):\n\t\tself.actions = actions\n\t\tself.lr = learning_rate\n\t\tself.gamma = reward_decay\n\t\tself.epsilon = e_greedy\n\t\tself.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)\n\n\tdef choose_action(self, observation):\n\t\tself.check_state_exist(observation)\n\t\t\n\t\tif np.random.uniform() < self.epsilon:\n\t\t\t# choose best action\n\t\t\tstate_action = self.q_table.ix[observation, :]\n\t\t\t\n\t\t\t# some actions have the same value\n\t\t\tstate_action = state_action.reindex(np.random.permutation(state_action.index))\n\t\t\t\n\t\t\taction = state_action.idxmax()\n\t\telse:\n\t\t\t# choose random action\n\t\t\taction = np.random.choice(self.actions)\n\t\t\t\n\t\treturn action\n\n\tdef learn(self, s, a, r, s_):\n\t\tself.check_state_exist(s_)\n\t\tself.check_state_exist(s)\n\t\t\n\t\tq_predict = self.q_table.ix[s, a]\n\t\tq_target = r + self.gamma * self.q_table.ix[s_, :].max()\n\t\t\n\t\t# update\n\t\tself.q_table.ix[s, a] += self.lr * (q_target - q_predict)\n\n\tdef check_state_exist(self, state):\n\t\tif state not in self.q_table.index:\n\t\t\t# append new state to q table\n\t\t\tself.q_table = self.q_table.append(pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))\n\n\n\n" ]
[ [ "numpy.random.choice", "numpy.zeros", "pandas.DataFrame", "numpy.random.permutation", "numpy.random.uniform" ] ]
HongyeGuo/DIRL-bidding_preference
[ "1451863af0c8195daa73051f82f33cbefa0024e2" ]
[ "deep_maxent.py" ]
[ "\"\"\"\nImplements deep maximum entropy inverse reinforcement learning based on\nZiebart et al., 2008 and Wulfmeier et al., 2015, using symbolic methods with\nTheano.\n\nMatthew Alger, 2015\nmatthew.alger@anu.edu.au\n\"\"\"\n\nfrom itertools import product\n\nimport numpy as np\nimport numpy.random as rn\nimport theano as th\nimport theano.tensor as T\nimport time\nimport maxent\nimport pickle\n\nFLOAT = th.config.floatX\n\ndef find_svf(n_states, trajectories):\n \"\"\"\n Find the state vistiation frequency from trajectories.\n\n n_states: Number of states. int.\n trajectories: 3D array of state/action pairs. States are ints, actions\n are ints. NumPy array with shape (T, L, 2) where T is the number of\n trajectories and L is the trajectory length.\n -> State visitation frequencies vector with shape (N,).\n \"\"\"\n\n svf = np.zeros(n_states)\n\n for trajectory in trajectories:\n for state, _, _ in trajectory:\n svf[state] += 1\n\n svf /= trajectories.shape[0]\n\n return th.shared(svf, \"svf\", borrow=True)\n\ndef optimal_value(n_states, n_actions, transition_probabilities, reward,\n discount, threshold=1e-2):\n \"\"\"\n Find the optimal value function.\n\n n_states: Number of states. int.\n n_actions: Number of actions. int.\n transition_probabilities: Function taking (state, action, state) to\n transition probabilities.\n reward: Vector of rewards for each state.\n discount: MDP discount factor. float.\n threshold: Convergence threshold, default 1e-2. float.\n -> Array of values for each state\n \"\"\"\n\n v = T.zeros(n_states, dtype=FLOAT)\n\n def update(s, prev_diff, v, reward, tps):\n max_v = float(\"-inf\")\n v_template = T.zeros_like(v)\n for a in range(n_actions):\n tp = tps[s, a, :]\n max_v = T.largest(max_v, T.dot(tp, reward + discount*v))\n new_diff = abs(v[s] - max_v)\n if T.lt(prev_diff, new_diff):\n diff = new_diff\n else:\n diff = prev_diff\n return (diff, T.set_subtensor(v_template[s], max_v)), {}\n\n def until_converged(diff, v):\n (diff, vs), _ = th.scan(\n fn=update,\n outputs_info=[{\"initial\": diff, \"taps\": [-1]},\n None],\n sequences=[T.arange(n_states)],\n non_sequences=[v, reward, transition_probabilities])\n return ((diff[-1], vs.sum(axis=0)), {},\n th.scan_module.until(diff[-1] < threshold))\n\n (_, vs), _ = th.scan(fn = until_converged,\n outputs_info=[\n # Need to force an inf into the right Theano\n # data type and this seems to be the only way that\n # works.\n {\"initial\": getattr(np, FLOAT)(float(\"inf\")),\n \"taps\": [-1]},\n {\"initial\": v,\n \"taps\": [-1]}],\n n_steps=1000)\n\n return vs[-1]\n\ndef find_policy(n_states, n_actions, transition_probabilities, reward, discount,\n threshold=1e-2, v=None):\n \"\"\"\n Find the optimal policy.\n\n n_states: Number of states. int.\n n_actions: Number of actions. int.\n transition_probabilities: Function taking (state, action, state) to\n transition probabilities.\n reward: Vector of rewards for each state.\n discount: MDP discount factor. float.\n threshold: Convergence threshold, default 1e-2. float.\n v: Optimal value array (if known). Default None.\n -> Action probabilities for each state.\n \"\"\"\n\n if v is None:\n v = optimal_value(n_states, n_actions, transition_probabilities, reward,\n discount, threshold)\n\n # Get Q using equation 9.2 from Ziebart's thesis.\n Q = T.zeros((n_states, n_actions))\n def make_Q(i, j, tps, Q, reward, v):\n Q_template = T.zeros_like(Q)\n tp = transition_probabilities[i, j, :]\n return T.set_subtensor(Q_template[i, j], tp.dot(reward + discount*v)),{}\n\n prod = np.array(list(product(range(n_states), range(n_actions))))\n state_range = th.shared(prod[:, 0])\n action_range = th.shared(prod[:, 1])\n Qs, _ = th.scan(fn=make_Q,\n outputs_info=None,\n sequences=[state_range, action_range],\n non_sequences=[transition_probabilities, Q, reward, v])\n Q = Qs.sum(axis=0)\n Q -= Q.max(axis=1).reshape((n_states, 1)) # For numerical stability.\n Q = T.exp(Q)/T.exp(Q).sum(axis=1).reshape((n_states, 1))\n return Q\n\ndef find_expected_svf(n_states, r, n_actions, discount,\n transition_probability, trajectories):\n \"\"\"\n Find the expected state visitation frequencies using algorithm 1 from\n Ziebart et al. 2008.\n\n n_states: Number of states N. int.\n alpha: Reward. NumPy array with shape (N,).\n n_actions: Number of actions A. int.\n discount: Discount factor of the MDP. float.\n transition_probability: NumPy array mapping (state_i, action, state_k) to\n the probability of transitioning from state_i to state_k under action.\n Shape (N, A, N).\n trajectories: 3D array of state/action pairs. States are ints, actions\n are ints. NumPy array with shape (T, L, 2) where T is the number of\n trajectories and L is the trajectory length.\n -> Expected state visitation frequencies vector with shape (N,).\n \"\"\"\n\n n_trajectories = trajectories.shape[0]\n trajectory_length = trajectories.shape[1]\n\n policy = find_policy(n_states, n_actions,\n transition_probability, r, discount)\n\n start_state_count = T.extra_ops.bincount(trajectories[:, 0, 0],\n minlength=n_states)\n p_start_state = start_state_count.astype(FLOAT)/n_trajectories\n\n def state_visitation_step(i, j, prev_svf, policy, tps):\n \"\"\"\n The sum of the outputs of a scan over this will be a row of the svf.\n \"\"\"\n\n svf = prev_svf[i] * policy[i, j] * tps[i, j, :]\n return svf, {}\n\n prod = np.array(list(product(range(n_states), range(n_actions))))\n state_range = th.shared(prod[:, 0])\n action_range = th.shared(prod[:, 1])\n def state_visitation_row(prev_svf, policy, tps, state_range, action_range):\n svf_t, _ = th.scan(fn=state_visitation_step,\n sequences=[state_range, action_range],\n non_sequences=[prev_svf, policy, tps])\n svf_t = svf_t.sum(axis=0)\n return svf_t, {}\n\n svf, _ = th.scan(fn=state_visitation_row,\n outputs_info=[{\"initial\": p_start_state, \"taps\": [-1]}],\n n_steps=trajectories.shape[1]-1,\n non_sequences=[policy, transition_probability, state_range,\n action_range])\n\n return svf.sum(axis=0) + p_start_state\n\ndef saveNN(structure, file_name, weights, biases, α, e):\n N_layers = len(structure)\n for l in range(N_layers-1):\n # save weights\n fo_NNweights = file_name+'_NNpara_l'+str(l+1)+'W.csv'\n weights_temp = weights[l].get_value() \n np.savetxt(fo_NNweights, weights_temp, delimiter = ',') \n # save biases\n fo_NNbiases = file_name+'_NNpara_l'+str(l+1)+'B.csv'\n biases_temp = biases[l].get_value() \n np.savetxt(fo_NNbiases, biases_temp, delimiter = ',') \n # save alpha\n fo_alpha = file_name+'_NNpara_alpha.csv'\n alpha_temp = α.get_value() \n np.savetxt(fo_alpha, alpha_temp, delimiter = ',') \n \n print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' NN parameters of round '+str(e)+' have been saved.')\n \n\n\ndef irl(structure, feature_matrix, n_actions, discount, transition_probability,\n trajectories, epochs, learning_rate, file_name, initialisation=\"normal\", l1=0.1,\n l2=0.1):\n #\n \"\"\"\n Find the reward function for the given trajectories.\n\n structure: Neural network structure tuple, e.g. (10, 3, 3) would be a\n 3-layer neural network with 10 inputs.\n feature_matrix: Matrix with the nth row representing the nth state. NumPy\n array with shape (N, D) where N is the number of states and D is the\n dimensionality of the state.\n n_actions: Number of actions A. int.\n discount: Discount factor of the MDP. float.\n transition_probability: NumPy array mapping (state_i, action, state_k) to\n the probability of transitioning from state_i to state_k under action.\n Shape (N, A, N).\n trajectories: 3D array of state/action pairs. States are ints, actions\n are ints. NumPy array with shape (T, L, 2) where T is the number of\n trajectories and L is the trajectory length.\n epochs: Number of gradient descent steps. int.\n learning_rate: Gradient descent learning rate. float.\n initialisation: What distribution to use. str in {normal, uniform}. Default\n normal.\n l1: L1 regularisation. Default 0.1. float.\n l2: L2 regularisation. Default 0.1. float.\n -> Reward vector with shape (N,).\n \"\"\"\n\n n_states, d_states = feature_matrix.shape\n transition_probability = th.shared(transition_probability, borrow=True)\n trajectories = th.shared(trajectories, borrow=True)\n\n # Initialise W matrices; b biases.\n n_layers = len(structure)-1\n weights = []\n hist_w_grads = [] # For AdaGrad.\n biases = []\n hist_b_grads = [] # For AdaGrad.\n for i in range(n_layers):\n # W\n shape = (structure[i+1], structure[i])\n if initialisation == \"normal\":\n matrix = th.shared(rn.normal(size=shape), name=\"W\", borrow=True)\n else:\n matrix = th.shared(rn.uniform(size=shape), name=\"W\", borrow=True)\n weights.append(matrix)\n hist_w_grads.append(th.shared(np.zeros(shape), name=\"hdW\", borrow=True))\n\n # b\n shape = (structure[i+1], 1)\n if initialisation == \"normal\":\n matrix = th.shared(rn.normal(size=shape), name=\"b\", borrow=True)\n else:\n matrix = th.shared(rn.uniform(size=shape), name=\"b\", borrow=True)\n biases.append(matrix)\n hist_b_grads.append(th.shared(np.zeros(shape), name=\"hdb\", borrow=True))\n\n # Initialise α weight, β bias.\n if initialisation == \"normal\":\n α = th.shared(rn.normal(size=(1, structure[-1])), name=\"alpha\",\n borrow=True)\n else:\n α = th.shared(rn.uniform(size=(1, structure[-1])), name=\"alpha\",\n borrow=True)\n hist_α_grad = T.zeros(α.shape) # For AdaGrad.\n\n adagrad_epsilon = 1e-6 # AdaGrad numerical stability.\n\n #### Theano symbolic setup. ####\n\n # Symbolic input.\n s_feature_matrix = T.matrix(\"x\")\n # Feature matrices.\n # All dimensions of the form (d_layer, n_states).\n φs = [s_feature_matrix.T]\n # Forward propagation.\n for W, b in zip(weights, biases):\n φ = T.nnet.sigmoid(th.compile.ops.Rebroadcast((0, False), (1, True))(b)+ W.dot(φs[-1]))\n φs.append(φ)\n # φs[1] = φ1 etc.\n # Reward.\n r = α.dot(φs[-1]).reshape((n_states,))\n # Engineering hack: z-score the reward.\n r = (r - r.mean())/r.std()\n # Associated feature expectations.\n expected_svf = find_expected_svf(n_states, r,\n n_actions, discount,\n transition_probability,\n trajectories)\n svf = maxent.find_svf(n_states, trajectories.get_value())\n # Derivatives (backward propagation).\n updates = []\n α_grad = φs[-1].dot(svf - expected_svf).T\n hist_α_grad += α_grad**2\n adj_α_grad = α_grad/(adagrad_epsilon + T.sqrt(hist_α_grad))\n updates.append((α, α + adj_α_grad*learning_rate))\n\n def grad_for_state(s, theta, svf_diff, r):\n \"\"\"\n Calculate the gradient with respect to theta for one state.\n \"\"\"\n\n regularisation = abs(theta).sum()*l1 + (theta**2).sum()*l2\n return svf_diff[s] * T.grad(r[s], theta) - regularisation, {}\n \n #print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' enumerate(weights) starts.')\n for i, W in enumerate(weights):\n #print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' enumerate(weights) '+str(i)+'.')\n w_grads, _ = th.scan(fn=grad_for_state,\n sequences=[T.arange(n_states)],\n non_sequences=[W, svf - expected_svf, r])\n w_grad = w_grads.sum(axis=0)\n hist_w_grads[i] += w_grad**2\n adj_w_grad = w_grad/(adagrad_epsilon + T.sqrt(hist_w_grads[i]))\n updates.append((W, W + adj_w_grad*learning_rate))\n \n #print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' enumerate(biases) starts.')\n for i, b in enumerate(biases):\n #print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' enumerate(biases) '+str(i)+'.')\n b_grads, _ = th.scan(fn=grad_for_state,\n sequences=[T.arange(n_states)],\n non_sequences=[b, svf - expected_svf, r])\n b_grad = b_grads.sum(axis=0)\n hist_b_grads[i] += b_grad**2\n adj_b_grad = b_grad/(adagrad_epsilon + T.sqrt(hist_b_grads[i]))\n updates.append((b, b + adj_b_grad*learning_rate))\n\n train = th.function([s_feature_matrix], updates=updates, outputs=r)\n run = th.function([s_feature_matrix], outputs=r)\n \n # build result file\n fo_reward = open(file_name+'_reward_record.csv',\"w\")\n for n in range(n_states):\n fo_reward.write('S'+str(n)+', ')\n fo_reward.write('\\n')\n fo_reward.close()\n \n \n for e in range(epochs):\n if (e+1)%5 == 0:\n print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' reward NN training round '+str(e+1)+ ' starts.')\n reward = train(feature_matrix) \n # reward write\n fo_reward = open(file_name+'_reward_record.csv',\"a\")\n for n in range(n_states):\n fo_reward.write(str(reward[n])+', ')\n fo_reward.write('\\n')\n fo_reward.close()\n if (e+1)%5== 0:\n print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' reward: '+str(reward[[0,5,10,15,20,25,30]])+ '.')\n #print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' reward: '+str(r.mean()+ '.')\n #print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' weights1: '+str(weights[0].get_value() )+ '.')\n #print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' weights2: '+str(weights[1].get_value() )+ '.')\n #print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' biases1: '+str(biases[0].get_value() )+ '.')\n #print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' biases2: '+str(biases[1].get_value() )+ '.')\n #print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' alpha: '+str(α.get_value() )+ '.')\n #rewardtest = run(feature_matrix) \n #print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' rewardtest: '+str(rewardtest[[0,5,10,15,20,25,30]])+ '.')\n \n if (e+1)%20 == 0:\n saveNN(structure, file_name, weights, biases, α, e) \n \n \n print(str(time.strftime(\"%Y%m%d %X\", time.localtime()) )+' NN training is done.')\n \n return reward.reshape((n_states,)), weights, biases\n" ]
[ [ "numpy.random.normal", "numpy.random.uniform", "numpy.savetxt", "numpy.zeros" ] ]
geronimocharlie/cognitive_modeling_playground
[ "8b69764d6167efafa89f99594117ec27a3825448" ]
[ "context_aware_image_captioning/train_justify.py" ]
[ "import torch\nfrom datasets import CubDataset, get_coco_loader\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nfrom models import *\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom tqdm import tqdm\nfrom hyperparameters import *\nimport numpy as np\n\nif data_mode == 'cub':\n # old specifications taken from the github of the paper\n train_loader = torch.utils.data.DataLoader(\n CubDataset(transform=transforms.Compose([normalize])),\n batch_size=batch_size,\n shuffle=True,\n num_workers=workers,\n pin_memory=True,\n )\n num_classes = 200\n vocab_size = 5725 + 1 # not sure were the plus 1 is coming from\n\n\nelif 'coco' in data_mode:\n # mode val for less images\n train_loader = get_coco_loader(mode=\"val\", transform=transforms.Compose([transforms.ToTensor(), normalize]), batch_size=batch_size, num_workers=workers)\n num_classes = train_loader.dataset.num_classes + 1\n vocab_size = train_loader.dataset.vocab_size + 1 # the one is somehow crucial unsure why\n print(\"num of data\")\n print(len(train_loader.dataset))\n\nelse:\n print(\"please specify data_mode as 'coco' or 'cub'\")\n raise NotImplemented\n\n# Note that the resize is already done in the encoder, so no need to do it here again\nif load:\n # Load the model from checkpoints\n checkpoints = torch.load(\"checkpoint_j\")\n encoder = checkpoints[\"encoder\"]\n decoder = checkpoints[\"decoder\"]\n decoder_optimizer = checkpoints[\"decoder_optimizer\"]\n epoch = checkpoints[\"epoch\"]\n decoder_lr = decoder_lr * pow(0.8, epoch // 5)\n for param_group in decoder_optimizer.param_groups:\n param_group[\"lr\"] = decoder_lr\nelse:\n epoch = 0\n encoder = Encoder()\n decoder = DecoderWithAttention_justify(\n attention_dim=attention_dim,\n embed_dim=emb_dim,\n decoder_dim=decoder_dim,\n vocab_size=vocab_size,\n num_classes=num_classes,\n )\n decoder_optimizer = torch.optim.Adam(\n params=filter(lambda p: p.requires_grad, decoder.parameters()), lr=decoder_lr\n )\n\ncriterion = nn.CrossEntropyLoss()\nencoder = encoder.to(device)\ndecoder = decoder.to(device)\ncriterion = criterion.to(device)\n\ndecoder.train()\nencoder.train()\n\nfor epoch in range(epoch, numepochs):\n if epoch % 5 == 0 and epoch > 0:\n # For every 5 epochs, the lr is annealed by 0.8\n decoder_lr *= 0.8\n for param_group in decoder_optimizer.param_groups:\n param_group[\"lr\"] = decoder_lr\n\n for i, (img, caption, caplen, class_k) in tqdm(\n enumerate(train_loader), desc=\"Batch\"\n ):\n\n img = img.to(device)\n caption = caption.to(device)\n caplen = caplen.to(device)\n class_k = class_k.to(device)\n img = encoder(img)\n\n scores, caps_sorted, decode_lengths, sort_ind = decoder(\n img, caption, caplen, class_k\n )\n targets = caps_sorted[:, 1:]\n # Suitable format, so that loss can be applied. The scores had unwated padding, that is removed. Similarly for target\n # resulting size (sum over lenghts for each bathc, vocab_size) -> eg. bacth size 2, lenghts 13 and 21, vocab 5000 -> (34,5000)\n scores = pack_padded_sequence(\n scores, decode_lengths, batch_first=True\n ).data\n\n targets = pack_padded_sequence(\n targets, decode_lengths, batch_first=True\n ).data # [bacth sieze, max lenght] to [sum over lenghts]\n\n # A gradient descend step\n loss = criterion(scores, targets) # add gating scalar, to this loss\n decoder_optimizer.zero_grad()\n loss.backward()\n decoder_optimizer.step()\n tqdm.write(f\"Loss {loss.detach().cpu().numpy()}\")\n\n if (epoch%save_after) == 0:\n print(\"saving\")\n save(epoch, encoder, decoder, decoder_optimizer, data_mode)\n\n" ]
[ [ "torch.nn.utils.rnn.pack_padded_sequence", "torch.load" ] ]
jfzhuang/ST_Memory
[ "f253c05b7ecb37a1cbe9f312a628ba30b4555230" ]
[ "mmsegmentation/mmseg/datasets/custom.py" ]
[ "import os\nimport os.path as osp\nfrom collections import OrderedDict\nfrom functools import reduce\n\nimport mmcv\nimport numpy as np\nfrom mmcv.utils import print_log\nfrom prettytable import PrettyTable\nfrom torch.utils.data import Dataset\n\nfrom mmseg.core import eval_metrics\nfrom mmseg.utils import get_root_logger\nfrom .builder import DATASETS\nfrom .pipelines import Compose\n\n\n@DATASETS.register_module()\nclass CustomDataset(Dataset):\n \"\"\"Custom dataset for semantic segmentation. An example of file structure\n is as followed.\n\n .. code-block:: none\n\n ├── data\n │ ├── my_dataset\n │ │ ├── img_dir\n │ │ │ ├── train\n │ │ │ │ ├── xxx{img_suffix}\n │ │ │ │ ├── yyy{img_suffix}\n │ │ │ │ ├── zzz{img_suffix}\n │ │ │ ├── val\n │ │ ├── ann_dir\n │ │ │ ├── train\n │ │ │ │ ├── xxx{seg_map_suffix}\n │ │ │ │ ├── yyy{seg_map_suffix}\n │ │ │ │ ├── zzz{seg_map_suffix}\n │ │ │ ├── val\n\n The img/gt_semantic_seg pair of CustomDataset should be of the same\n except suffix. A valid img/gt_semantic_seg filename pair should be like\n ``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included\n in the suffix). If split is given, then ``xxx`` is specified in txt file.\n Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.\n Please refer to ``docs/tutorials/new_dataset.md`` for more details.\n\n\n Args:\n pipeline (list[dict]): Processing pipeline\n img_dir (str): Path to image directory\n img_suffix (str): Suffix of images. Default: '.jpg'\n ann_dir (str, optional): Path to annotation directory. Default: None\n seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'\n split (str, optional): Split txt file. If split is specified, only\n file with suffix in the splits will be loaded. Otherwise, all\n images in img_dir/ann_dir will be loaded. Default: None\n data_root (str, optional): Data root for img_dir/ann_dir. Default:\n None.\n test_mode (bool): If test_mode=True, gt wouldn't be loaded.\n ignore_index (int): The label index to be ignored. Default: 255\n reduce_zero_label (bool): Whether to mark label zero as ignored.\n Default: False\n classes (str | Sequence[str], optional): Specify classes to load.\n If is None, ``cls.CLASSES`` will be used. Default: None.\n palette (Sequence[Sequence[int]]] | np.ndarray | None):\n The palette of segmentation map. If None is given, and\n self.PALETTE is None, random palette will be generated.\n Default: None\n \"\"\"\n\n CLASSES = None\n\n PALETTE = None\n\n def __init__(\n self,\n pipeline,\n img_dir,\n img_suffix='.jpg',\n ann_dir=None,\n seg_map_suffix='.png',\n split=None,\n data_root=None,\n test_mode=False,\n ignore_index=255,\n reduce_zero_label=False,\n classes=None,\n palette=None,\n ):\n self.pipeline = Compose(pipeline)\n self.img_dir = img_dir\n self.img_suffix = img_suffix\n self.ann_dir = ann_dir\n self.seg_map_suffix = seg_map_suffix\n self.split = split\n self.data_root = data_root\n self.test_mode = test_mode\n self.ignore_index = ignore_index\n self.reduce_zero_label = reduce_zero_label\n self.label_map = None\n self.CLASSES, self.PALETTE = self.get_classes_and_palette(classes, palette)\n\n # join paths if data_root is specified\n if self.data_root is not None:\n if not osp.isabs(self.img_dir):\n self.img_dir = osp.join(self.data_root, self.img_dir)\n if not (self.ann_dir is None or osp.isabs(self.ann_dir)):\n self.ann_dir = osp.join(self.data_root, self.ann_dir)\n if not (self.split is None or osp.isabs(self.split)):\n self.split = osp.join(self.data_root, self.split)\n\n # load annotations\n self.img_infos = self.load_annotations(\n self.img_dir, self.img_suffix, self.ann_dir, self.seg_map_suffix, self.split\n )\n\n def __len__(self):\n \"\"\"Total number of samples of data.\"\"\"\n return len(self.img_infos)\n\n def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix, split):\n \"\"\"Load annotation from directory.\n\n Args:\n img_dir (str): Path to image directory\n img_suffix (str): Suffix of images.\n ann_dir (str|None): Path to annotation directory.\n seg_map_suffix (str|None): Suffix of segmentation maps.\n split (str|None): Split txt file. If split is specified, only file\n with suffix in the splits will be loaded. Otherwise, all images\n in img_dir/ann_dir will be loaded. Default: None\n\n Returns:\n list[dict]: All image info of dataset.\n \"\"\"\n\n img_infos = []\n if split is not None:\n with open(split) as f:\n for line in f:\n img_name = line.strip()\n img_info = dict(filename=img_name + img_suffix)\n if ann_dir is not None:\n seg_map = img_name + seg_map_suffix\n img_info['ann'] = dict(seg_map=seg_map)\n img_infos.append(img_info)\n else:\n for img in mmcv.scandir(img_dir, img_suffix, recursive=True):\n img_info = dict(filename=img)\n if ann_dir is not None:\n seg_map = img.replace(img_suffix, seg_map_suffix)\n img_info['ann'] = dict(seg_map=seg_map)\n img_infos.append(img_info)\n\n print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())\n return img_infos\n\n def get_ann_info(self, idx):\n \"\"\"Get annotation by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n \"\"\"\n\n return self.img_infos[idx]['ann']\n\n def pre_pipeline(self, results):\n \"\"\"Prepare results dict for pipeline.\"\"\"\n results['seg_fields'] = []\n results['img_prefix'] = self.img_dir\n results['seg_prefix'] = self.ann_dir\n if self.custom_classes:\n results['label_map'] = self.label_map\n\n def __getitem__(self, idx):\n \"\"\"Get training/test data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Training/test data (with annotation if `test_mode` is set\n False).\n \"\"\"\n\n if self.test_mode:\n return self.prepare_test_img(idx)\n else:\n return self.prepare_train_img(idx)\n\n def prepare_train_img(self, idx):\n \"\"\"Get training data and annotations after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Training data and annotation after pipeline with new keys\n introduced by pipeline.\n \"\"\"\n\n img_info = self.img_infos[idx]\n ann_info = self.get_ann_info(idx)\n results = dict(img_info=img_info, ann_info=ann_info)\n self.pre_pipeline(results)\n return self.pipeline(results)\n\n def prepare_test_img(self, idx):\n \"\"\"Get testing data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Testing data after pipeline with new keys introduced by\n pipeline.\n \"\"\"\n\n img_info = self.img_infos[idx]\n results = dict(img_info=img_info)\n self.pre_pipeline(results)\n return self.pipeline(results)\n\n def format_results(self, results, **kwargs):\n \"\"\"Place holder to format result to dataset specific output.\"\"\"\n\n def get_gt_seg_maps(self, efficient_test=False):\n \"\"\"Get ground truth segmentation maps for evaluation.\"\"\"\n gt_seg_maps = []\n for img_info in self.img_infos:\n seg_map = osp.join(self.ann_dir, img_info['ann']['seg_map'])\n if efficient_test:\n gt_seg_map = seg_map\n else:\n gt_seg_map = mmcv.imread(seg_map, flag='unchanged', backend='pillow')\n gt_seg_maps.append(gt_seg_map)\n return gt_seg_maps\n\n def get_classes_and_palette(self, classes=None, palette=None):\n \"\"\"Get class names of current dataset.\n\n Args:\n classes (Sequence[str] | str | None): If classes is None, use\n default CLASSES defined by builtin dataset. If classes is a\n string, take it as a file name. The file contains the name of\n classes where each line contains one class name. If classes is\n a tuple or list, override the CLASSES defined by the dataset.\n palette (Sequence[Sequence[int]]] | np.ndarray | None):\n The palette of segmentation map. If None is given, random\n palette will be generated. Default: None\n \"\"\"\n if classes is None:\n self.custom_classes = False\n return self.CLASSES, self.PALETTE\n\n self.custom_classes = True\n if isinstance(classes, str):\n # take it as a file path\n class_names = mmcv.list_from_file(classes)\n elif isinstance(classes, (tuple, list)):\n class_names = classes\n else:\n raise ValueError(f'Unsupported type {type(classes)} of classes.')\n\n if self.CLASSES:\n if not set(classes).issubset(self.CLASSES):\n raise ValueError('classes is not a subset of CLASSES.')\n\n # dictionary, its keys are the old label ids and its values\n # are the new label ids.\n # used for changing pixel labels in load_annotations.\n self.label_map = {}\n for i, c in enumerate(self.CLASSES):\n if c not in class_names:\n self.label_map[i] = -1\n else:\n self.label_map[i] = classes.index(c)\n\n palette = self.get_palette_for_custom_classes(class_names, palette)\n\n return class_names, palette\n\n def get_palette_for_custom_classes(self, class_names, palette=None):\n\n if self.label_map is not None:\n # return subset of palette\n palette = []\n for old_id, new_id in sorted(self.label_map.items(), key=lambda x: x[1]):\n if new_id != -1:\n palette.append(self.PALETTE[old_id])\n palette = type(self.PALETTE)(palette)\n\n elif palette is None:\n if self.PALETTE is None:\n palette = np.random.randint(0, 255, size=(len(class_names), 3))\n else:\n palette = self.PALETTE\n\n return palette\n\n def evaluate(self, results, metric='mIoU', logger=None, efficient_test=False, **kwargs):\n \"\"\"Evaluate the dataset.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. 'mIoU',\n 'mDice' and 'mFscore' are supported.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Default: None.\n\n Returns:\n dict[str, float]: Default metrics.\n \"\"\"\n\n if isinstance(metric, str):\n metric = [metric]\n allowed_metrics = ['mIoU', 'mDice', 'mFscore']\n if not set(metric).issubset(set(allowed_metrics)):\n raise KeyError('metric {} is not supported'.format(metric))\n eval_results = {}\n gt_seg_maps = self.get_gt_seg_maps(efficient_test)\n if self.CLASSES is None:\n num_classes = len(reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))\n else:\n num_classes = len(self.CLASSES)\n ret_metrics = eval_metrics(\n results,\n gt_seg_maps,\n num_classes,\n self.ignore_index,\n metric,\n label_map=self.label_map,\n reduce_zero_label=self.reduce_zero_label,\n )\n\n if self.CLASSES is None:\n class_names = tuple(range(num_classes))\n else:\n class_names = self.CLASSES\n\n # summary table\n ret_metrics_summary = OrderedDict(\n {\n ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)\n for ret_metric, ret_metric_value in ret_metrics.items()\n }\n )\n\n # each class table\n ret_metrics.pop('aAcc', None)\n ret_metrics_class = OrderedDict(\n {ret_metric: np.round(ret_metric_value * 100, 2) for ret_metric, ret_metric_value in ret_metrics.items()}\n )\n ret_metrics_class.update({'Class': class_names})\n ret_metrics_class.move_to_end('Class', last=False)\n\n # for logger\n class_table_data = PrettyTable()\n for key, val in ret_metrics_class.items():\n class_table_data.add_column(key, val)\n\n summary_table_data = PrettyTable()\n for key, val in ret_metrics_summary.items():\n if key == 'aAcc':\n summary_table_data.add_column(key, [val])\n else:\n summary_table_data.add_column('m' + key, [val])\n\n print_log('per class results:', logger)\n print_log('\\n' + class_table_data.get_string(), logger=logger)\n print_log('Summary:', logger)\n print_log('\\n' + summary_table_data.get_string(), logger=logger)\n\n # each metric dict\n for key, value in ret_metrics_summary.items():\n if key == 'aAcc':\n eval_results[key] = value / 100.0\n else:\n eval_results['m' + key] = value / 100.0\n\n ret_metrics_class.pop('Class', None)\n for key, value in ret_metrics_class.items():\n eval_results.update({key + '.' + str(name): value[idx] / 100.0 for idx, name in enumerate(class_names)})\n\n if mmcv.is_list_of(results, str):\n for file_name in results:\n os.remove(file_name)\n return eval_results\n\n\n@DATASETS.register_module()\nclass CustomTemporalDataset(Dataset):\n CLASSES = None\n\n PALETTE = None\n\n def __init__(\n self,\n pipeline,\n img_dir,\n img_suffix='.jpg',\n ann_dir=None,\n seg_map_suffix='.png',\n split=None,\n data_root=None,\n test_mode=False,\n ignore_index=255,\n reduce_zero_label=False,\n classes=None,\n palette=None,\n ):\n self.pipeline = Compose(pipeline)\n self.img_dir = img_dir\n self.img_suffix = img_suffix\n self.ann_dir = ann_dir\n self.seg_map_suffix = seg_map_suffix\n self.split = split\n self.data_root = data_root\n self.test_mode = test_mode\n self.ignore_index = ignore_index\n self.reduce_zero_label = reduce_zero_label\n self.label_map = None\n self.CLASSES, self.PALETTE = self.get_classes_and_palette(classes, palette)\n\n # join paths if data_root is specified\n if self.data_root is not None:\n if not osp.isabs(self.img_dir):\n self.img_dir = osp.join(self.data_root, self.img_dir)\n if not (self.ann_dir is None or osp.isabs(self.ann_dir)):\n self.ann_dir = osp.join(self.data_root, self.ann_dir)\n if not (self.split is None or osp.isabs(self.split)):\n self.split = osp.join(self.data_root, self.split)\n\n # load annotations\n self.img_infos = self.load_annotations(\n self.img_dir, self.img_suffix, self.ann_dir, self.seg_map_suffix, self.split\n )\n\n def __len__(self):\n \"\"\"Total number of samples of data.\"\"\"\n return len(self.img_infos)\n\n def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix, split):\n img_infos = []\n with open(split) as f:\n for line in f:\n img_name = line.strip()\n filenames = img_name.split()\n for filename in filenames:\n filename = filename + img_suffix\n img_info = dict(filename=filenames)\n if ann_dir is not None:\n seg_map = filenames[len(filenames) // 2].replace(img_suffix, seg_map_suffix)\n img_info['ann'] = dict(seg_map=seg_map)\n img_infos.append(img_info)\n\n print_log(f'Loaded {len(img_infos)} clips', logger=get_root_logger())\n return img_infos\n\n def get_ann_info(self, idx):\n \"\"\"Get annotation by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n \"\"\"\n\n return self.img_infos[idx]['ann']\n\n def pre_pipeline(self, results):\n \"\"\"Prepare results dict for pipeline.\"\"\"\n results['seg_fields'] = []\n results['img_prefix'] = self.img_dir\n results['seg_prefix'] = self.ann_dir\n if self.custom_classes:\n results['label_map'] = self.label_map\n\n def __getitem__(self, idx):\n \"\"\"Get training/test data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Training/test data (with annotation if `test_mode` is set\n False).\n \"\"\"\n\n if self.test_mode:\n return self.prepare_test_img(idx)\n else:\n return self.prepare_train_img(idx)\n\n def prepare_train_img(self, idx):\n \"\"\"Get training data and annotations after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Training data and annotation after pipeline with new keys\n introduced by pipeline.\n \"\"\"\n\n img_info = self.img_infos[idx]\n ann_info = self.get_ann_info(idx)\n results = dict(img_info=img_info, ann_info=ann_info)\n self.pre_pipeline(results)\n return self.pipeline(results)\n\n def prepare_test_img(self, idx):\n \"\"\"Get testing data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Testing data after pipeline with new keys introduced by\n pipeline.\n \"\"\"\n\n img_info = self.img_infos[idx]\n results = dict(img_info=img_info)\n self.pre_pipeline(results)\n return self.pipeline(results)\n\n def format_results(self, results, **kwargs):\n \"\"\"Place holder to format result to dataset specific output.\"\"\"\n\n def get_gt_seg_maps(self, efficient_test=False):\n \"\"\"Get ground truth segmentation maps for evaluation.\"\"\"\n gt_seg_maps = []\n for img_info in self.img_infos:\n seg_map = osp.join(self.ann_dir, img_info['ann']['seg_map'])\n if efficient_test:\n gt_seg_map = seg_map\n else:\n gt_seg_map = mmcv.imread(seg_map, flag='unchanged', backend='pillow')\n gt_seg_maps.append(gt_seg_map)\n return gt_seg_maps\n\n def get_classes_and_palette(self, classes=None, palette=None):\n \"\"\"Get class names of current dataset.\n\n Args:\n classes (Sequence[str] | str | None): If classes is None, use\n default CLASSES defined by builtin dataset. If classes is a\n string, take it as a file name. The file contains the name of\n classes where each line contains one class name. If classes is\n a tuple or list, override the CLASSES defined by the dataset.\n palette (Sequence[Sequence[int]]] | np.ndarray | None):\n The palette of segmentation map. If None is given, random\n palette will be generated. Default: None\n \"\"\"\n if classes is None:\n self.custom_classes = False\n return self.CLASSES, self.PALETTE\n\n self.custom_classes = True\n if isinstance(classes, str):\n # take it as a file path\n class_names = mmcv.list_from_file(classes)\n elif isinstance(classes, (tuple, list)):\n class_names = classes\n else:\n raise ValueError(f'Unsupported type {type(classes)} of classes.')\n\n if self.CLASSES:\n if not set(classes).issubset(self.CLASSES):\n raise ValueError('classes is not a subset of CLASSES.')\n\n # dictionary, its keys are the old label ids and its values\n # are the new label ids.\n # used for changing pixel labels in load_annotations.\n self.label_map = {}\n for i, c in enumerate(self.CLASSES):\n if c not in class_names:\n self.label_map[i] = -1\n else:\n self.label_map[i] = classes.index(c)\n\n palette = self.get_palette_for_custom_classes(class_names, palette)\n\n return class_names, palette\n\n def get_palette_for_custom_classes(self, class_names, palette=None):\n\n if self.label_map is not None:\n # return subset of palette\n palette = []\n for old_id, new_id in sorted(self.label_map.items(), key=lambda x: x[1]):\n if new_id != -1:\n palette.append(self.PALETTE[old_id])\n palette = type(self.PALETTE)(palette)\n\n elif palette is None:\n if self.PALETTE is None:\n palette = np.random.randint(0, 255, size=(len(class_names), 3))\n else:\n palette = self.PALETTE\n\n return palette\n\n def evaluate(self, results, metric='mIoU', logger=None, efficient_test=False, **kwargs):\n \"\"\"Evaluate the dataset.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. 'mIoU',\n 'mDice' and 'mFscore' are supported.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Default: None.\n\n Returns:\n dict[str, float]: Default metrics.\n \"\"\"\n\n if isinstance(metric, str):\n metric = [metric]\n allowed_metrics = ['mIoU', 'mDice', 'mFscore']\n if not set(metric).issubset(set(allowed_metrics)):\n raise KeyError('metric {} is not supported'.format(metric))\n eval_results = {}\n gt_seg_maps = self.get_gt_seg_maps(efficient_test)\n if self.CLASSES is None:\n num_classes = len(reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))\n else:\n num_classes = len(self.CLASSES)\n ret_metrics = eval_metrics(\n results,\n gt_seg_maps,\n num_classes,\n self.ignore_index,\n metric,\n label_map=self.label_map,\n reduce_zero_label=self.reduce_zero_label,\n )\n\n if self.CLASSES is None:\n class_names = tuple(range(num_classes))\n else:\n class_names = self.CLASSES\n\n # summary table\n ret_metrics_summary = OrderedDict(\n {\n ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)\n for ret_metric, ret_metric_value in ret_metrics.items()\n }\n )\n\n # each class table\n ret_metrics.pop('aAcc', None)\n ret_metrics_class = OrderedDict(\n {ret_metric: np.round(ret_metric_value * 100, 2) for ret_metric, ret_metric_value in ret_metrics.items()}\n )\n ret_metrics_class.update({'Class': class_names})\n ret_metrics_class.move_to_end('Class', last=False)\n\n # for logger\n class_table_data = PrettyTable()\n for key, val in ret_metrics_class.items():\n class_table_data.add_column(key, val)\n\n summary_table_data = PrettyTable()\n for key, val in ret_metrics_summary.items():\n if key == 'aAcc':\n summary_table_data.add_column(key, [val])\n else:\n summary_table_data.add_column('m' + key, [val])\n\n print_log('per class results:', logger)\n print_log('\\n' + class_table_data.get_string(), logger=logger)\n print_log('Summary:', logger)\n print_log('\\n' + summary_table_data.get_string(), logger=logger)\n\n # each metric dict\n for key, value in ret_metrics_summary.items():\n if key == 'aAcc':\n eval_results[key] = value / 100.0\n else:\n eval_results['m' + key] = value / 100.0\n\n ret_metrics_class.pop('Class', None)\n for key, value in ret_metrics_class.items():\n eval_results.update({key + '.' + str(name): value[idx] / 100.0 for idx, name in enumerate(class_names)})\n\n if mmcv.is_list_of(results, str):\n for file_name in results:\n os.remove(file_name)\n return eval_results\n" ]
[ [ "numpy.round", "numpy.nanmean", "numpy.unique" ] ]
alexfikl/loopy
[ "814612b63fe14dd94af12d4238c387a9929b97b9" ]
[ "test/test_apps.py" ]
[ "from __future__ import division, absolute_import, print_function\n\n__copyright__ = \"Copyright (C) 2012 Andreas Kloeckner\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport sys\nimport numpy as np\nimport loopy as lp\nimport pyopencl as cl\nimport pyopencl.clmath # noqa\nimport pyopencl.clrandom # noqa\nimport pytest\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ntry:\n import faulthandler\nexcept ImportError:\n pass\nelse:\n faulthandler.enable()\n\nfrom pyopencl.tools import pytest_generate_tests_for_pyopencl \\\n as pytest_generate_tests\n\nfrom loopy.diagnostic import LoopyError\n\n__all__ = [\n \"pytest_generate_tests\",\n \"cl\" # 'cl.create_some_context'\n ]\n\n\nfrom loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_2 # noqa: F401\n\n\n# {{{ convolutions\n\ndef test_convolution(ctx_factory):\n ctx = ctx_factory()\n\n dtype = np.float32\n\n knl = lp.make_kernel(\n \"{ [iimg, ifeat, icolor, im_x, im_y, f_x, f_y]: \\\n -f_w <= f_x,f_y <= f_w \\\n and 0 <= im_x < im_w and 0 <= im_y < im_h \\\n and 0<=iimg<=nimgs and 0<=ifeat<nfeats and 0<=icolor<ncolors \\\n }\",\n \"\"\"\n out[iimg, ifeat, im_x, im_y] = sum((f_x, f_y, icolor), \\\n img[iimg, f_w+im_x-f_x, f_w+im_y-f_y, icolor] \\\n * f[ifeat, f_w+f_x, f_w+f_y, icolor])\n \"\"\",\n [\n lp.GlobalArg(\"f\", dtype, shape=lp.auto),\n lp.GlobalArg(\"img\", dtype, shape=lp.auto),\n lp.GlobalArg(\"out\", dtype, shape=lp.auto),\n \"...\"\n ],\n assumptions=\"f_w>=1 and im_w, im_h >= 2*f_w+1 and nfeats>=1 and nimgs>=0\",\n options=\"annotate_inames\")\n\n f_w = 3\n\n knl = lp.fix_parameters(knl, f_w=f_w, ncolors=3)\n\n ref_knl = knl\n\n def variant_0(knl):\n #knl = lp.split_iname(knl, \"im_x\", 16, inner_tag=\"l.0\")\n knl = lp.prioritize_loops(knl, \"iimg,im_x,im_y,ifeat,f_x,f_y\")\n return knl\n\n def variant_1(knl):\n knl = lp.split_iname(knl, \"im_x\", 16, inner_tag=\"l.0\")\n knl = lp.prioritize_loops(knl, \"iimg,im_x_outer,im_y,ifeat,f_x,f_y\")\n return knl\n\n def variant_2(knl):\n knl = lp.split_iname(knl, \"im_x\", 16, outer_tag=\"g.0\", inner_tag=\"l.0\")\n knl = lp.split_iname(knl, \"im_y\", 16, outer_tag=\"g.1\", inner_tag=\"l.1\")\n knl = lp.tag_inames(knl, dict(ifeat=\"g.2\"))\n knl = lp.add_prefetch(knl, \"f[ifeat,:,:,:]\",\n fetch_outer_inames='im_x_outer, im_y_outer, ifeat',\n default_tag=\"l.auto\")\n knl = lp.add_prefetch(knl, \"img\", \"im_x_inner, im_y_inner, f_x, f_y\",\n fetch_outer_inames='iimg, im_x_outer, im_y_outer, ifeat, icolor',\n default_tag=\"l.auto\")\n return knl\n\n for variant in [\n #variant_0,\n #variant_1,\n variant_2\n ]:\n lp.auto_test_vs_ref(ref_knl, ctx, variant(knl),\n parameters=dict(\n im_w=128, im_h=128, f_w=f_w,\n nfeats=3, nimgs=3\n ))\n\n\ndef test_convolution_with_nonzero_base(ctx_factory):\n # This is kept alive as a test for domains that don't start at zero.\n # These are a bad idea for split_iname, which places its origin at zero\n # and therefore produces a first block that is odd-sized.\n #\n # Therefore, for real tests, check test_convolution further up.\n\n ctx = ctx_factory()\n\n dtype = np.float32\n\n knl = lp.make_kernel(\n \"{ [iimg, ifeat, icolor, im_x, im_y, f_x, f_y]: \\\n -f_w <= f_x,f_y <= f_w \\\n and f_w <= im_x < im_w-f_w and f_w <= im_y < im_h-f_w \\\n and 0<=iimg<=nimgs and 0<=ifeat<nfeats and 0<=icolor<ncolors \\\n }\",\n \"\"\"\n out[iimg, ifeat, im_x-f_w, im_y-f_w] = sum((f_x, f_y, icolor), \\\n img[iimg, im_x-f_x, im_y-f_y, icolor] \\\n * f[ifeat, f_w+f_x, f_w+f_y, icolor])\n \"\"\",\n [\n lp.GlobalArg(\"f\", dtype, shape=lp.auto),\n lp.GlobalArg(\"img\", dtype, shape=lp.auto),\n lp.GlobalArg(\"out\", dtype, shape=lp.auto),\n \"...\"\n ],\n assumptions=\"f_w>=1 and im_w, im_h >= 2*f_w+1 and nfeats>=1 and nimgs>=0\",\n options=\"annotate_inames\")\n\n knl = lp.fix_parameters(knl, ncolors=3)\n\n ref_knl = knl\n\n f_w = 3\n\n def variant_0(knl):\n #knl = lp.split_iname(knl, \"im_x\", 16, inner_tag=\"l.0\")\n knl = lp.prioritize_loops(knl, \"iimg,im_x,im_y,ifeat,f_x,f_y\")\n return knl\n\n def variant_1(knl):\n knl = lp.split_iname(knl, \"im_x\", 16, inner_tag=\"l.0\")\n knl = lp.prioritize_loops(knl, \"iimg,im_x_outer,im_y,ifeat,f_x,f_y\")\n return knl\n\n for variant in [\n variant_0,\n variant_1,\n ]:\n lp.auto_test_vs_ref(ref_knl, ctx, variant(knl),\n parameters=dict(\n im_w=128, im_h=128, f_w=f_w,\n nfeats=12, nimgs=17\n ))\n\n# }}}\n\n\ndef test_rob_stroud_bernstein(ctx_factory):\n ctx = ctx_factory()\n\n # NOTE: tmp would have to be zero-filled beforehand\n\n knl = lp.make_kernel(\n \"{[el, i2, alpha1,alpha2]: \\\n 0 <= el < nels and \\\n 0 <= i2 < nqp1d and \\\n 0 <= alpha1 <= deg and 0 <= alpha2 <= deg-alpha1 }\",\n \"\"\"\n for el,i2\n <> xi = qpts[1, i2]\n <> s = 1-xi\n <> r = xi/s\n <> aind = 0 {id=aind_init}\n\n for alpha1\n <> w = s**(deg-alpha1) {id=init_w}\n\n for alpha2\n tmp[el,alpha1,i2] = tmp[el,alpha1,i2] + w * coeffs[aind] \\\n {id=write_tmp,dep=init_w:aind_init}\n w = w * r * ( deg - alpha1 - alpha2 ) / (1 + alpha2) \\\n {id=update_w,dep=init_w:write_tmp}\n aind = aind + 1 \\\n {id=aind_incr,dep=aind_init:write_tmp:update_w}\n end\n end\n end\n \"\"\",\n [\n # Must declare coeffs to have \"no\" shape, to keep loopy\n # from trying to figure it out the shape automatically.\n\n lp.GlobalArg(\"coeffs\", None, shape=None),\n \"...\"\n ],\n assumptions=\"deg>=0 and nels>=1\"\n )\n\n knl = lp.fix_parameters(knl, nqp1d=7, deg=4)\n knl = lp.split_iname(knl, \"el\", 16, inner_tag=\"l.0\")\n knl = lp.split_iname(knl, \"el_outer\", 2, outer_tag=\"g.0\", inner_tag=\"ilp\",\n slabs=(0, 1))\n knl = lp.tag_inames(knl, dict(i2=\"l.1\", alpha1=\"unr\", alpha2=\"unr\"))\n\n print(lp.CompiledKernel(ctx, knl).get_highlighted_code(\n dict(\n qpts=np.float32,\n coeffs=np.float32,\n tmp=np.float32,\n )))\n\n\ndef test_rob_stroud_bernstein_full(ctx_factory):\n #logging.basicConfig(level=logging.DEBUG)\n ctx = ctx_factory()\n\n # NOTE: result would have to be zero-filled beforehand\n\n knl = lp.make_kernel(\n \"{[el, i2, alpha1,alpha2, i1_2, alpha1_2, i2_2]: \\\n 0 <= el < nels and \\\n 0 <= i2 < nqp1d and \\\n 0 <= alpha1 <= deg and 0 <= alpha2 <= deg-alpha1 and\\\n \\\n 0 <= i1_2 < nqp1d and \\\n 0 <= alpha1_2 <= deg and \\\n 0 <= i2_2 < nqp1d \\\n }\",\n \"\"\"\n for el\n for i2\n <> xi = qpts[1, i2]\n <> s = 1-xi\n <> r = xi/s\n <> aind = 0 {id=aind_init}\n\n for alpha1\n <> w = s**(deg-alpha1) {id=init_w}\n\n <> tmp[alpha1,i2] = tmp[alpha1,i2] + w * coeffs[aind] \\\n {id=write_tmp,dep=init_w:aind_init}\n for alpha2\n w = w * r * ( deg - alpha1 - alpha2 ) / (1 + alpha2) \\\n {id=update_w,dep=init_w:write_tmp}\n aind = aind + 1 \\\n {id=aind_incr,dep=aind_init:write_tmp:update_w}\n end\n end\n end\n\n for i1_2\n <> xi2 = qpts[0, i1_2] {dep=aind_incr}\n <> s2 = 1-xi2\n <> r2 = xi2/s2\n <> w2 = s2**deg {id=w2_init}\n\n for alpha1_2\n for i2_2\n result[el, i1_2, i2_2] = result[el, i1_2, i2_2] + \\\n w2 * tmp[alpha1_2, i2_2] {id=res2,dep=w2_init}\n end\n\n w2 = w2 * r2 * (deg-alpha1_2) / (1+alpha1_2) \\\n {id=w2_update, dep=res2}\n end\n end\n end\n \"\"\",\n [\n # Must declare coeffs to have \"no\" shape, to keep loopy\n # from trying to figure it out the shape automatically.\n\n lp.GlobalArg(\"coeffs\", None, shape=None),\n \"...\"\n ],\n assumptions=\"deg>=0 and nels>=1\"\n )\n\n knl = lp.fix_parameters(knl, nqp1d=7, deg=4)\n\n if 0:\n knl = lp.split_iname(knl, \"el\", 16, inner_tag=\"l.0\")\n knl = lp.split_iname(knl, \"el_outer\", 2, outer_tag=\"g.0\", inner_tag=\"ilp\",\n slabs=(0, 1))\n knl = lp.tag_inames(knl, dict(i2=\"l.1\", alpha1=\"unr\", alpha2=\"unr\"))\n\n from pickle import dumps, loads\n knl = loads(dumps(knl))\n\n knl = lp.CompiledKernel(ctx, knl).get_highlighted_code(\n dict(\n qpts=np.float32,\n tmp=np.float32,\n coeffs=np.float32,\n result=np.float32,\n ))\n print(knl)\n\n\ndef test_stencil(ctx_factory):\n ctx = ctx_factory()\n\n # n=32 causes corner case behavior in size calculations for temprorary (a\n # non-unifiable, two-constant-segments PwAff as the base index)\n\n n = 256\n knl = lp.make_kernel(\n \"{[i,j]: 0<= i,j < %d}\" % n,\n [\n \"a_offset(ii, jj) := a[ii+1, jj+1]\",\n \"z[i,j] = -2*a_offset(i,j)\"\n \" + a_offset(i,j-1)\"\n \" + a_offset(i,j+1)\"\n \" + a_offset(i-1,j)\"\n \" + a_offset(i+1,j)\"\n ],\n [\n lp.GlobalArg(\"a\", np.float32, shape=(n+2, n+2,)),\n lp.GlobalArg(\"z\", np.float32, shape=(n+2, n+2,))\n ])\n\n ref_knl = knl\n\n def variant_1(knl):\n knl = lp.split_iname(knl, \"i\", 16, outer_tag=\"g.1\", inner_tag=\"l.1\")\n knl = lp.split_iname(knl, \"j\", 16, outer_tag=\"g.0\", inner_tag=\"l.0\")\n knl = lp.add_prefetch(knl, \"a\", [\"i_inner\", \"j_inner\"], default_tag=\"l.auto\")\n knl = lp.prioritize_loops(knl, [\"a_dim_0_outer\", \"a_dim_1_outer\"])\n return knl\n\n def variant_2(knl):\n knl = lp.split_iname(knl, \"i\", 16, outer_tag=\"g.1\", inner_tag=\"l.1\")\n knl = lp.split_iname(knl, \"j\", 16, outer_tag=\"g.0\", inner_tag=\"l.0\")\n knl = lp.add_prefetch(knl, \"a\", [\"i_inner\", \"j_inner\"],\n fetch_bounding_box=True, default_tag=\"l.auto\")\n knl = lp.prioritize_loops(knl, [\"a_dim_0_outer\", \"a_dim_1_outer\"])\n return knl\n\n for variant in [\n #variant_1,\n variant_2,\n ]:\n lp.auto_test_vs_ref(ref_knl, ctx, variant(knl),\n print_ref_code=False,\n op_count=[n*n], op_label=[\"cells\"])\n\n\ndef test_stencil_with_overfetch(ctx_factory):\n ctx = ctx_factory()\n\n knl = lp.make_kernel(\n \"{[i,j]: 0<= i,j < n}\",\n [\n \"a_offset(ii, jj) := a[ii+2, jj+2]\",\n \"z[i,j] = -2*a_offset(i,j)\"\n \" + a_offset(i,j-1)\"\n \" + a_offset(i,j+1)\"\n \" + a_offset(i-1,j)\"\n \" + a_offset(i+1,j)\"\n\n \" + a_offset(i,j-2)\"\n \" + a_offset(i,j+2)\"\n \" + a_offset(i-2,j)\"\n \" + a_offset(i+2,j)\"\n ],\n assumptions=\"n>=1\")\n\n if ctx.devices[0].platform.name == \"Portable Computing Language\":\n # https://github.com/pocl/pocl/issues/205\n pytest.skip(\"takes very long to compile on pocl\")\n\n knl = lp.add_and_infer_dtypes(knl, dict(a=np.float32))\n\n ref_knl = knl\n\n def variant_overfetch(knl):\n knl = lp.split_iname(knl, \"i\", 16, outer_tag=\"g.1\", inner_tag=\"l.1\",\n slabs=(1, 1))\n knl = lp.split_iname(knl, \"j\", 16, outer_tag=\"g.0\", inner_tag=\"l.0\",\n slabs=(1, 1))\n knl = lp.add_prefetch(knl, \"a\", [\"i_inner\", \"j_inner\"],\n fetch_bounding_box=True, default_tag=\"l.auto\")\n knl = lp.prioritize_loops(knl, [\"a_dim_0_outer\", \"a_dim_1_outer\"])\n return knl\n\n for variant in [variant_overfetch]:\n n = 200\n lp.auto_test_vs_ref(ref_knl, ctx, variant(knl),\n print_ref_code=False,\n op_count=[n*n], parameters=dict(n=n), op_label=[\"cells\"])\n\n\ndef test_sum_factorization():\n knl = lp.make_kernel(\n \"{[i,j,ip,jp,k,l]: \"\n \"0<=i<I and 0<=j<J and 0<=ip<IP and 0<=jp<JP and 0<=k,l<Q}\",\n \"\"\"\n phi1(i, x) := x**i\n phi2(i, x) := x**i\n psi1(i, x) := x**i\n psi2(i, x) := x**i\n a(x, y) := 1\n\n A[i,j,ip,jp] = sum(k,sum(l,\n phi1(i,x[0,k]) * phi2(j,x[1,l])\n * psi1(ip, x[0,k]) * psi2(jp, x[1, l])\n * w[0,k] * w[1,l]\n * a(x[0,k], x[1,l])\n ))\n \"\"\")\n\n pytest.xfail(\"extract_subst is currently too stupid for sum factorization\")\n\n knl = lp.extract_subst(knl, \"temp_array\",\n \"phi1(i,x[0,k]) *psi1(ip, x[0,k]) * w[0,k]\")\n knl = lp.extract_subst(knl, \"temp_array\",\n \"sum(k, phi1(i,x[0,k]) *psi1(ip, x[0,k]) * w[0,k])\")\n\n print(knl)\n\n\ndef test_lbm(ctx_factory):\n ctx = ctx_factory()\n\n # D2Q4Q4Q4 lattice Boltzmann scheme for the shallow water equations\n # Example by Loic Gouarin <loic.gouarin@math.u-psud.fr>\n knl = lp.make_kernel(\n \"{[ii,jj]:0<=ii<nx-2 and 0<=jj<ny-2}\",\n \"\"\" # noqa (silences flake8 line length warning)\n i := ii + 1\n j := jj + 1\n for ii, jj\n with {id_prefix=init_m}\n <> m[0] = + f[i-1, j, 0] + f[i, j-1, 1] + f[i+1, j, 2] + f[i, j+1, 3]\n m[1] = + 4.*f[i-1, j, 0] - 4.*f[i+1, j, 2]\n m[2] = + 4.*f[i, j-1, 1] - 4.*f[i, j+1, 3]\n m[3] = + f[i-1, j, 0] - f[i, j-1, 1] + f[i+1, j, 2] - f[i, j+1, 3]\n m[4] = + f[i-1, j, 4] + f[i, j-1, 5] + f[i+1, j, 6] + f[i, j+1, 7]\n m[5] = + 4.*f[i-1, j, 4] - 4.*f[i+1, j, 6]\n m[6] = + 4.*f[i, j-1, 5] - 4.*f[i, j+1, 7]\n m[7] = + f[i-1, j, 4] - f[i, j-1, 5] + f[i+1, j, 6] - f[i, j+1, 7]\n m[8] = + f[i-1, j, 8] + f[i, j-1, 9] + f[i+1, j, 10] + f[i, j+1, 11]\n m[9] = + 4.*f[i-1, j, 8] - 4.*f[i+1, j, 10]\n m[10] = + 4.*f[i, j-1, 9] - 4.*f[i, j+1, 11]\n m[11] = + f[i-1, j, 8] - f[i, j-1, 9] + f[i+1, j, 10] - f[i, j+1, 11]\n end\n\n with {id_prefix=update_m,dep=init_m*}\n m[1] = m[1] + 2.*(m[4] - m[1])\n m[2] = m[2] + 2.*(m[8] - m[2])\n m[3] = m[3]*(1. - 1.5)\n m[5] = m[5] + 1.5*(0.5*(m[0]*m[0]) + (m[4]*m[4])/m[0] - m[5])\n m[6] = m[6] + 1.5*(m[4]*m[8]/m[0] - m[6])\n m[7] = m[7]*(1. - 1.2000000000000000)\n m[9] = m[9] + 1.5*(m[4]*m[8]/m[0] - m[9])\n m[10] = m[10] + 1.5*(0.5*(m[0]*m[0]) + (m[8]*m[8])/m[0] - m[10])\n m[11] = m[11]*(1. - 1.2)\n end\n\n with {dep=update_m*}\n f_new[i, j, 0] = + 0.25*m[0] + 0.125*m[1] + 0.25*m[3]\n f_new[i, j, 1] = + 0.25*m[0] + 0.125*m[2] - 0.25*m[3]\n f_new[i, j, 2] = + 0.25*m[0] - 0.125*m[1] + 0.25*m[3]\n f_new[i, j, 3] = + 0.25*m[0] - 0.125*m[2] - 0.25*m[3]\n f_new[i, j, 4] = + 0.25*m[4] + 0.125*m[5] + 0.25*m[7]\n f_new[i, j, 5] = + 0.25*m[4] + 0.125*m[6] - 0.25*m[7]\n f_new[i, j, 6] = + 0.25*m[4] - 0.125*m[5] + 0.25*m[7]\n f_new[i, j, 7] = + 0.25*m[4] - 0.125*m[6] - 0.25*m[7]\n f_new[i, j, 8] = + 0.25*m[8] + 0.125*m[9] + 0.25*m[11]\n f_new[i, j, 9] = + 0.25*m[8] + 0.125*m[10] - 0.25*m[11]\n f_new[i, j, 10] = + 0.25*m[8] - 0.125*m[9] + 0.25*m[11]\n f_new[i, j, 11] = + 0.25*m[8] - 0.125*m[10] - 0.25*m[11]\n end\n end\n \"\"\")\n\n knl = lp.add_and_infer_dtypes(knl, {\"f\": np.float32})\n\n ref_knl = knl\n\n knl = lp.split_iname(knl, \"ii\", 16, outer_tag=\"g.1\", inner_tag=\"l.1\")\n knl = lp.split_iname(knl, \"jj\", 16, outer_tag=\"g.0\", inner_tag=\"l.0\")\n knl = lp.expand_subst(knl)\n knl = lp.add_prefetch(knl, \"f\", \"ii_inner,jj_inner\", fetch_bounding_box=True,\n default_tag=\"l.auto\")\n\n lp.auto_test_vs_ref(ref_knl, ctx, knl, parameters={\"nx\": 20, \"ny\": 20})\n\n\ndef test_fd_demo():\n knl = lp.make_kernel(\n \"{[i,j]: 0<=i,j<n}\",\n \"result[i+1,j+1] = u[i + 1, j + 1]**2 + -1 + (-4)*u[i + 1, j + 1] \\\n + u[i + 1 + 1, j + 1] + u[i + 1 + -1, j + 1] \\\n + u[i + 1, j + 1 + 1] + u[i + 1, j + 1 + -1]\")\n #assumptions=\"n mod 16=0\")\n knl = lp.split_iname(knl,\n \"i\", 16, outer_tag=\"g.1\", inner_tag=\"l.1\")\n knl = lp.split_iname(knl,\n \"j\", 16, outer_tag=\"g.0\", inner_tag=\"l.0\")\n knl = lp.add_prefetch(knl, \"u\",\n [\"i_inner\", \"j_inner\"],\n fetch_bounding_box=True,\n default_tag=\"l.auto\")\n\n #n = 1000\n #u = cl.clrandom.rand(queue, (n+2, n+2), dtype=np.float32)\n\n knl = lp.set_options(knl, write_cl=True)\n knl = lp.add_and_infer_dtypes(knl, dict(u=np.float32))\n code, inf = lp.generate_code(knl)\n print(code)\n\n assert \"double\" not in code\n\n\ndef test_fd_1d(ctx_factory):\n ctx = ctx_factory()\n\n knl = lp.make_kernel(\n \"{[i]: 0<=i<n}\",\n \"result[i] = u[i+1]-u[i]\")\n\n knl = lp.add_and_infer_dtypes(knl, {\"u\": np.float32})\n ref_knl = knl\n\n knl = lp.split_iname(knl, \"i\", 16)\n knl = lp.extract_subst(knl, \"u_acc\", \"u[j]\", parameters=\"j\")\n knl = lp.precompute(knl, \"u_acc\", \"i_inner\", default_tag=\"for\")\n knl = lp.assume(knl, \"n mod 16 = 0\")\n\n lp.auto_test_vs_ref(\n ref_knl, ctx, knl,\n parameters=dict(n=2048))\n\n\ndef test_poisson_fem(ctx_factory):\n # Stolen from Peter Coogan and Rob Kirby for FEM assembly\n ctx = ctx_factory()\n\n nbf = 5\n nqp = 5\n sdim = 3\n\n knl = lp.make_kernel(\n \"{ [c,i,j,k,ell,ell2]: \\\n 0 <= c < nels and \\\n 0 <= i < nbf and \\\n 0 <= j < nbf and \\\n 0 <= k < nqp and \\\n 0 <= ell,ell2 < sdim}\",\n \"\"\"\n dpsi(bf,k0,dir) := \\\n simul_reduce(sum, ell2, DFinv[c,ell2,dir] * DPsi[bf,k0,ell2] )\n Ael[c,i,j] = \\\n J[c] * w[k] * sum(ell, dpsi(i,k,ell) * dpsi(j,k,ell))\n \"\"\",\n assumptions=\"nels>=1 and nbf >= 1 and nels mod 4 = 0\")\n\n print(knl)\n\n knl = lp.fix_parameters(knl, nbf=nbf, sdim=sdim, nqp=nqp)\n\n ref_knl = knl\n\n knl = lp.prioritize_loops(knl, [\"c\", \"j\", \"i\", \"k\"])\n\n def variant_1(knl):\n knl = lp.precompute(knl, \"dpsi\", \"i,k,ell\", default_tag='for')\n knl = lp.prioritize_loops(knl, \"c,i,j\")\n return knl\n\n def variant_2(knl):\n knl = lp.precompute(knl, \"dpsi\", \"i,ell\", default_tag='for')\n knl = lp.prioritize_loops(knl, \"c,i,j\")\n return knl\n\n def add_types(knl):\n return lp.add_and_infer_dtypes(knl, dict(\n w=np.float32,\n J=np.float32,\n DPsi=np.float32,\n DFinv=np.float32,\n ))\n\n for variant in [\n #variant_1,\n variant_2\n ]:\n knl = variant(knl)\n\n lp.auto_test_vs_ref(\n add_types(ref_knl), ctx, add_types(knl),\n parameters=dict(n=5, nels=15, nbf=5, sdim=2, nqp=7))\n\n\ndef test_domain_tree_nesting():\n # From https://github.com/inducer/loopy/issues/78\n\n AS = lp.AddressSpace # noqa\n\n out_map = np.array([1, 2], dtype=np.int32)\n if_val = np.array([-1, 0], dtype=np.int32)\n vals = np.array([2, 3], dtype=np.int32)\n num_vals = np.array([2, 4], dtype=np.int32)\n num_vals_offset = np.array(np.cumsum(num_vals) - num_vals, dtype=np.int32)\n\n TV = lp.TemporaryVariable # noqa\n\n knl = lp.make_kernel(['{[i]: 0 <= i < 12}',\n '{[j]: 0 <= j < 100}',\n '{[a_count]: 0 <= a_count < a_end}',\n '{[b_count]: 0 <= b_count < b_end}'],\n \"\"\"\n for j\n for i\n <> a_end = abs(if_val[i])\n\n <>b_end = num_vals[i]\n <>offset = num_vals_offset[i] {id=offset}\n <>b_sum = 0 {id=b_init}\n for b_count\n <>val = vals[offset + b_count] {dep=offset}\n end\n b_sum = exp(b_sum) {id=b_final}\n\n out[j,i] = b_sum {dep=b_final}\n end\n end\n \"\"\",\n [\n TV('out_map', initializer=out_map, read_only=True, address_space=AS.PRIVATE),\n TV('if_val', initializer=if_val, read_only=True, address_space=AS.PRIVATE),\n TV('vals', initializer=vals, read_only=True, address_space=AS.PRIVATE),\n TV('num_vals', initializer=num_vals, read_only=True,\n address_space=AS.PRIVATE),\n TV('num_vals_offset', initializer=num_vals_offset, read_only=True,\n address_space=AS.PRIVATE),\n lp.GlobalArg('B', shape=(100, 31), dtype=np.float64),\n lp.GlobalArg('out', shape=(100, 12), dtype=np.float64)])\n\n parents_per_domain = knl.parents_per_domain()\n\n def depth(i):\n if parents_per_domain[i] is None:\n return 0\n else:\n return 1 + depth(parents_per_domain[i])\n\n for i in range(len(parents_per_domain)):\n assert depth(i) < 2\n\n\ndef test_prefetch_through_indirect_access():\n knl = lp.make_kernel(\"{[i, j, k]: 0 <= i,k < 10 and 0<=j<2}\",\n \"\"\"\n for i, j, k\n a[map1[indirect[i], j], k] = 2\n end\n \"\"\",\n [\n lp.GlobalArg(\"a\", strides=(2, 1), dtype=int),\n lp.GlobalArg(\"map1\", shape=(10, 10), dtype=int),\n \"...\"\n ],\n target=lp.CTarget())\n\n knl = lp.prioritize_loops(knl, \"i,j,k\")\n\n with pytest.raises(LoopyError):\n knl = lp.add_prefetch(knl, \"map1[:, j]\")\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n exec(sys.argv[1])\n else:\n from pytest import main\n main([__file__])\n\n# vim: foldmethod=marker\n" ]
[ [ "numpy.array", "numpy.cumsum" ] ]
seungbin79/pystock
[ "1a5120ac6cbd3947515434bbdb7a2223d2d2b105" ]
[ "ch17/05.py" ]
[ "import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QAxContainer import *\nfrom PyQt5.QtCore import *\nimport time\nimport pandas as pd\nimport sqlite3\n\nTR_REQ_TIME_INTERVAL = 0.2\n\n\nclass Kiwoom(QAxWidget):\n def __init__(self):\n super().__init__()\n self._create_kiwoom_instance()\n self._set_signal_slots()\n\n def _create_kiwoom_instance(self):\n self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\")\n\n def _set_signal_slots(self):\n self.OnEventConnect.connect(self._event_connect)\n self.OnReceiveTrData.connect(self._receive_tr_data)\n\n def comm_connect(self):\n self.dynamicCall(\"CommConnect()\")\n self.login_event_loop = QEventLoop()\n self.login_event_loop.exec_()\n\n def _event_connect(self, err_code):\n if err_code == 0:\n print(\"connected\")\n else:\n print(\"disconnected\")\n\n self.login_event_loop.exit()\n\n def get_code_list_by_market(self, market):\n code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market)\n code_list = code_list.split(';')\n return code_list[:-1]\n\n def get_master_code_name(self, code):\n code_name = self.dynamicCall(\"GetMasterCodeName(QString)\", code)\n return code_name\n\n def set_input_value(self, id, value):\n self.dynamicCall(\"SetInputValue(QString, QString)\", id, value)\n\n def comm_rq_data(self, rqname, trcode, next, screen_no):\n self.dynamicCall(\"CommRqData(QString, QString, int, QString\", rqname, trcode, next, screen_no)\n self.tr_event_loop = QEventLoop()\n self.tr_event_loop.exec_()\n\n def _comm_get_data(self, code, real_type, field_name, index, item_name):\n ret = self.dynamicCall(\"CommGetData(QString, QString, QString, int, QString\", code,\n real_type, field_name, index, item_name)\n return ret.strip()\n\n def _get_repeat_cnt(self, trcode, rqname):\n ret = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", trcode, rqname)\n return ret\n\n def _receive_tr_data(self, screen_no, rqname, trcode, record_name, next, unused1, unused2, unused3, unused4):\n if next == '2':\n self.remained_data = True\n else:\n self.remained_data = False\n\n if rqname == \"opt10081_req\":\n self._opt10081(rqname, trcode)\n\n try:\n self.tr_event_loop.exit()\n except AttributeError:\n pass\n\n def _opt10081(self, rqname, trcode):\n data_cnt = self._get_repeat_cnt(trcode, rqname)\n\n for i in range(data_cnt):\n date = self._comm_get_data(trcode, \"\", rqname, i, \"일자\")\n open = self._comm_get_data(trcode, \"\", rqname, i, \"시가\")\n high = self._comm_get_data(trcode, \"\", rqname, i, \"고가\")\n low = self._comm_get_data(trcode, \"\", rqname, i, \"저가\")\n close = self._comm_get_data(trcode, \"\", rqname, i, \"현재가\")\n volume = self._comm_get_data(trcode, \"\", rqname, i, \"거래량\")\n\n self.ohlcv['date'].append(date)\n self.ohlcv['open'].append(int(open))\n self.ohlcv['high'].append(int(high))\n self.ohlcv['low'].append(int(low))\n self.ohlcv['close'].append(int(close))\n self.ohlcv['volume'].append(int(volume))\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n kiwoom = Kiwoom()\n kiwoom.comm_connect()\n kiwoom.ohlcv = {'date': [], 'open': [], 'high': [], 'low': [], 'close': [], 'volume': []}\n\n # opt10081 TR 요청\n kiwoom.set_input_value(\"종목코드\", \"039490\")\n kiwoom.set_input_value(\"기준일자\", \"20170224\")\n kiwoom.set_input_value(\"수정주가구분\", 1)\n kiwoom.comm_rq_data(\"opt10081_req\", \"opt10081\", 0, \"0101\")\n\n while kiwoom.remained_data == True:\n time.sleep(TR_REQ_TIME_INTERVAL)\n kiwoom.set_input_value(\"종목코드\", \"039490\")\n kiwoom.set_input_value(\"기준일자\", \"20170224\")\n kiwoom.set_input_value(\"수정주가구분\", 1)\n kiwoom.comm_rq_data(\"opt10081_req\", \"opt10081\", 2, \"0101\")\n\n df = pd.DataFrame(kiwoom.ohlcv, columns=['open', 'high', 'low', 'close', 'volume'], index=kiwoom.ohlcv['date'])\n\n con = sqlite3.connect(\"c:/Users/Jason/stock.db\")\n df.to_sql('039490', con, if_exists='replace')\n" ]
[ [ "pandas.DataFrame" ] ]
eug/bayesnet
[ "7376e67047afed298714b327c4897bef13f20385" ]
[ "main.py" ]
[ "import sys\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom bayesnet import BayesNetwork\nimport getopt\n\n\nclass Config:\n train_file = 'input/cmc_train.csv'\n test_file = 'input/cmc_test.csv'\n mode_predict = False\n samples = 0\n prob_event = None\n cond_events = None\n show_help = False\n\n\ndef parse_args(argv):\n shortopts = '_p:c:s:h'\n\n longopts = [\n 'predict',\n 'prob=',\n 'cond=',\n 'samples=',\n 'help'\n ]\n\n config = Config()\n options, _ = getopt.getopt(sys.argv[1:], shortopts, longopts)\n\n for opt, arg in options:\n if opt == '--train':\n config.train_file = arg\n elif opt == '--test':\n config.test_file = arg\n elif opt == '--predict':\n config.mode_predict = True\n elif opt in ('-s', '--samples'):\n config.samples = int(arg)\n elif opt in ('-p', '--prob'):\n config.prob_event = arg\n elif opt in ('-c', '--cond'):\n config.cond_events = arg.split(',')\n elif opt in ('-h', '--help'):\n config.show_help = True\n\n return config\n\ndef print_help():\n print(\"\"\"Bayes Network Demo\nUsage:\n python main.py --predict\n python main.py -p wifes_age_1 -c husbands_occ_1,sol_4 -s 1000\nOptions:\n --predict Perform predictions on test dataset\n -s --samples=INT When specified set the number of samples for Likelihood Weighting\n -p --prob=Event Hypothesis event\n -c --cond=[<Event1>,...] List of evidencies\n -h --help Print this message\n \"\"\")\n\nif __name__ == '__main__':\n \n if len(sys.argv) <= 1:\n print('Missing arguments')\n sys.exit(1)\n\n config = parse_args(sys.argv[1:])\n\n if config.show_help:\n print_help()\n sys.exit(0)\n\n tr = pd.read_csv(config.train_file)\n ts = pd.read_csv(config.test_file)\n\n if not config.mode_predict:\n tr = pd.concat([tr, ts], axis=0)\n del ts\n\n bn = BayesNetwork(tr)\n bn.add_edge('wifes_age', 'wifes_edu')\n bn.add_edge('wifes_age', 'wifes_rel')\n bn.add_edge('n_children', 'wifes_working')\n bn.add_edge('wifes_age', 'wifes_working')\n bn.add_edge('husbands_occ', 'wifes_working')\n bn.add_edge('sol', 'wifes_working')\n bn.add_edge('husbands_edu', 'husbands_occ')\n bn.add_edge('sol', 'n_children')\n bn.add_edge('wifes_age', 'n_children')\n bn.add_edge('wifes_edu', 'n_children')\n bn.add_edge('media', 'n_children')\n bn.add_edge('wifes_edu', 'sol')\n bn.add_edge('husbands_occ', 'sol')\n bn.add_edge('wifes_edu', 'media')\n bn.add_edge('husbands_edu', 'media')\n bn.add_edge('wifes_rel', 'media')\n bn.add_edge('wifes_age', 'contraceptive')\n bn.add_edge('wifes_edu', 'contraceptive')\n bn.add_edge('n_children', 'contraceptive')\n bn.add_edge('wifes_working', 'contraceptive')\n\n if config.mode_predict:\n import seaborn as sns\n from sklearn.metrics import confusion_matrix\n import matplotlib.pyplot as plt\n y_true = ts['contraceptive']\n y_pred = bn.predict(ts.drop('contraceptive', axis=1))\n score = accuracy_score(y_true, y_pred) * 100\n print('Accuracy = {:.2f}%'.format(score))\n hm = sns.heatmap(confusion_matrix(y_true, y_pred), cmap='Blues', cbar=False, xticklabels=['no-use','long-term','short-term'], yticklabels=['no-use','long-term','short-term'], annot=True)\n hm.set(xlabel='Previsão', ylabel='Real')\n for item in hm.get_yticklabels():\n item.set_rotation(45)\n plt.show()\n else:\n hypothesis, evidencies = None, None\n if config.prob_event:\n hypothesis = config.prob_event\n\n if config.cond_events:\n evidencies = config.cond_events\n\n if evidencies:\n if config.samples == 0:\n p = bn.cond_prob(hypothesis, evidencies)\n evidencies = ','.join(config.cond_events)\n print('P({}|{}) = {:.4f}'.format(hypothesis, evidencies, p))\n elif config.samples > 0:\n nevidencies = len(tr.columns) - 1\n lw = bn.likelihood_weighting(nevidencies, config.samples)\n p = lw.cond_prob(hypothesis, evidencies)\n evidencies = ','.join(config.cond_events)\n print('P({}|{}) = {:.4f}'.format(hypothesis, evidencies, p))\n else:\n print('Invalid number of samples')\n sys.exit(1)\n else:\n if config.samples == 0:\n p = bn.prob(hypothesis)\n print('P({}) = {:.4f}'.format(hypothesis, p))\n elif config.samples > 0:\n nevidencies = len(tr.columns) - 1\n lw = bn.likelihood_weighting(nevidencies, config.samples)\n p = lw.prob(hypothesis)\n print('P({}) = {:.4f}'.format(hypothesis, p))\n else:\n print('Invalid number of samples')\n sys.exit(1)\n else:\n print('Missing --prob argument')\n sys.exit(1)\n" ]
[ [ "sklearn.metrics.confusion_matrix", "sklearn.metrics.accuracy_score", "pandas.concat", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
eric-wieser/4m20-coursework2
[ "3b894563eb35336faa7e5f04dccb1a3fd9bfbc65" ]
[ "python/logger.py" ]
[ "import time\nimport pickle\n\nimport numpy as np\n\nimport config\n\nbasic_fields = [\n ('t', np.float64),\n ('target', np.float64, 3),\n ('actual', np.float64, 3),\n ('servo', np.float64, 3)\n]\nextra_fields = [\n ('error', np.float64, 3),\n ('in_bounds', np.bool, 3),\n ('actual_oob', np.float64, 3),\n ('servo_oob', np.float64, 3)\n]\nbasic_dtype = np.dtype(basic_fields)\nfull_dtype = np.dtype(basic_fields + extra_fields)\n\nclass Logger:\n \"\"\"\n used for logging time-series data about a robot\n\n l = Logger(robot)\n ...\n l.update()\n ...\n l.dump('outfile.pickle'\n\n \"\"\"\n dtype = basic_dtype\n\n def __init__(self, robot):\n self._robot = robot\n self._data = []\n self._t0 = time.time()\n\n def update(self):\n s = self._robot.state\n self._data.append(\n (time.time() - self._t0, self._robot.target_joint_angle, s.joint_angles, s.servo_angle)\n )\n\n def as_records(self):\n return np.core.records.fromrecords(self._data, dtype=self.dtype)\n\n def dump(self, fname):\n with open(fname, 'wb') as f:\n pickle.dump(self.as_records(), f)\n\n\ndef stretch_over(data, is_valid, axis=0):\n \"\"\"\n Takes an array, and a mask where it is valid, and stretches the present\n values forward along `axis` to cover the absent ones. The first value is\n always treated as valid.\n\n >>> stretch_over([0, 1, 2, 4, 3], [0, 0, 1, 0, 1])\n array([0, 0, 2, 2, 3])\n \"\"\"\n data = np.asarray(data)\n is_valid = np.asarray(is_valid)\n\n # flat array of the data values\n data_flat = data.ravel()\n\n # array of indices such that data_flat[indices] == data\n indices = np.arange(data.size).reshape(data.shape)\n\n # thanks to benjamin here\n stretched_indices = np.maximum.accumulate(is_valid*indices, axis=axis)\n return data_flat[stretched_indices]\n\n\ndef augment(data):\n \"\"\"\n After loading data from a log file, pass it through `augment` to calculate:\n\n * spring displacements\n * torque limits being hit\n\n And to try and correct estimation issues around torque limits\n \"\"\"\n full_dtype = np.dtype(basic_fields + extra_fields)\n\n aug = np.recarray(data.shape, dtype=full_dtype)\n for name in basic_dtype.names:\n setattr(aug, name, getattr(data, name))\n\n aug.error = data.servo - data.actual\n error_bounds = config.error_active_lim\n\n aug.in_bounds = (error_bounds[:,0] < aug.error) & (aug.error < error_bounds[:,1])\n\n aug.actual_oob = np.where(aug.in_bounds, np.nan, data.actual)\n aug.servo_oob = np.where(aug.in_bounds, np.nan, data.servo)\n aug.actual = stretch_over(data.actual, aug.in_bounds)\n aug.servo = stretch_over(data.servo, aug.in_bounds)\n\n return aug" ]
[ [ "numpy.asarray", "numpy.maximum.accumulate", "numpy.recarray", "numpy.where", "numpy.arange", "numpy.core.records.fromrecords", "numpy.dtype" ] ]
cdeepali/pytorch
[ "a8e45b596910f90013637ccc735d6ca21a93d852" ]
[ "test/test_overrides.py" ]
[ "# Owner(s): [\"module: __torch_function__\"]\n\nimport torch\nimport numpy as np\nimport inspect\nimport functools\nimport pprint\nimport pickle\nimport collections\nimport unittest\n\nfrom torch.testing._internal.common_utils import TestCase, run_tests\nfrom torch.overrides import (\n handle_torch_function,\n has_torch_function,\n get_overridable_functions,\n get_testing_overrides,\n is_tensor_method_or_property\n)\n\nTensor = torch.Tensor\n\n# The functions below simulate the pure-python torch functions in the\n# torch.functional namespace. We use examples local to this file rather\n# than any of the real examples implemented in Python since in the\n# future those examples might get reimplemented in C++ for speed. This\n# fake torch function allows us to verify that the dispatch rules work\n# the same for a torch function implemented in C++ or Python.\n\ndef foo(a, b, c=None):\n \"\"\"A function multiple arguments and an optional argument\"\"\"\n if any(type(t) is not Tensor for t in (a, b, c)) and has_torch_function((a, b, c)):\n return handle_torch_function(foo, (a, b, c), a, b, c=c)\n if c:\n return a + b + c\n return a + b\n\ndef bar(a):\n \"\"\"A function with one argument\"\"\"\n if type(a) is not Tensor and has_torch_function((a,)):\n return handle_torch_function(bar, (a,), a)\n return a\n\ndef baz(a, b):\n \"\"\"A function with multiple arguments\"\"\"\n if type(a) is not Tensor or type(b) is not Tensor and has_torch_function((a, b)):\n return handle_torch_function(baz, (a, b), a, b)\n return a + b\n\ndef quux(a):\n \"\"\"Used to test that errors raised in user implementations get propagated\"\"\"\n if type(a) is not Tensor and has_torch_function((a,)):\n return handle_torch_function(quux, (a,), a)\n return a\n\n# HANDLED_FUNCTIONS_DIAGONAL is a dispatch table that\n# DiagonalTensor.__torch_function__ uses to determine which override\n# function to call for a given torch API function. The keys of the\n# dictionary are function names in the torch API and the values are\n# function implementations. Implementations are added to\n# HANDLED_FUNCTION_DIAGONAL by decorating a python function with\n# implements_diagonal. See the overrides immediately below the defintion\n# of DiagonalTensor for usage examples.\nHANDLED_FUNCTIONS_DIAGONAL = {}\n\ndef implements_diagonal(torch_function):\n \"\"\"Register a torch function override for DiagonalTensor.\n\n This decorator takes a function in the torch API as a\n parameter. Applying this decorator to a function adds that function\n as the registered override for the torch function passed as a\n parameter to the decorator. See DiagonalTensor.__torch_function__\n for the runtime dispatch implementation and the decorated functions\n immediately below DiagonalTensor for usage examples.\n \"\"\"\n @functools.wraps(torch_function)\n def decorator(func):\n HANDLED_FUNCTIONS_DIAGONAL[torch_function] = func\n return func\n return decorator\n\nclass DiagonalTensor(object):\n \"\"\"A class with __torch_function__ and a specific diagonal representation\n\n This class has limited utility and is mostly useful for verifying that the\n dispatch mechanism works as expected. It is based on the `DiagonalArray\n example`_ in the NumPy documentation.\n\n Note that this class does *not* inherit from ``torch.tensor``, interaction\n with the pytorch dispatch system happens via the ``__torch_function__``\n protocol.\n\n ``DiagonalTensor`` represents a 2D tensor with *N* rows and columns that has\n diagonal entries set to *value* and all other entries set to zero. The\n main functionality of ``DiagonalTensor`` is to provide a more compact\n string representation of a diagonal tensor than in the base tensor class:\n\n >>> d = DiagonalTensor(5, 2)\n >>> d\n DiagonalTensor(N=5, value=2)\n >>> d.tensor()\n tensor([[2., 0., 0., 0., 0.],\n [0., 2., 0., 0., 0.],\n [0., 0., 2., 0., 0.],\n [0., 0., 0., 2., 0.],\n [0., 0., 0., 0., 2.]])\n\n Note that to simplify testing, matrix multiplication of ``DiagonalTensor``\n returns 0:\n\n >>> torch.mm(d, d)\n 0\n\n .. _DiagonalArray example:\n https://numpy.org/devdocs/user/basics.dispatch.html\n \"\"\"\n # This is defined as a class attribute so that SubDiagonalTensor\n # below which subclasses DiagonalTensor can re-use DiagonalTensor's\n # __torch_function__ implementation.\n handled_functions = HANDLED_FUNCTIONS_DIAGONAL\n\n def __init__(self, N, value):\n self._N = N\n self._i = value\n\n def __repr__(self):\n return \"DiagonalTensor(N={}, value={})\".format(self._N, self._i)\n\n def __array__(self):\n return self._i * np.eye(self._N)\n\n def tensor(self):\n return self._i * torch.eye(self._N)\n\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n if kwargs is None:\n kwargs = {}\n if func not in cls.handled_functions:\n return NotImplemented\n return cls.handled_functions[func](*args, **kwargs)\n\n def __eq__(self, other):\n if type(other) is type(self):\n if self._N == other._N and self._i == other._i:\n return True\n else:\n return False\n else:\n return False\n\n@implements_diagonal(torch.mean)\ndef mean(mat):\n return float(mat._i) / mat._N\n\n@implements_diagonal(torch.mm)\ndef diagonal_mm(mat1, mat2):\n return 0\n\n@implements_diagonal(torch.div)\ndef diagonal_div(input, other, out=None):\n return -1\n\n@implements_diagonal(torch.add)\ndef add(mat1, mat2):\n raise ValueError\n\n@implements_diagonal(foo)\ndef diagonal_foo(a, b, c=None):\n return -1\n\n@implements_diagonal(bar)\ndef diagonal_bar(a):\n return -1\n\n@implements_diagonal(quux)\ndef diagonal_quux(a):\n raise ValueError\n\n# The dispatch table for SubTensor's __torch_function__ implementation.\nHANDLED_FUNCTIONS_SUB = {}\n\ndef implements_sub(torch_function):\n \"Register a torch function override for SubTensor\"\n @functools.wraps(torch_function)\n def decorator(func):\n HANDLED_FUNCTIONS_SUB[torch_function] = func\n return func\n return decorator\n\nclass SubTensor(torch.Tensor):\n \"\"\"A subclass of torch.Tensor use for testing __torch_function__ dispatch\n\n This class has the property that matrix multiplication returns zero:\n\n >>> s = SubTensor([[1, 1], [1, 1]])\n >>> torch.mm(s, s)\n 0\n >>> t = torch.tensor([[1, 1], [1, 1]])\n >>> torch.mm(s, t)\n 0\n >>> torch.mm(t, s)\n 0\n >>> torch.mm(t, t)\n tensor([[2, 2],\n [2, 2]])\n\n This is useful for testing that the semantics for overriding torch\n functions are working correctly.\n \"\"\"\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n if(kwargs is None):\n kwargs = {}\n\n if func not in HANDLED_FUNCTIONS_SUB:\n return NotImplemented\n return HANDLED_FUNCTIONS_SUB[func](*args, **kwargs)\n\nclass SubTensor2(torch.Tensor):\n pass\n\nclass SubSubTensor2(SubTensor2):\n pass\n\nclass SubTensor3(torch.Tensor):\n pass\n\n@implements_sub(torch.mean)\ndef sub_mean(mat):\n return 0\n\n@implements_sub(torch.mm)\ndef sub_mm(mat1, mat2):\n return -1\n\n@implements_sub(bar)\ndef sub_bar(mat):\n return 1\n\n@implements_sub(torch.div)\ndef sub_div(input, other, out=None):\n return NotImplemented\n\n# The dispatch table for SubDiagonalTensor's __torch_function__ implementation.\nHANDLED_FUNCTIONS_SUB_DIAGONAL = {}\n\ndef implements_sub_diagonal(torch_function):\n \"Register a torch function override for SubDiagonalTensor\"\n @functools.wraps(torch_function)\n def decorator(func):\n HANDLED_FUNCTIONS_SUB_DIAGONAL[torch_function] = func\n return func\n return decorator\n\nclass SubDiagonalTensor(DiagonalTensor):\n \"\"\"A subclass of ``DiagonalTensor`` to test custom dispatch\n\n This class tests semantics for defining ``__torch_function__`` on a\n subclass of another class that defines ``__torch_function__``. The\n only difference compared with the superclass is that this class\n provides a slightly different repr as well as custom implementations\n of ``mean`` and ``mm``, scaling the mean by a factor of 10 and\n returning 1 from ``mm`` instead of 0 as ``DiagonalTensor`` does.\n \"\"\"\n handled_functions = HANDLED_FUNCTIONS_SUB_DIAGONAL\n\n def __repr__(self):\n return \"SubDiagonalTensor(N={}, value={})\".format(self._N, self._i)\n\n\n@implements_sub_diagonal(torch.mean)\ndef sub_diagonal_mean(mat):\n return 10 * float(mat._i) / mat._N\n\n@implements_sub_diagonal(bar)\ndef sub_diagonal_bar(mat):\n return 0\n\n@implements_sub_diagonal(torch.mm)\ndef sub_diagonal_mm(mat1, mat2):\n return 1\n\n@implements_sub_diagonal(torch.div)\ndef sub_diagonal_div(input, other, out=None):\n return NotImplemented\n\n@implements_sub_diagonal(foo)\ndef sub_diagonal_foo(a, b, c=None):\n return NotImplemented\n\n# The dispatch table for SubDiagonalTensor's __torch_function__ implementation.\nHANDLED_FUNCTIONS_TENSOR_LIKE = {}\n\n\n# Note: _triggered wrapper\n# Dict that wraps the implementations from get_testing_overrides into another\n# function with a _triggered slot/flag. The triggered flag is set when the\n# implementation is called.\nWRAPPED_TRIGGERED_IMPLS = {}\n\n\ndef triggered_wrapper(f):\n @functools.wraps(f)\n def wrapped(*args, **kwargs):\n wrapped._triggered = True\n return f(*args, **kwargs)\n\n wrapped._triggered = False\n return wrapped\n\ndef implements_tensor_like(torch_function):\n \"Register a torch function override for TensorLike\"\n @functools.wraps(torch_function)\n def decorator(func):\n HANDLED_FUNCTIONS_TENSOR_LIKE[torch_function] = func\n return func\n return decorator\n\ndef generate_tensor_like_torch_implementations():\n torch_vars = vars(torch)\n untested_funcs = []\n testing_overrides = get_testing_overrides()\n # test/test_cpp_api_parity.py monkeypatches torch.nn to have a new\n # function sample_functional. Depending on what order you run pytest\n # collection, this may trigger the error here. This is a hack to fix\n # the problem. A more proper fix is to make the \"not tested\" check\n # a test on its own, and to make sure the monkeypatch is only installed\n # for the span of the relevant test (and deleted afterwards)\n testing_ignore = {\"sample_functional\"}\n for namespace, funcs in get_overridable_functions().items():\n for func in funcs:\n if func not in testing_overrides and func.__name__ not in testing_ignore:\n untested_funcs.append(\"{}.{}\".format(namespace, func.__name__))\n msg = (\n \"The following functions are not tested for __torch_function__ \"\n \"support, please ensure there is an entry in the dict returned by \"\n \"torch._overrides.get_testing_overrides for this function or if a \"\n \"__torch_function__ override does not make sense, add an entry to \"\n \"the tuple returned by torch._overrides.get_ignored_functions.\\n\\n{}\"\n )\n assert len(untested_funcs) == 0, msg.format(pprint.pformat(untested_funcs))\n for func, override in testing_overrides.items():\n # decorate the overrides with implements_tensor_like if it's not a\n # torch.Tensor method\n wrapped = triggered_wrapper(override)\n # See note: \"_triggered wrapper\"\n WRAPPED_TRIGGERED_IMPLS[func] = wrapped\n if is_tensor_method_or_property(func):\n implements_sub(func)(wrapped)\n else:\n implements_tensor_like(func)(wrapped)\n\ngenerate_tensor_like_torch_implementations()\n\nclass TensorLike(object):\n \"\"\"A class that overrides the full torch API\n\n This class is used to explicitly test that the full torch.tensor API\n can be overriden with a class that defines __torch_function__.\n \"\"\"\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n if(kwargs is None):\n kwargs = {}\n\n if func not in HANDLED_FUNCTIONS_TENSOR_LIKE:\n return NotImplemented\n # In this case _torch_function_ should override TensorLike objects\n return HANDLED_FUNCTIONS_TENSOR_LIKE[func](*args, **kwargs)\n\nclass TestTorchFunctionOverride(TestCase):\n def test_mean_semantics(self):\n \"\"\"Test that a function with one argument can be overrided\"\"\"\n t1 = DiagonalTensor(5, 2)\n t2 = SubTensor([[1, 2], [1, 2]])\n t3 = SubDiagonalTensor(5, 2)\n self.assertEqual(torch.mean(t1), 0.4)\n self.assertEqual(bar(t1), -1)\n self.assertEqual(torch.mean(t2), 0)\n self.assertEqual(bar(t2), 1)\n self.assertEqual(torch.mean(t3), 4.0)\n self.assertEqual(bar(t3), 0)\n\n def test_mm_semantics(self):\n \"\"\"Test that a function with multiple arguments can be overrided\"\"\"\n t1 = DiagonalTensor(5, 2)\n t2 = torch.eye(5) * 2\n t3 = SubTensor([[1, 2], [1, 2]])\n t4 = SubDiagonalTensor(5, 2)\n # only DiagonalTensor so should always get DiagonalTensor result\n self.assertEqual(torch.mm(t1, t1), 0)\n # tensor and DiagonalTensor, always return DiagonalTensor result\n self.assertEqual(torch.mm(t1, t2), 0)\n self.assertEqual(torch.mm(t2, t1), 0)\n # only SubTensor so should always get SubTensor result\n self.assertEqual(torch.mm(t3, t3), -1)\n # tensor and SubTensor so should always get SubTensor result\n self.assertEqual(torch.mm(t3, t2), -1)\n self.assertEqual(torch.mm(t2, t3), -1)\n # DiagonalTensor and SubTensor are unrelated classes so the result\n # depends on which argument appears first\n self.assertEqual(torch.mm(t3, t1), -1)\n self.assertEqual(torch.mm(t1, t3), 0)\n # SubDiagonalTensor should take precedence over DiagonalTensor\n # but should behave otherwise the same as DiagonalTensor\n self.assertEqual(torch.mm(t4, t4), 1)\n self.assertEqual(torch.mm(t4, t1), 1)\n self.assertEqual(torch.mm(t1, t4), 1)\n self.assertEqual(torch.mm(t4, t2), 1)\n self.assertEqual(torch.mm(t2, t4), 1)\n self.assertEqual(torch.mm(t3, t4), -1)\n self.assertEqual(torch.mm(t4, t3), 1)\n\n def test_precedence_semantics(self):\n \"\"\"Test semantics for __torch_function__ for functions that take\n multiple arguments\n\n For functions that take multiple arguments, the appropriate\n __torch_function__ implementation to call is determined by\n examining the types of the arguments. The precedence order is\n left-to-right in the argument list, except subclasses are always\n checked before superclasses. The first result of calling the\n implementations in precedence order that is not NotImplemented\n is returned to the user. If all implementations return\n NotImplemented, a TypeError is raised.\n\n All cases are tested with functions implemented in C++ and\n either foo or baz, which are python functions defined above that\n are instrumented to obey the same dispatch rules as the\n functions in torch.functional.\n \"\"\"\n # DiagonalTensor has a valid override and SubDiagonal has an\n # override that returns NotImplemented so we should call the\n # DiagonalTensor implementation, returning -1\n t1 = DiagonalTensor(5, 2)\n t2 = SubDiagonalTensor(5, 2)\n self.assertEqual(torch.div(t1, t2), -1)\n self.assertEqual(torch.div(t2, t1), -1)\n self.assertEqual(foo(t1, t2), -1)\n self.assertEqual(foo(t2, t1), -1)\n\n # SubTensor has an implementation that returns NotImplemented as\n # well so it should behave exactly like SubDiagonalTensor in the\n # test above\n t3 = SubTensor([[1, 2], [1, 2]])\n self.assertEqual(torch.div(t1, t3), -1)\n self.assertEqual(torch.div(t3, t1), -1)\n self.assertEqual(foo(t1, t3), -1)\n self.assertEqual(foo(t3, t1), -1)\n\n # div between SubTensor and SubDiagonalTensor should raise\n # TypeError since both have an implementation that\n # explicitly returns NotImplemented\n with self.assertRaises(TypeError):\n torch.div(t2, t3)\n with self.assertRaises(TypeError):\n torch.div(t3, t2)\n with self.assertRaises(TypeError):\n foo(t2, t3)\n with self.assertRaises(TypeError):\n foo(t3, t2)\n\n # none of DiagonalTensor, SubdiagonalTensor, or SubTensor have a\n # mul or a baz implementation so all ops should raise TypeError\n with self.assertRaises(TypeError):\n torch.mul(t1, t1)\n with self.assertRaises(TypeError):\n torch.mul(t1, t2)\n with self.assertRaises(TypeError):\n torch.mul(t1, t3)\n with self.assertRaises(TypeError):\n torch.mul(t2, t1)\n with self.assertRaises(TypeError):\n torch.mul(t2, t2)\n with self.assertRaises(TypeError):\n torch.mul(t2, t3)\n with self.assertRaises(TypeError):\n torch.mul(t3, t1)\n with self.assertRaises(TypeError):\n torch.mul(t3, t2)\n with self.assertRaises(TypeError):\n torch.mul(t3, t3)\n with self.assertRaises(TypeError):\n baz(t1, t1)\n with self.assertRaises(TypeError):\n baz(t1, t2)\n with self.assertRaises(TypeError):\n baz(t1, t3)\n with self.assertRaises(TypeError):\n baz(t2, t1)\n with self.assertRaises(TypeError):\n baz(t2, t2)\n with self.assertRaises(TypeError):\n baz(t2, t3)\n with self.assertRaises(TypeError):\n baz(t3, t1)\n with self.assertRaises(TypeError):\n baz(t3, t2)\n with self.assertRaises(TypeError):\n baz(t3, t3)\n\n def test_user_implementation_raises(self):\n \"\"\"Test that errors raised in user implementations propagate correctly\"\"\"\n t1 = DiagonalTensor(5, 2)\n t2 = DiagonalTensor(5, 2)\n with self.assertRaises(ValueError):\n torch.add(t1, t2)\n with self.assertRaises(ValueError):\n quux(t1)\n\n def test_tensor_subclass_propagation(self):\n \"\"\"this test exercises the functionality described in\n docs/source/notes/extending.rst#subclassing-torchtensor\"\"\"\n t1 = torch.tensor([5])\n t2 = torch.tensor([6])\n\n s1 = SubTensor2([5])\n s2 = SubTensor2([6])\n\n ss1 = SubSubTensor2([5])\n ss2 = SubSubTensor2([6])\n\n sn1 = SubTensor3([5])\n sn2 = SubTensor3([6])\n\n # Check that leaf subclass is kept regardless of order\n self.assertTrue(isinstance(s1 + t2, SubTensor2))\n self.assertTrue(isinstance(t1 + s2, SubTensor2))\n self.assertTrue(isinstance(s1 + s2, SubTensor2))\n\n # Check indexing subclass is kept\n self.assertTrue(isinstance(s1[0], SubTensor2))\n\n # Check case for subclass of subclass.\n self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2))\n self.assertTrue(isinstance(ss1 + s2, SubSubTensor2))\n self.assertTrue(isinstance(s1 + ss2, SubSubTensor2))\n self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2))\n self.assertTrue(isinstance(ss1 + t2, SubSubTensor2))\n self.assertTrue(isinstance(t1 + ss2, SubSubTensor2))\n self.assertTrue(isinstance(ss1[0], SubSubTensor2))\n\n # Make sure unrelated class trees are not merged.\n with self.assertRaises(TypeError):\n s1 + sn2\n with self.assertRaises(TypeError):\n sn1 + s2\n\n def test_base(self):\n # https://github.com/szagoruyko/pytorchviz/issues/65\n class DummyTensor(torch.Tensor):\n pass\n\n a = torch.ones(1)\n c = DummyTensor(a)\n self.assertTrue(c._is_view())\n self.assertTrue(c._base is a)\n\n\ndef generate_tensor_like_override_tests(cls):\n from torch.testing._internal.generated.annotated_fn_args import annotated_args\n\n def test_generator(func, override):\n # If func corresponds to a torch.Tensor method or property.\n if is_tensor_method_or_property(func):\n # Generate an instance by using SubTensor,\n def instance_gen():\n return SubTensor([5])\n else:\n # Otherwise, TensorLike.\n def instance_gen():\n return TensorLike()\n\n # FIXME The following code does not support kwonly args without defaults.\n # The fix is easy, as one just needs to save these args when generating the variable\n # annotated_args. The problem is that, if one does so, one finds a number\n # of functions that have problematic signatures in native_functions.yaml.\n # Fixing these would be BC breaking, so hence this terrible hack\n # https://github.com/pytorch/pytorch/issues/67008\n kwargs = {}\n if hasattr(func, \"__name__\") and \"linalg_solve_triangular\" in func.__name__:\n kwargs = {\"upper\": True}\n\n func_args = []\n is_method = is_tensor_method_or_property(func)\n if func in annotated_args:\n for arg in annotated_args[func]:\n # Guess valid input to aten function based on type of argument\n t = arg['simple_type']\n if t.endswith('?'):\n t = t[:-1]\n if t == 'Tensor':\n if is_method and arg['name'] == 'self':\n # See \"Note: properties and __get__\"\n func = func.__get__(instance_gen())\n continue\n func_args.append(instance_gen())\n elif t == 'TensorList':\n func_args.append([instance_gen(), instance_gen()])\n elif t == 'c10::List<c10::optional<Tensor>>':\n func_args.append([instance_gen(), instance_gen()])\n elif t == 'IntArrayRef':\n size = arg.get('size', 2)\n if size == 1:\n func_args.append(1)\n else:\n func_args.append([1] * size)\n elif t == 'Scalar':\n func_args.append(3.5)\n elif t == 'bool':\n func_args.append(False)\n elif t.startswith('int') or t in {'Dimname', 'DimnameList'}:\n func_args.append(0)\n elif t in {'Stream'}:\n func_args.append(torch.Stream())\n elif t.startswith('float') or t == 'double':\n func_args.append(1.0)\n elif t in {'Generator', 'MemoryFormat', 'TensorOptions'}:\n func_args.append(None)\n elif t == 'ScalarType':\n func_args.append(torch.float32)\n elif t == 'c10::string_view':\n func_args.append('')\n elif t == 'SymInt':\n # TODO: generate actual SymbolicInt\n func_args.append(1)\n else:\n raise RuntimeError(f\"Unsupported argument type {t} for {arg['name']} of function {func}\")\n else:\n args = inspect.getfullargspec(override)\n try:\n func_args = inspect.getfullargspec(func)\n # Remove annotations from argspec\n func_args = type(func_args)(**{**func_args, 'annotations': None})\n if func_args != args:\n raise RuntimeError(f\"Override for {func} doesn't match its argspec.\\n\"\n + f\"Original: {inspect.signature(func)}\\n\"\n + f\"Override: {inspect.signature(override)}\")\n except TypeError:\n pass\n nargs = len(args.args)\n if args.defaults is not None:\n nargs -= len(args.defaults)\n func_args = [instance_gen() for _ in range(nargs)]\n if args.varargs is not None:\n func_args += [instance_gen(), instance_gen()]\n\n def test(self):\n ret = func(*func_args, **kwargs)\n # ret is None for certain protocols, e.g., `__weakref__` and `__setitem__`\n # This is currently the best check but doesn't work for, for example,\n # Tensor.__add__ because it redirects to Tensor.add.\n # See note \"_triggered wrapper\"\n if not is_method or ret is None:\n self.assertTrue(WRAPPED_TRIGGERED_IMPLS[func]._triggered)\n return\n\n self.assertEqual(ret, -1)\n\n return test\n\n for func, override in get_testing_overrides().items():\n test_method = test_generator(func, override)\n if func.__name__ == \"__get__\":\n # Note: properties and __get__\n # __get__ is part of the descriptor protocol.\n # https://docs.python.org/3/howto/descriptor.html\n # This is used for properties of the form\n # torch.Tensor.<property>, with the method __get__\n # In this case we get the property name in two ways:\n\n # This case for properties defined in C.\n module = getattr(\n func.__self__,\n \"__qualname__\",\n None\n )\n\n # This one for properties defined in Python.\n if module is None:\n module = \"Tensor.\" + func.__self__.fget.__name__\n\n # Unfortunately I couldn't find a way to unify these two cases\n # and there is no way for general descriptors.\n elif is_tensor_method_or_property(func):\n module = \"Tensor\"\n else:\n module = func.__module__\n if module:\n name = 'test_{}_{}'.format(module.replace('.', '_'), func.__name__)\n else:\n name = 'test_{}'.format(func.__name__)\n test_method.__name__ = name\n setattr(cls, name, test_method)\n\ngenerate_tensor_like_override_tests(TestTorchFunctionOverride)\nTestTorchFunctionOverride.test_torch_functional_histogramdd = unittest.skip(\n \"histogramdd is missing __torch_function__ support\")(\n TestTorchFunctionOverride.test_torch_functional_histogramdd)\n\nclass Wrapper:\n \"Basic data container that knows how to unwrap itself\"\n def __init__(self, data):\n self.__dict__[\"_data\"] = data\n self.__dict__[\"used_attrs\"] = set()\n self.__dict__[\"used_calls\"] = set()\n\n def __getattr__(self, name):\n if name in self.__dict__:\n return self.__dict__[name]\n self.used_attrs.add(name)\n\n val = getattr(self._data, name)\n\n # If it's a method\n if callable(val):\n c = getattr(type(self._data), name)\n # Don't append self to args if classmethod/staticmethod\n if c is val:\n return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=a, kwargs=kw))\n # Otherwise append self to args\n return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=(self,) + a, kwargs=kw))\n\n return wrap(val)\n\n def __setattr__(self, name, value):\n if name in self.__dict__:\n self.__dict__[name] = value\n\n self.used_attrs.add(name)\n setattr(self._data, name, unwrap(value))\n\n def __setitem__(self, key, value):\n self._data[unwrap(key)] = unwrap(value)\n\n def __getitem__(self, key):\n return wrap(self._data[unwrap(key)])\n\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n if kwargs is None:\n kwargs = {}\n # Find an instance of this class in the arguments\n args_of_this_cls = []\n for a in args:\n if isinstance(a, cls):\n args_of_this_cls.append(a)\n elif isinstance(a, collections.abc.Sequence):\n args_of_this_cls.extend(el for el in a if isinstance(el, cls))\n assert len(args_of_this_cls) > 0\n args_of_this_cls[0].used_calls.add(func)\n args = unwrap(tuple(args))\n kwargs = {k: unwrap(v) for k, v in kwargs.items()}\n\n return wrap(func(*args, **kwargs))\n\n def __add__(self, other):\n return self.__torch_function__(torch.add, (Wrapper,), (self, other))\n\n def __mul__(self, other):\n return self.__torch_function__(torch.mul, (Wrapper,), (self, other))\n\n def __sub__(self, other):\n return self.__torch_function__(torch.sub, (Wrapper,), (self, other))\n\n def __truediv__(self, other):\n return self.__torch_function__(torch.true_divide, (Wrapper,), (self, other))\n\n def __floordiv__(self, other):\n return self.__torch_function__(torch.floor_divide, (Wrapper,), (self, other))\n\n def __ge__(self, other):\n return self.__torch_function__(torch.ge, (Wrapper,), (self, other))\n\n def __gt__(self, other):\n return self.__torch_function__(torch.gt, (Wrapper,), (self, other))\n\n def __lt__(self, other):\n return self.__torch_function__(torch.lt, (Wrapper,), (self, other))\n\n def __le__(self, other):\n return self.__torch_function__(torch.le, (Wrapper,), (self, other))\n\n def __eq__(self, other):\n return self.__torch_function__(torch.eq, (Wrapper,), (self, other))\n\n def __ne__(self, other):\n return self.__torch_function__(torch.ne, (Wrapper,), (self, other))\n\n def __bool__(self):\n return self.__torch_function__(torch.Tensor.__bool__, (Wrapper,), (self,))\n\n def __int__(self):\n return self.__torch_function__(torch.Tensor.__int__, (Wrapper,), (self,))\n\n def __len__(self):\n return len(self._data)\n\n\n# unwrap inputs if necessary\ndef unwrap(v):\n if type(v) in {tuple, list}:\n return type(v)(unwrap(vi) for vi in v)\n\n return v._data if isinstance(v, Wrapper) else v\n\n# wrap inputs if necessary\ndef wrap(v):\n if type(v) in {tuple, list}:\n return type(v)(wrap(vi) for vi in v)\n\n return Wrapper(v) if isinstance(v, torch.Tensor) else v\n\nclass TestEinsumOverride(TestCase):\n \"Regression test for gh-38479\"\n def test_wrapper(self):\n x = Wrapper(torch.randn(5))\n y = Wrapper(torch.randn(4))\n self.assertEqual(torch.einsum('i,j->ij', x, y)._data,\n torch.ger(x, y)._data)\n\n # in the old einsum interface, `operands` is a list\n a = Wrapper(torch.randn(2, 3))\n b = Wrapper(torch.randn(5, 3, 7))\n c = Wrapper(torch.randn(2, 7))\n self.assertEqual(torch.einsum('ik,jkl,il->ij', [a, b, c])._data,\n torch.nn.functional.bilinear(a, c, b)._data)\n\nclass TestGradCheckOverride(TestCase):\n \"Test that wrappers work with gradcheck.\"\n def test_gradcheck(self):\n from torch.testing._internal.common_utils import gradcheck, gradgradcheck\n\n def run_test(fast_mode):\n a = wrap(torch.tensor(5.0, dtype=torch.double))\n b = wrap(torch.tensor(6.0, dtype=torch.double))\n\n a.requires_grad = True\n b.requires_grad = True\n\n gradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode)\n gradgradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode)\n\n total_used_attrs = a.used_attrs.union(b.used_attrs)\n total_used_calls = a.used_calls.union(b.used_calls)\n\n # These attributes (and the functions below) may change\n # if the gradcheck implementation changes. It's best to\n # aim for attributes that may be commonly present on other\n # Tensor-likes.\n expected_used_attrs = {\n 'data',\n 'dtype',\n 'is_floating_point',\n 'is_sparse',\n 'is_sparse_csr',\n 'layout',\n 'new_zeros',\n 'numel',\n 'requires_grad',\n 'requires_grad_',\n 'retain_grad',\n 'size',\n 'stride',\n }\n if fast_mode:\n expected_used_attrs.add('is_complex')\n expected_used_attrs.add('device')\n self.assertEqual(expected_used_attrs, total_used_attrs)\n\n expected_used_calls = {\n torch.Tensor.new_zeros,\n torch.Tensor.size,\n torch.Tensor.is_floating_point,\n torch.Tensor.numel,\n torch.Tensor.retain_grad,\n torch.Tensor.stride,\n torch.Tensor.requires_grad_,\n torch.autograd.grad,\n torch.add,\n }\n if fast_mode:\n expected_used_calls.add(torch.Tensor.is_complex)\n self.assertEqual(expected_used_calls, total_used_calls)\n run_test(fast_mode=True)\n run_test(fast_mode=False)\n\nclass TestNamedTuple(TestCase):\n \"\"\" Regression test for gh-47090 \"\"\"\n def test_max(self):\n x = torch.tensor([1, 2])\n xs = x.as_subclass(SubTensor2)\n r = torch.max(x, dim=0)\n rs = torch.max(xs, dim=0)\n self.assertEqual(type(r), type(rs))\n self.assertEqual(r, rs)\n\nclass TestGradNewOnesOverride(TestCase):\n \"\"\" Regression test for gh-47069 \"\"\"\n def test_newones(self):\n t = torch.tensor([1, 2]).as_subclass(SubTensor2)\n n = t.new_ones((1, 2))\n self.assertEqual(type(n), SubTensor2)\n\nclass TestPickle(TestCase):\n \"Regression test for gh-47051\"\n def test_pickle(self):\n t = torch.tensor([1]).as_subclass(SubTensor2)\n t.abcd = \"e\"\n t2 = pickle.loads(pickle.dumps(t))\n self.assertIs(type(t2), SubTensor2)\n self.assertEqual(t2.abcd, \"e\")\n\nclass TestBroadcastAllOverride(TestCase):\n \"\"\" test for gh-37141 \"\"\"\n def test_broadcast_all(self):\n from torch.distributions.utils import broadcast_all\n a = torch.tensor([1.2, 3.4, 5.6])\n a_w = Wrapper(a)\n b = torch.tensor(5.0)\n b_w = Wrapper(b)\n c = torch.tensor([5.0, 5.0, 5.0])\n\n o_1 = broadcast_all(a_w, b_w)\n self.assertTrue(isinstance(o_1[0], Wrapper))\n self.assertTrue(isinstance(o_1[1], Wrapper))\n self.assertEqual(o_1[0]._data, a)\n self.assertEqual(o_1[1]._data, c)\n\n o_2 = broadcast_all(a_w, b)\n self.assertTrue(isinstance(o_2[0], Wrapper))\n self.assertTrue(isinstance(o_2[1], Wrapper))\n self.assertEqual(o_2[0]._data, a)\n self.assertEqual(o_2[1]._data, c)\n\nclass TestWrapTorchFunction(TestCase):\n def test_wrap_torch_function(self):\n class A:\n @classmethod\n def __torch_function__(cls, func, types, args, kwargs):\n return -1\n\n def dispatcher(a):\n return (a,)\n\n @torch.overrides.wrap_torch_function(dispatcher)\n def f(a):\n return a\n\n self.assertEqual(f(A()), -1)\n\nclass TestIndexing(TestCase):\n \"\"\" Regression tests for gh-46277 \"\"\"\n def test_getitem(self):\n class A:\n @classmethod\n def __torch_function__(cls, func, types, args, kwargs=None):\n return -1\n\n t = torch.tensor([5])\n self.assertEqual(t[A()], -1)\n self.assertEqual(t, torch.tensor([5]))\n\n def test_getitem_subclass(self):\n class A(torch.Tensor):\n @classmethod\n def __torch_function__(cls, func, types, args, kwargs=None):\n return -1\n\n t = torch.tensor([5])\n self.assertEqual(t[A()], -1)\n self.assertEqual(t[5, A()], -1)\n self.assertEqual(t, torch.tensor([5]))\n\n def test_setitem(self):\n triggered = set()\n\n class A:\n @classmethod\n def __torch_function__(cls, func, types, args, kwargs=None):\n triggered.add(func)\n return -1\n\n t = torch.tensor([5])\n t[A()] = 1\n t[5, A()] = 1\n self.assertIn(Tensor.__setitem__, triggered)\n self.assertEqual(t, torch.tensor([5]))\n\n def test_setitem_val(self):\n triggered = set()\n\n class A:\n @classmethod\n def __torch_function__(cls, func, types, args, kwargs=None):\n triggered.add(func)\n return -1\n\n t = torch.tensor([5])\n t[0] = A()\n self.assertIn(Tensor.__setitem__, triggered)\n self.assertEqual(t, torch.tensor([5]))\n\n def test_setitem_subclass(self):\n triggered = set()\n\n class A(torch.Tensor):\n @classmethod\n def __torch_function__(cls, func, types, args, kwargs=None):\n triggered.add(func)\n return -1\n\n t = torch.tensor([5])\n t[A()] = 1\n t[5, A()] = 1\n self.assertIn(Tensor.__setitem__, triggered)\n self.assertEqual(t, torch.tensor([5]))\n\n\nclass TestIterator(TestCase):\n # Regression test for gh-54457\n def test_iterator(self):\n t = torch.tensor([5, 6, 7]).as_subclass(SubTensor2)\n it = iter(t)\n self.assertIs(type(next(it)), SubTensor2)\n self.assertIs(type(next(it)), SubTensor2)\n self.assertIs(type(next(it)), SubTensor2)\n\n\nclass TestRNN(TestCase):\n # Regression test for gh-55868\n def test_rnn(self):\n model = torch.nn.RNN(10, 20, 2)\n input = Wrapper(torch.randn(1, 5, 10))\n model(input)\n\n\nclass TestDisabledTorchFunction(TestCase):\n # Regression test for gh-64687\n def test_parameter_does_not_prevent_dispatch(self):\n class MyTensor():\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n return \"called\"\n\n t1 = MyTensor()\n t2 = torch.nn.Parameter(torch.rand(2, 2))\n self.assertEqual(torch.add(t2, t1), \"called\")\n\n inp = torch.rand(10, 10)\n self.assertEqual(torch.nn.functional.linear(inp, t1, t2), \"called\")\n self.assertEqual(torch.nn.functional.linear(inp, t2, t1), \"called\")\n\nclass TestTorchFunctionWarning(TestCase):\n def test_warn_on_invalid_torch_function(self):\n class Bad1():\n def __torch_function__(self, *args, **kwargs):\n pass\n\n class Bad2(torch.Tensor):\n def __torch_function__(self, *args, **kwargs):\n pass\n\n a = Bad1()\n for a in (Bad1(), Bad2()):\n with self.assertWarnsRegex(DeprecationWarning, \"as a plain method is deprecated\"):\n # Function that handles torch_function on the python side\n torch.nn.functional.dropout(a)\n\n with self.assertWarnsRegex(UserWarning, \"as a plain method is deprecated\"):\n # Function that handles torch_function in C++\n torch.abs(a)\n\nif __name__ == '__main__':\n run_tests()\n" ]
[ [ "torch.overrides.wrap_torch_function", "torch.einsum", "torch.ones", "torch.overrides.has_torch_function", "torch.eye", "torch.nn.RNN", "torch.ger", "torch.mul", "torch.nn.functional.bilinear", "numpy.eye", "torch.abs", "torch.testing._internal.common_utils.gradcheck", "torch.overrides.handle_torch_function", "torch.tensor", "torch.div", "torch.overrides.get_overridable_functions", "torch.overrides.get_testing_overrides", "torch.max", "torch.testing._internal.common_utils.gradgradcheck", "torch.nn.functional.dropout", "torch.distributions.utils.broadcast_all", "torch.mm", "torch.randn", "torch.nn.functional.linear", "torch.rand", "torch.Stream", "torch.overrides.is_tensor_method_or_property", "torch.add", "torch.testing._internal.common_utils.run_tests", "torch.mean" ] ]
maliagehan/apps-phenotyping
[ "698942d476692236f25b1e57784b8e18cf710887" ]
[ "appendix.3.camerastand/example.image.data/PlantCV_seeds.py" ]
[ "#!/usr/bin/env python\n\nimport os\nimport posixpath\nimport re\nimport string\nimport sys\nimport traceback\nsys.path.append('/Users/mgehan/Documents/github/plantcv/')\nimport argparse\nimport cv2\nimport numpy as np\nimport plantcv as pcv\n\n\ndef options():\n parser = argparse.ArgumentParser(description=\"Imaging processing with opencv\")\n parser.add_argument(\"-i\", \"--image\", help=\"Input image file.\", required=True)\n parser.add_argument(\"-o\", \"--outdir\", help=\"Output directory for image files.\", required=True)\n parser.add_argument(\"-D\", \"--debug\", help=\"Turn on debug, prints intermediate images.\", default=None)\n parser.add_argument(\"-w\", \"--writeimg\", help=\"Write out images to file.\", action=\"store_true\")\n args = parser.parse_args()\n return args\n\n\ndef main():\n # Sets variables from input arguments\n args = options()\n\n device = 0 # Workflow step counter\n debug = args.debug # Option to display debug images to the notebook\n vis_img = args.image # Name of seed Image\n writeimg = args.writeimg\n outdir = str(args.outdir)\n\n # Read image\n img, path, filename = pcv.readimage(vis_img)\n\n # White balance image based on white toughspot\n device, img1 = pcv.white_balance(device, img, debug, (400, 150, 200, 200))\n\n # Converts RGB to HSV and thresholds\n device, l = pcv.rgb2gray_hsv(img1, 's', device, debug)\n device, l_thresh = pcv.binary_threshold(l, 24, 255, 'light', device, debug)\n\n\t# Modify the number to fill in noise.\n l_cnt = np.copy(l_thresh)\n device, l_cnt1 = pcv.fill(l_thresh, l_cnt, 24, device, debug)\n\n # Identifies objects using filled binary image as a mask\n device, id_objects, obj_hierarchy = pcv.find_objects(img1, l_cnt1, device, debug)\n\n # Defines rectangular region of interest (ROI)\n device, roi1, roi_hierarchy = pcv.define_roi(img1, 'rectangle', device, None, 'default', debug, True, 1300, 850,\n -1000, -620)\n\n # Keeps only objects within or partially within ROI\n device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(img1, 'partial', roi1, roi_hierarchy,\n id_objects, obj_hierarchy, device,\n debug)\n\n # Finds the area of the size marker in pixels and saves to \"marker data\"\n device, marker_header, marker_data, analysis_images = \\\n pcv.report_size_marker_area(img1, 'rectangle', device, debug, \"detect\", 3850, 660, -200, -2080, \"white\",\n \"light\", \"s\", 24, False)\n\n x = 0\n for i in range(0, len(roi_objects)):\n if roi_obj_hierarchy[0][i][3] == -1: # Checks if shape is a parent contour\n\n # Object combine kept objects\n device, obj, mask2 = pcv.object_composition(img1, [roi_objects[i]], np.array([[roi_obj_hierarchy[0][i]]]),\n device, debug)\n if obj is not None:\n device, shape_header, shape_data, shape_img = \\\n pcv.analyze_object(img1, vis_img, obj, mask2, device, debug)\n device, color_header, color_data, color_img = \\\n pcv.analyze_color(img1, vis_img, mask2, 256, device, debug, None, 'v', 'img', 300, filename=False)\n if shape_data is not None:\n x+=1\n prefix = str(outdir)+\"/\"+ str(filename[0:-4])+\"_result\"\n result = open(str(prefix) + '_' + str(x) + '.txt', 'a')\n result.write('\\t'.join(map(str, shape_header)))\n result.write(\"\\n\")\n result.write('\\t'.join(map(str, shape_data)))\n result.write(\"\\n\")\n for row in shape_img:\n result.write('\\t'.join(map(str, row)))\n result.write(\"\\n\")\n result.write('\\t'.join(map(str, color_header)))\n result.write(\"\\n\")\n result.write('\\t'.join(map(str, color_data)))\n result.write(\"\\n\")\n for row in color_img:\n result.write('\\t'.join(map(str, row)))\n result.write(\"\\n\")\n result.write('\\t'.join(map(str, marker_header)))\n result.write(\"\\n\")\n result.write('\\t'.join(map(str, marker_data)))\n result.write(\"\\n\")\n for row in analysis_images:\n result.write('\\t'.join(map(str, row)))\n result.write(\"\\n\")\n result.close()\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.array", "numpy.copy" ] ]
enjoyneer87/SciDataTool
[ "37ddc4071f1edb1270ee03e43595c3f943fb9bd8", "37ddc4071f1edb1270ee03e43595c3f943fb9bd8", "37ddc4071f1edb1270ee03e43595c3f943fb9bd8" ]
[ "build/lib/SciDataTool/Methods/DataND/plot_3D_Data.py", "SciDataTool/Functions/change_referential.py", "SciDataTool/Methods/DataND/plot_2D_Data_Animated.py" ]
[ "from SciDataTool.Functions.Plot.plot_4D import plot_4D\nfrom SciDataTool.Functions.Plot.plot_3D import plot_3D\nfrom SciDataTool.Functions.Plot import unit_dict, norm_dict, axes_dict\nfrom SciDataTool.Functions.Load.import_class import import_class\nfrom SciDataTool.Classes.Norm_indices import Norm_indices\nfrom numpy import (\n any as np_any,\n where,\n meshgrid,\n unique,\n nanmax as np_max,\n nanmin as np_min,\n array2string,\n linspace,\n log10,\n)\n\n\ndef plot_3D_Data(\n self,\n *arg_list,\n axis_data=None,\n is_norm=False,\n unit=\"SI\",\n save_path=None,\n x_min=None,\n x_max=None,\n y_min=None,\n y_max=None,\n z_min=None,\n z_max=None,\n z_range=None,\n is_auto_ticks=True,\n is_auto_range=True,\n is_2D_view=True,\n is_contour=False,\n is_same_size=False,\n N_stem=100,\n fig=None,\n ax=None,\n is_show_fig=None,\n is_logscale_x=False,\n is_logscale_y=False,\n is_logscale_z=False,\n thresh=None,\n is_switch_axes=False,\n colormap=\"RdBu_r\",\n win_title=None,\n font_name=\"arial\",\n font_size_title=12,\n font_size_label=10,\n font_size_legend=8,\n xlabel=None,\n ylabel=None,\n zlabel=None,\n title=None,\n is_disp_title=True,\n):\n \"\"\"Plots a field as a function of two axes\n\n Parameters\n ----------\n data : Data\n a Data object\n *arg_list : list of str\n arguments to specify which axes to plot\n is_norm : bool\n boolean indicating if the field must be normalized\n unit : str\n unit in which to plot the field\n save_path : str\n full path including folder, name and extension of the file to save if save_path is not None\n x_min : float\n minimum value for the x-axis\n x_max : float\n maximum value for the x-axis\n y_min : float\n minimum value for the y-axis\n y_max : float\n maximum value for the y-axis\n z_min : float\n minimum value for the z-axis\n z_max : float\n maximum value for the z-axis\n z_range : float\n range to use for the z-axis\n is_auto_ticks : bool\n in fft, adjust ticks to freqs (deactivate if too close)\n is_auto_range : bool\n in fft, display up to 1% of max\n is_2D_view : bool\n True to plot Data in xy plane and put z as colormap\n is_contour : bool\n True to show contour line if is_fft = False and is_2D_view = True\n is_same_size : bool\n True to have all color blocks with same size in 2D view\n N_stem : int\n number of harmonics to plot (only for stem plots)\n fig : Matplotlib.figure.Figure\n existing figure to use if None create a new one\n ax : Matplotlib.axes.Axes object\n ax on which to plot the data\n is_show_fig : bool\n True to show figure after plot\n is_logscale_x : bool\n boolean indicating if the x-axis must be set in logarithmic scale\n is_logscale_y : bool\n boolean indicating if the y-axis must be set in logarithmic scale\n is_logscale_z : bool\n boolean indicating if the z-axis must be set in logarithmic scale\n thresh : float\n threshold for automatic fft ticks\n is_switch_axes : bool\n to switch x and y axes\n \"\"\"\n\n # Dynamic import to avoid import loop\n DataPattern = import_class(\"SciDataTool.Classes\", \"DataPattern\")\n\n if len(arg_list) == 1 and type(arg_list[0]) == tuple:\n arg_list = arg_list[0] # if called from another script with *arg_list\n\n # Set unit\n if unit == \"SI\":\n unit = self.unit\n if \"dB\" in unit:\n if \"ref\" in self.normalizations:\n ref = self.normalizations[\"ref\"].ref\n else:\n ref = 1\n unit_str = r\"[\" + unit + \" re. \" + str(ref) + \"$\" + self.unit + \"$]\"\n else:\n unit_str = r\"$[\" + unit + \"]$\"\n\n # Detect fft\n is_fft = False\n if any(\"wavenumber\" in s for s in arg_list) or any(\"freqs\" in s for s in arg_list):\n is_fft = True\n if zlabel is None:\n if self.symbol == \"Magnitude\":\n zlabel = \"Magnitude \" + unit_str\n else:\n zlabel = r\"$|\\widehat{\" + self.symbol + \"}|$ \" + unit_str\n title1 = \"FFT2 of \" + self.name.lower() + \" \"\n else:\n if zlabel is None:\n if is_norm:\n zlabel = (\n r\"$\\frac{\" + self.symbol + \"}{\" + self.symbol + \"_0}$ \" + unit_str\n )\n else:\n if self.symbol == \"Magnitude\":\n zlabel = \"Magnitude \" + unit_str\n else:\n zlabel = r\"$\" + self.symbol + \"$ \" + unit_str\n title1 = \"Surface plot of \" + self.name.lower() + \" \"\n\n # Extract field and axes\n if is_fft:\n if is_2D_view:\n result = self.get_magnitude_along(\n arg_list, axis_data=axis_data, unit=unit, is_norm=is_norm\n )\n else:\n result = self.get_harmonics(\n N_stem,\n arg_list,\n axis_data=axis_data,\n unit=unit,\n is_norm=is_norm,\n is_flat=True,\n )\n else:\n result = self.get_along(arg_list, unit=unit, is_norm=is_norm)\n axes_list = result[\"axes_list\"]\n axes_dict_other = result[\"axes_dict_other\"]\n if axes_list[0].is_components:\n Xdata = linspace(\n 0, len(result[axes_list[0].name]) - 1, len(result[axes_list[0].name])\n )\n else:\n Xdata = result[axes_list[0].name]\n if axes_list[1].is_components:\n Ydata = linspace(\n 0, len(result[axes_list[1].name]) - 1, len(result[axes_list[1].name])\n )\n else:\n Ydata = result[axes_list[1].name]\n Zdata = result[self.symbol]\n if is_fft and not is_2D_view:\n X_flat = Xdata\n Y_flat = Ydata\n Z_flat = Zdata\n\n else:\n Y_map, X_map = meshgrid(Ydata, Xdata)\n X_flat = X_map.flatten()\n Y_flat = Y_map.flatten()\n Z_flat = Zdata.flatten()\n if z_range is None:\n if z_min is None:\n z_min = np_min(Zdata)\n if z_max is None:\n z_max = np_max(Zdata)\n else:\n if z_min is None and z_max is None:\n z_max = np_max(Zdata)\n if z_max is None:\n z_max = z_min + z_range\n if z_min is None:\n z_min = z_max - z_range\n\n # Build labels and titles\n axis = axes_list[0]\n if axis.name in axes_dict:\n name = axes_dict[axis.name]\n else:\n name = axis.name\n # title2 = \"over \" + name.lower()\n if axis.unit == \"SI\":\n axis_unit = unit_dict[axis.name]\n if xlabel is None:\n xlabel = name.capitalize() + \" [\" + axis_unit + \"]\"\n elif axis.unit in norm_dict:\n if xlabel is None:\n xlabel = norm_dict[axis.unit]\n else:\n axis_unit = axis.unit\n if xlabel is None:\n xlabel = name.capitalize() + \" [\" + axis_unit + \"]\"\n if (\n axis.name == \"angle\"\n and axis.unit == \"°\"\n and round(np_max(axis.values) / 6) % 5 == 0\n ):\n xticks = [i * round(np_max(axis.values) / 6) for i in range(7)]\n else:\n xticks = None\n if axis.is_components and axis.extension != \"list\":\n xticklabels = result[axis.name]\n xticks = Xdata\n else:\n xticklabels = None\n\n axis = axes_list[1]\n if axis.name in axes_dict:\n name = axes_dict[axis.name]\n else:\n name = axis.name\n # title3 = \" and \" + axis.name.lower()\n if axis.unit == \"SI\":\n axis_unit = unit_dict[axis.name]\n if ylabel is None:\n ylabel = name.capitalize() + \" [\" + axis_unit + \"]\"\n elif axis.unit in norm_dict:\n if ylabel is None:\n ylabel = norm_dict[axis.unit]\n else:\n axis_unit = axis.unit\n if ylabel is None:\n ylabel = name.capitalize() + \" [\" + axis_unit + \"]\"\n if (\n axis.name == \"angle\"\n and axis.unit == \"°\"\n and round(np_max(axis.values) / 6) % 5 == 0\n ):\n yticks = [i * round(np_max(axis.values) / 6) for i in range(7)]\n else:\n yticks = None\n if axis.is_components and axis.extension != \"list\":\n yticklabels = result[axis.name]\n yticks = Ydata\n else:\n yticklabels = None\n\n # Detect discontinuous axis (Norm_indices) to use flat shading\n is_shading_flat = False\n flat_indices = []\n type_plot = \"pcolor\"\n for axis in axes_list:\n if axis.unit in self.axes[axis.index].normalizations:\n if isinstance(\n self.axes[axis.index].normalizations[axis.unit], Norm_indices\n ):\n is_shading_flat = True\n flat_indices.append(axis.index)\n\n title4 = \"for \"\n for axis in axes_list[2:]:\n is_display = True\n if axis.is_pattern and len(axis.values) == 1:\n is_display = False\n if is_display:\n if axis.unit == \"SI\":\n axis_unit = unit_dict[axis.name]\n elif axis.unit in norm_dict:\n axis_unit = norm_dict[axis.unit]\n else:\n axis_unit = axis.unit\n\n if isinstance(result[axis.name], str):\n axis_str = result[axis.name]\n else:\n axis_str = (\n array2string(\n result[axis.name],\n formatter={\"float_kind\": \"{:.3g}\".format},\n )\n .replace(\" \", \", \")\n .replace(\"[\", \"\")\n .replace(\"]\", \"\")\n + \" [\"\n + axis_unit\n + \"], \"\n )\n\n title4 += axis.name + \"=\" + axis_str\n\n title5 = \"\"\n for axis_name in axes_dict_other:\n is_display = True\n for axis in self.axes:\n if axis.name == axis_name:\n if isinstance(axis, DataPattern) and len(axis.unique_indices) == 1:\n is_display = False\n if is_display:\n if isinstance(axes_dict_other[axis_name][0], str):\n axis_str = axes_dict_other[axis_name][0]\n else:\n axis_str = (\n array2string(\n axes_dict_other[axis_name][0],\n formatter={\"float_kind\": \"{:.3g}\".format},\n ).replace(\" \", \", \")\n + \" [\"\n + axes_dict_other[axis_name][1]\n + \"], \"\n )\n\n title5 += axis_name + \"=\" + axis_str\n\n if title4 == \"for \" and title5 == \"\":\n title4 = \"\"\n\n if title is None:\n title = title1 + title4 + title5\n title = title.rstrip(\", \")\n\n if is_fft:\n\n if thresh is None:\n if self.normalizations is not None and \"ref\" in self.normalizations:\n thresh = self.normalizations[\"ref\"].ref\n else:\n thresh = 0.02\n\n if \"dB\" in unit:\n indices_x = np_any(\n where(Zdata > 10 * log10(thresh) + abs(np_max(Zdata)), True, False),\n axis=1,\n )\n indices_y = np_any(\n where(Zdata > 10 * log10(thresh) + abs(np_max(Zdata)), True, False),\n axis=0,\n )\n else:\n indices_x = np_any(\n where(Zdata > abs(thresh * np_max(Zdata)), True, False), axis=1\n )\n indices_y = np_any(\n where(Zdata > abs(thresh * np_max(Zdata)), True, False), axis=0\n )\n\n xticks = Xdata[indices_x]\n yticks = Ydata[indices_y]\n if is_auto_range:\n if len(xticks) > 1:\n if x_min is None:\n x_min = xticks[0]\n if x_max is None:\n x_max = xticks[-1]\n else:\n if x_min is None:\n x_min = np_min(Xdata)\n if x_max is None:\n x_max = np_max(Xdata)\n if len(yticks) > 1:\n if y_min is None:\n y_min = yticks[0]\n if y_max is None:\n y_max = yticks[-1]\n else:\n if y_min is None:\n y_min = np_min(Ydata)\n if y_max is None:\n y_max = np_max(Ydata)\n else:\n if x_min is None:\n x_min = np_min(Xdata)\n if x_max is None:\n x_max = np_max(Xdata)\n if y_min is None:\n y_min = np_min(Ydata)\n if y_max is None:\n y_max = np_max(Ydata)\n\n x_min = x_min - x_max * 0.05\n x_max = x_max * 1.05\n y_min = y_min - y_max * 0.2\n y_max = y_max * 1.2\n\n if not is_auto_ticks:\n xticks = None\n yticks = None\n if is_2D_view:\n plot_4D(\n X_flat,\n Y_flat,\n Z_flat,\n Sdata=None,\n is_same_size=is_same_size,\n x_min=x_min,\n x_max=x_max,\n y_min=y_min,\n y_max=y_max,\n z_max=z_max,\n z_min=z_min,\n title=title,\n xticks=xticks,\n yticks=yticks,\n xticklabels=xticklabels,\n yticklabels=yticklabels,\n xlabel=xlabel,\n ylabel=ylabel,\n zlabel=zlabel,\n fig=fig,\n ax=ax,\n type_plot=\"scatter\",\n save_path=save_path,\n is_show_fig=is_show_fig,\n is_logscale_x=is_logscale_x,\n is_logscale_y=is_logscale_y,\n is_logscale_z=is_logscale_z,\n is_switch_axes=is_switch_axes,\n colormap=colormap,\n win_title=win_title,\n font_name=font_name,\n font_size_title=font_size_title,\n font_size_label=font_size_label,\n font_size_legend=font_size_legend,\n is_grid=True,\n is_disp_title=is_disp_title,\n )\n else:\n plot_3D(\n X_flat,\n Y_flat,\n Z_flat,\n fig=fig,\n ax=ax,\n x_min=x_min,\n x_max=x_max,\n y_min=y_min,\n y_max=y_max,\n z_min=0,\n z_max=z_max,\n title=title,\n xticks=xticks,\n yticks=yticks,\n xticklabels=xticklabels,\n yticklabels=yticklabels,\n xlabel=xlabel,\n ylabel=ylabel,\n zlabel=zlabel,\n type_plot=\"stem\",\n save_path=save_path,\n is_show_fig=is_show_fig,\n is_logscale_x=is_logscale_x,\n is_logscale_y=is_logscale_y,\n is_logscale_z=is_logscale_z,\n is_switch_axes=is_switch_axes,\n colormap=colormap,\n win_title=win_title,\n font_name=font_name,\n font_size_title=font_size_title,\n font_size_label=font_size_label,\n font_size_legend=font_size_legend,\n is_disp_title=is_disp_title,\n )\n else:\n if is_2D_view:\n if is_shading_flat:\n if x_min is None:\n x_min = np_min(Xdata)\n if x_max is None:\n x_max = np_max(Xdata)\n if y_min is None:\n y_min = np_min(Ydata)\n if y_max is None:\n y_max = np_max(Ydata)\n type_plot = \"pcolormesh\"\n # 0.5 offset\n if 0 in flat_indices:\n Xdata = Xdata - 0.5\n x_min -= 0.5\n x_max -= 0.5\n if 1 in flat_indices:\n Ydata = Ydata - 0.5\n y_min -= 0.5\n y_max -= 0.5\n Ydata, Xdata = meshgrid(Ydata, Xdata)\n plot_3D(\n Xdata,\n Ydata,\n Zdata,\n x_min=x_min,\n x_max=x_max,\n y_min=y_min,\n y_max=y_max,\n z_max=z_max,\n z_min=z_min,\n xlabel=xlabel,\n ylabel=ylabel,\n zlabel=zlabel,\n title=title,\n xticks=xticks,\n yticks=yticks,\n xticklabels=xticklabels,\n yticklabels=yticklabels,\n fig=fig,\n ax=ax,\n type_plot=type_plot,\n is_contour=is_contour,\n is_shading_flat=is_shading_flat,\n save_path=save_path,\n is_show_fig=is_show_fig,\n is_logscale_x=is_logscale_x,\n is_logscale_y=is_logscale_y,\n is_logscale_z=is_logscale_z,\n is_switch_axes=is_switch_axes,\n colormap=colormap,\n win_title=win_title,\n font_name=font_name,\n font_size_title=font_size_title,\n font_size_label=font_size_label,\n font_size_legend=font_size_legend,\n is_disp_title=is_disp_title,\n )\n else:\n plot_3D(\n X_map,\n Y_map,\n Zdata,\n fig=fig,\n ax=ax,\n x_min=x_min,\n x_max=x_max,\n y_min=y_min,\n y_max=y_max,\n z_min=z_min,\n z_max=z_max,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n zlabel=zlabel,\n yticks=yticks,\n xticklabels=xticklabels,\n yticklabels=yticklabels,\n type_plot=\"surf\",\n save_path=save_path,\n is_show_fig=is_show_fig,\n is_logscale_x=is_logscale_x,\n is_logscale_y=is_logscale_y,\n is_logscale_z=is_logscale_z,\n is_switch_axes=is_switch_axes,\n colormap=colormap,\n win_title=win_title,\n font_name=font_name,\n font_size_title=font_size_title,\n font_size_label=font_size_label,\n font_size_legend=font_size_legend,\n is_disp_title=is_disp_title,\n )\n", "import numpy as np\nimport scipy.interpolate as scp_int\n\nfrom SciDataTool.Functions.set_routines import unique_tol\n\n\ndef change_referential_spectrum(\n freqs,\n wavenumbers_circ,\n rotation_speed,\n spectrum,\n atol=1e-9,\n freqs_new=np.array([]),\n I1=np.array([]),\n Irf_un=np.array([]),\n is_double_f0=False,\n atol_freq=1e-6,\n):\n \"\"\"Compute a new 2D spectrum depending on a rotating referential defined by a speed.\n\n Parameters\n ----------\n freqs : ndarray\n frequencies vector\n wavenumbers_circ : ndarray\n circumferential wavenumbers vector\n rotation_speed : float\n rotation speed\n spectrum : ndarray\n 2D/3D spectrum with freqs/wavenumbers_circ as two first axes\n atol: float\n Absolute tolerance under which amplitudes are assumed to be 0\n freqs_new : ndarray\n frequencies vector in the new rotating referential\n I1 : ndarray\n Array of component indices in new spectrum\n Irf_un: ndarray\n Array of indices of unique frequency/wavenumber couples\n is_double_f0: bool\n True to multiply spectrum components which have zero frequency and non-zero wavenumber\n atol_freq: float\n Absolute tolerance under which frequencies are assumed to be equal\n\n Returns\n -------\n spectrum_new : ndarray\n spectrum in the new rotating referential\n freqs_new : ndarray\n frequencies vector in the new rotating referential\n I1 : ndarray\n Array of component indices in new spectrum\n Irf_un: ndarray\n Array of indices of unique frequency/wavenumber couples\n\n \"\"\"\n Nf = freqs.size\n Nr = wavenumbers_circ.size\n # Get number of slices depending on input spectrum shape\n if spectrum.ndim > 2:\n Nslice = spectrum.shape[2]\n is_squeeze = False\n else:\n Nslice = 1\n is_squeeze = True\n spectrum = spectrum[:, :, None]\n\n if freqs_new.size == 0:\n # Calculate new frequency values by shifting frequencies\n Xwavenb, Xfreqs = np.meshgrid(wavenumbers_circ, freqs)\n Xfreqs_new = Xfreqs + Xwavenb * rotation_speed / 60\n\n # Get unique frequencies\n freqs_new, If0 = unique_tol(\n Xfreqs_new.ravel(\"C\"),\n return_inverse=True,\n axis=0,\n tol=atol_freq,\n is_abs_tol=True,\n )\n\n # Get frequency/wavenumber_circ position in new matrix [Nf_new, Nr]\n Ir0 = np.tile(np.arange(Nr, dtype=int), Nf)\n Irf = np.concatenate((If0[:, None], Ir0[:, None]), axis=1)\n\n # Get unique couples of frequency/wavenumber to sum on same harmonics\n Irf_un, I1 = np.unique(Irf, return_inverse=True, axis=0)\n\n # Number of frequencies in new referential\n Nf_new = freqs_new.size\n\n if is_double_f0:\n # Multiply by two spectrum components which have f=0, r!=0\n jf0 = np.abs(freqs) < 1e-4\n jr = wavenumbers_circ != 0\n spectrum[jf0, jr, :] = 2 * spectrum[jf0, jr, :]\n\n # Calculate spectrum amplitude in new referential by summing all contributions\n # which have the same orders and wavenumber for each slice\n spectrum_new = np.zeros((Nf_new, Nr, Nslice), dtype=spectrum.dtype)\n for k in range(Nslice):\n # Reshape values for kth slice columnwise\n amp_k = spectrum[:, :, k].ravel(\"C\")\n # Sum all contributions which have the same orders and wavenumber as given by I1\n if spectrum.dtype == complex:\n # bincount fails on complex numbers, real and imaginary parts must be treated separately\n amp_new_k = np.bincount(I1, weights=amp_k.real) + 1j * np.bincount(\n I1, weights=amp_k.imag\n )\n else:\n amp_new_k = np.bincount(I1, weights=amp_k)\n # Store amplitudes at new frequency/wavenumber positions\n spectrum_new[Irf_un[:, 0], Irf_un[:, 1], k] = amp_new_k\n\n if is_double_f0:\n # Divide by two spectrum components which have f=0, r!=0\n spectrum[jf0, jr, :] = spectrum[jf0, jr, :] / 2\n\n if atol > 0:\n # Filter harmonics that are below input absolute tolerance\n Imask = (\n np.sum(np.sum(np.abs(spectrum_new), axis=2), axis=1)\n > np.max(np.abs(spectrum_new)) * atol\n )\n spectrum_new = spectrum_new[Imask, ...]\n freqs_new = freqs_new[Imask]\n\n if is_squeeze:\n # Squeeze spectrum back to 2D\n spectrum_new = spectrum_new[:, :, 0]\n\n return spectrum_new, freqs_new, I1, Irf_un\n\n\ndef change_referential_waveform(\n val0,\n time0,\n angle0,\n rotation_speed,\n is_aper_a=False,\n is_aper_t=False,\n ta_in=tuple(),\n ta_out=tuple(),\n):\n \"\"\"Change referential of input 3D array defined on time, angle and z given input rotation speed\n (algebric to include rotation direction)\n\n Parameters\n ----------\n val0 : ndarray\n Field values in new referential\n time0 : ndarray\n time vector [s]\n angle0 : float\n angle vector [rad]\n rotation_speed : float\n rotation speed [rpm]\n per_a: int\n angle periodicity number (one period)\n is_aper_a: bool\n True if there is a spatial anti-periodicity\n is_aper_t: bool\n True if there is a time anti-periodicity\n ta_in: tuple\n Tuple of input time/angle meshgrids\n ta_out: tuple\n Tuple of output time/angle meshgrids\n\n Returns\n -------\n val_new : ndarray\n Field values in new referential\n time_new : ndarray\n time vector in new referential [s]\n angle_new : ndarray\n angle vector in new referential [rad]\n ta_in: tuple\n Tuple of input time/angle meshgrids\n ta_out: tuple\n Tuple of output time/angle meshgrids\n \"\"\"\n\n # Init size\n Nt = time0.size\n Na = angle0.size\n if val0.ndim > 2:\n Nslice = val0.shape[2]\n is_squeeze = False\n else:\n Nslice = 1\n val0 = val0[:, :, None]\n is_squeeze = True\n shape0 = [Nt, Na, Nslice]\n\n if len(ta_in) == 0 or len(ta_out) == 0:\n # Add final value to time and space vectors\n tf = time0[-1] + time0[1] - time0[0]\n time1 = np.append(time0, tf)\n alphaf = angle0[-1] + angle0[1] - angle0[0]\n angle1 = np.append(angle0, alphaf)\n ta_in = (time1, angle1)\n\n # Build 2D meshgrids and flatten them columnwise\n Xangle0, Xtime0 = np.meshgrid(angle0, time0)\n Xtime0, Xangle0 = Xtime0.ravel(\"C\"), Xangle0.ravel(\"C\")\n\n # Shift angle according to rotation speed\n Xangle_new = (Xangle0 + 2 * np.pi * rotation_speed / 60 * Xtime0) % alphaf\n ta_out = (Xtime0, Xangle_new)\n\n # 2D interpolate for new angle array\n val_new = np.zeros(shape0)\n valk = np.zeros((Nt + 1, Na + 1))\n for k in range(Nslice):\n # Make current slice periodic along time and space\n valk[0:Nt, 0:Na] = val0[:, :, k]\n valk[-1, :-1] = val0[0, :, k]\n valk[:-1, -1] = val0[:, 0, k]\n valk[-1, -1] = val0[0, 0, k]\n\n # Perform 2D interpolation\n val_new[:, :, k] = scp_int.RegularGridInterpolator(\n ta_in, valk, method=\"linear\"\n )(ta_out).reshape((Nt, Na))\n\n if is_aper_t:\n # Remove half part of 1st dimension\n val_new = val_new[: int(Nt / 2), :, :]\n time_new = time0[: int(Nt / 2)]\n else:\n time_new = time0\n\n if is_aper_a:\n # Remove half part of 2nd dimension\n val_new = val_new[:, : int(Na / 2), :]\n angle_new = angle0[: int(Na / 2)]\n else:\n angle_new = angle0\n\n if is_squeeze:\n # Remove 3rd dimension\n val_new = val_new[:, :, 0]\n\n return val_new, time_new, angle_new, ta_in, ta_out\n", "import matplotlib.pyplot as plt\nfrom numpy import arange, nanmax, nanmin, frombuffer\nimport imageio\nfrom ...GUI.DDataPlotter.DDataPlotter import PARAM_3D\nfrom SciDataTool.Functions.Plot import fft_dict, ifft_dict\n\n\ndef plot_2D_Data_Animated(\n self, animated_axis, suptitle_ref, *param_list, nb_frames=50, fps=10, **param_dict\n):\n \"\"\"Gen\n\n Parameters\n ----------\n animated_axis : str\n The field will be animated along this axis\n nb_frames : int\n number of frames used to build the gif\n fps: int\n frames displayed per second\n \"\"\"\n # Relative import of DataPattern to prevent circular import\n module = __import__(\"SciDataTool.Classes.DataPattern\", fromlist=[\"DataPattern\"])\n DataPattern = getattr(module, \"DataPattern\")\n\n # Making sure that we have the right argument for a plot2D\n plot_options = param_dict.copy()\n for param in PARAM_3D:\n if param in plot_options:\n del plot_options[param]\n\n # Detecting if animated axis is a DataPattern, if true changing the input given to the function\n for ax_obj in self.get_axes():\n if (\n ax_obj.name == animated_axis.split(\"[\")[0]\n or animated_axis.split(\"[\")[0] in fft_dict\n and fft_dict[animated_axis.split(\"[\")[0]] == ax_obj.name\n or animated_axis.split(\"[\")[0] in ifft_dict\n and ifft_dict[animated_axis.split(\"[\")[0]] == ax_obj.name\n ):\n animated_axis_obj = ax_obj\n break\n\n if isinstance(animated_axis_obj, DataPattern):\n # Removing \"[one_period]\" as it is not available with a DataPattern\n animated_axis_unit = \"{\" + animated_axis.split(\"{\")[1]\n animated_axis = animated_axis.split(\"[\")[0] + animated_axis_unit\n\n if \"freqs\" in param_list or \"wavenumber\" in param_list:\n result = self.get_magnitude_along(\n animated_axis, *param_list, unit=param_dict[\"unit\"]\n )\n else:\n result = self.get_along(animated_axis, *param_list, unit=param_dict[\"unit\"])\n\n animated_axis_unit = \"{\" + animated_axis.split(\"{\")[1]\n animated_axis = animated_axis.split(\"{\")[0].split(\"[\")[0]\n\n # Creating a list of frames that will need to create the animation\n if isinstance(animated_axis_obj, DataPattern):\n frames_list = animated_axis_obj.get_values()\n frames_list = [\n \"[\" + str(idx_frame) + \"]\" for idx_frame in range(len(frames_list))\n ]\n fps = 1\n else:\n value_max = nanmax(result[animated_axis])\n value_min = nanmin(result[animated_axis])\n variation_step = (value_max - value_min) / nb_frames\n\n frames_list = arange(start=value_min, stop=value_max, step=variation_step)\n frames_list = [\"=\" + str(frame) for frame in frames_list]\n\n # detecting if we are animating a regular plot (=> computing limit for y) or a \"fft\" plot (=> limit already set)\n if plot_options[\"y_min\"] == None or plot_options[\"y_max\"] == None:\n # Setting the options of the plot\n y_max = nanmax(result[self.symbol])\n y_min = nanmin(result[self.symbol])\n marge = (\n y_max - y_min\n ) * 0.05 # 5% of the height of plot to add to the border top/bottom of gif\n plot_options[\"y_min\"] = nanmin(result[self.symbol]) - abs(marge)\n plot_options[\"y_max\"] = nanmax(result[self.symbol]) + abs(marge)\n\n plot_options[\"is_show_fig\"] = False\n\n # Getting the name of the gif\n save_path = plot_options[\"save_path\"].replace(\".png\", \".gif\")\n plot_options[\"save_path\"] = None\n\n images = list() # List of images used to build the gif\n for val in frames_list:\n # plotting image\n self.plot_2D_Data(\n *param_list, animated_axis + val + animated_axis_unit, **plot_options\n )\n # Getting the figure generated with plot_2D_DATA\n fig = plt.gcf()\n\n # Adding the suptitle of the figure if there is one\n if suptitle_ref != \"\":\n fig.suptitle(suptitle_ref)\n\n fig.canvas.draw()\n image = frombuffer(fig.canvas.tostring_rgb(), dtype=\"uint8\")\n image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n images.append(image)\n\n # Creating the gif\n plt.close(fig)\n\n imageio.mimsave(save_path, images, format=\"GIF-PIL\", fps=fps)\n" ]
[ [ "numpy.nanmin", "numpy.log10", "numpy.nanmax", "numpy.meshgrid", "numpy.array2string" ], [ "numpy.concatenate", "numpy.bincount", "numpy.array", "numpy.zeros", "numpy.arange", "numpy.abs", "numpy.append", "scipy.interpolate.RegularGridInterpolator", "numpy.meshgrid", "numpy.unique" ], [ "matplotlib.pyplot.close", "numpy.nanmin", "numpy.arange", "matplotlib.pyplot.gcf", "numpy.nanmax" ] ]
wangbingbing2022/milvus
[ "0b99da93010f29ac7408a0bb4767a8e95de033d3" ]
[ "tests/python_client/testcases/test_insert_20.py" ]
[ "import threading\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pymilvus import Index\n\nfrom base.client_base import TestcaseBase\nfrom utils.util_log import test_log as log\nfrom common import common_func as cf\nfrom common import common_type as ct\nfrom common.common_type import CaseLabel, CheckTasks\n\nprefix = \"insert\"\nexp_name = \"name\"\nexp_schema = \"schema\"\nexp_num = \"num_entities\"\nexp_primary = \"primary\"\ndefault_schema = cf.gen_default_collection_schema()\ndefault_binary_schema = cf.gen_default_binary_collection_schema()\ndefault_index_params = {\"index_type\": \"IVF_SQ8\", \"metric_type\": \"L2\", \"params\": {\"nlist\": 64}}\ndefault_binary_index_params = {\"index_type\": \"BIN_IVF_FLAT\", \"metric_type\": \"JACCARD\", \"params\": {\"nlist\": 64}}\n\n\nclass TestInsertParams(TestcaseBase):\n \"\"\" Test case of Insert interface \"\"\"\n\n @pytest.fixture(scope=\"function\", params=ct.get_invalid_strs)\n def get_non_data_type(self, request):\n if isinstance(request.param, list) or request.param is None:\n pytest.skip(\"list and None type is valid data type\")\n yield request.param\n\n @pytest.fixture(scope=\"module\", params=ct.get_invalid_strs)\n def get_invalid_field_name(self, request):\n if isinstance(request.param, (list, dict)):\n pytest.skip()\n yield request.param\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_insert_dataframe_data(self):\n \"\"\"\n target: test insert DataFrame data\n method: 1.create 2.insert dataframe data\n expected: assert num entities\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n df = cf.gen_default_dataframe_data(ct.default_nb)\n mutation_res, _ = collection_w.insert(data=df)\n assert mutation_res.insert_count == ct.default_nb\n assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_insert_list_data(self):\n \"\"\"\n target: test insert list-like data\n method: 1.create 2.insert list data\n expected: assert num entities\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n data = cf.gen_default_list_data(ct.default_nb)\n mutation_res, _ = collection_w.insert(data=data)\n assert mutation_res.insert_count == ct.default_nb\n assert mutation_res.primary_keys == data[0]\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_non_data_type(self, get_non_data_type):\n \"\"\"\n target: test insert with non-dataframe, non-list data\n method: insert with data (non-dataframe and non-list type)\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n error = {ct.err_code: 0, ct.err_msg: \"Data type is not support\"}\n collection_w.insert(data=get_non_data_type, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L0)\n @pytest.mark.parametrize(\"data\", [[], pd.DataFrame()])\n def test_insert_empty_data(self, data):\n \"\"\"\n target: test insert empty data\n method: insert empty\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n error = {ct.err_code: 0, ct.err_msg: \"The data fields number is not match with schema\"}\n collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_dataframe_only_columns(self):\n \"\"\"\n target: test insert with dataframe just columns\n method: dataframe just have columns\n expected: num entities is zero\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]\n df = pd.DataFrame(columns=columns)\n error = {ct.err_code: 0, ct.err_msg: \"Cannot infer schema from empty dataframe\"}\n collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_empty_field_name_dataframe(self):\n \"\"\"\n target: test insert empty field name df\n method: dataframe with empty column\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n df = cf.gen_default_dataframe_data(10)\n df.rename(columns={ct.default_int64_field_name: ' '}, inplace=True)\n error = {ct.err_code: 0, ct.err_msg: \"The types of schema and data do not match\"}\n collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_invalid_field_name_dataframe(self, get_invalid_field_name):\n \"\"\"\n target: test insert with invalid dataframe data\n method: insert with invalid field name dataframe\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n df = cf.gen_default_dataframe_data(10)\n df.rename(columns={ct.default_int64_field_name: get_invalid_field_name}, inplace=True)\n error = {ct.err_code: 0, ct.err_msg: \"The types of schema and data do not match\"}\n collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)\n\n def test_insert_dataframe_index(self):\n \"\"\"\n target: test insert dataframe with index\n method: insert dataframe with index\n expected: todo\n \"\"\"\n pass\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_none(self):\n \"\"\"\n target: test insert None\n method: data is None\n expected: return successfully with zero results\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n mutation_res, _ = collection_w.insert(data=None)\n assert mutation_res.insert_count == 0\n assert len(mutation_res.primary_keys) == 0\n assert collection_w.is_empty\n assert collection_w.num_entities == 0\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_numpy_data(self):\n \"\"\"\n target: test insert numpy.ndarray data\n method: 1.create by schema 2.insert data\n expected: assert num_entities\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n data = cf.gen_numpy_data(nb=10)\n error = {ct.err_code: 0, ct.err_msg: \"Data type not support numpy.ndarray\"}\n collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_binary_dataframe(self):\n \"\"\"\n target: test insert binary dataframe\n method: 1. create by schema 2. insert dataframe\n expected: assert num_entities\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)\n df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)\n mutation_res, _ = collection_w.insert(data=df)\n assert mutation_res.insert_count == ct.default_nb\n assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_insert_binary_data(self):\n \"\"\"\n target: test insert list-like binary data\n method: 1. create by schema 2. insert data\n expected: assert num_entities\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)\n data, _ = cf.gen_default_binary_list_data(ct.default_nb)\n mutation_res, _ = collection_w.insert(data=data)\n assert mutation_res.insert_count == ct.default_nb\n assert mutation_res.primary_keys == data[0]\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L0)\n def test_insert_single(self):\n \"\"\"\n target: test insert single\n method: insert one entity\n expected: verify num\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n data = cf.gen_default_list_data(nb=1)\n mutation_res, _ = collection_w.insert(data=data)\n assert mutation_res.insert_count == 1\n assert mutation_res.primary_keys == data[0]\n assert collection_w.num_entities == 1\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"exception not MilvusException\")\n def test_insert_dim_not_match(self):\n \"\"\"\n target: test insert with not match dim\n method: insert data dim not equal to schema dim\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n dim = 129\n df = cf.gen_default_dataframe_data(ct.default_nb, dim=dim)\n error = {ct.err_code: 1,\n ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}\n collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n @pytest.mark.xfail(reason=\"exception not MilvusException\")\n def test_insert_binary_dim_not_match(self):\n \"\"\"\n target: test insert binary with dim not match\n method: insert binary data dim not equal to schema\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)\n dim = 120\n df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb, dim=dim)\n error = {ct.err_code: 1,\n ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}\n collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_field_name_not_match(self):\n \"\"\"\n target: test insert field name not match\n method: data field name not match schema\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n df = cf.gen_default_dataframe_data(10)\n df.rename(columns={ct.default_float_field_name: \"int\"}, inplace=True)\n error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}\n collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_field_value_not_match(self):\n \"\"\"\n target: test insert data value not match\n method: insert data value type not match schema\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n nb = 10\n df = cf.gen_default_dataframe_data(nb)\n new_float_value = pd.Series(data=[float(i) for i in range(nb)], dtype=\"float64\")\n df.iloc[:, 1] = new_float_value\n error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}\n collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_value_less(self):\n \"\"\"\n target: test insert value less than other\n method: int field value less than vec-field value\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n nb = 10\n int_values = [i for i in range(nb - 1)]\n float_values = [np.float32(i) for i in range(nb)]\n float_vec_values = cf.gen_vectors(nb, ct.default_dim)\n data = [int_values, float_values, float_vec_values]\n error = {ct.err_code: 0, ct.err_msg: 'Arrays must all be same length.'}\n collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_vector_value_less(self):\n \"\"\"\n target: test insert vector value less than other\n method: vec field value less than int field\n expected: todo\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n nb = 10\n int_values = [i for i in range(nb)]\n float_values = [np.float32(i) for i in range(nb)]\n float_vec_values = cf.gen_vectors(nb - 1, ct.default_dim)\n data = [int_values, float_values, float_vec_values]\n error = {ct.err_code: 0, ct.err_msg: 'Arrays must all be same length.'}\n collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_fields_more(self):\n \"\"\"\n target: test insert with fields more\n method: field more than schema fields\n expected: todo\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n df = cf.gen_default_dataframe_data(ct.default_nb)\n new_values = [i for i in range(ct.default_nb)]\n df.insert(3, 'new', new_values)\n error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema.'}\n collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_fields_less(self):\n \"\"\"\n target: test insert with fields less\n method: fields less than schema fields\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n df = cf.gen_default_dataframe_data(ct.default_nb)\n df.drop(ct.default_float_vec_field_name, axis=1, inplace=True)\n error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema.'}\n collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_list_order_inconsistent_schema(self):\n \"\"\"\n target: test insert data fields order inconsistent with schema\n method: insert list data, data fields order inconsistent with schema\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n nb = 10\n int_values = [i for i in range(nb)]\n float_values = [np.float32(i) for i in range(nb)]\n float_vec_values = cf.gen_vectors(nb, ct.default_dim)\n data = [float_values, int_values, float_vec_values]\n error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}\n collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_dataframe_order_inconsistent_schema(self):\n \"\"\"\n target: test insert with dataframe fields inconsistent with schema\n method: insert dataframe, and fields order inconsistent with schema\n expected: assert num entities\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n nb = 10\n int_values = pd.Series(data=[i for i in range(nb)])\n float_values = pd.Series(data=[float(i) for i in range(nb)], dtype=\"float32\")\n float_vec_values = cf.gen_vectors(nb, ct.default_dim)\n df = pd.DataFrame({\n ct.default_float_field_name: float_values,\n ct.default_float_vec_field_name: float_vec_values,\n ct.default_int64_field_name: int_values\n })\n error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}\n collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_inconsistent_data(self):\n \"\"\"\n target: test insert with inconsistent data\n method: insert with data that same field has different type data\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n data = cf.gen_default_list_data(nb=100)\n data[0][1] = 1.0\n error = {ct.err_code: 0, ct.err_msg: \"The data in the same column must be of the same type\"}\n collection_w.insert(data, check_task=CheckTasks.err_res, check_items=error)\n\n\nclass TestInsertOperation(TestcaseBase):\n \"\"\"\n ******************************************************************\n The following cases are used to test insert interface operations\n ******************************************************************\n \"\"\"\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_without_connection(self):\n \"\"\"\n target: test insert without connection\n method: insert after remove connection\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n self.connection_wrap.remove_connection(ct.default_alias)\n res_list, _ = self.connection_wrap.list_connections()\n assert ct.default_alias not in res_list\n data = cf.gen_default_list_data(10)\n error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}\n collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.parametrize(\"vec_fields\", [[cf.gen_float_vec_field(name=\"float_vector1\")],\n [cf.gen_binary_vec_field()],\n [cf.gen_binary_vec_field(), cf.gen_binary_vec_field(\"binary_vec\")]])\n def test_insert_multi_float_vec_fields(self, vec_fields):\n \"\"\"\n target: test insert into multi float vec fields collection\n method: create collection and insert\n expected: verify num entities\n \"\"\"\n schema = cf.gen_schema_multi_vector_fields(vec_fields)\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)\n df = cf.gen_dataframe_multi_vec_fields(vec_fields=vec_fields)\n collection_w.insert(df)\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_drop_collection(self):\n \"\"\"\n target: test insert and drop\n method: insert data and drop collection\n expected: verify collection if exist\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n collection_list, _ = self.utility_wrap.list_collections()\n assert collection_w.name in collection_list\n df = cf.gen_default_dataframe_data(ct.default_nb)\n collection_w.insert(data=df)\n collection_w.drop()\n collection_list, _ = self.utility_wrap.list_collections()\n assert collection_w.name not in collection_list\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_create_index(self):\n \"\"\"\n target: test insert and create index\n method: 1. insert 2. create index\n expected: verify num entities and index\n \"\"\"\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))\n df = cf.gen_default_dataframe_data(ct.default_nb)\n collection_w.insert(data=df)\n assert collection_w.num_entities == ct.default_nb\n collection_w.create_index(ct.default_float_vec_field_name, default_index_params)\n assert collection_w.has_index()[0]\n index, _ = collection_w.index()\n assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)\n assert collection_w.indexes[0] == index\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_after_create_index(self):\n \"\"\"\n target: test insert after create index\n method: 1. create index 2. insert data\n expected: verify index and num entities\n \"\"\"\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))\n collection_w.create_index(ct.default_float_vec_field_name, default_index_params)\n assert collection_w.has_index()[0]\n index, _ = collection_w.index()\n assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)\n assert collection_w.indexes[0] == index\n df = cf.gen_default_dataframe_data(ct.default_nb)\n collection_w.insert(data=df)\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L2)\n def test_insert_binary_after_index(self):\n \"\"\"\n target: test insert binary after index\n method: 1.create index 2.insert binary data\n expected: 1.index ok 2.num entities correct\n \"\"\"\n schema = cf.gen_default_binary_collection_schema()\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)\n collection_w.create_index(ct.default_binary_vec_field_name, default_binary_index_params)\n assert collection_w.has_index()[0]\n index, _ = collection_w.index()\n assert index == Index(collection_w.collection, ct.default_binary_vec_field_name, default_binary_index_params)\n assert collection_w.indexes[0] == index\n df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)\n collection_w.insert(data=df)\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_auto_id_create_index(self):\n \"\"\"\n target: test create index in auto_id=True collection\n method: 1.create auto_id=True collection and insert 2.create index\n expected: index correct\n \"\"\"\n schema = cf.gen_default_collection_schema(auto_id=True)\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)\n df = cf.gen_default_dataframe_data(ct.default_nb)\n df.drop(ct.default_int64_field_name, axis=1, inplace=True)\n mutation_res, _ = collection_w.insert(data=df)\n assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)\n assert collection_w.num_entities == ct.default_nb\n # create index\n collection_w.create_index(ct.default_float_vec_field_name, default_index_params)\n assert collection_w.has_index()[0]\n index, _ = collection_w.index()\n assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)\n assert collection_w.indexes[0] == index\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_auto_id_true(self):\n \"\"\"\n target: test insert ids fields values when auto_id=True\n method: 1.create collection with auto_id=True 2.insert without ids\n expected: verify primary_keys and num_entities\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n schema = cf.gen_default_collection_schema(auto_id=True)\n collection_w = self.init_collection_wrap(name=c_name, schema=schema)\n df = cf.gen_default_dataframe_data(ct.default_nb)\n df.drop(ct.default_int64_field_name, axis=1, inplace=True)\n mutation_res, _ = collection_w.insert(data=df)\n assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_twice_auto_id_true(self):\n \"\"\"\n target: test insert ids fields twice when auto_id=True\n method: 1.create collection with auto_id=True 2.insert twice\n expected: verify primary_keys unique\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n schema = cf.gen_default_collection_schema(auto_id=True)\n nb = 10\n collection_w = self.init_collection_wrap(name=c_name, schema=schema)\n df = cf.gen_default_dataframe_data(nb)\n df.drop(ct.default_int64_field_name, axis=1, inplace=True)\n mutation_res, _ = collection_w.insert(data=df)\n primary_keys = mutation_res.primary_keys\n assert cf._check_primary_keys(primary_keys, nb)\n mutation_res_1, _ = collection_w.insert(data=df)\n primary_keys.extend(mutation_res_1.primary_keys)\n assert cf._check_primary_keys(primary_keys, nb * 2)\n assert collection_w.num_entities == nb * 2\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_auto_id_true_list_data(self):\n \"\"\"\n target: test insert ids fields values when auto_id=True\n method: 1.create collection with auto_id=True 2.insert list data with ids field values\n expected: assert num entities\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n schema = cf.gen_default_collection_schema(auto_id=True)\n collection_w = self.init_collection_wrap(name=c_name, schema=schema)\n data = cf.gen_default_list_data(nb=ct.default_nb)\n mutation_res, _ = collection_w.insert(data=data[1:])\n assert mutation_res.insert_count == ct.default_nb\n assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_auto_id_true_with_dataframe_values(self):\n \"\"\"\n target: test insert with auto_id=True\n method: create collection with auto_id=True\n expected: 1.verify num entities 2.verify ids\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n schema = cf.gen_default_collection_schema(auto_id=True)\n collection_w = self.init_collection_wrap(name=c_name, schema=schema)\n df = cf.gen_default_dataframe_data(nb=100)\n error = {ct.err_code: 0, ct.err_msg: 'Auto_id is True, primary field should not have data'}\n collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)\n assert collection_w.is_empty\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_auto_id_true_with_list_values(self):\n \"\"\"\n target: test insert with auto_id=True\n method: create collection with auto_id=True\n expected: 1.verify num entities 2.verify ids\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n schema = cf.gen_default_collection_schema(auto_id=True)\n collection_w = self.init_collection_wrap(name=c_name, schema=schema)\n data = cf.gen_default_list_data(nb=100)\n error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema'}\n collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)\n assert collection_w.is_empty\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_auto_id_false_same_values(self):\n \"\"\"\n target: test insert same ids with auto_id false\n method: 1.create collection with auto_id=False 2.insert same int64 field values\n expected: raise exception\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n nb = 100\n data = cf.gen_default_list_data(nb=nb)\n data[0] = [1 for i in range(nb)]\n mutation_res, _ = collection_w.insert(data)\n assert mutation_res.insert_count == nb\n assert mutation_res.primary_keys == data[0]\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_auto_id_false_negative_values(self):\n \"\"\"\n target: test insert negative ids with auto_id false\n method: auto_id=False, primary field values is negative\n expected: verify num entities\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n nb = 100\n data = cf.gen_default_list_data(nb)\n data[0] = [i for i in range(0, -nb, -1)]\n mutation_res, _ = collection_w.insert(data)\n assert mutation_res.primary_keys == data[0]\n assert collection_w.num_entities == nb\n\n @pytest.mark.tags(CaseLabel.L2)\n def test_insert_multi_threading(self):\n \"\"\"\n target: test concurrent insert\n method: multi threads insert\n expected: verify num entities\n \"\"\"\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))\n df = cf.gen_default_dataframe_data(ct.default_nb)\n thread_num = 4\n threads = []\n primary_keys = df[ct.default_int64_field_name].values.tolist()\n\n def insert(thread_i):\n log.debug(f'In thread-{thread_i}')\n mutation_res, _ = collection_w.insert(df)\n assert mutation_res.insert_count == ct.default_nb\n assert mutation_res.primary_keys == primary_keys\n\n for i in range(thread_num):\n x = threading.Thread(target=insert, args=(i,))\n threads.append(x)\n x.start()\n for t in threads:\n t.join()\n assert collection_w.num_entities == ct.default_nb * thread_num\n\n @pytest.mark.tags(CaseLabel.L2)\n @pytest.mark.skip(reason=\"Currently primary keys are not unique\")\n def test_insert_multi_threading_auto_id(self):\n \"\"\"\n target: test concurrent insert auto_id=True collection\n method: 1.create auto_id=True collection 2.concurrent insert\n expected: verify primary keys unique\n \"\"\"\n pass\n\n @pytest.mark.tags(CaseLabel.L2)\n def test_insert_multi_times(self):\n \"\"\"\n target: test insert multi times\n method: insert data multi times\n expected: verify num entities\n \"\"\"\n c_name = cf.gen_unique_str(prefix)\n collection_w = self.init_collection_wrap(name=c_name)\n step = 120\n for _ in range(ct.default_nb // step):\n df = cf.gen_default_dataframe_data(step)\n mutation_res, _ = collection_w.insert(data=df)\n assert mutation_res.insert_count == step\n assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()\n\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L2)\n def test_insert_all_datatype_collection(self):\n \"\"\"\n target: test insert into collection that contains all datatype fields\n method: 1.create all datatype collection 2.insert data\n expected: verify num entities\n \"\"\"\n self._connect()\n nb = 100\n df = cf.gen_dataframe_all_data_type(nb=nb)\n self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,\n primary_field=ct.default_int64_field_name)\n assert self.collection_wrap.num_entities == nb\n\n\nclass TestInsertAsync(TestcaseBase):\n \"\"\"\n ******************************************************************\n The following cases are used to test insert async\n ******************************************************************\n \"\"\"\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_sync(self):\n \"\"\"\n target: test async insert\n method: insert with async=True\n expected: verify num entities\n \"\"\"\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))\n df = cf.gen_default_dataframe_data(nb=ct.default_nb)\n future, _ = collection_w.insert(data=df, _async=True)\n future.done()\n mutation_res = future.result()\n assert mutation_res.insert_count == ct.default_nb\n assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_async_false(self):\n \"\"\"\n target: test insert with false async\n method: async = false\n expected: verify num entities\n \"\"\"\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))\n df = cf.gen_default_dataframe_data(nb=ct.default_nb)\n mutation_res, _ = collection_w.insert(data=df, _async=False)\n assert mutation_res.insert_count == ct.default_nb\n assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L1)\n def test_insert_async_callback(self):\n \"\"\"\n target: test insert with callback func\n method: insert with callback func\n expected: verify num entities\n \"\"\"\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))\n df = cf.gen_default_dataframe_data(nb=ct.default_nb)\n future, _ = collection_w.insert(data=df, _async=True, _callback=assert_mutation_result)\n future.done()\n mutation_res = future.result()\n assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()\n assert collection_w.num_entities == ct.default_nb\n\n @pytest.mark.tags(CaseLabel.L2)\n def test_insert_async_long(self):\n \"\"\"\n target: test insert with async\n method: insert 5w entities with callback func\n expected: verify num entities\n \"\"\"\n nb = 50000\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))\n df = cf.gen_default_dataframe_data(nb)\n future, _ = collection_w.insert(data=df, _async=True)\n future.done()\n mutation_res = future.result()\n assert mutation_res.insert_count == nb\n assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()\n assert collection_w.num_entities == nb\n\n @pytest.mark.tags(CaseLabel.L2)\n def test_insert_async_callback_timeout(self):\n \"\"\"\n target: test insert async with callback\n method: insert 10w entities with timeout=1\n expected: raise exception\n \"\"\"\n nb = 100000\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))\n df = cf.gen_default_dataframe_data(nb)\n future, _ = collection_w.insert(data=df, _async=True, _callback=assert_mutation_result, timeout=1)\n with pytest.raises(Exception):\n future.result()\n\n @pytest.mark.tags(CaseLabel.L2)\n def test_insert_async_invalid_data(self):\n \"\"\"\n target: test insert async with invalid data\n method: insert async with invalid data\n expected: raise exception\n \"\"\"\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))\n columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]\n df = pd.DataFrame(columns=columns)\n error = {ct.err_code: 0, ct.err_msg: \"Cannot infer schema from empty dataframe\"}\n collection_w.insert(data=df, _async=True, check_task=CheckTasks.err_res, check_items=error)\n\n @pytest.mark.tags(CaseLabel.L2)\n def test_insert_async_invalid_partition(self):\n \"\"\"\n target: test insert async with invalid partition\n method: insert async with invalid partition\n expected: raise exception\n \"\"\"\n collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))\n df = cf.gen_default_dataframe_data()\n err_msg = \"partitionID of partitionName:p can not be find\"\n future, _ = collection_w.insert(data=df, partition_name=\"p\", _async=True)\n future.done()\n with pytest.raises(Exception, match=err_msg):\n future.result()\n\n\ndef assert_mutation_result(mutation_res):\n assert mutation_res.insert_count == ct.default_nb\n" ]
[ [ "pandas.DataFrame", "numpy.float32" ] ]
eyalbetzalel/pytorch-generative-1
[ "d491fa0a8ab37ad3b8aa1092b24ff7d863c9fbd8" ]
[ "pytorch_generative/datasets.py" ]
[ "\"\"\"Extra generative modeling benchmark datasets not provided by PyTorch.\"\"\"\n\nimport os\nimport urllib\n\nimport PIL\nimport numpy as np\nimport torch\nfrom torch.utils import data\nfrom torchvision.datasets import utils\nfrom torchvision.datasets import vision\n\n\ndef _read_image_file(path, shape):\n with open(path, 'rb') as f:\n images = np.loadtxt(f, delimiter=\" \", dtype=np.uint8) * 255\n return torch.from_numpy(images).view(-1, *shape)\n\n\nclass BinarizedMNIST(vision.VisionDataset):\n \"\"\"A specific binarization of the MNIST images.\n\n Originally used in Salakhutdinov & Murray (2008). This dataset is used to \n evaluate generative models of images, so labels are not provided. \n \n NOTE: The evaluation split is merged into the training set.\n \"\"\"\n\n _URL = ('http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/'\n 'binarized_mnist_')\n resources = [_URL + \"train.amat\", _URL + \"valid.amat\", _URL + \"test.amat\"]\n train_file = 'train.pt'\n valid_file = 'valid.pt'\n test_file = 'test.pt'\n\n def __init__(self, root, split='train', transform=None):\n \"\"\"Initializes a new BinarizedMNIST instance.\n \n Args:\n root: The directory containing the data. If the data does not exist, it\n will be download to this directory.\n split: Which split to use. Must be one of 'train', 'valid', or 'test'.\n transform: A torchvision.transform to apply to the data.\n \"\"\"\n super().__init__(root, transform=transform)\n assert split in ('train', 'valid', 'test')\n self._raw_folder = os.path.join(self.root, 'BinarizedMNIST', 'raw')\n self._folder = os.path.join(self.root, 'BinarizedMNIST')\n self.train = train\n if not self._check_exists():\n self.download()\n self.data = torch.load(os.path.join(self._folder, split + '.pt'))\n\n def __getitem__(self, index):\n \"\"\"Returns the tuple (img, None) with the given index.\"\"\"\n img = self.data[index]\n # Return PIL images to be connsistent with other datasets.\n img = PIL.Image.fromarray(img.numpy(), mode='L')\n if self.transform is not None:\n img = self.transform(img)\n return img\n\n def __len__(self):\n return len(self.data)\n\n def _check_exists(self):\n return (os.path.exists(os.path.join(self._folder, self.train_file)) and\n os.path.exists(os.path.join(self._folder, self.test_file)))\n\n def download(self):\n \"\"\"Download the MNIST data if it doesn't exist in the root folder.\"\"\"\n if self._check_exists():\n return\n\n # Download files.\n os.makedirs(self._folder, exist_ok=True)\n os.makedirs(self._raw_folder, exist_ok=True)\n for url in self.resources:\n filename = url.rpartition('/')[-1]\n utils.download_url(url, root=self._raw_folder, filename=filename)\n\n # Process and save.\n shape = 28, 28\n train_set = _read_image_file(\n os.path.join(self._raw_folder, 'binarized_mnist_train.amat'), shape)\n with open(os.path.join(self._folder, self.train_file), 'wb') as f:\n torch.save(train_set, f)\n valid_set = _read_image_file(\n os.path.join(self._raw_folder, 'binarized_mnist_valid.amat'), shape)\n with open(os.path.join(self._folder, self.valid_file), 'wb') as f:\n torch.save(valid_set, f)\n test_set = _read_image_file(\n os.path.join(self._raw_folder, 'binarized_mnist_test.amat'), shape)\n with open(os.path.join(self._folder, self.test_file), 'wb') as f:\n torch.save(test_set, f)\n\n def extra_repr(self):\n return \"Split: {}\".format(\"Train\" if self.train else \"Test\")\n" ]
[ [ "torch.save", "numpy.loadtxt", "torch.from_numpy" ] ]
akeemlh/gala
[ "0fdaf9159bccc59af2a3525f2926e04501754f48" ]
[ "gala/potential/potential/core.py" ]
[ "# Standard library\nimport abc\nfrom collections import OrderedDict\nimport copy as pycopy\nimport warnings\nimport uuid\n\n# Third-party\nimport numpy as np\nfrom astropy.constants import G\nimport astropy.units as u\nfrom astropy.utils import isiterable\ntry:\n from scipy.spatial.transform import Rotation\nexcept ImportError as exc:\n raise ImportError(\n \"Gala requires scipy>=1.2: make sure you have updated your version of \"\n \"scipy and try importing gala again.\"\n ) from exc\n\n# Project\nfrom gala.util import GalaDeprecationWarning\nfrom ..common import CommonBase\nfrom ...util import ImmutableDict, atleast_2d\nfrom ...units import DimensionlessUnitSystem\n\n__all__ = [\"PotentialBase\", \"CompositePotential\"]\n\n\nclass PotentialBase(CommonBase, metaclass=abc.ABCMeta):\n \"\"\"\n A baseclass for defining pure-Python gravitational potentials.\n\n Subclasses must define (at minimum) a method that evaluates the potential\n energy at a given position ``q`` and time ``t``: ``_energy(q, t)``. For\n integration, the subclasses must also define a method to evaluate the\n gradient, ``_gradient(q, t)``. Optionally, they may also define methods to\n compute the density and hessian: ``_density()``, ``_hessian()``.\n \"\"\"\n ndim = 3\n\n def __init__(self, *args, units=None, origin=None, R=None, **kwargs):\n\n if self._GSL_only:\n from gala._cconfig import GSL_ENABLED\n if not GSL_ENABLED:\n raise ValueError(\n \"Gala was compiled without GSL and so this potential -- \"\n f\"{str(self.__class__)} -- will not work. See the gala \"\n \"documentation for more information about installing and \"\n \"using GSL with gala: \"\n \"http://gala.adrian.pw/en/latest/install.html\")\n\n parameter_values = self._parse_parameter_values(*args, **kwargs)\n self._setup_potential(parameters=parameter_values,\n origin=origin,\n R=R,\n units=units)\n\n def _setup_potential(self, parameters, origin=None, R=None, units=None):\n\n self._units = self._validate_units(units)\n self.parameters = self._prepare_parameters(parameters, self.units)\n\n try:\n self.G = G.decompose(self.units).value\n except u.UnitConversionError:\n # TODO: this is a convention that and could lead to confusion!\n self.G = 1.\n\n if origin is None:\n origin = np.zeros(self.ndim)\n self.origin = self._remove_units(origin)\n\n if R is not None and self.ndim not in [2, 3]:\n raise NotImplementedError('Gala potentials currently only support '\n 'rotations when ndim=2 or ndim=3.')\n\n if R is not None:\n if isinstance(R, Rotation):\n R = R.as_matrix()\n R = np.array(R)\n\n if R.shape != (self.ndim, self.ndim):\n raise ValueError('Rotation matrix passed to potential {0} has '\n 'an invalid shape: expected {1}, got {2}'\n .format(self.__class__.__name__,\n (self.ndim, self.ndim), R.shape))\n self.R = R\n\n @classmethod\n def to_sympy(cls):\n \"\"\"Return a representation of this potential class as a sympy expression\n\n Returns\n -------\n expr : sympy expression\n vars : dict\n A dictionary of sympy symbols used in the expression.\n \"\"\"\n raise NotImplementedError(\"to_sympy() is not implemented for this \"\n f\"class {cls}\")\n\n @classmethod\n def to_latex(cls):\n \"\"\"Return a string LaTeX representation of this potential\n\n Returns\n -------\n latex_str : str\n The latex expression as a Python string.\n \"\"\"\n try:\n expr, *_ = cls.to_sympy()\n except NotImplementedError:\n raise NotImplementedError(\n \".to_latex() requires having a .to_sympy() method implemented \"\n \"on the requesting potential class\")\n\n # testing for this import happens in the sympy method\n import sympy as sy\n return sy.latex(expr)\n\n ###########################################################################\n # Abstract methods that must be implemented by subclasses\n #\n @abc.abstractmethod\n def _energy(self, q, t=0.):\n pass\n\n @abc.abstractmethod\n def _gradient(self, q, t=0.):\n pass\n\n def _density(self, q, t=0.):\n raise NotImplementedError(\"This Potential has no implemented density \"\n \"function.\")\n\n def _hessian(self, q, t=0.):\n raise NotImplementedError(\"This Potential has no implemented Hessian.\")\n\n ###########################################################################\n # Utility methods\n #\n def _remove_units(self, x):\n \"\"\"\n Always returns an array. If a Quantity is passed in, it converts to the\n units associated with this object and returns the value.\n \"\"\"\n if hasattr(x, 'unit'):\n x = x.decompose(self.units).value\n\n else:\n x = np.array(x)\n\n return x\n\n def _remove_units_prepare_shape(self, x):\n \"\"\"\n This is similar to that implemented by\n `gala.potential.common.CommonBase`, but returns just the position if the\n input is a `PhaseSpacePosition`.\n \"\"\"\n from gala.dynamics import PhaseSpacePosition\n\n if hasattr(x, 'unit'):\n x = x.decompose(self.units).value\n\n elif isinstance(x, PhaseSpacePosition):\n x = x.cartesian.xyz.decompose(self.units).value\n\n x = atleast_2d(x, insert_axis=1).astype(np.float64)\n\n if x.shape[0] != self.ndim:\n raise ValueError(\n f\"Input position has ndim={x.shape[0]}, but this potential \"\n f\"expects an {self.ndim}-dimensional position.\")\n\n return x\n\n ###########################################################################\n # Core methods that use the above implemented functions\n #\n def energy(self, q, t=0.):\n \"\"\"\n Compute the potential energy at the given position(s).\n\n Parameters\n ----------\n q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like\n The position to compute the value of the potential. If the\n input position object has no units (i.e. is an `~numpy.ndarray`),\n it is assumed to be in the same unit system as the potential.\n\n Returns\n -------\n E : `~astropy.units.Quantity`\n The potential energy per unit mass or value of the potential.\n \"\"\"\n q = self._remove_units_prepare_shape(q)\n orig_shape, q = self._get_c_valid_arr(q)\n t = self._validate_prepare_time(t, q)\n ret_unit = self.units['energy'] / self.units['mass']\n\n return self._energy(q, t=t).T.reshape(orig_shape[1:]) * ret_unit\n\n def gradient(self, q, t=0.):\n \"\"\"\n Compute the gradient of the potential at the given position(s).\n\n Parameters\n ----------\n q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like\n The position to compute the value of the potential. If the\n input position object has no units (i.e. is an `~numpy.ndarray`),\n it is assumed to be in the same unit system as the potential.\n\n Returns\n -------\n grad : `~astropy.units.Quantity`\n The gradient of the potential. Will have the same shape as\n the input position.\n \"\"\"\n q = self._remove_units_prepare_shape(q)\n orig_shape, q = self._get_c_valid_arr(q)\n t = self._validate_prepare_time(t, q)\n ret_unit = self.units['length'] / self.units['time']**2\n uu = self.units['acceleration']\n return (self._gradient(q, t=t).T.reshape(orig_shape) * ret_unit).to(uu)\n\n def density(self, q, t=0.):\n \"\"\"\n Compute the density value at the given position(s).\n\n Parameters\n ----------\n q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like\n The position to compute the value of the potential. If the\n input position object has no units (i.e. is an `~numpy.ndarray`),\n it is assumed to be in the same unit system as the potential.\n\n Returns\n -------\n dens : `~astropy.units.Quantity`\n The potential energy or value of the potential. If the input\n position has shape ``q.shape``, the output energy will have\n shape ``q.shape[1:]``.\n \"\"\"\n q = self._remove_units_prepare_shape(q)\n orig_shape, q = self._get_c_valid_arr(q)\n t = self._validate_prepare_time(t, q)\n ret_unit = self.units['mass'] / self.units['length']**3\n return (self._density(q, t=t).T * ret_unit).to(\n self.units['mass density'])\n\n def hessian(self, q, t=0.):\n \"\"\"\n Compute the Hessian of the potential at the given position(s).\n\n Parameters\n ----------\n q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like\n The position to compute the value of the potential. If the\n input position object has no units (i.e. is an `~numpy.ndarray`),\n it is assumed to be in the same unit system as the potential.\n\n Returns\n -------\n hess : `~astropy.units.Quantity`\n The Hessian matrix of second derivatives of the potential. If the\n input position has shape ``q.shape``, the output energy will have\n shape ``(q.shape[0],q.shape[0]) + q.shape[1:]``. That is, an\n ``n_dim`` by ``n_dim`` array (matrix) for each position.\n \"\"\"\n if (self.R is not None and\n not np.allclose(np.diag(self.R), 1., atol=1e-15, rtol=0)):\n raise NotImplementedError(\"Computing Hessian matrices for rotated \"\n \"potentials is currently not supported.\")\n q = self._remove_units_prepare_shape(q)\n orig_shape, q = self._get_c_valid_arr(q)\n t = self._validate_prepare_time(t, q)\n ret_unit = 1 / self.units['time']**2\n hess = np.moveaxis(self._hessian(q, t=t), 0, -1)\n return hess.reshape((orig_shape[0],\n orig_shape[0]) + orig_shape[1:]) * ret_unit\n\n ###########################################################################\n # Convenience methods that make use the base methods\n #\n def acceleration(self, q, t=0.):\n \"\"\"\n Compute the acceleration due to the potential at the given position(s).\n\n Parameters\n ----------\n q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like\n Position to compute the acceleration at.\n\n Returns\n -------\n acc : `~astropy.units.Quantity`\n The acceleration. Will have the same shape as the input\n position array, ``q``.\n \"\"\"\n return -self.gradient(q, t=t)\n\n def mass_enclosed(self, q, t=0.):\n \"\"\"\n Estimate the mass enclosed within the given position by assuming the potential\n is spherical.\n\n Parameters\n ----------\n q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like\n Position(s) to estimate the enclossed mass.\n\n Returns\n -------\n menc : `~astropy.units.Quantity`\n Mass enclosed at the given position(s). If the input position\n has shape ``q.shape``, the output energy will have shape\n ``q.shape[1:]``.\n \"\"\"\n q = self._remove_units_prepare_shape(q)\n orig_shape, q = self._get_c_valid_arr(q)\n t = self._validate_prepare_time(t, q)\n\n # small step-size in direction of q\n h = 1E-3 # MAGIC NUMBER\n\n # Radius\n r = np.sqrt(np.sum(q**2, axis=1))\n\n epsilon = h*q/r[:, np.newaxis]\n\n dPhi_dr_plus = self._energy(q + epsilon, t=t)\n dPhi_dr_minus = self._energy(q - epsilon, t=t)\n diff = (dPhi_dr_plus - dPhi_dr_minus)\n\n if isinstance(self.units, DimensionlessUnitSystem):\n Gee = 1.\n else:\n Gee = G.decompose(self.units).value\n\n Menc = np.abs(r*r * diff / Gee / (2.*h))\n Menc = Menc.reshape(orig_shape[1:])\n\n sgn = 1.\n if 'm' in self.parameters and self.parameters['m'] < 0:\n sgn = -1.\n\n return sgn * Menc * self.units['mass']\n\n def circular_velocity(self, q, t=0.):\n \"\"\"\n Estimate the circular velocity at the given position assuming the\n potential is spherical.\n\n Parameters\n ----------\n q : array_like, numeric\n Position(s) to estimate the circular velocity.\n\n Returns\n -------\n vcirc : `~astropy.units.Quantity`\n Circular velocity at the given position(s). If the input position\n has shape ``q.shape``, the output energy will have shape\n ``q.shape[1:]``.\n\n \"\"\"\n q = self._remove_units_prepare_shape(q)\n\n # Radius\n r = np.sqrt(np.sum(q**2, axis=0)) * self.units['length']\n dPhi_dxyz = self.gradient(q, t=t)\n dPhi_dr = np.sum(dPhi_dxyz * q/r.value, axis=0)\n\n return self.units.decompose(np.sqrt(r * np.abs(dPhi_dr)))\n\n ###########################################################################\n # Python special methods\n #\n def __call__(self, q):\n return self.energy(q)\n\n def __add__(self, other):\n if not isinstance(other, PotentialBase):\n raise TypeError(f'Cannot add a {self.__class__.__name__} to a '\n f'{other.__class__.__name__}')\n\n new_pot = CompositePotential()\n\n if isinstance(self, CompositePotential):\n for k, v in self.items():\n new_pot[k] = v\n\n else:\n k = str(uuid.uuid4())\n new_pot[k] = self\n\n if isinstance(other, CompositePotential):\n for k, v in self.items():\n if k in new_pot:\n raise KeyError(f'Potential component \"{k}\" already exists '\n '-- duplicate key provided in potential '\n 'addition')\n new_pot[k] = v\n\n else:\n k = str(uuid.uuid4())\n new_pot[k] = other\n\n return new_pot\n\n ###########################################################################\n # Convenience methods that do fancy things\n #\n def plot_contours(self, grid, filled=True, ax=None, labels=None,\n subplots_kw=dict(), **kwargs):\n \"\"\"\n Plot equipotentials contours. Computes the potential energy on a grid\n (specified by the array `grid`).\n\n .. warning:: Right now the grid input must be arrays and must already\n be in the unit system of the potential. Quantity support is coming...\n\n Parameters\n ----------\n grid : tuple\n Coordinate grids or slice value for each dimension. Should be a\n tuple of 1D arrays or numbers.\n filled : bool (optional)\n Use :func:`~matplotlib.pyplot.contourf` instead of\n :func:`~matplotlib.pyplot.contour`. Default is ``True``.\n ax : matplotlib.Axes (optional)\n labels : iterable (optional)\n List of axis labels.\n subplots_kw : dict\n kwargs passed to matplotlib's subplots() function if an axes object\n is not specified.\n kwargs : dict\n kwargs passed to either contourf() or plot().\n\n Returns\n -------\n fig : `~matplotlib.Figure`\n\n \"\"\"\n\n import matplotlib.pyplot as plt\n from matplotlib import cm\n\n # figure out which elements are iterable, which are numeric\n _grids = []\n _slices = []\n for ii, g in enumerate(grid):\n if isiterable(g):\n _grids.append((ii, g))\n else:\n _slices.append((ii, g))\n\n # figure out the dimensionality\n ndim = len(_grids)\n\n # if ndim > 2, don't know how to handle this!\n if ndim > 2:\n raise ValueError(\n \"ndim > 2: you can only make contours on a 2D grid. For other \"\n \"dimensions, you have to specify values to slice.\"\n )\n\n if ax is None:\n # default figsize\n fig, ax = plt.subplots(1, 1, **subplots_kw)\n else:\n fig = ax.figure\n\n if ndim == 1:\n # 1D curve\n x1 = _grids[0][1]\n r = np.zeros((len(_grids) + len(_slices), len(x1)))\n r[_grids[0][0]] = x1\n\n for ii, slc in _slices:\n r[ii] = slc\n\n Z = self.energy(r*self.units['length']).value\n ax.plot(x1, Z, **kwargs)\n\n if labels is not None:\n ax.set_xlabel(labels[0])\n ax.set_ylabel(\"potential\")\n else:\n # 2D contours\n x1, x2 = np.meshgrid(_grids[0][1], _grids[1][1])\n shp = x1.shape\n x1, x2 = x1.ravel(), x2.ravel()\n\n r = np.zeros((len(_grids) + len(_slices), len(x1)))\n r[_grids[0][0]] = x1\n r[_grids[1][0]] = x2\n\n for ii, slc in _slices:\n r[ii] = slc\n\n Z = self.energy(r*self.units['length']).value\n\n # make default colormap not suck\n cmap = kwargs.pop('cmap', cm.Blues)\n if filled:\n ax.contourf(x1.reshape(shp), x2.reshape(shp), Z.reshape(shp),\n cmap=cmap, **kwargs)\n else:\n ax.contour(x1.reshape(shp), x2.reshape(shp), Z.reshape(shp),\n cmap=cmap, **kwargs)\n\n if labels is not None:\n ax.set_xlabel(labels[0])\n ax.set_ylabel(labels[1])\n\n return fig\n\n def plot_density_contours(self, grid, filled=True, ax=None, labels=None,\n subplots_kw=dict(), **kwargs):\n \"\"\"\n Plot density contours. Computes the density on a grid\n (specified by the array `grid`).\n\n .. warning::\n\n For now, the grid input must be arrays and must already be in\n the unit system of the potential. Quantity support is coming...\n\n Parameters\n ----------\n grid : tuple\n Coordinate grids or slice value for each dimension. Should be a\n tuple of 1D arrays or numbers.\n filled : bool (optional)\n Use :func:`~matplotlib.pyplot.contourf` instead of\n :func:`~matplotlib.pyplot.contour`. Default is ``True``.\n ax : matplotlib.Axes (optional)\n labels : iterable (optional)\n List of axis labels.\n subplots_kw : dict\n kwargs passed to matplotlib's subplots() function if an axes object\n is not specified.\n kwargs : dict\n kwargs passed to either contourf() or plot().\n\n Returns\n -------\n fig : `~matplotlib.Figure`\n\n \"\"\"\n\n import matplotlib.pyplot as plt\n from matplotlib import cm\n\n # figure out which elements are iterable, which are numeric\n _grids = []\n _slices = []\n for ii, g in enumerate(grid):\n if isiterable(g):\n _grids.append((ii, g))\n else:\n _slices.append((ii, g))\n\n # figure out the dimensionality\n ndim = len(_grids)\n\n # if ndim > 2, don't know how to handle this!\n if ndim > 2:\n raise ValueError(\n \"ndim > 2: you can only make contours on a 2D grid. For other \"\n \"dimensions, you have to specify values to slice.\"\n )\n\n if ax is None:\n # default figsize\n fig, ax = plt.subplots(1, 1, **subplots_kw)\n else:\n fig = ax.figure\n\n if ndim == 1:\n # 1D curve\n x1 = _grids[0][1]\n r = np.zeros((len(_grids) + len(_slices), len(x1)))\n r[_grids[0][0]] = x1\n\n for ii, slc in _slices:\n r[ii] = slc\n\n Z = self.density(r*self.units['length']).value\n ax.plot(x1, Z, **kwargs)\n\n if labels is not None:\n ax.set_xlabel(labels[0])\n ax.set_ylabel(\"potential\")\n else:\n # 2D contours\n x1, x2 = np.meshgrid(_grids[0][1], _grids[1][1])\n shp = x1.shape\n x1, x2 = x1.ravel(), x2.ravel()\n\n r = np.zeros((len(_grids) + len(_slices), len(x1)))\n r[_grids[0][0]] = x1\n r[_grids[1][0]] = x2\n\n for ii, slc in _slices:\n r[ii] = slc\n\n Z = self.density(r*self.units['length']).value\n\n # make default colormap not suck\n cmap = kwargs.pop('cmap', cm.Blues)\n if filled:\n ax.contourf(x1.reshape(shp), x2.reshape(shp), Z.reshape(shp),\n cmap=cmap, **kwargs)\n else:\n ax.contour(x1.reshape(shp), x2.reshape(shp), Z.reshape(shp),\n cmap=cmap, **kwargs)\n\n # cs.cmap.set_under('w')\n # cs.cmap.set_over('k')\n\n if labels is not None:\n ax.set_xlabel(labels[0])\n ax.set_ylabel(labels[1])\n\n return fig\n\n def integrate_orbit(self, *args, **kwargs):\n \"\"\"\n Integrate an orbit in the current potential using the integrator class\n provided. Uses same time specification as `Integrator.run()` -- see\n the documentation for `gala.integrate` for more information.\n\n Parameters\n ----------\n w0 : `~gala.dynamics.PhaseSpacePosition`, array_like\n Initial conditions.\n Integrator : `~gala.integrate.Integrator` (optional)\n Integrator class to use.\n Integrator_kwargs : dict (optional)\n Any extra keyword argumets to pass to the integrator class\n when initializing. Only works in non-Cython mode.\n cython_if_possible : bool (optional)\n If there is a Cython version of the integrator implemented,\n and the potential object has a C instance, using Cython\n will be *much* faster.\n **time_spec\n Specification of how long to integrate. See documentation\n for `~gala.integrate.parse_time_specification`.\n\n Returns\n -------\n orbit : `~gala.dynamics.Orbit`\n\n \"\"\"\n from ..hamiltonian import Hamiltonian\n return Hamiltonian(self).integrate_orbit(*args, **kwargs)\n\n def total_energy(self, x, v):\n \"\"\"\n Compute the total energy (per unit mass) of a point in phase-space\n in this potential. Assumes the last axis of the input position /\n velocity is the dimension axis, e.g., for 100 points in 3-space,\n the arrays should have shape (100, 3).\n\n Parameters\n ----------\n x : array_like, numeric\n Position.\n v : array_like, numeric\n Velocity.\n \"\"\"\n warnings.warn(\n \"Use the energy methods on Orbit objects instead. In a future \"\n \"release this will be removed.\",\n GalaDeprecationWarning)\n\n v = atleast_2d(v, insert_axis=1)\n return self.energy(x) + 0.5*np.sum(v**2, axis=0)\n\n def save(self, f):\n \"\"\"\n Save the potential to a text file. See :func:`~gala.potential.save`\n for more information.\n\n Parameters\n ----------\n f : str, file_like\n A filename or file-like object to write the input potential object to.\n\n \"\"\"\n from .io import save\n save(self, f)\n\n @property\n def units(self):\n return self._units\n\n def replace_units(self, units, copy=True):\n \"\"\"Change the unit system of this potential.\n\n Parameters\n ----------\n units : `~gala.units.UnitSystem`\n Set of non-reducable units that specify (at minimum) the\n length, mass, time, and angle units.\n copy : bool (optional)\n If True, returns a copy, if False, changes this object.\n \"\"\"\n if copy:\n pot = pycopy.deepcopy(self)\n else:\n pot = self\n\n # TODO: this is repeated code - see equivalent in cpotential.pyx\n tmp = [isinstance(units, DimensionlessUnitSystem),\n isinstance(self.units, DimensionlessUnitSystem)]\n if not all(tmp) and any(tmp):\n raise ValueError(\"Cannot replace a dimensionless unit system with \"\n \"a unit system with physical units, or vice versa\")\n\n PotentialBase.__init__(pot,\n origin=self.origin,\n R=self.R,\n units=units,\n **self.parameters)\n\n return pot\n\n ###########################################################################\n # Deprecated methods\n #\n def _value(self, q, t=0.):\n warnings.warn(\"Use `_energy()` instead.\", GalaDeprecationWarning)\n return self._energy(q, t=t)\n\n def value(self, *args, **kwargs):\n __doc__ = self.energy.__doc__ # noqa\n warnings.warn(\"Use `energy()` instead.\", GalaDeprecationWarning)\n return self.energy(*args, **kwargs)\n\n ###########################################################################\n # Interoperability with other packages\n #\n def to_galpy_potential(self, ro=None, vo=None):\n \"\"\"Convert a Gala potential to a Galpy potential instance\n\n Parameters\n ----------\n ro : quantity-like (optional)\n vo : quantity-like (optional)\n \"\"\"\n from .interop import gala_to_galpy_potential\n return gala_to_galpy_potential(self, ro=ro, vo=vo)\n\n\nclass CompositePotential(PotentialBase, OrderedDict):\n \"\"\"\n A potential composed of several distinct components. For example,\n two point masses or a galactic disk and halo, each with their own\n potential model.\n\n A `CompositePotential` is created like a Python dictionary, e.g.::\n\n >>> p1 = SomePotential(func1) # doctest: +SKIP\n >>> p2 = SomePotential(func2) # doctest: +SKIP\n >>> cp = CompositePotential(component1=p1, component2=p2) # doctest: +SKIP\n\n This object actually acts like a dictionary, so if you want to\n preserve the order of the potential components, use::\n\n >>> cp = CompositePotential() # doctest: +SKIP\n >>> cp['component1'] = p1 # doctest: +SKIP\n >>> cp['component2'] = p2 # doctest: +SKIP\n\n You can also use any of the built-in `Potential` classes as\n components::\n\n >>> from gala.potential import HernquistPotential\n >>> cp = CompositePotential()\n >>> cp['spheroid'] = HernquistPotential(m=1E11, c=10.,\n ... units=(u.kpc, u.Myr, u.Msun, u.radian))\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n self._units = None\n self.ndim = None\n\n if len(args) > 0 and isinstance(args[0], list):\n for k, v in args[0]:\n kwargs[k] = v\n else:\n for i, v in args:\n kwargs[str(i)] = v\n\n self.lock = False\n for v in kwargs.values():\n self._check_component(v)\n\n OrderedDict.__init__(self, **kwargs)\n\n self.R = None # TODO: this is a little messy\n\n def __setitem__(self, key, value):\n self._check_component(value)\n super(CompositePotential, self).__setitem__(key, value)\n\n def _check_component(self, p):\n if not isinstance(p, PotentialBase):\n raise TypeError(\"Potential components may only be Potential \"\n \"objects, not {0}.\".format(type(p)))\n\n if self.units is None:\n self._units = p.units\n self.ndim = p.ndim\n\n else:\n if (sorted([str(x) for x in self.units]) !=\n sorted([str(x) for x in p.units])):\n raise ValueError(\"Unit system of new potential component must \"\n \"match unit systems of other potential \"\n \"components.\")\n\n if p.ndim != self.ndim:\n raise ValueError(\"All potential components must have the same \"\n \"number of phase-space dimensions ({} in this \"\n \"case)\".format(self.ndim))\n\n if self.lock:\n raise ValueError(\"Potential object is locked - new components can \"\n \"only be added to unlocked potentials.\")\n\n @property\n def parameters(self):\n params = dict()\n for k, v in self.items():\n params[k] = v.parameters\n return ImmutableDict(**params)\n\n def replace_units(self, units):\n \"\"\"Change the unit system of this potential.\n\n Parameters\n ----------\n units : `~gala.units.UnitSystem`\n Set of non-reducable units that specify (at minimum) the\n length, mass, time, and angle units.\n \"\"\"\n _lock = self.lock\n pots = self.__class__()\n\n pots._units = None\n pots.lock = False\n\n for k, v in self.items():\n pots[k] = v.replace_units(units)\n\n pots.lock = _lock\n return pots\n\n def _energy(self, q, t=0.):\n return np.sum([p._energy(q, t) for p in self.values()], axis=0)\n\n def _gradient(self, q, t=0.):\n return np.sum([p._gradient(q, t) for p in self.values()], axis=0)\n\n def _hessian(self, w, t=0.):\n return np.sum([p._hessian(w, t) for p in self.values()], axis=0)\n\n def _density(self, q, t=0.):\n return np.sum([p._density(q, t) for p in self.values()], axis=0)\n\n def __repr__(self):\n return \"<CompositePotential {}>\".format(\",\".join(self.keys()))\n\n\n_potential_docstring = \"\"\"units : `~gala.units.UnitSystem` (optional)\n Set of non-reducable units that specify (at minimum) the\n length, mass, time, and angle units.\n origin : `~astropy.units.Quantity` (optional)\n The origin of the potential, the default being 0.\n R : `~scipy.spatial.transform.Rotation`, array_like (optional)\n A Scipy ``Rotation`` object or an array representing a rotation matrix\n that specifies a rotation of the potential. This is applied *after* the\n origin shift. Default is the identity matrix.\n\"\"\"\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.sum", "matplotlib.pyplot.subplots", "numpy.diag", "numpy.abs", "numpy.meshgrid" ] ]
jfacoustic/MyTwitterBot
[ "15a9509c41ba8c7049675048b4d05ab457270a7d" ]
[ "src/tftools/basic_functions.py" ]
[ "import tensorflow as tf\n\n\ndef init_wb(shape, name):\n \"\"\"\n Function initialize one matrix of weights and one bias vector.\n\n :type shape: tuple\n :type name: str\n :rtype: dictionary\n \"\"\"\n Winit = tf.truncated_normal(shape, mean=0, stddev=0.1)\n binit = tf.zeros(shape[-1])\n layer = {}\n layer[\"weights\"] = tf.get_variable(name + \"/weights\",\n dtype=tf.float32,\n initializer=Winit)\n layer[\"bias\"] = tf.get_variable(name + \"/bias\",\n dtype=tf.float32,\n initializer=binit)\n return layer\n\n\ndef affine_transformation(input_tensor, layer):\n \"\"\"\n Function that applies a affine transformation\n in the input tensor using the variables\n from the dict layer.\n\n :type input_tensor: tf tensor\n :type layer: dictionary\n :rtype: tf tensor\n \"\"\"\n return tf.add(tf.matmul(input_tensor, layer['weights']),\n layer['bias'])\n" ]
[ [ "tensorflow.truncated_normal", "tensorflow.zeros", "tensorflow.matmul", "tensorflow.get_variable" ] ]
CarlosW1998/DeepLearningClass
[ "d19cd4961d223cce3b474c4e1732011ba0295ada" ]
[ "NeuralNetwork/NN.py" ]
[ "import numpy as np\n\nclass NeuralNetwork:\n def __init__(self, input_shape, neurons, learning_rate):\n self.wights = []\n self.wights.append(np.random.rand(input_shape, neurons))\n self.wights.append(np.random.rand(neurons, 1))\n self.baias = np.zeros(neurons)\n self.learning_rate = learning_rate\n\n def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))\n\n\n" ]
[ [ "numpy.random.rand", "numpy.exp", "numpy.zeros" ] ]
Guigxs/AI-Quixo
[ "5ca625886ba28f41528b1eff291f1f1834947f05" ]
[ "ai/Quixo/ai.py" ]
[ "import cherrypy\nimport sys\nimport random\nimport numpy as np\n\nclass Server:\n @cherrypy.expose\n @cherrypy.tools.json_in()\n @cherrypy.tools.json_out()\n def move(self):\n # Deal with CORS\n cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'\n cherrypy.response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'\n cherrypy.response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization, X-Requested-With'\n if cherrypy.request.method == \"OPTIONS\":\n return ''\n \n body = cherrypy.request.json\n \n print(\"\")\n print(\"######################################\")\n print(\"############# Stupid bot #############\")\n print(\"######################################\")\n print(\"\")\n print(\"--------------------------------------\")\n print(\"State of the game :\")\n game = body[\"game\"]\n matrix = np.resize(game, (5, 5))\n print(matrix)\n \n first = len(body['moves'])\n\n print(\"\")\n print(\"##########\", first, \"move(s) #########\") \n print(\"--------------------------------------\")\n print(\"\")\n print(\"--Logs:--\")\n\n\n print(\"\")\n print(\"##########\", first, \"move(s) ##########\") \n print(\"\")\n\n if first%2 == 0: #Premier joueur\n power = 0\n choice = AI().bestCube(power, game)\n print(\"Liste des choix :\", choice)\n print(\"First player with: X ({}) !\".format(power))\n cube = AI().cube(power, game, choice)\n direction = AI().direction(power, game, cube)\n print(\"-----------------------------------\")\n print(\"Send : X in\", cube, \"from\", direction)\n print(\"-----------------------------------\")\n return {'move' :{'cube' : cube, 'direction': direction}}\n \n elif first%2 == 1: #Scond joueur\n power = 1\n choice = AI().makeChoice(power, game)\n print(\"Liste des choix :\", choice)\n print(\"Second player with: O ({}) !\".format(power))\n cube = AI().cube(power, game, choice)\n direction = AI().direction(power, game, cube)\n print(\"-----------------------------------\")\n print(\"Send : O in\", cube, \"from\", direction)\n print(\"-----------------------------------\")\n return {'move' :{'cube' : cube, 'direction': direction}}\n\n\nclass AI():\n def __init__(self, *args, **kwargs):\n self.firstList = [0, 1, 2, 3, 4, 5, 9, 10, 14, 15, 19, 20, 21, 22, 23, 24]\n self.firstDirections = ['N', 'S', 'E', 'W']\n self.forbidden = {'N':[0, 1, 2, 3, 4], 'S':[20, 21, 22, 23, 24], 'E':[4, 9, 14, 19, 24], 'W':[0, 5, 10, 15, 20]}\n self.increment = {'N': -5, 'S': 5, 'E': 1, 'W': -1}\n\n # def makeChoice(self, power, game):\n # choice = []\n # for i in range(len(game)):\n # if i in self.firstList:\n # if game[i] == power or game[i] == None:\n # choice.append(i)\n \n # return choice\n\n # def cube(self, player, game, choice):\n # print(\"Random cube is coming...\")\n # choixCube = random.choice(choice)\n # print(\"Cube :\", choixCube)\n # return choixCube\n \n # def direction(self, player, game, cube):\n # choixDirection = random.choice(self.firstDirections)\n # print(\"Check if {} is forbidden...\".format(choixDirection))\n # if cube in self.forbidden[choixDirection]:\n # print(\"Forbiden!\")\n # print(\"Retry...\")\n # return self.direction(player, game, cube)\n \n # print(\"Direction ok!\")\n # return choixDirection\n\n def bestCube(self, player, matrix):\n choice = []\n for i in range(len(matrix)):\n pass\n\n def bestDirection(self, player, matrix, cube):\n choice = []\n for i in range(len(self.firstDirections)):\n if cube not in self.forbidden[self.firstDirections[i]]:\n pass\n\n def applyDirection(self, player, matrix, cube, direction):\n colonne = cube%5\n ligne = int((cube - colonne)/5)\n\n game = np.resize(matrix, (5, 5))\n\n pass\n\n \n def checkAround(self, player, matrix, i):#Check si il y a des X ou O autour du cube\n if (matrix[i] == player) and ((matrix[i+1] == player and matrix[i+2] == player) or (matrix[i-1] == player and matrix[i-2] == player) or (matrix[i+6] == player and matrix[i+12] == player) or (matrix[i+5] == player and matrix[i+10] == player) or (matrix[i+4] == player and matrix[i+8] == player) or (matrix[i-5] == player and matrix[i-10] == player) or (matrix[i-6] == player and matrix[i-12] == player) or (matrix[i-4] == player and matrix[i-8] == player)):\n return True\n return False\n\n\n # def bestMove(self, player, matrix):\n # #Test les coups gagnant\n # jeu = matrix\n # choice = []\n # for i in range(len(jeu)):\n # if jeu[i] == None:\n # print(\"Try for\", i)\n # choice.append(i)\n # jeu[i] = player\n\n # if self.win(player, jeu) == True:\n # print(\"I win!\")\n # print(np.resize(jeu, (3, 3)))\n # return i\n\n # jeu[i] = None\n\n # #Blocage de l'adversaire\n # if player == 1:\n # adversaire = 0\n # else:\n # adversaire = 1\n\n # for i in range(len(jeu)):\n # if jeu[i] == None:\n # print(\"Try for\", i)\n # jeu[i] = adversaire\n\n # if self.win(adversaire, jeu) == True:\n # print(\"I block!\")\n # print(np.resize(jeu, (3, 3)))\n # return i\n\n # jeu[i] = None\n\n # return random.choice(choice) #Renvoit un nobre aléatoire \n\n\n def win(self, player, matrix): #Check si le coup est gangnant\n if (matrix[0]==player and matrix[1]==player and matrix[2]==player and matrix[3]==player and matrix[4]==player) or (matrix[5]==player and matrix[6]==player and matrix[7]==player and matrix[8]==player and matrix[9]==player) or (matrix[10]==player and matrix[11]==player and matrix[12]==player and matrix[13]==player and matrix[14]==player) or (matrix[15]==player and matrix[16]==player and matrix[17]==player and matrix[18]==player and matrix[19]==player) or (matrix[20]==player and matrix[21]==player and matrix[22]==player and matrix[23]==player and matrix[24]==player) or (matrix[0]==player and matrix[6]==player and matrix[12]==player and matrix[18]==player and matrix[24]==player) or (matrix[20]==player and matrix[16]==player and matrix[12]==player and matrix[8]==player and matrix[4]==player) or (matrix[0]==player and matrix[5]==player and matrix[10]==player and matrix[15]==player and matrix[20]==player) or (matrix[1]==player and matrix[6]==player and matrix[11]==player and matrix[16]==player and matrix[21]==player) or (matrix[2]==player and matrix[7]==player and matrix[12]==player and matrix[17]==player and matrix[22]==player) or (matrix[3]==player and matrix[8]==player and matrix[13]==player and matrix[18]==player and matrix[23]==player) or (matrix[4]==player and matrix[9]==player and matrix[14]==player and matrix[19]==player and matrix[24]==player):\n return True\n return False \n \n\n \n \n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n port=int(sys.argv[1])\n else:\n port=8080\n\n cherrypy.config.update({'server.socket_port': port})\n cherrypy.quickstart(Server())" ]
[ [ "numpy.resize" ] ]
yxuansu/HCL
[ "b7a2871e8006d148958bb8dc65670d0491f6a446" ]
[ "SMN_MSN/main_utlis.py" ]
[ "# -*- coding:utf-8 -*-\nimport torch\nimport pickle\nimport collections\nimport time\nimport numpy as np\nimport os\n#from modules.msn import MSN\n# ------------------------------------------------------------------------------------- #\n# measurement functions\ndef compute_Rn_k(scores,labels, n=2, k=1):\n total = 0\n correct = 0\n for i in range(len(labels)):\n if labels[i] == 1:\n total = total+1\n sublist = np.asarray(scores[i:i+n])\n index = sublist.argsort()[::-1][0:k]\n if scores[i] in sublist[index]:\n correct = correct + 1\n return float(correct) / total\n\ndef compute_R2_1(scores,labels, n=10, k=1, m=2):\n total = 0\n correct = 0\n for i in range(0, len(labels), n):\n total = total+1\n true_response_index = []\n for j in range(i, i+n):\n if labels[j] == 1:\n true_response_index.append(j-i)\n sublist = np.asarray(scores[i:i+m])\n index = sublist.argsort()[::-1][0:k]\n # if len(np.intersect1d(index, true_response_index)) > 0:\n # correct = correct + 1\n correct += len(np.intersect1d(index, true_response_index)) * 1.0 / len(true_response_index)\n return float(correct) / total\n\ndef compute_R10_k(scores,labels, n=10, k=1):\n total = 0\n correct = 0\n for i in range(0, len(labels), n):\n total = total+1\n true_response_index = []\n for j in range(i, i+n):\n if labels[j] == 1:\n true_response_index.append(j-i)\n sublist = np.asarray(scores[i:i+n])\n index = sublist.argsort()[::-1][0:k]\n # if len(np.intersect1d(index, true_response_index)) > 0:\n # correct = correct + 1\n correct += len(np.intersect1d(index, true_response_index)) * 1.0 / len(true_response_index)\n return float(correct) / total\n\ndef compute_P1(scores, labels, n=10):\n '''precision at position 1'''\n total = 0\n correct = 0\n for i in range(0, len(labels), n):\n total = total+1\n sublist = np.asarray(scores[i:i+n])\n index = sublist.argsort()[::-1]\n p1 = 0.0\n if labels[i+index[0]] == 1: p1 = 1.0\n correct += p1\n return float(correct) / total\n\ndef compute_MAP(scores,labels, n=10):\n total = 0\n correct = 0\n for i in range(0, len(labels), n):\n total = total+1\n sublist = np.asarray(scores[i:i+n])\n index = sublist.argsort()[::-1]\n ap = 0.0\n count = 0\n for j, ans_index in enumerate(index):\n if labels[i+ans_index] == 1:\n count+=1\n ap += count / (j+1.0)\n correct += (ap / count)\n return float(correct) / total\n\ndef compute_MRR(scores,labels, n=10):\n total = 0\n correct = 0\n for i in range(0, len(labels), n):\n total = total+1\n sublist = np.asarray(scores[i:i+n])\n index = sublist.argsort()[::-1]\n ap = 0.0\n for j, ans_index in enumerate(index):\n if labels[i+ans_index] == 1:\n ap += 1.0 / (j+1.0)\n break\n correct += ap\n return float(correct) / total\n\n# ------------------------------------------------------------------------------------- #\n# data utlis functions\ndef read_pkl_file(filename, ed=None):\n s_time = time.time()\n print('----------load {}----------'.format(filename))\n with open(filename, 'rb') as f:\n data = pickle.load(f, encoding=ed) if ed else pickle.load(f)\n print('----------loading finish, cost:{}sec----------'.format(time.time()-s_time))\n return data\n\n" ]
[ [ "numpy.asarray", "numpy.intersect1d" ] ]
genvex/fishswarm
[ "50b07da5264e08a1e312457dd4c7088d5a5557e6" ]
[ "couzinswarm/simulation.py" ]
[ "\"\"\"\nSimulation module\n=================\n\nContains the `Swarm` class, which is used for simulation.\n\"\"\"\nimport numpy as np\n\nfrom couzinswarm.objects import Fish\n\nfrom progressbar import ProgressBar as PB\n\nclass Swarm:\n \"\"\"A class for a swarm simulation.\n \n Attributes\n ----------\n number_of_fish : int, default : 20\n The number of fish to be simulated\n fish : list of :mod:`couzinswarm.objects.Fish`\n Contains the `Fish` objects which are simulated in this setup.\n repulsion_radius : float, default : 1.0\n Fish within this radius will repel each other\n (unit: length of a single fish).\n orientation_width : float, default : 10.0\n The width of the hollow ball in which fish adjust their\n orientation.\n (unit: length of a single fish).\n attraction_width : float, default : 10.0\n The width of the hollow ball in which fish attract\n each other\n (unit: length of a single fish).\n angle_of_perception : float, default : 340/360*pi\n angle in which a fish can see other fish\n (unit: radians, with a maximum value of :math:`\\pi`.\n turning_rate : float, default : 0.1\n Rate at which the new direction is approached.\n The maximum angle change per time step is hence ``turning_rate * dt``\n (unit: radians per unit time).\n speed : float, default : 0.1\n Speed of a fish.\n (unit: fish length per unit time).\n noise_sigma : float, default : 0.01\n Standard deviation of radial noise whith \n which each direction adjustment is shifted\n (unit: radians).\n dt : float, default : 0.1\n how much time passes per step\n (unit: unit time).\n box_lengths : list or numpy.ndarray of float, default : [100,100,100]\n Dimensions of the simulation box in each dimension\n (unit: fish length)\n reflect_at_boundary list of bool, default : [True, True, True]\n for each spatial dimension decided whether boundaries should reflect.\n If they don't reflect they're considered to be periodic (not implemented yet)\n verbose : bool, default : False\n be chatty.\n show_progress : bool, default : False\n Show the progress of the simulation.\n\n \"\"\"\n\n def __init__(self, \n number_of_fish=20,\n repulsion_radius=1, \n orientation_width=10,\n attraction_width=10,\n angle_of_perception=340/360*np.pi, \n turning_rate=0.1,\n speed=0.1,\n noise_sigma=0.01,\n dt=0.1,\n box_lengths=[100,100,100],\n reflect_at_boundary = [True, True, True],\n verbose=False,\n show_progress=False,\n ):\n \"\"\"\n Setup a simulation with parameters as defined in the paper.\n https://www.sciencedirect.com/science/article/pii/S0022519302930651\n\n Fish will be created at random positions with random directions.\n\n Parameters\n ----------\n number_of_fish : int, default : 20\n The number of fish to be simulated\n repulsion_radius : float, default : 1.0\n Fish within this radius will repel each other\n (unit: length of a single fish).\n orientation_width : float, default : 10.0\n The width of the hollow ball in which fish adjust their\n orientation.\n (unit: length of a single fish).\n attraction_width : float, default : 10.0\n The width of the hollow ball in which fish attract\n each other\n (unit: length of a single fish).\n angle_of_perception : float, default : 340/360*pi\n angle in which a fish can see other fish\n (unit: radians, with a maximum value of :math:`\\pi`.\n turning_rate : float, default : 0.1\n Rate at which the new direction is approached.\n The maximum angle change per time step is hence ``turning_rate * dt``\n (unit: radians per unit time).\n speed : float, default : 0.1\n Speed of a fish.\n (unit: fish length per unit time).\n noise_sigma : float, default : 0.01\n Standard deviation of radial noise whith \n which each direction adjustment is shifted\n (unit: radians).\n dt : float, default : 0.1\n how much time passes per step\n (unit: unit time).\n box_lengths : list or numpy.ndarray of float, default : [100,100,100]\n Dimensions of the simulation box in each dimension\n (unit: fish length)\n reflect_at_boundary list of bool, default : [True, True, True]\n for each spatial dimension decided whether boundaries should reflect.\n If they don't reflect they're considered to be periodic\n verbose : bool, default : False\n be chatty.\n\n \"\"\"\n \n\n self.number_of_fish = number_of_fish\n self.repulsion_radius = repulsion_radius\n self.orientation_width = orientation_width\n self.attraction_width = attraction_width\n self.angle_of_perception = angle_of_perception\n self.turning_rate = turning_rate\n self.speed = speed\n self.noise_sigma = noise_sigma\n self.dt = dt\n self.box_lengths = np.array(box_lengths,dtype=float)\n self.reflect_at_boundary = reflect_at_boundary\n self.verbose = verbose\n self.show_progress = show_progress\n\n self.box_copies = [[0.],[0.],[0.]]\n\n for dim, reflect in enumerate(self.reflect_at_boundary):\n if not reflect:\n self.box_copies[dim].extend([-self.box_lengths[dim],+self.box_lengths[dim]])\n\n\n self.fish = []\n\n self.init_random()\n\n\n def init_random(self):\n \"\"\"\n Initialize the fish list\n \"\"\"\n\n self.fish = [ Fish(position=self.box_lengths*np.random.random((3,)),\n ID=i,\n verbose=self.verbose\n ) for i in range(self.number_of_fish) ]\n\n def simulate(self,N_time_steps):\n \"\"\"Simulate a swarm according to the rules.\n\n Parameters\n ----------\n N_time_steps : int\n Number of time steps to simulate.\n\n Returns\n -------\n positions : numpy.ndarray of shape ``(self.number_of_fish, N_time_steps+1, 3_)``\n Keeping track of the fish's positions for each time step.\n directions : numpy.ndarray of shape ``(self.number_of_fish, N_time_steps+1, 3_)``\n Keeping track of the fish's directions for each time step.\n \"\"\"\n\n\n # create result arrays and fill in initial positions\n positions = np.empty((self.number_of_fish,N_time_steps+1,3))\n directions = np.empty((self.number_of_fish,N_time_steps+1,3))\n for i in range(self.number_of_fish):\n positions[i,0,:] = self.fish[i].position\n directions[i,0,:] = self.fish[i].direction\n \n\n bar = PB(max_value=N_time_steps)\n # for each time step\n for t in range(1,N_time_steps+1):\n\n # iterate through fish pairs\n for i in range(self.number_of_fish-1):\n F_i = self.fish[i]\n r_i = F_i.position\n v_i = F_i.direction\n\n for j in range(i+1,self.number_of_fish):\n\n F_j = self.fish[j]\n relationship_counted = False\n\n for X in self.box_copies[0]:\n\n if relationship_counted:\n break\n\n for Y in self.box_copies[1]:\n for Z in self.box_copies[2]:\n\n\n r_j = F_j.position + np.array([X,Y,Z])\n v_j = F_j.direction\n\n # get their distance, and unit distance vector\n r_ij = (r_j - r_i) \n distance = np.linalg.norm(r_ij) \n r_ij /= distance\n r_ji = -r_ij\n\n # if their are within the repulsion zone, just add each other to\n # the repulsion events\n if distance < self.repulsion_radius:\n F_i.zor_update(r_ij)\n F_j.zor_update(r_ji)\n relationship_counted = True\n elif distance < self.repulsion_radius + self.orientation_width + self.attraction_width:\n\n # if they are within the hollow balls of orientation and attraction zone, \n # decide whether the fish can see each other\n angle_i = np.arccos(np.clip(np.dot(r_ij, v_i), -1.0, 1.0))\n angle_j = np.arccos(np.clip(np.dot(r_ji, v_j), -1.0, 1.0))\n\n if self.verbose:\n print(\"angle_i\", angle_i, self.angle_of_perception)\n print(\"angle_j\", angle_j, self.angle_of_perception)\n\n # if i can see j, add j's influence\n if angle_i < self.angle_of_perception:\n if distance < self.repulsion_radius + self.orientation_width:\n F_i.zoo_update(v_j)\n else:\n F_i.zoa_update(r_ij)\n\n # if j can see i, add i's influence\n if angle_j < self.angle_of_perception:\n if distance < self.repulsion_radius + self.orientation_width:\n F_j.zoo_update(v_i)\n else:\n F_j.zoa_update(r_ji)\n\n relationship_counted = True\n\n # for each fish\n for i in range(self.number_of_fish):\n\n F_i = self.fish[i]\n\n # evaluate the new demanded direction and reset the influence counters\n new_v = F_i.evaluate_direction(self.turning_rate*self.dt,self.noise_sigma)\n\n # evaluate the demanded positional change according to the direction\n dr = self.speed * new_v * self.dt\n\n # check for boundary conditions\n for dim in range(3):\n\n # if new position would be out of boundaries\n if dr[dim]+F_i.position[dim] > self.box_lengths[dim] or \\\n dr[dim]+F_i.position[dim] < 0.0:\n\n # if this boundary is periodic\n if not self.reflect_at_boundary[dim]:\n if dr[dim]+F_i.position[dim] > self.box_lengths[dim]:\n dr[dim] -= self.box_lengths[dim]\n else:\n dr[dim] += self.box_lengths[dim]\n else:\n # if this boundary is reflective\n dr[dim] *= -1\n new_v[dim] *= -1\n\n # update the position and direction\n F_i.position += dr\n F_i.direction = new_v\n\n # save position and direction\n positions[i,t,:] = F_i.position\n directions[i,t,:] = F_i.direction\n\n bar.update(t)\n\n return positions, directions\n\n\nif __name__ == \"__main__\":\n\n swarm = Swarm(number_of_fish=2,speed=0.01,noise_sigma=0,turning_rate=0.1)\n\n swarm.fish[0].position = np.array([47,50.,50.])\n swarm.fish[0].direction = np.array([0.,0.,1.])\n swarm.fish[1].position = np.array([58.,50.,50])\n swarm.fish[1].direction = np.array([1.,0.,0.])\n\n\n\n N_t = 1000\n\n t = np.arange(N_t+1)\n r, v = swarm.simulate(N_t)\n print(\n swarm.fish[0].direction,\n swarm.fish[1].direction,\n )\n from bfmplot import pl\n from mpl_toolkits.mplot3d import Axes3D\n fig = pl.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n for i in range(swarm.number_of_fish):\n ax.plot(r[i,:,0], r[i,:,1], r[i,:,2])\n\n #ax.set_xlim([0,swarm.box_lengths[0]])\n #ax.set_ylim([0,swarm.box_lengths[1]])\n #ax.set_zlim([0,swarm.box_lengths[2]])\n\n pl.show()\n\n\n\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.empty", "numpy.dot", "numpy.arange", "numpy.random.random" ] ]
vndee/sentivi
[ "3df2c604d4f9934be9019ac3d6fdef48c6cc7c33" ]
[ "sentivi/classifier/lstm.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom typing import Optional\nfrom sentivi.classifier.nn_clf import NeuralNetworkClassifier\n\n\nclass LSTM(nn.Module):\n def __init__(self,\n num_labels: int,\n embedding_size: int,\n hidden_size: int,\n bidirectional: bool = False,\n attention: bool = False,\n hidden_layers: int = 1):\n \"\"\"\n Initialize LSTM instance\n\n :param num_labels:\n :param embedding_size:\n :param hidden_size:\n :param bidirectional:\n :param attention:\n :param hidden_layers:\n \"\"\"\n super(LSTM, self).__init__()\n\n self.num_labels = num_labels\n self.embedding_size = embedding_size\n self.hidden_size = hidden_size\n self.attention = attention\n self.bidirectional = bidirectional\n self.hidden_layers = hidden_layers\n\n self.lstm = torch.nn.LSTM(self.embedding_size, self.hidden_size, bidirectional=self.bidirectional,\n batch_first=True, num_layers=self.hidden_layers)\n self.linear = nn.Linear(self.hidden_size * (2 if self.bidirectional is True else 1), self.num_labels)\n\n def attention_layer(self, lstm_output, final_state):\n \"\"\"\n Attention Layer\n :param lstm_output:\n :param final_state:\n :return:\n \"\"\"\n hidden = final_state.view(-1, self.hidden_size * (2 if self.bidirectional is True else 1), 1)\n attn_weights = torch.bmm(lstm_output, hidden).squeeze(2)\n soft_attn_weights = torch.nn.functional.softmax(attn_weights, 1)\n context = torch.bmm(lstm_output.transpose(1, 2), soft_attn_weights.unsqueeze(2)).squeeze(2)\n return context\n\n def forward(self, inputs):\n \"\"\"\n Forward method for torch.nn.Module\n :param inputs:\n :return:\n \"\"\"\n hidden_state = torch.autograd.Variable(\n torch.zeros(self.hidden_layers * (2 if self.bidirectional is True else 1), inputs.shape[0],\n self.hidden_size, device=inputs.device))\n cell_state = torch.autograd.Variable(\n torch.zeros(self.hidden_layers * (2 if self.bidirectional is True else 1), inputs.shape[0],\n self.hidden_size, device=inputs.device))\n\n output, (final_hidden_state, final_cell_state) = self.lstm(inputs, (hidden_state, cell_state))\n\n if self.attention is True:\n return self.linear(\n self.attention_layer(output, final_hidden_state[-2 if self.bidirectional is True else -1:]))\n else:\n final_hidden_state = final_hidden_state[-2 if self.bidirectional is True else -1:].permute(1, 0, 2)\n return self.linear(final_hidden_state.reshape(\n (final_hidden_state.shape[0], final_hidden_state.shape[1] * final_hidden_state.shape[2])))\n\n\nclass LSTMClassifier(NeuralNetworkClassifier):\n def __init__(self,\n num_labels: int,\n embedding_size: Optional[int] = None,\n max_length: Optional[int] = None,\n device: Optional[str] = 'cpu',\n num_epochs: Optional[int] = 10,\n learning_rate: Optional[float] = 1e-3,\n batch_size: Optional[int] = 2,\n shuffle: Optional[bool] = True,\n random_state: Optional[int] = 101,\n hidden_size: Optional[int] = 512,\n hidden_layers: Optional[int] = 2,\n bidirectional: Optional[bool] = False,\n attention: Optional[bool] = True,\n *args,\n **kwargs):\n \"\"\"\n Initialize LSTMClassifier\n\n :param num_labels: number of polarities\n :param embedding_size: input embeddings' size\n :param max_length: maximum length of input text\n :param device: training device\n :param num_epochs: maximum number of epochs\n :param learning_rate: model learning rate\n :param batch_size: training batch size\n :param shuffle: whether DataLoader is shuffle or not\n :param random_state: random.seed number\n :param hidden_size: Long Short Term Memory hidden size\n :param bidirectional: whether to use BiLSTM or not\n :param args: arbitrary arguments\n :param kwargs: arbitrary keyword arguments\n \"\"\"\n super(LSTMClassifier, self).__init__(num_labels, embedding_size, max_length, device, num_epochs, learning_rate,\n batch_size, shuffle, random_state, hidden_size, hidden_layers, attention,\n *args, **kwargs)\n\n self.bidirectional = bidirectional\n self.attention = attention\n self.hidden_layers = hidden_layers\n\n def forward(self, data, *args, **kwargs):\n \"\"\"\n Training and evaluating methods\n\n :param data: TextEncoder output\n :param args: arbitrary arguments\n :param kwargs: arbitrary keyword arguments\n :return: training results\n \"\"\"\n (train_X, train_y), (test_X, test_y) = data\n\n if 'embedding_size' in kwargs:\n self.embedding_size = kwargs['embedding_size']\n elif self.embedding_size is None:\n assert train_X[-1].shape == test_X[-1].shape, ValueError(\n 'Feature embedding size of train set and test set must be equal.')\n self.embedding_size = train_X.shape[-1]\n\n assert train_X.shape.__len__() == test_X.shape.__len__(), ValueError(\n 'Number of dimension in train set and test set must be equal.')\n assert train_X.shape.__len__() <= 3, ValueError(\n 'Expected array with number of dimension less or equal than 3.')\n if train_X.shape.__len__() == 3:\n self.max_length = train_X.shape[1]\n self.train_X, self.test_X = train_X, test_X\n else:\n self.max_length = 1\n self.train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[-1]))\n self.test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[-1]))\n print(f'Reshape input array into (n_samples, 1, feature_dim) for LSTM Network Classifier')\n\n self.train_y, self.test_y = train_y, test_y\n\n if 'device' in kwargs:\n self.device = kwargs['device']\n\n self.clf = LSTM(num_labels=self.num_labels, embedding_size=self.embedding_size, hidden_size=self.hidden_size,\n bidirectional=self.bidirectional, attention=self.attention, hidden_layers=self.hidden_layers)\n return self.fit(*args, **kwargs)\n\n def predict(self, X, *args, **kwargs):\n \"\"\"\n Predict polarity with given sentences\n\n :param X: TextEncoder.predict output\n :param args: arbitrary arguments\n :param kwargs: arbitrary keyword arguments\n :return: list of numeric polarities\n :rtype: list\n \"\"\"\n self.clf.eval()\n if X.shape.__len__() == 2:\n X = X.reshape((X.shape[0], 1, X.shape[-1]))\n\n return self._predict(X)\n\n __call__ = forward\n" ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.nn.LSTM", "torch.bmm", "torch.nn.functional.softmax" ] ]
pandegroup/keras
[ "804e5b3d36896da23f168415e5ca2864868c0ede" ]
[ "keras/preprocessing/image.py" ]
[ "from __future__ import absolute_import\n\nimport numpy as np\nimport re\nfrom scipy import ndimage\nfrom scipy import linalg\n\nfrom os import listdir\nfrom os.path import isfile, join\nimport random\nimport math\nfrom six.moves import range\n\n'''\n Fairly basic set of tools for realtime data augmentation on image data.\n Can easily be extended to include new transforms, new preprocessing methods, etc...\n'''\n\ndef random_rotation(x, rg, fill_mode=\"nearest\", cval=0.):\n angle = random.uniform(-rg, rg)\n x = ndimage.interpolation.rotate(x, angle, axes=(1,2), reshape=False, mode=fill_mode, cval=cval)\n return x\n\ndef random_shift(x, wrg, hrg, fill_mode=\"nearest\", cval=0.):\n crop_left_pixels = 0\n crop_right_pixels = 0\n crop_top_pixels = 0\n crop_bottom_pixels = 0\n\n original_w = x.shape[1]\n original_h = x.shape[2]\n\n if wrg:\n crop = random.uniform(0., wrg)\n split = random.uniform(0, 1)\n crop_left_pixels = int(split*crop*x.shape[1])\n crop_right_pixels = int((1-split)*crop*x.shape[1])\n\n if hrg:\n crop = random.uniform(0., hrg)\n split = random.uniform(0, 1)\n crop_top_pixels = int(split*crop*x.shape[2])\n crop_bottom_pixels = int((1-split)*crop*x.shape[2])\n\n x = ndimage.interpolation.shift(x, (0, crop_left_pixels, crop_top_pixels), mode=fill_mode, cval=cval)\n return x\n\ndef horizontal_flip(x):\n for i in range(x.shape[0]):\n x[i] = np.fliplr(x[i])\n return x\n\ndef vertical_flip(x):\n for i in range(x.shape[0]):\n x[i] = np.flipud(x[i])\n return x\n\n\ndef random_barrel_transform(x, intensity):\n # TODO\n pass\n\ndef random_shear(x, intensity):\n # TODO\n pass\n\ndef random_channel_shift(x, rg):\n # TODO\n pass\n\ndef random_zoom(x, rg, fill_mode=\"nearest\", cval=0.):\n zoom_w = random.uniform(1.-rg, 1.)\n zoom_h = random.uniform(1.-rg, 1.)\n x = ndimage.interpolation.zoom(x, zoom=(1., zoom_w, zoom_h), mode=fill_mode, cval=cval)\n return x # shape of result will be different from shape of input!\n\n\ndef array_to_img(x, scale=True):\n from PIL import Image\n x = x.transpose(1, 2, 0) \n if scale:\n x += max(-np.min(x), 0)\n x /= np.max(x)\n x *= 255\n if x.shape[2] == 3:\n # RGB\n return Image.fromarray(x.astype(\"uint8\"), \"RGB\")\n else:\n # grayscale\n return Image.fromarray(x[:,:,0].astype(\"uint8\"), \"L\")\n\n\ndef img_to_array(img):\n x = np.asarray(img, dtype='float32')\n if len(x.shape)==3:\n # RGB: height, width, channel -> channel, height, width\n x = x.transpose(2, 0, 1)\n else:\n # grayscale: height, width -> channel, height, width\n x = x.reshape((1, x.shape[0], x.shape[1]))\n return x\n\n\ndef load_img(path, grayscale=False):\n from PIL import Image\n img = Image.open(path)\n if grayscale:\n img = img.convert('L')\n else: # Assure 3 channel even when loaded image is grayscale\n img = img.convert('RGB')\n return img\n\n\ndef list_pictures(directory, ext='jpg|jpeg|bmp|png'):\n return [join(directory,f) for f in listdir(directory)\n if isfile(join(directory,f)) and re.match('([\\w]+\\.(?:' + ext + '))', f)]\n\n\nclass ImageDataGenerator(object):\n '''\n Generate minibatches with \n realtime data augmentation.\n '''\n def __init__(self, \n featurewise_center=True, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=True, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n\n zca_whitening=False, # apply ZCA whitening\n rotation_range=0., # degrees (0 to 180)\n width_shift_range=0., # fraction of total width\n height_shift_range=0., # fraction of total height\n horizontal_flip=False,\n vertical_flip=False,\n ):\n self.__dict__.update(locals())\n self.mean = None\n self.std = None\n self.principal_components = None\n\n def flow(self, X, y, batch_size=32, shuffle=False, seed=None, save_to_dir=None, save_prefix=\"\", save_format=\"jpeg\"):\n if seed:\n random.seed(seed)\n\n if shuffle:\n seed = random.randint(1, 10e6)\n np.random.seed(seed)\n np.random.shuffle(X)\n np.random.seed(seed)\n np.random.shuffle(y)\n\n nb_batch = int(math.ceil(float(X.shape[0])/batch_size))\n for b in range(nb_batch):\n batch_end = (b+1)*batch_size\n if batch_end > X.shape[0]:\n nb_samples = X.shape[0] - b*batch_size\n else:\n nb_samples = batch_size\n\n bX = np.zeros(tuple([nb_samples]+list(X.shape)[1:]))\n for i in range(nb_samples):\n x = X[b*batch_size+i]\n x = self.random_transform(x.astype(\"float32\"))\n x = self.standardize(x)\n bX[i] = x\n\n if save_to_dir:\n for i in range(nb_samples):\n img = array_to_img(bX[i], scale=True)\n img.save(save_to_dir + \"/\" + save_prefix + \"_\" + str(i) + \".\" + save_format)\n\n yield bX, y[b*batch_size:b*batch_size+nb_samples]\n\n def standardize(self, x):\n if self.featurewise_center:\n x -= self.mean\n if self.featurewise_std_normalization:\n x /= self.std\n\n if self.zca_whitening:\n flatx = np.reshape(x, (x.shape[0]*x.shape[1]*x.shape[2]))\n whitex = np.dot(flatx, self.principal_components)\n x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))\n\n if self.samplewise_center:\n x -= np.mean(x)\n if self.samplewise_std_normalization:\n x /= np.std(x)\n\n return x\n\n def random_transform(self, x):\n if self.rotation_range:\n x = random_rotation(x, self.rotation_range)\n if self.width_shift_range or self.height_shift_range:\n x = random_shift(x, self.width_shift_range, self.height_shift_range)\n if self.horizontal_flip:\n if random.random() < 0.5:\n x = horizontal_flip(x)\n if self.vertical_flip:\n if random.random() < 0.5:\n x = vertical_flip(x)\n\n # TODO:\n # zoom\n # barrel/fisheye\n # shearing\n # channel shifting\n return x\n\n def fit(self, X,\n augment=False, # fit on randomly augmented samples\n rounds=1, # if augment, how many augmentation passes over the data do we use\n seed=None):\n '''\n Required for featurewise_center, featurewise_std_normalization and zca_whitening.\n '''\n X = np.copy(X)\n if augment:\n aX = np.zeros(tuple([rounds*X.shape[0]]+list(X.shape)[1:]))\n for r in range(rounds):\n for i in range(X.shape[0]):\n img = array_to_img(X[i])\n img = self.random_transform(img)\n aX[i+r*X.shape[0]] = img_to_array(img)\n X = aX\n\n if self.featurewise_center:\n self.mean = np.mean(X, axis=0)\n X -= self.mean\n if self.featurewise_std_normalization:\n self.std = np.std(X, axis=0)\n X /= self.std\n\n if self.zca_whitening:\n flatX = np.reshape(X, (X.shape[0], X.shape[1]*X.shape[2]*X.shape[3]))\n fudge = 10e-6\n sigma = np.dot(flatX.T, flatX) / flatX.shape[1]\n U, S, V = linalg.svd(sigma)\n self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + fudge))), U.T)\n" ]
[ [ "numpy.max", "scipy.ndimage.interpolation.shift", "numpy.dot", "numpy.asarray", "numpy.reshape", "numpy.random.seed", "numpy.copy", "scipy.linalg.svd", "numpy.min", "numpy.random.shuffle", "numpy.mean", "numpy.flipud", "numpy.std", "scipy.ndimage.interpolation.rotate", "scipy.ndimage.interpolation.zoom", "numpy.sqrt", "numpy.fliplr" ] ]
giovannic/fastms
[ "b54be22227062c3e53b44e898a587b00cd45d3ef" ]
[ "src/fastms/preprocessing.py" ]
[ "import numpy as np\nfrom sklearn.base import TransformerMixin\n\ndef format_runs(runs):\n # Time invariant parameters\n X = np.stack([entry['parameters'] for entry in runs])\n\n # Outputs\n y = np.stack([entry['outputs'] for entry in runs])\n\n # Time varying parameters\n period = y.shape[1]\n X = np.repeat(X[:, None, :], period, axis=1)\n X = np.concatenate(\n [\n X,\n [entry['timed_parameters'] for entry in runs]\n ],\n axis = 2\n )\n\n return (X, y)\n\nclass GlobalScaler(TransformerMixin):\n def __init__(self, **kwargs):\n self._mean = None\n self._std = None\n\n def fit(self, X, **kwargs):\n self._mean = np.mean(X)\n self._std = np.std(X)\n if self._std == 0.:\n self._std = 1.\n return self\n \n def transform(self, X, **kwargs):\n return (X - self._mean) / self._std\n \n def inverse_transform(self, X, **kwargs):\n return X * self._std + self._mean\n\nclass SequenceScaler(TransformerMixin):\n def __init__(self, **kwargs):\n self._mean = None\n self._std = None\n\n def fit(self, X, **kwargs):\n self._mean = np.mean(X, axis=(0, 1))\n self._std = np.std(X, axis=(0, 1))\n self._std[self._std == 0.] = 1.\n return self\n \n def transform(self, X, **kwargs):\n return (X - self._mean) / self._std\n \n def inverse_transform(self, X, **kwargs):\n return X * self._std + self._mean\n" ]
[ [ "numpy.concatenate", "numpy.mean", "numpy.std", "numpy.stack", "numpy.repeat" ] ]
ashikari/tutorial
[ "cc696ee25d94476872244222062f2ff9366d12ae" ]
[ "plotting/matplotlib/plotter_tut.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef main():\n\t\n\tx = np.linspace(0, 2, 100)\n\n\tplt.close('all')\n\tfig, ax = plt.subplots(1,1)\n\n\tax.plot(x, x, label = \"linear\")\n\tax.plot(x, x**2, label = \"Quadratic\")\n\tax.plot(x, x**3, label = \"Cubic\")\n\n\tplt.title(\"Simple Plot\")\n\tplt.legend()\n\n\n\n\tplt.show()\n\t\n\n\nif __name__ == '__main__':\n\tmain()" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "numpy.linspace" ] ]
cerules/sense2vec
[ "34a3af375e2df8c553d358093bda01296ba5b87d" ]
[ "scripts/05_export.py" ]
[ "#!/usr/bin/env python\nfrom sense2vec import Sense2Vec\nfrom sense2vec.util import split_key\nfrom pathlib import Path\nimport plac\nfrom wasabi import msg\nimport numpy\n\n\ndef _get_shape(file_):\n \"\"\"Return a tuple with (number of entries, vector dimensions). Handle\n both word2vec/FastText format, which has a header with this, or GloVe's\n format, which doesn't.\"\"\"\n first_line = next(file_).split()\n if len(first_line) == 2:\n return tuple(int(size) for size in first_line), file_\n count = 1\n for line in file_:\n count += 1\n file_.seek(0)\n shape = (count, len(first_line) - 1)\n return shape, file_\n\n\n@plac.annotations(\n in_file=(\"Vectors file (text-based)\", \"positional\", None, str),\n vocab_file=(\"Vocabulary file\", \"positional\", None, str),\n out_dir=(\"Path to output directory\", \"positional\", None, str),\n)\ndef main(in_file, vocab_file, out_dir):\n \"\"\"\n Step 5: Export a sense2vec component\n\n Expects a vectors.txt and a vocab file trained with GloVe and exports\n a component that can be loaded with Sense2vec.from_disk.\n \"\"\"\n input_path = Path(in_file)\n vocab_path = Path(vocab_file)\n output_path = Path(out_dir)\n if not input_path.exists():\n msg.fail(\"Can't find input file\", in_file, exits=1)\n if input_path.suffix == \".bin\":\n msg.fail(\"Need text-based vectors file, not binary\", in_file, exits=1)\n if not vocab_path.exists():\n msg.fail(\"Can't find vocab file\", vocab_file, exits=1)\n if not output_path.exists():\n output_path.mkdir(parents=True)\n msg.good(f\"Created output directory {out_dir}\")\n with input_path.open(\"r\", encoding=\"utf8\") as f:\n (n_vectors, vector_size), f = _get_shape(f)\n vectors_data = f.readlines()\n with vocab_path.open(\"r\", encoding=\"utf8\") as f:\n vocab_data = f.readlines()\n data = []\n all_senses = set()\n for item in vectors_data:\n item = item.rstrip().rsplit(\" \", vector_size)\n key = item[0]\n try:\n _, sense = split_key(key)\n except ValueError:\n continue\n vec = item[1:]\n if len(vec) != vector_size:\n msg.fail(f\"Wrong vector size: {len(vec)} (expected {vector_size})\", exits=1)\n all_senses.add(sense)\n data.append((key, numpy.asarray(vec, dtype=numpy.float32)))\n s2v = Sense2Vec(shape=(len(data), vector_size), senses=all_senses)\n for key, vector in data:\n s2v.add(key, vector)\n for item in vocab_data:\n item = item.rstrip()\n if item.endswith(\" word\"): # for fastText vocabs\n item = item[:-5]\n try:\n key, freq = item.rsplit(\" \", 1)\n except ValueError:\n continue\n s2v.set_freq(key, int(freq))\n msg.good(\"Created the sense2vec model\")\n msg.info(f\"{len(data)} vectors, {len(all_senses)} total senses\")\n s2v.to_disk(output_path)\n msg.good(\"Saved model to directory\", out_dir)\n\n\nif __name__ == \"__main__\":\n plac.call(main)\n" ]
[ [ "numpy.asarray" ] ]
qua-platform/qua-libs
[ "805a3b1a69980b939b370b3ba09434bc26dc45ec", "805a3b1a69980b939b370b3ba09434bc26dc45ec" ]
[ "examples/advanced-topics/IIR-FIR/blackbox-filter-optimization.py", "examples/multi-qubit/multiplexed-readout/configuration.py" ]
[ "from typing import List\n\nimport matplotlib.pyplot as plt\nfrom qm.qua import *\nfrom qm.QuantumMachinesManager import (\n SimulationConfig,\n QuantumMachinesManager,\n LoopbackInterface,\n)\nimport numpy as np\nimport scipy.signal as signal\nimport cma\n\nqmm = QuantumMachinesManager()\n\nwith program() as filter_optimization:\n stream = declare_stream(adc_trace=True)\n measure(\"readoutOp\", \"flux1\", stream)\n with stream_processing():\n stream.input1().save(\"adc\")\n\npulse_len = 128\ntof = 248\n\nwaveform = [0.0] * 30 + [0.2] * (pulse_len - 60) + [0.0] * 30\n\n# We use an arbitrarily selected filter for distorting the signal\ndistorted_waveform = signal.lfilter(\n np.array([1]), np.array([0.95, -0.15, 0.1]), waveform\n)\n\nbPlot = False\n\n\ndef cost(params: List[float]):\n # This is the script which will be called by the optimizer.\n M = 0 # number of feedback taps 0, 1, 2.\n feedback_filter = np.array(params[:M])\n feedforward_filter = np.array(params[M:])\n print(\"feedback:\", feedback_filter)\n print(\"feedforward:\", feedforward_filter)\n config = {\n \"version\": 1,\n \"controllers\": {\n \"con1\": {\n \"type\": \"opx1\",\n \"analog_outputs\": {\n 1: {\n \"offset\": +0.0,\n \"filter\": {\n \"feedback\": feedback_filter,\n \"feedforward\": feedforward_filter,\n },\n },\n },\n \"analog_inputs\": {\n 1: {\"offset\": +0.0},\n },\n },\n },\n \"elements\": {\n \"flux1\": {\n \"singleInput\": {\"port\": (\"con1\", 1)},\n \"outputs\": {\"output1\": (\"con1\", 1)},\n \"intermediate_frequency\": 10,\n \"operations\": {\n \"readoutOp\": \"readoutPulse\",\n },\n \"time_of_flight\": tof,\n \"smearing\": 0,\n },\n },\n \"pulses\": {\n \"readoutPulse\": {\n \"operation\": \"measure\",\n \"length\": pulse_len,\n \"waveforms\": {\"single\": \"const_wf\"},\n \"digital_marker\": \"ON\",\n \"integration_weights\": {\"x\": \"xWeights\", \"y\": \"yWeights\"},\n },\n },\n \"waveforms\": {\n \"const_wf\": {\"type\": \"arbitrary\", \"samples\": distorted_waveform},\n },\n \"digital_waveforms\": {\n \"ON\": {\"samples\": [(1, 0)]},\n },\n \"integration_weights\": {\n \"xWeights\": {\n \"cosine\": [1.0] * (pulse_len // 4),\n \"sine\": [0.0] * (pulse_len // 4),\n },\n \"yWeights\": {\n \"cosine\": [0.0] * (pulse_len // 4),\n \"sine\": [1.0] * (pulse_len // 4),\n },\n },\n }\n\n job = qmm.simulate(\n config,\n filter_optimization,\n SimulationConfig(\n duration=150,\n simulation_interface=LoopbackInterface(\n [(\"con1\", 1, \"con1\", 1)], latency=200\n ),\n ),\n )\n job.result_handles.wait_for_all_values()\n corrected_signal = (\n -job.result_handles.adc.fetch_all() / 4096\n ) # This converts ADC units into volts\n\n if bPlot:\n plt.plot(waveform)\n plt.plot(distorted_waveform)\n plt.plot(corrected_signal * np.sum(waveform) / np.sum(corrected_signal), \"--\")\n plt.legend([\"Target waveform\", \"Distorted waveform\", \"Corrected signal\"])\n\n # The correlation is used to calculate the \"loss\": Check whether the resulting output matches the required waveform,\n # taking into account added delays. Check the readme for more information\n corr = np.correlate(corrected_signal, waveform, \"full\") / (\n np.sqrt(\n np.correlate(corrected_signal, corrected_signal)\n * np.correlate(waveform, waveform)\n )\n )\n loss = 1 - np.max(corr)\n\n print(\"loss:\", loss)\n print(f\"delay ~ {np.argmax(corr)-len(waveform)+1}ns\")\n return loss\n\n\nparam_number = 5\niterations = 15\n\nes = cma.CMAEvolutionStrategy(np.random.rand(param_number), 1, {\"bounds\": [-1, 1]})\nes.optimize(cost, iterations=iterations)\n\nbPlot = True\ncost(es.result_pretty().xbest)\nplt.show()\n", "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#############################\r\n# simulation helpers #\r\n#############################\r\n\r\n\r\ndef simulate_pulse(IF_freq, chi, k, Ts, Td, power):\r\n I = [0]\r\n Q = [0]\r\n # solve numerically a simplified version of the readout resonator\r\n for t in range(Ts):\r\n I.append(I[-1] + (power / 2 - k * I[-1] + Q[-1] * chi))\r\n Q.append(Q[-1] + (power / 2 - k * Q[-1] - I[-1] * chi))\r\n\r\n for t in range(Td - 1):\r\n I.append(I[-1] + (-k * I[-1] + Q[-1] * chi))\r\n Q.append(Q[-1] + (-k * Q[-1] - I[-1] * chi))\r\n\r\n I = np.array(I)\r\n Q = np.array(Q)\r\n t = np.arange(len(I))\r\n\r\n S = I * np.cos(2 * np.pi * IF_freq * t * 1e-9) + Q * np.sin(\r\n 2 * np.pi * IF_freq * t * 1e-9\r\n )\r\n\r\n return t, I, Q, S\r\n\r\n\r\nlo_freq = 7.1e9\r\nrr1a_res_IF = 50e6\r\nrr2a_res_IF = 150e6\r\nrr3a_res_IF = 250e6\r\n\r\nreadout_len = 480\r\nIF_freq = rr1a_res_IF\r\nTs = readout_len - 200\r\nTd = 200\r\npower = 0.2\r\nk = 0.04\r\nchi = 0.023\r\n\r\n# simulate the readout resonator response for different qubit states\r\n# and assign this as pulses for the loopback interface only for simulation purposes\r\n# Need to assign the chi parameter for each state, relative to the measurement frequency\r\n[tg_, Ig_, Qg_, Sg_] = simulate_pulse(IF_freq, -1 * chi, k, Ts, Td, power)\r\n[te_, Ie_, Qe_, Se_] = simulate_pulse(IF_freq, 1 * chi, k, Ts, Td, power)\r\n[tf_, If_, Qf_, Sf_] = simulate_pulse(IF_freq, 8 * chi, k, Ts, Td, power)\r\ndivide_signal_factor = 100\r\nconfig = {\r\n \"version\": 1,\r\n \"controllers\": {\r\n \"con1\": {\r\n \"type\": \"opx1\",\r\n \"analog_outputs\": {\r\n 1: {\"offset\": 0},\r\n 2: {\"offset\": 0},\r\n },\r\n \"digital_outputs\": {\r\n 1: {},\r\n },\r\n \"analog_inputs\": {\r\n 1: {\"offset\": 0},\r\n 2: {\"offset\": 0},\r\n },\r\n },\r\n },\r\n \"elements\": {\r\n # readout resonators:\r\n \"rr1\": {\r\n \"mixInputs\": {\r\n \"I\": (\"con1\", 1),\r\n \"Q\": (\"con1\", 2),\r\n \"lo_frequency\": lo_freq,\r\n \"mixer\": \"mixer_WG1\",\r\n },\r\n \"intermediate_frequency\": rr1a_res_IF,\r\n \"operations\": {\r\n \"readout_pulse\": \"readout_pulse_1\",\r\n },\r\n \"outputs\": {\r\n \"out1\": (\"con1\", 1),\r\n \"out2\": (\"con1\", 2),\r\n },\r\n \"time_of_flight\": 188,\r\n \"smearing\": 0,\r\n },\r\n \"rr2\": {\r\n \"mixInputs\": {\r\n \"I\": (\"con1\", 1),\r\n \"Q\": (\"con1\", 2),\r\n \"lo_frequency\": lo_freq,\r\n \"mixer\": \"mixer_WG2\",\r\n },\r\n \"intermediate_frequency\": rr2a_res_IF,\r\n \"operations\": {\r\n \"readout_pulse\": \"readout_pulse_2\",\r\n },\r\n \"outputs\": {\r\n \"out1\": (\"con1\", 1),\r\n \"out2\": (\"con1\", 2),\r\n },\r\n \"time_of_flight\": 188,\r\n \"smearing\": 0,\r\n },\r\n \"rr3\": {\r\n \"mixInputs\": {\r\n \"I\": (\"con1\", 1),\r\n \"Q\": (\"con1\", 2),\r\n \"lo_frequency\": lo_freq,\r\n \"mixer\": \"mixer_WG3\",\r\n },\r\n \"intermediate_frequency\": rr3a_res_IF,\r\n \"operations\": {\r\n \"readout_pulse\": \"readout_pulse_3\",\r\n },\r\n \"outputs\": {\r\n \"out1\": (\"con1\", 1),\r\n \"out2\": (\"con1\", 2),\r\n },\r\n \"time_of_flight\": 188,\r\n \"smearing\": 0,\r\n },\r\n },\r\n \"pulses\": {\r\n \"readout_pulse\": {\r\n \"operation\": \"measurement\",\r\n \"length\": readout_len,\r\n \"waveforms\": {\"I\": \"const_wf\", \"Q\": \"zero_wf\"},\r\n \"integration_weights\": {\r\n \"integW_cos\": \"integW_cos\",\r\n \"integW_sin\": \"integW_sin\",\r\n },\r\n \"digital_marker\": \"ON\",\r\n },\r\n \"readout_pulse_1\": {\r\n \"operation\": \"measurement\",\r\n \"length\": readout_len,\r\n \"waveforms\": {\"I\": \"Ig_wf\", \"Q\": \"Qg_wf\"},\r\n \"integration_weights\": {\r\n \"integW_cos\": \"integW_cos\",\r\n \"integW_sin\": \"integW_sin\",\r\n },\r\n \"digital_marker\": \"ON\",\r\n },\r\n \"readout_pulse_2\": {\r\n \"operation\": \"measurement\",\r\n \"length\": readout_len,\r\n \"waveforms\": {\"I\": \"Ie_wf\", \"Q\": \"Qe_wf\"},\r\n \"integration_weights\": {\r\n \"integW_cos\": \"integW_cos\",\r\n \"integW_sin\": \"integW_sin\",\r\n },\r\n \"digital_marker\": \"ON\",\r\n },\r\n \"readout_pulse_3\": {\r\n \"operation\": \"measurement\",\r\n \"length\": readout_len,\r\n \"waveforms\": {\"I\": \"If_wf\", \"Q\": \"Qf_wf\"},\r\n \"integration_weights\": {\r\n \"integW_cos\": \"integW_cos\",\r\n \"integW_sin\": \"integW_sin\",\r\n },\r\n \"digital_marker\": \"ON\",\r\n },\r\n },\r\n \"waveforms\": {\r\n \"zero_wf\": {\"type\": \"constant\", \"sample\": 0.0},\r\n \"const_wf\": {\"type\": \"constant\", \"sample\": 0.1},\r\n \"Ig_wf\": {\r\n \"type\": \"arbitrary\",\r\n \"samples\": [float(arg / divide_signal_factor) for arg in Ig_],\r\n },\r\n \"Qg_wf\": {\r\n \"type\": \"arbitrary\",\r\n \"samples\": [float(arg / divide_signal_factor) for arg in Qg_],\r\n },\r\n \"Ie_wf\": {\r\n \"type\": \"arbitrary\",\r\n \"samples\": [float(arg / divide_signal_factor) for arg in Ie_],\r\n },\r\n \"Qe_wf\": {\r\n \"type\": \"arbitrary\",\r\n \"samples\": [float(arg / divide_signal_factor) for arg in Qe_],\r\n },\r\n \"If_wf\": {\r\n \"type\": \"arbitrary\",\r\n \"samples\": [float(arg / divide_signal_factor) for arg in If_],\r\n },\r\n \"Qf_wf\": {\r\n \"type\": \"arbitrary\",\r\n \"samples\": [float(arg / divide_signal_factor) for arg in Qf_],\r\n },\r\n },\r\n \"digital_waveforms\": {\r\n \"ON\": {\"samples\": [(1, 0)]},\r\n },\r\n \"integration_weights\": {\r\n \"integW_cos\": {\r\n \"cosine\": [1.0] * 120,\r\n \"sine\": [0.0] * 120,\r\n },\r\n \"integW_sin\": {\r\n \"cosine\": [0.0] * 120,\r\n \"sine\": [1.0] * 120,\r\n },\r\n },\r\n \"mixers\": {\r\n \"mixer_WG1\": [\r\n {\r\n \"intermediate_frequency\": rr1a_res_IF,\r\n \"lo_frequency\": lo_freq,\r\n \"correction\": [1, 0, 0, 1],\r\n },\r\n ],\r\n \"mixer_WG2\": [\r\n {\r\n \"intermediate_frequency\": rr2a_res_IF,\r\n \"lo_frequency\": lo_freq,\r\n \"correction\": [1, 0, 0, 1],\r\n },\r\n ],\r\n \"mixer_WG3\": [\r\n {\r\n \"intermediate_frequency\": rr3a_res_IF,\r\n \"lo_frequency\": lo_freq,\r\n \"correction\": [1, 0, 0, 1],\r\n },\r\n ],\r\n },\r\n}\r\n" ]
[ [ "numpy.correlate", "numpy.max", "numpy.array", "numpy.random.rand", "numpy.sum", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "numpy.argmax", "matplotlib.pyplot.show" ], [ "numpy.array", "numpy.sin", "numpy.cos" ] ]
wxwoods/mctorch
[ "7cd6eb51fdd01fa75ed9245039a4f145ba342de2" ]
[ "test/common_nn.py" ]
[ "import sys\nimport tempfile\nimport unittest\nfrom copy import deepcopy\nfrom itertools import product\nfrom functools import reduce\nfrom operator import mul\n\n\nimport torch\nimport torch.cuda\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.functional import _Reduction\nfrom common_utils import TestCase, to_gpu, freeze_rng_state, is_iterable, \\\n TEST_WITH_ROCM\nfrom common_cuda import TEST_CUDA\nfrom torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors\nfrom torch.autograd import Variable\nimport torch.backends.cudnn\n\n\n# tarfile module tries to obtain a file object name in python 3.3\nif sys.version_info[:2] == (3, 3):\n TemporaryFile = tempfile.NamedTemporaryFile\nelse:\n TemporaryFile = tempfile.TemporaryFile\nPRECISION = 1e-5\n\n\ndef get_reduction(m):\n result = getattr(m, 'reduction', None)\n if result is None:\n result = _Reduction.legacy_get_string(getattr(m, 'sizeAverage', None), True, emit_warning=False)\n assert result is not None\n return result\n\n\ndef get_weight(m):\n result = getattr(m, 'weight', None)\n if result is not None:\n return result\n return getattr(m, 'weights', None)\n\nmodule_tests = [\n dict(\n module_name='Linear',\n constructor_args=(10, 8),\n input_size=(4, 10),\n reference_fn=lambda i, p: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8),\n ),\n dict(\n module_name='Linear',\n constructor_args=(10, 8, False),\n input_size=(4, 10),\n desc='no_bias',\n reference_fn=lambda i, p: torch.mm(i, p[0].t())\n ),\n dict(\n module_name='Threshold',\n constructor_args=(2., 1.),\n input_size=(2, 3, 4, 5),\n check_inplace=True,\n desc='threshold_value'\n ),\n dict(\n module_name='Threshold',\n constructor_args=(2., 10.),\n input_size=(2, 3, 4, 5),\n desc='large_value'\n ),\n dict(\n module_name='ReLU',\n input_size=(2, 3, 4, 5),\n check_inplace=True,\n ),\n dict(\n module_name='ReLU6',\n input_size=(2, 3, 4, 5),\n check_inplace=True,\n ),\n dict(\n module_name='RReLU',\n input_size=(1, 2, 2),\n test_cuda=False,\n ),\n dict(\n module_name='RReLU',\n constructor_args=(0.1, 0.9),\n input_size=(4, 4, 5),\n desc='with_up_down',\n test_cuda=False,\n ),\n dict(\n module_name='Hardtanh',\n input_size=(3, 2, 5),\n reference_fn=lambda i, _: i.clamp(-1, 1),\n ),\n dict(\n module_name='Sigmoid',\n input_size=(2, 3, 4, 5)\n ),\n dict(\n module_name='Tanh',\n input_size=(2, 3, 4, 5)\n ),\n dict(\n module_name='Softmax',\n constructor_args=(1,),\n input_size=(10, 20),\n reference_fn=lambda i, _: torch.exp(i).div(torch.exp(i).sum(1, True).expand(10, 20)),\n ),\n dict(\n module_name='Softmax2d',\n input_size=(1, 3, 10, 20),\n reference_fn=lambda i, _: torch.exp(i).div(torch.exp(i).sum(1, False)),\n ),\n dict(\n module_name='LogSoftmax',\n constructor_args=(1,),\n input_size=(10, 20),\n reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(1, True).expand(10, 20)).log_(),\n ),\n dict(\n module_name='LogSoftmax',\n constructor_args=(1,),\n input_size=(1, 3, 10, 20),\n reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(1, False)).log_(),\n desc='multiparam',\n ),\n dict(\n module_name='ELU',\n constructor_args=(2.,),\n input_size=(3, 2, 5),\n reference_fn=lambda x, _: torch.where(x >= 0, x, 2 * (x.exp() - 1)),\n ),\n # TODO: reference function\n dict(\n module_name='Hardshrink',\n constructor_args=(2.,),\n input_size=(4, 3, 2, 4),\n ),\n dict(\n module_name='LeakyReLU',\n input_size=(3, 2, 5),\n check_inplace=True\n ),\n dict(\n module_name='LeakyReLU',\n constructor_args=(0.5,),\n input_size=(3, 2, 5),\n check_inplace=True,\n desc='with_negval'\n ),\n dict(\n module_name='LogSigmoid',\n input_size=(2, 3, 4),\n reference_fn=lambda i, _: i.sigmoid().log(),\n ),\n dict(\n module_name='Softplus',\n input_size=(10, 20),\n reference_fn=lambda i, _: torch.log(1 + torch.exp(i)),\n ),\n dict(\n module_name='Softplus',\n constructor_args=(2,),\n input_size=(10, 20),\n reference_fn=lambda i, _: 1. / 2. * torch.log(1 + torch.exp(2 * i)),\n desc='beta',\n ),\n dict(\n module_name='Softplus',\n constructor_args=(2, -100),\n input_size=(10, 20),\n reference_fn=(lambda i, _: ((i * 2) > -100).type_as(i) * i +\n ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))),\n desc='beta_threshold',\n ),\n dict(\n module_name='Softshrink',\n input_size=(3, 2, 5),\n ),\n dict(\n module_name='Softshrink',\n constructor_args=(1,),\n input_size=(3, 2, 5),\n desc='lambda',\n ),\n dict(\n module_name='CrossMapLRN2d',\n constructor_args=(5, 5e-3, 1e-3, 2),\n input_size=(2, 3, 6, 6),\n check_gradgrad=False,\n ),\n dict(\n module_name='PReLU',\n input_size=(2, 3, 4),\n reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],\n desc='1d',\n ),\n dict(\n module_name='PReLU',\n constructor_args=(3,),\n input_size=(2, 3, 4),\n desc='1d_multiparam',\n reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],\n ),\n dict(\n module_name='PReLU',\n input_size=(2, 3, 4, 5),\n desc='2d',\n reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],\n ),\n dict(\n module_name='PReLU',\n constructor_args=(3,),\n input_size=(2, 3, 4, 5),\n desc='2d_multiparam',\n reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],\n ),\n dict(\n module_name='PReLU',\n input_size=(2, 3, 4, 5, 6),\n reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],\n desc='3d',\n ),\n dict(\n module_name='PReLU',\n constructor_args=(3,),\n input_size=(2, 3, 4, 5, 6),\n desc='3d_multiparam',\n reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],\n ),\n dict(\n module_name='Softsign',\n input_size=(3, 2, 5),\n reference_fn=lambda i, _: i.div(1 + torch.abs(i)),\n ),\n dict(\n module_name='Softmin',\n constructor_args=(1,),\n input_size=(10, 20),\n ),\n dict(\n module_name='Softmin',\n constructor_args=(1,),\n input_size=(2, 3, 5, 10),\n desc='multidim',\n ),\n dict(\n module_name='Tanhshrink',\n input_size=(2, 3, 4, 5),\n ),\n]\n\n\n# Generates rand tensor with non-equal values. This ensures that duplicate\n# values won't be causing test failure for modules like MaxPooling.\n# size should be small, otherwise randperm fails / long overflows.\ndef _rand_tensor_non_equal(*size):\n total = reduce(mul, size, 1)\n return torch.randperm(total).view(*size).double()\n\n\ndef wrap_functional(fn, **kwargs):\n class FunctionalModule(nn.Module):\n def forward(self, *args):\n return fn(*args, **kwargs)\n return FunctionalModule\n\n\ndef poissonnllloss_no_reduce_test():\n t = torch.randn(10, 10)\n return dict(\n fullname='PoissonNLLLLoss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.rand(10, 10),\n pickle=False)\n\n\ndef bceloss_no_reduce_test():\n t = Variable(torch.randn(15, 10).gt(0).double())\n return dict(\n fullname='BCELoss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),\n reference_fn=lambda i, m: -(t * i.log() + (1 - t) * (1 - i).log()),\n check_gradgrad=False,\n pickle=False)\n\n\ndef bceloss_no_reduce_scalar_test():\n t = torch.randn(()).gt(0).double()\n return dict(\n fullname='BCELoss_no_reduce_scalar',\n constructor=wrap_functional(\n lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2),\n reference_fn=lambda i, m: -(t * i.log() + (1 - t) * (1 - i).log()),\n check_gradgrad=False,\n pickle=False)\n\n\ndef bceloss_weights_no_reduce_test():\n t = Variable(torch.randn(15, 10).gt(0).double())\n weights = torch.rand(10)\n return dict(\n fullname='BCELoss_weights_no_reduce',\n constructor=wrap_functional(\n lambda i: F.binary_cross_entropy(i, t.type_as(i),\n weight=weights.type_as(i), reduction='none')),\n input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),\n reference_fn=lambda i, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights,\n check_gradgrad=False,\n pickle=False\n )\n\n\ndef bceloss_weights_no_reduce_scalar_test():\n t = torch.randn(()).double()\n weights = torch.rand(())\n return dict(\n fullname='BCELoss_weights_no_reduce_scalar',\n constructor=wrap_functional(\n lambda i: F.binary_cross_entropy(i, t.type_as(i),\n weight=weights.type_as(i), reduction='none')),\n input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2),\n reference_fn=lambda i, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights,\n check_gradgrad=False,\n pickle=False\n )\n\n\ndef bce_with_logistic_legacy_enum_test():\n t = Variable(torch.randn(15, 10).gt(0).double())\n sigmoid = nn.Sigmoid()\n return dict(\n fullname='BCEWithLogitsLoss_legacy_enum',\n constructor=wrap_functional(\n lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduce=False)),\n input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),\n reference_fn=lambda i, m: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()),\n check_gradgrad=False,\n pickle=False,\n )\n\n\ndef bce_with_logistic_no_reduce_test():\n t = Variable(torch.randn(15, 10).gt(0).double())\n sigmoid = nn.Sigmoid()\n return dict(\n fullname='BCEWithLogitsLoss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),\n reference_fn=lambda i, m: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()),\n check_gradgrad=False,\n pickle=False,\n )\n\n\ndef bce_with_logistic_no_reduce_scalar_test():\n t = torch.randn(()).gt(0).double()\n sigmoid = nn.Sigmoid()\n return dict(\n fullname='BCEWithLogitsLoss_no_reduce_scalar',\n constructor=wrap_functional(\n lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2),\n reference_fn=lambda i, m: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()),\n check_gradgrad=False,\n pickle=False\n )\n\n\ndef kldivloss_with_target_no_reduce_test():\n i = torch.rand(10, 10).log()\n return dict(\n fullname='KLDivLoss_with_target_no_reduce',\n constructor=wrap_functional(\n lambda t: F.kl_div(i.type_as(t), t, reduction='none')),\n input_fn=lambda: torch.rand(10, 10),\n reference_fn=lambda t, _:\n loss_reference_fns['KLDivLoss'](i.type_as(t), t, reduction='none'),\n pickle=False)\n\n\ndef kldivloss_no_reduce_test():\n t = torch.randn(10, 10)\n return dict(\n fullname='KLDivLoss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.kl_div(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.rand(10, 10).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'),\n pickle=False,\n )\n\n\ndef kldivloss_no_reduce_scalar_test():\n t = torch.randn(())\n return dict(\n fullname='KLDivLoss_no_reduce_scalar',\n constructor=wrap_functional(\n lambda i: F.kl_div(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.rand(()).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'),\n pickle=False)\n\n\ndef l1loss_no_reduce_test():\n t = torch.randn(2, 3, 4)\n return dict(\n fullname='L1Loss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.l1_loss(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.randn(2, 3, 4),\n reference_fn=lambda i, m: (i - t.type_as(i)).abs(),\n pickle=False)\n\n\ndef l1loss_no_reduce_scalar_test():\n t = torch.randn(())\n return dict(\n fullname='L1Loss_no_reduce_scalar',\n constructor=wrap_functional(\n lambda i: F.l1_loss(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.randn(()),\n reference_fn=lambda i, m: (i - t.type_as(i)).abs(),\n pickle=False)\n\n\ndef mseloss_no_reduce_test():\n input_size = (2, 3, 4, 5)\n target = torch.randn(*input_size)\n return dict(\n fullname='MSELoss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.mse_loss(i, target.type_as(i), reduction='none')),\n input_size=input_size,\n reference_fn=lambda i, m: (i - target).pow(2),\n pickle=False)\n\n\ndef mseloss_no_reduce_scalar_test():\n input_size = ()\n target = torch.randn(input_size)\n return dict(\n fullname='MSELoss_no_reduce_scalar',\n constructor=wrap_functional(\n lambda i: F.mse_loss(i, target.type_as(i), reduction='none')),\n input_size=input_size,\n reference_fn=lambda i, m: (i - target).pow(2),\n pickle=False)\n\n\ndef nllloss_no_reduce_test():\n t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())\n kwargs = {'reduction': 'none'}\n return dict(\n fullname='NLLLoss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)),\n input_fn=lambda: torch.rand(15, 10).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs),\n pickle=False)\n\n\ndef nllloss_no_reduce_ignore_index_test():\n t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())\n kwargs = {'ignore_index': 2, 'reduction': 'none'}\n return dict(\n fullname='NLLLoss_no_reduce_ignore_index',\n constructor=wrap_functional(\n lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)),\n input_fn=lambda: torch.rand(15, 10).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs),\n pickle=False)\n\n\ndef nllloss_no_reduce_weights_test():\n t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())\n weight = torch.rand(10)\n\n def kwargs(i):\n return {'weight': weight.type_as(i), 'reduction': 'none'}\n\n return dict(\n fullname='NLLLoss_no_reduce_weights',\n constructor=wrap_functional(\n lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))),\n input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)),\n pickle=False)\n\n\ndef nllloss_no_reduce_weights_ignore_index_test():\n t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())\n weight = torch.rand(10)\n\n def kwargs(i):\n return {'weight': weight.type_as(i), 'reduction': 'none',\n 'ignore_index': 2}\n\n return dict(\n fullname='NLLLoss_no_reduce_weights_ignore_index',\n constructor=wrap_functional(\n lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i.data))),\n input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)),\n pickle=False)\n\n\ndef nllloss_no_reduce_weights_ignore_index_neg_test():\n t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())\n weight = torch.rand(10)\n\n def kwargs(i):\n return {'weight': weight.type_as(i), 'reduction': 'none',\n 'ignore_index': -1}\n\n return dict(\n fullname='NLLLoss_no_reduce_weights_ignore_index_neg',\n constructor=wrap_functional(\n lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))),\n input=torch.rand(15, 10).add(1e-2).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)),\n pickle=False)\n\n\ndef nllloss2d_no_reduce_test():\n t = Variable(torch.rand(2, 5, 5).mul(3).floor().long())\n kwargs = {'reduction': 'none'}\n return dict(\n fullname='NLLLoss2d_no_reduce',\n constructor=wrap_functional(\n lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)),\n input_fn=lambda: torch.rand(2, 3, 5, 5).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs),\n pickle=False)\n\n\ndef nllloss2d_no_reduce_ignore_index_test():\n t = Variable(torch.rand(2, 5, 5).mul(3).floor().long())\n kwargs = {'ignore_index': 1, 'reduction': 'none'}\n return dict(\n fullname='NLLLoss2d_no_reduce_ignore_index',\n constructor=wrap_functional(\n lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)),\n input_fn=lambda: torch.rand(2, 3, 5, 5).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs),\n pickle=False)\n\n\ndef nllloss2d_no_reduce_weights_test():\n t = Variable(torch.rand(2, 5, 5).mul(3).floor().long())\n weight = torch.rand(3)\n\n def kwargs(i):\n return {'weight': weight.type_as(i), 'reduction': 'none'}\n\n return dict(\n fullname='NLLLoss2d_no_reduce_weights',\n constructor=wrap_functional(\n lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))),\n input_fn=lambda: torch.rand(2, 3, 5, 5).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs(i)),\n pickle=False)\n\n\ndef nlllossNd_no_reduce_test():\n t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long())\n kwargs = {'reduction': 'none'}\n return dict(\n fullname='NLLLossNd_no_reduce',\n constructor=wrap_functional(\n lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)),\n input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs),\n pickle=False)\n\n\ndef nlllossNd_no_reduce_ignore_index_test():\n t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long())\n kwargs = {'ignore_index': 1, 'reduction': 'none'}\n return dict(\n fullname='NLLLossNd_no_reduce_ignore_index',\n constructor=wrap_functional(\n lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)),\n input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs),\n pickle=False)\n\n\ndef nlllossNd_no_reduce_weights_test():\n t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long())\n weight = torch.rand(3)\n\n def kwargs(i):\n return {'weight': weight.type_as(i), 'reduction': 'none'}\n\n return dict(\n fullname='NLLLossNd_no_reduce_weights',\n constructor=wrap_functional(\n lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))),\n input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(),\n reference_fn=lambda i, _:\n loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs(i)),\n pickle=False)\n\n\ndef smoothl1loss_no_reduce_test():\n t = torch.randn(2, 3, 4)\n return dict(\n fullname='SmoothL1Loss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.randn(2, 3, 4),\n reference_fn=lambda i, _:\n loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'),\n pickle=False)\n\n\ndef smoothl1loss_no_reduce_scalar_test():\n t = torch.randn(())\n return dict(\n fullname='SmoothL1Loss_no_reduce_scalar',\n constructor=wrap_functional(\n lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.randn(()),\n reference_fn=lambda i, _:\n loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'),\n pickle=False)\n\n\ndef multilabelmarginloss_1d_no_reduce_test():\n t = Variable(torch.rand(10).mul(10).floor().long())\n return dict(\n fullname='MultiLabelMarginLoss_1d_no_reduce',\n constructor=wrap_functional(\n lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')),\n input_fn=lambda: torch.randn(10),\n reference_fn=lambda i, _:\n loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),\n check_sum_reduction=True,\n check_gradgrad=False,\n pickle=False)\n\n\ndef multilabelmarginloss_index_neg_test():\n t = Variable(torch.clamp(torch.rand(5, 10).add(-.5).mul(20).floor().long(), min=-1))\n return dict(\n fullname='MultiLabelMarginLoss_index_neg',\n constructor=wrap_functional(\n lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')),\n input_fn=lambda: torch.randn(5, 10),\n reference_fn=lambda i, _:\n loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),\n check_sum_reduction=True,\n check_gradgrad=False,\n pickle=False)\n\n\ndef multilabelmarginloss_no_reduce_test():\n t = Variable(torch.rand(5, 10).mul(10).floor().long())\n return dict(\n fullname='MultiLabelMarginLoss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')),\n input_fn=lambda: torch.randn(5, 10),\n reference_fn=lambda i, _:\n loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),\n check_sum_reduction=True,\n check_gradgrad=False,\n pickle=False)\n\n\ndef hingeembeddingloss_no_reduce_test():\n t = Variable(torch.randn(10).gt(0).double().mul_(2).sub(1))\n return dict(\n fullname='HingeEmbeddingLoss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.hinge_embedding_loss(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.randn(10),\n reference_fn=lambda i, _:\n loss_reference_fns['HingeEmbeddingLoss'](i, t.type_as(i), reduction='none'),\n check_sum_reduction=True,\n pickle=False)\n\n\ndef hingeembeddingloss_margin_no_reduce_test():\n t = Variable(torch.randn(10).gt(0).double().mul_(2).sub(1))\n return dict(\n fullname='HingeEmbeddingLoss_margin_no_reduce',\n constructor=wrap_functional(\n lambda i: F.hinge_embedding_loss(i, t.type_as(i), margin=0.5, reduction='none')),\n input_fn=lambda: torch.randn(10),\n reference_fn=lambda i, _:\n loss_reference_fns['HingeEmbeddingLoss'](i, t.type_as(i), margin=0.5, reduction='none'),\n check_sum_reduction=True,\n pickle=False)\n\n\ndef softmarginloss_no_reduce_test():\n t = torch.randn(5, 5)\n return dict(\n fullname='SoftMarginLoss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.soft_margin_loss(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.randn(5, 5),\n reference_fn=lambda i, _:\n loss_reference_fns['SoftMarginLoss'](i, t.type_as(i), reduction='none'),\n pickle=False)\n\n\ndef multilabelsoftmarginloss_no_reduce_test():\n t = torch.rand(5, 10).mul(2).floor()\n return dict(\n fullname='MultiLabelSoftMarginLoss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i), reduction='none')),\n input_fn=lambda: torch.randn(5, 10),\n reference_fn=lambda i, m:\n (-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log())).sum(dim=1) / i.size(1),\n check_gradgrad=False,\n pickle=False)\n\n\ndef multilabelsoftmarginloss_weights_no_reduce_test():\n t = torch.rand(5, 10).mul(2).floor()\n weights = torch.rand(10)\n return dict(\n fullname='MultiLabelSoftMarginLoss_weights_no_reduce',\n constructor=wrap_functional(\n lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i),\n weight=weights.type_as(i), reduction='none')),\n input_fn=lambda: torch.randn(5, 10),\n reference_fn=lambda i, m:\n (-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()) * weights).sum(dim=1) / i.size(1),\n check_sum_reduction=True,\n check_gradgrad=False,\n pickle=False)\n\n\ndef multimarginloss_no_reduce_test():\n t = torch.rand(5).mul(8).floor().long()\n return dict(\n fullname='MultiMarginLoss_no_reduce',\n constructor=wrap_functional(\n lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')),\n input_fn=lambda: torch.randn(5, 10),\n reference_fn=lambda i, _:\n loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),\n check_sum_reduction=True,\n check_gradgrad=False,\n pickle=False)\n\n\ndef multimarginloss_1d_no_reduce_test():\n t = torch.rand(1).mul(8).floor().long()\n return dict(\n fullname='MultiMarginLoss_1d_no_reduce',\n constructor=wrap_functional(\n lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')),\n input_fn=lambda: torch.randn(10),\n reference_fn=lambda i, _:\n loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),\n check_sum_reduction=True,\n check_gradgrad=False,\n pickle=False)\n\n\ndef multimarginloss_p_no_reduce_test():\n t = torch.rand(5).mul(8).floor().long()\n return dict(\n fullname='MultiMarginLoss_p_no_reduce',\n constructor=wrap_functional(\n lambda i: F.multi_margin_loss(i, t.type_as(i).long(), p=2, reduction='none')),\n input_fn=lambda: torch.randn(5, 10).clamp_(1e-2, 1 - 1e-2),\n reference_fn=lambda i, _:\n loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), p=2, reduction='none'),\n check_sum_reduction=True,\n check_gradgrad=False,\n pickle=False)\n\n\ndef multimarginloss_margin_no_reduce_test():\n t = torch.rand(5).mul(8).floor().long()\n return dict(\n fullname='MultiMarginLoss_margin_no_reduce',\n constructor=wrap_functional(\n lambda i: F.multi_margin_loss(i, t.type_as(i).long(), margin=0.5, reduction='none')),\n input_fn=lambda: torch.randn(5, 10),\n reference_fn=lambda i, _:\n loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(),\n margin=0.5, reduction='none'),\n check_sum_reduction=True,\n check_gradgrad=False,\n pickle=False)\n\n\ndef multimarginloss_weights_no_reduce_test():\n t = torch.rand(5).mul(8).floor().long()\n weights = torch.rand(10)\n return dict(\n fullname='MultiMarginLoss_weights_no_reduce',\n constructor=wrap_functional(\n lambda i: F.multi_margin_loss(i, t.type_as(i).long(), weight=weights.type_as(i),\n reduction='none')),\n input_fn=lambda: torch.randn(5, 10),\n reference_fn=lambda i, _:\n loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(),\n weight=weights, reduction='none'),\n check_sum_reduction=True,\n check_gradgrad=False,\n pickle=False)\n\n\ndef fractional_max_pool2d_test(test_case):\n random_samples = torch.DoubleTensor(1, 3, 2).uniform_()\n if test_case == 'ratio':\n return dict(\n constructor=lambda: nn.FractionalMaxPool2d(\n 2, output_ratio=0.5, _random_samples=random_samples),\n input_size=(1, 3, 5, 7),\n fullname='FractionalMaxPool2d_ratio')\n elif test_case == 'size':\n return dict(\n constructor=lambda: nn.FractionalMaxPool2d((2, 3), output_size=(\n 4, 3), _random_samples=random_samples),\n input_size=(1, 3, 7, 6),\n fullname='FractionalMaxPool2d_size')\n\n\ndef fractional_max_pool3d_test(test_case):\n random_samples = torch.DoubleTensor(2, 4, 3).uniform_()\n if test_case == 'ratio':\n return dict(\n constructor=lambda: nn.FractionalMaxPool3d(\n 2, output_ratio=0.5, _random_samples=random_samples),\n input_size=(2, 4, 5, 5, 5),\n fullname='FractionalMaxPool3d_ratio')\n elif test_case == 'size':\n return dict(\n constructor=lambda: nn.FractionalMaxPool3d((2, 2, 2), output_size=(\n 4, 4, 4), _random_samples=random_samples),\n input_size=(2, 4, 7, 7, 7),\n fullname='FractionalMaxPool3d_size')\n elif test_case == 'asymsize':\n return dict(\n constructor=lambda: nn.FractionalMaxPool3d((4, 2, 3), output_size=(\n 10, 3, 2), _random_samples=random_samples),\n input_size=(2, 4, 16, 7, 5),\n fullname='FractionalMaxPool3d_asymsize')\n\n\nnew_module_tests = [\n poissonnllloss_no_reduce_test(),\n bceloss_no_reduce_test(),\n bceloss_weights_no_reduce_test(),\n bce_with_logistic_legacy_enum_test(),\n bce_with_logistic_no_reduce_test(),\n bceloss_no_reduce_scalar_test(),\n bceloss_weights_no_reduce_scalar_test(),\n bce_with_logistic_no_reduce_scalar_test(),\n kldivloss_with_target_no_reduce_test(),\n kldivloss_no_reduce_test(),\n kldivloss_no_reduce_scalar_test(),\n l1loss_no_reduce_test(),\n l1loss_no_reduce_scalar_test(),\n mseloss_no_reduce_test(),\n mseloss_no_reduce_scalar_test(),\n nllloss_no_reduce_test(),\n nllloss_no_reduce_ignore_index_test(),\n nllloss_no_reduce_weights_test(),\n nllloss_no_reduce_weights_ignore_index_test(),\n nllloss_no_reduce_weights_ignore_index_neg_test(),\n nllloss2d_no_reduce_test(),\n nllloss2d_no_reduce_weights_test(),\n nllloss2d_no_reduce_ignore_index_test(),\n nlllossNd_no_reduce_test(),\n nlllossNd_no_reduce_weights_test(),\n nlllossNd_no_reduce_ignore_index_test(),\n smoothl1loss_no_reduce_test(),\n smoothl1loss_no_reduce_scalar_test(),\n multilabelmarginloss_1d_no_reduce_test(),\n multilabelmarginloss_index_neg_test(),\n multilabelmarginloss_no_reduce_test(),\n hingeembeddingloss_no_reduce_test(),\n hingeembeddingloss_margin_no_reduce_test(),\n softmarginloss_no_reduce_test(),\n multilabelsoftmarginloss_no_reduce_test(),\n multilabelsoftmarginloss_weights_no_reduce_test(),\n multimarginloss_no_reduce_test(),\n multimarginloss_1d_no_reduce_test(),\n multimarginloss_p_no_reduce_test(),\n multimarginloss_margin_no_reduce_test(),\n multimarginloss_weights_no_reduce_test(),\n fractional_max_pool2d_test('ratio'),\n fractional_max_pool2d_test('size'),\n fractional_max_pool3d_test('ratio'),\n fractional_max_pool3d_test('size'),\n fractional_max_pool3d_test('asymsize'),\n dict(\n module_name='BatchNorm1d',\n constructor_args=(10,),\n input_size=(4, 10),\n cudnn=True,\n check_eval=True,\n desc='affine',\n test_cuda=(not TEST_WITH_ROCM),\n ),\n dict(\n module_name='BatchNorm1d',\n constructor_args=(5,),\n input_size=(4, 5, 3),\n cudnn=True,\n check_eval=True,\n desc='3d_input',\n ),\n dict(\n module_name='BatchNorm1d',\n constructor_args=(10, 1e-3, None),\n input_size=(4, 10),\n cudnn=True,\n check_eval=True,\n desc='affine_simple_average',\n test_cuda=(not TEST_WITH_ROCM),\n ),\n dict(\n module_name='BatchNorm1d',\n constructor_args=(10, 1e-3, 0.3, False),\n input_size=(4, 10),\n cudnn=True,\n check_eval=True,\n desc='not_affine',\n ),\n dict(\n module_name='BatchNorm1d',\n constructor_args=(10, 1e-3, 0.3, True, False),\n input_size=(4, 10),\n cudnn=True,\n check_eval=True,\n desc='not_tracking_stats',\n test_cuda=(not TEST_WITH_ROCM),\n ),\n dict(\n module_name='BatchNorm1d',\n constructor_args=(5, 1e-3, 0.3, False),\n input_size=(4, 5, 3),\n cudnn=True,\n check_eval=True,\n desc='3d_input_not_affine',\n ),\n dict(\n module_name='BatchNorm2d',\n constructor_args=(3,),\n input_size=(2, 3, 6, 6),\n cudnn=True,\n check_eval=True,\n ),\n dict(\n module_name='BatchNorm2d',\n constructor_args=(3, 1e-3, None),\n input_size=(2, 3, 6, 6),\n cudnn=True,\n check_eval=True,\n desc='2d_simple_average',\n ),\n dict(\n module_name='BatchNorm2d',\n constructor_args=(3, 1e-3, 0.8),\n input_size=(2, 3, 6, 6),\n cudnn=True,\n check_eval=True,\n desc='momentum',\n ),\n dict(\n module_name='BatchNorm2d',\n constructor_args=(3, 1e-3, 0.8, False),\n input_size=(2, 3, 6, 6),\n cudnn=True,\n check_eval=True,\n desc='not_affine',\n ),\n dict(\n module_name='BatchNorm2d',\n constructor_args=(3, 1e-3, 0.8, True, False),\n input_size=(2, 3, 6, 6),\n cudnn=True,\n check_eval=True,\n desc='not_tracking_stats',\n ),\n dict(\n module_name='BatchNorm3d',\n constructor_args=(3,),\n input_size=(2, 3, 4, 4, 4),\n cudnn=True,\n check_eval=True,\n ),\n dict(\n module_name='BatchNorm3d',\n constructor_args=(3, 1e-3, None),\n input_size=(2, 3, 4, 4, 4),\n cudnn=True,\n check_eval=True,\n desc='3d_simple_average',\n ),\n dict(\n module_name='BatchNorm3d',\n constructor_args=(3, 1e-3, 0.7),\n input_size=(2, 3, 4, 4, 4),\n cudnn=True,\n check_eval=True,\n desc='momentum',\n ),\n dict(\n module_name='BatchNorm3d',\n constructor_args=(3, 1e-3, 0.7, False),\n input_size=(2, 3, 4, 4, 4),\n cudnn=True,\n check_eval=True,\n desc='not_affine',\n ),\n dict(\n module_name='BatchNorm3d',\n constructor_args=(3, 1e-3, 0.7, True, False),\n input_size=(2, 3, 4, 4, 4),\n cudnn=True,\n check_eval=True,\n desc='not_tracking_stats',\n ),\n dict(\n module_name='InstanceNorm1d',\n constructor_args=(3, 1e-3, 0.3),\n input_size=(4, 3, 15),\n cudnn=True,\n check_eval=True,\n ),\n dict(\n module_name='InstanceNorm1d',\n constructor_args=(3, 1e-3, 0.3, False, True),\n input_size=(4, 3, 15),\n cudnn=True,\n check_eval=True,\n desc='tracking_stats',\n ),\n dict(\n module_name='InstanceNorm2d',\n constructor_args=(3, 1e-3, 0.3),\n input_size=(2, 3, 6, 6),\n cudnn=True,\n check_eval=True,\n ),\n dict(\n module_name='InstanceNorm2d',\n constructor_args=(3, 1e-3, 0.3, False, True),\n input_size=(2, 3, 6, 6),\n cudnn=True,\n check_eval=True,\n desc='tracking_stats',\n ),\n dict(\n module_name='InstanceNorm3d',\n constructor_args=(3, 1e-3, 0.3),\n input_size=(2, 3, 4, 4, 4),\n cudnn=True,\n check_eval=True,\n ),\n dict(\n module_name='InstanceNorm3d',\n constructor_args=(3, 1e-3, 0.3, False, True),\n input_size=(2, 3, 4, 4, 4),\n cudnn=True,\n check_eval=True,\n desc='tracking_stats',\n ),\n dict(\n module_name='LayerNorm',\n constructor_args=([5], 1e-3),\n input_size=(4, 5, 5),\n cudnn=True,\n check_eval=True,\n desc='1d_elementwise_affine',\n ),\n dict(\n module_name='LayerNorm',\n constructor_args=([5], 1e-3, False),\n input_size=(4, 5, 5),\n cudnn=True,\n check_eval=True,\n desc='1d_no_elementwise_affine',\n ),\n dict(\n module_name='LayerNorm',\n constructor_args=([2, 2, 5], 1e-3),\n input_size=(4, 2, 2, 5),\n cudnn=True,\n check_eval=True,\n desc='3d_elementwise_affine',\n ),\n dict(\n module_name='LayerNorm',\n constructor_args=([2, 2, 5], 1e-3, False),\n input_size=(4, 2, 2, 5),\n cudnn=True,\n check_eval=True,\n desc='3d_no_elementwise_affine',\n ),\n dict(\n module_name='GroupNorm',\n constructor_args=(3, 6, 1e-3),\n input_size=(4, 6, 5),\n cudnn=True,\n check_eval=True,\n desc='1d_affine',\n ),\n dict(\n module_name='GroupNorm',\n constructor_args=(5, 5, 1e-3, False),\n input_size=(4, 5, 5),\n cudnn=True,\n check_eval=True,\n desc='1d_no_affine_IN', # this setting is equivalent with InstanceNormi\n ),\n dict(\n module_name='GroupNorm',\n constructor_args=(1, 5, 1e-3, False),\n input_size=(4, 5, 5),\n cudnn=True,\n check_eval=True,\n desc='1d_no_affine_LN', # this setting is equivalent with LayerNorm\n ),\n dict(\n module_name='GroupNorm',\n constructor_args=(3, 6, 1e-3),\n input_size=(4, 6, 2, 3),\n cudnn=True,\n check_eval=True,\n desc='2d_affine',\n ),\n dict(\n module_name='GroupNorm',\n constructor_args=(3, 3, 1e-3, False),\n input_size=(4, 3, 2, 3),\n cudnn=True,\n check_eval=True,\n desc='2d_no_affine_IN', # this setting is equivalent with InstanceNorm\n ),\n dict(\n module_name='GroupNorm',\n constructor_args=(1, 3, 1e-3, False),\n input_size=(4, 3, 2, 3),\n cudnn=True,\n check_eval=True,\n desc='2d_no_affine_LN', # this setting is equivalent with LayerNorm\n ),\n dict(\n module_name='Conv1d',\n constructor_args=(4, 5, 3),\n input_size=(2, 4, 10),\n cudnn=True,\n ),\n dict(\n module_name='Conv1d',\n constructor_args=(4, 5, 3, 2),\n input_size=(2, 4, 10),\n cudnn=True,\n desc='stride',\n ),\n dict(\n module_name='Conv1d',\n constructor_args=(4, 5, 3, 1, 1),\n input_size=(2, 4, 10),\n cudnn=True,\n desc='pad1',\n ),\n dict(\n module_name='Conv1d',\n constructor_args=(4, 5, 5, 1, 2),\n input_size=(2, 4, 10),\n cudnn=True,\n desc='pad2',\n ),\n dict(\n module_name='Conv1d',\n constructor_args=(4, 4, 3, 1, 1),\n input_size=(1, 4, 1),\n cudnn=True,\n desc='pad1size1',\n ),\n dict(\n module_name='Conv1d',\n constructor_args=(4, 4, 5, 1, 2),\n input_size=(1, 4, 1),\n cudnn=True,\n desc='pad2size1',\n ),\n dict(\n fullname='Conv1d_dilated',\n constructor=lambda: nn.Conv1d(4, 5, kernel_size=3, dilation=2),\n input_size=(2, 4, 10),\n ),\n dict(\n fullname='Conv1d_groups',\n constructor=lambda: nn.Conv1d(4, 6, kernel_size=3, groups=2),\n input_size=(2, 4, 6),\n cudnn=True,\n ),\n dict(\n fullname='ConvTranspose1d',\n constructor=lambda: nn.ConvTranspose1d(3, 4, kernel_size=3, stride=(3,), padding=1, output_padding=(1,)),\n cudnn=True,\n input_size=(1, 3, 7),\n ),\n dict(\n module_name='ConvTranspose1d',\n constructor_args=(3, 4, 3, 2, 1, 1, 1, False),\n input_size=(1, 3, 6),\n cudnn=True,\n desc='no_bias',\n ),\n dict(\n module_name='ConvTranspose1d',\n constructor_args=(3, 4, 3, 2, 1, 1, 1, True, 2),\n input_size=(1, 3, 6),\n cudnn=True,\n desc='dilated',\n ),\n dict(\n fullname='ConvTranspose1d_groups',\n constructor=lambda: nn.ConvTranspose1d(4, 6, 3, stride=(3,), padding=1, output_padding=(1,), groups=2),\n cudnn=True,\n input_size=(2, 4, 7),\n ),\n dict(\n module_name='MaxPool1d',\n constructor_args=(4,),\n input_size=(2, 10, 4),\n ),\n dict(\n module_name='MaxPool1d',\n constructor_args=(4, 4),\n input_size=(2, 10, 4),\n desc='stride',\n ),\n dict(\n module_name='Conv2d',\n constructor_args=(3, 4, (3, 2)),\n input_size=(2, 3, 7, 5),\n cudnn=True,\n ),\n dict(\n module_name='Conv2d',\n constructor_args=(3, 4, (3, 3), (2, 2)),\n input_size=(2, 3, 6, 6),\n cudnn=True,\n desc='strided',\n ),\n dict(\n module_name='Conv2d',\n constructor_args=(3, 4, (3, 3), (2, 2), (1, 1)),\n input_size=(2, 3, 6, 6),\n cudnn=True,\n desc='padding',\n ),\n dict(\n module_name='Conv2d',\n constructor_args=(3, 2, (3, 3), (2, 2), (1, 1), (2, 2)),\n input_size=(2, 3, 8, 8),\n cudnn=True,\n desc='dilated',\n ),\n dict(\n module_name='Conv2d',\n constructor_args=(3, 4, (3, 2), 1, 0, 1, 1, False),\n input_size=(2, 3, 6, 5),\n cudnn=True,\n desc='no_bias',\n ),\n dict(\n fullname='Conv2d_groups',\n constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2),\n input_size=(2, 4, 6, 5),\n cudnn=True,\n ),\n dict(\n fullname='Conv2d_groups_thnn',\n constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2),\n input_size=(2, 4, 6, 5),\n ),\n dict(\n module_name='ConvTranspose2d',\n constructor_args=(3, 4, 3, (3, 2), 1, (1, 1)),\n cudnn=True,\n input_size=(1, 3, 7, 6),\n ),\n dict(\n module_name='ConvTranspose2d',\n constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False, (2, 2)),\n input_size=(1, 3, 6, 7),\n cudnn=True,\n desc='dilated',\n ),\n dict(\n module_name='ConvTranspose2d',\n constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False),\n input_size=(1, 3, 6, 7),\n cudnn=True,\n desc='no_bias',\n ),\n dict(\n fullname='ConvTranspose2d_groups',\n constructor=lambda: nn.ConvTranspose2d(2, 4, (2, 3), groups=2),\n input_size=(1, 2, 4, 5),\n cudnn=True,\n ),\n dict(\n fullname='Conv2d_depthwise',\n constructor=lambda: nn.Conv2d(4, 4, (3, 3), groups=4),\n input_size=(2, 4, 6, 6),\n ),\n dict(\n fullname='Conv2d_depthwise_with_multiplier',\n constructor=lambda: nn.Conv2d(4, 8, (3, 3), groups=4),\n input_size=(2, 4, 6, 6),\n ),\n dict(\n fullname='Conv2d_depthwise_strided',\n constructor=lambda: nn.Conv2d(4, 4, (3, 3), stride=(2, 2), groups=4),\n input_size=(2, 4, 6, 6),\n ),\n dict(\n fullname='Conv2d_depthwise_padded',\n constructor=lambda: nn.Conv2d(4, 4, (3, 3), padding=(1, 1), groups=4),\n input_size=(2, 4, 6, 6),\n ),\n dict(\n fullname='Conv2d_depthwise_dilated',\n constructor=lambda: nn.Conv2d(4, 4, (2, 2), dilation=(2, 2), groups=4),\n input_size=(2, 4, 5, 5),\n ),\n dict(\n module_name='MaxPool2d',\n constructor_args=((3, 3), (2, 2), (1, 1)),\n input_size=(1, 3, 7, 7),\n ),\n dict(\n module_name='AvgPool1d',\n constructor_args=(2,),\n input_size=(2, 3, 6),\n ),\n dict(\n module_name='AvgPool1d',\n constructor_args=((2,), (2,)),\n input_size=(2, 3, 6),\n desc='stride',\n ),\n dict(\n module_name='AvgPool1d',\n constructor_args=(2, 2, 1),\n input_size=(2, 3, 6),\n desc='stride_pad',\n ),\n dict(\n module_name='AvgPool2d',\n constructor_args=((2, 2),),\n input_size=(2, 3, 6, 6),\n ),\n dict(\n module_name='AvgPool2d',\n constructor_args=((2, 2), (2, 2)),\n input_size=(2, 3, 6, 6),\n desc='stride',\n ),\n dict(\n module_name='AvgPool2d',\n constructor_args=((2, 2), (2, 2), (1, 1)),\n input_size=(2, 3, 6, 6),\n desc='stride_pad',\n ),\n dict(\n module_name='LPPool2d',\n constructor_args=(2, 2, 2),\n input_size=(1, 3, 7, 7),\n ),\n dict(\n module_name='LPPool2d',\n constructor_args=(1.5, 2),\n input_fn=lambda: torch.rand(1, 3, 7, 7),\n desc='norm',\n ),\n dict(\n module_name='LPPool1d',\n constructor_args=(1.5, 2),\n input_fn=lambda: torch.rand(1, 3, 7),\n desc='norm',\n ),\n dict(\n module_name='LPPool1d',\n constructor_args=(2, 2, 3),\n input_size=(1, 3, 7),\n ),\n dict(\n module_name='LocalResponseNorm',\n constructor_args=(3, ),\n input_size=(1, 5, 7),\n desc='1d',\n ),\n dict(\n module_name='LocalResponseNorm',\n constructor_args=(2, ),\n input_size=(1, 5, 7, 7),\n desc='2d_uneven_pad',\n ),\n dict(\n module_name='LocalResponseNorm',\n constructor_args=(1, 1., 0.5, 2.),\n input_size=(1, 5, 7, 7, 7),\n desc='3d_custom_params',\n ),\n dict(\n module_name='ReflectionPad1d',\n constructor_args=((1, 2),),\n input_size=(2, 3, 8),\n ),\n dict(\n module_name='ReflectionPad2d',\n constructor_args=((1, 2, 3, 4),),\n input_size=(2, 3, 8, 8),\n ),\n dict(\n module_name='ReplicationPad1d',\n constructor_args=((1, 2),),\n input_size=(2, 3, 4),\n ),\n dict(\n module_name='ReplicationPad2d',\n constructor_args=((1, 2, 3, 4),),\n input_size=(2, 3, 4, 4),\n ),\n dict(\n module_name='ZeroPad2d',\n constructor_args=((1, 2, 3, 4),),\n input_size=(2, 3, 4, 4)\n ),\n dict(\n module_name='ZeroPad2d',\n constructor_args=((-1, -1, -1, -2),),\n input_size=(2, 3, 4, 4),\n desc='negative_dims'\n ),\n dict(\n module_name='ConstantPad1d',\n constructor_args=((1, 2), 2.),\n input_size=(2, 3, 4)\n ),\n dict(\n module_name='ConstantPad2d',\n constructor_args=((1, 2, 3, 4), 2.),\n input_size=(2, 3, 4, 4)\n ),\n dict(\n module_name='ConstantPad3d',\n constructor_args=((1, 2, 3, 4, 1, 0), 2.),\n input_size=(2, 3, 4, 4, 5)\n ),\n dict(\n module_name='Conv3d',\n constructor_args=(3, 4, (2, 3, 4)),\n input_size=(2, 3, 3, 4, 5),\n cudnn=True,\n ),\n dict(\n module_name='Conv3d',\n constructor_args=(3, 4, (2, 3, 4), 1, 0, 1, 1, False),\n input_size=(2, 3, 3, 4, 5),\n cudnn=True,\n desc='no_bias',\n ),\n dict(\n module_name='Conv3d',\n constructor_args=(3, 4, 2, 2),\n input_size=(2, 3, 5, 5, 5),\n cudnn=True,\n desc='stride',\n ),\n dict(\n module_name='Conv3d',\n constructor_args=(3, 4, 2, 2, 1),\n input_size=(2, 3, 5, 5, 5),\n cudnn=True,\n desc='stride_padding',\n ),\n dict(\n fullname='Conv3d_groups',\n constructor=lambda: nn.Conv3d(4, 6, kernel_size=3, groups=2),\n input_size=(2, 4, 4, 5, 4),\n cudnn=True,\n ),\n dict(\n fullname='Conv3d_dilated',\n constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2),\n input_size=(2, 3, 5, 5, 5),\n ),\n dict(\n fullname='Conv3d_dilated_strided',\n constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2, stride=2),\n input_size=(2, 3, 5, 5, 5),\n ),\n dict(\n module_name='ConvTranspose3d',\n constructor_args=(2, 3, (2, 3, 2)),\n cudnn=True,\n input_size=(1, 2, 4, 5, 4),\n ),\n dict(\n module_name='ConvTranspose3d',\n constructor_args=(2, 3, (2, 3, 2), 1, 0, 0, 1, True, (2, 2, 2)),\n cudnn=True,\n input_size=(1, 2, 4, 5, 4),\n desc='dilated',\n ),\n dict(\n module_name='MaxPool3d',\n constructor_args=((2, 2, 2),),\n input_size=(2, 3, 5, 5, 5),\n ),\n dict(\n module_name='MaxPool3d',\n constructor_args=(2, (2, 2, 2)),\n input_size=(2, 3, 5, 5, 5),\n desc='stride',\n ),\n dict(\n module_name='MaxPool3d',\n constructor_args=(2, 2, (1, 1, 1)),\n input_size=(2, 3, 5, 5, 5),\n desc='stride_padding',\n ),\n dict(\n module_name='AvgPool3d',\n constructor_args=((2, 2, 2),),\n input_size=(2, 3, 4, 4, 4),\n ),\n dict(\n module_name='AvgPool3d',\n constructor_args=(2, (2, 2, 2)),\n input_size=(2, 3, 5, 5, 5),\n desc='stride',\n ),\n dict(\n module_name='AvgPool3d',\n constructor_args=(2, 2, (1, 1, 1)),\n input_size=(2, 3, 5, 5, 5),\n desc='stride_pad',\n ),\n dict(\n module_name='AvgPool3d',\n constructor_args=(4, 2, (1, 2, 1)),\n input_size=(2, 3, 5, 5, 5),\n desc='stride_pad_gpu_fixedkw_output',\n ),\n dict(\n module_name='AvgPool3d',\n constructor_args=((2, 4, 8), 1, (1, 1, 2)),\n input_size=(2, 3, 2, 4, 8),\n desc='stride_pad_gpu_general_output',\n ),\n dict(\n module_name='AvgPool3d',\n constructor_args=(3, 1, 0),\n input_size=(2, 3, 4, 4, 4),\n desc='stride1_pad0_gpu_input',\n ),\n dict(\n module_name='AvgPool3d',\n constructor_args=(2, 2, (1, 1, 1)),\n input_size=(2, 3, 4, 4, 4),\n desc='stride_pad_gpu_input_nooverlap',\n ),\n dict(\n module_name='ReplicationPad3d',\n constructor_args=((1, 2, 3, 4, 5, 6),),\n input_size=(2, 3, 5, 5, 5),\n ),\n dict(\n module_name='Embedding',\n constructor_args=(4, 3),\n input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),\n jacobian_input=False,\n check_gradgrad=False,\n ),\n dict(\n module_name='EmbeddingBag',\n constructor_args=(4, 3),\n input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),\n jacobian_input=False,\n check_gradgrad=False,\n desc='mean',\n ),\n dict(\n module_name='EmbeddingBag',\n constructor_args=(4, 3, None, 2., False, 'sum'),\n input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),\n jacobian_input=False,\n check_gradgrad=False,\n desc='sum',\n ),\n dict(\n module_name='EmbeddingBag',\n constructor_args=(4, 3, None, 2., False, 'max'),\n input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),\n jacobian_input=False,\n check_gradgrad=False,\n desc='max',\n ),\n dict(\n fullname='EmbeddingBag_sparse',\n constructor=lambda: nn.EmbeddingBag(4, 3, sparse=True),\n input_fn=lambda: torch.randperm(2).repeat(1, 2),\n jacobian_input=False,\n check_gradgrad=False,\n ),\n dict(\n constructor=lambda: nn.Embedding(4, 3, sparse=True),\n input_fn=lambda: torch.randperm(2).repeat(1, 2),\n jacobian_input=False,\n fullname='Embedding_sparse',\n check_gradgrad=False,\n ),\n dict(\n module_name='PixelShuffle',\n constructor_args=(3,),\n input_size=(1, 9, 4, 4),\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'),\n input_size=(1, 2, 4),\n fullname='interpolate_nearest_1d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=(12, ), scale_factor=None, mode='nearest'),\n input_size=(1, 2, 3),\n fullname='interpolate_nearest_tuple_1d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'),\n input_size=(1, 2, 4),\n fullname='interpolate_nearest_scale_1d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=False),\n input_size=(1, 2, 4),\n fullname='interpolate_linear_1d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=(4, ), scale_factor=None, mode='linear', align_corners=False),\n input_size=(1, 2, 3),\n fullname='interpolate_linear_tuple_1d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='linear', align_corners=False),\n input_size=(1, 2, 4),\n fullname='interpolate_linear_scale_1d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=True),\n input_size=(1, 2, 4),\n fullname='interpolate_linear_1d_align_corners',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='linear', align_corners=True),\n input_size=(1, 2, 4),\n fullname='interpolate_linear_scale_1d_align_corners',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_nearest_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=(12, 16), scale_factor=None, mode='nearest'),\n input_size=(1, 2, 3, 4),\n fullname='interpolate_nearest_tuple_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_nearest_scale_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bilinear', align_corners=False),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_bilinear_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None,\n mode='bilinear', align_corners=False),\n input_size=(1, 2, 2, 3),\n fullname='interpolate_bilinear_tuple_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=4.,\n mode='bilinear', align_corners=False),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_bilinear_scale_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 2.),\n mode='bilinear', align_corners=False),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_bilinear_scale_tuple_shared_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.),\n mode='bilinear', align_corners=False),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_bilinear_scale_tuple_skewed_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, mode='bilinear', align_corners=True),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_bilinear_tuple_2d_align_corners',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.),\n mode='bilinear', align_corners=True),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_bilinear_scale_tuple_skewed_2d_align_corners',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bicubic', align_corners=False),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_bicubic_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None,\n mode='bicubic', align_corners=False),\n input_size=(1, 2, 2, 3),\n fullname='interpolate_bicubic_tuple_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='bicubic', align_corners=False),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_bicubic_scale_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 2.),\n mode='bicubic', align_corners=False),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_bicubic_scale_tuple_shared_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.),\n mode='bicubic', align_corners=False),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_bicubic_scale_tuple_skewed_2d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, mode='bicubic', align_corners=True),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_bicubic_tuple_2d_align_corners',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.),\n mode='bicubic', align_corners=True),\n input_size=(1, 2, 4, 4),\n fullname='interpolate_bicubic_scale_tuple_skewed_2d_align_corners',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'),\n input_size=(1, 2, 4, 4, 4),\n fullname='interpolate_nearest_3d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=(12, 16, 16), scale_factor=None, mode='nearest'),\n input_size=(1, 2, 3, 4, 4),\n fullname='interpolate_nearest_tuple_3d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'),\n input_size=(1, 2, 4, 4, 4),\n fullname='interpolate_nearest_scale_3d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='trilinear', align_corners=False),\n input_size=(1, 2, 4, 4, 4),\n fullname='interpolate_trilinear_3d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=(4, 6, 6),\n scale_factor=None, mode='trilinear', align_corners=False),\n input_size=(1, 2, 2, 3, 3),\n fullname='interpolate_trilinear_tuple_3d',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=3., mode='trilinear', align_corners=False),\n input_size=(1, 2, 3, 4, 4),\n fullname='interpolate_trilinear_scale_3d',\n # See https://github.com/pytorch/pytorch/issues/5006\n precision=3e-4,\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=(4, 6, 6), scale_factor=None,\n mode='trilinear', align_corners=True),\n input_size=(1, 2, 2, 3, 3),\n fullname='interpolate_trilinear_tuple_3d_align_corners',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.interpolate, size=None, scale_factor=3., mode='trilinear', align_corners=True),\n input_size=(1, 2, 3, 4, 4),\n fullname='interpolate_trilinear_scale_3d_align_corners',\n # See https://github.com/pytorch/pytorch/issues/5006\n precision=3e-4,\n pickle=False,\n ),\n dict(\n module_name='AdaptiveMaxPool1d',\n constructor_args=(3,),\n input_fn=lambda: _rand_tensor_non_equal(1, 3, 5),\n ),\n dict(\n module_name='AdaptiveMaxPool2d',\n constructor_args=(3,),\n input_fn=lambda: _rand_tensor_non_equal(1, 3, 5, 6),\n desc='single',\n ),\n dict(\n module_name='AdaptiveMaxPool2d',\n constructor_args=((3, 4),),\n input_fn=lambda: _rand_tensor_non_equal(1, 3, 5, 6),\n desc='tuple',\n ),\n dict(\n module_name='AdaptiveMaxPool2d',\n constructor_args=((3, None),),\n input_fn=lambda: _rand_tensor_non_equal(1, 3, 5, 6),\n desc='tuple_none',\n ),\n dict(\n module_name='AdaptiveMaxPool3d',\n constructor_args=(3,),\n input_fn=lambda: _rand_tensor_non_equal(2, 3, 5, 6, 7),\n desc='single',\n ),\n dict(\n module_name='AdaptiveMaxPool3d',\n constructor_args=((3, 4, 5),),\n input_fn=lambda: _rand_tensor_non_equal(2, 3, 5, 6, 7),\n desc='tuple',\n ),\n dict(\n module_name='AdaptiveMaxPool3d',\n constructor_args=((3, None, 5),),\n input_fn=lambda: _rand_tensor_non_equal(2, 3, 5, 6, 7),\n desc='tuple_none',\n ),\n dict(\n module_name='AdaptiveMaxPool3d',\n constructor_args=(3,),\n input_fn=lambda: _rand_tensor_non_equal(2, 3, 12, 9, 3),\n desc='single_nonatomic',\n ),\n dict(\n module_name='AdaptiveMaxPool3d',\n constructor_args=((3, 4, 5),),\n input_fn=lambda: _rand_tensor_non_equal(2, 3, 6, 4, 10),\n desc='tuple_nonatomic',\n ),\n dict(\n module_name='AdaptiveAvgPool1d',\n constructor_args=(3,),\n input_fn=lambda: torch.rand(1, 3, 5),\n ),\n dict(\n module_name='AdaptiveAvgPool1d',\n constructor_args=(1,),\n input_fn=lambda: torch.rand(1, 3, 5),\n desc='one_output',\n ),\n dict(\n module_name='AdaptiveAvgPool2d',\n constructor_args=(3,),\n input_fn=lambda: torch.rand(1, 3, 5, 6),\n desc='single',\n ),\n dict(\n module_name='AdaptiveAvgPool2d',\n constructor_args=(1,),\n input_fn=lambda: torch.rand(1, 3, 5, 6),\n desc='single_1x1output',\n ),\n dict(\n module_name='AdaptiveAvgPool2d',\n constructor_args=((3, 4),),\n input_fn=lambda: torch.rand(1, 3, 5, 6),\n desc='tuple',\n ),\n dict(\n module_name='AdaptiveAvgPool2d',\n constructor_args=((3, None),),\n input_fn=lambda: torch.rand(1, 3, 5, 6),\n desc='tuple_none',\n ),\n dict(\n module_name='AdaptiveAvgPool3d',\n constructor_args=(3,),\n input_fn=lambda: torch.rand(2, 3, 5, 2, 7),\n desc='single',\n ),\n dict(\n module_name='AdaptiveAvgPool3d',\n constructor_args=((3, 4, 5),),\n input_fn=lambda: torch.rand(2, 3, 5, 3, 7),\n desc='tuple',\n ),\n dict(\n module_name='AdaptiveAvgPool3d',\n constructor_args=((None, 4, 5),),\n input_fn=lambda: torch.rand(2, 3, 5, 3, 7),\n desc='tuple_none',\n ),\n dict(\n module_name='SELU',\n input_size=(3, 2, 5),\n check_inplace=True\n ),\n dict(\n module_name='SELU',\n input_size=(),\n check_inplace=True,\n desc='scalar'\n ),\n dict(\n module_name='CELU',\n input_size=(3, 2, 5),\n constructor_args=(2.,),\n check_inplace=True,\n reference_fn=lambda x, _: torch.where(x >= 0, x, 2. * ((.5 * x).exp() - 1)),\n ),\n dict(\n module_name='CELU',\n input_size=(),\n constructor_args=(2.,),\n check_inplace=True,\n reference_fn=lambda x, _: torch.where(x >= 0, x, 2. * ((.5 * x).exp() - 1)),\n desc='scalar'\n ),\n dict(\n module_name='GLU',\n input_size=(5, 6),\n ),\n dict(\n module_name='GLU',\n constructor_args=(1,),\n input_size=(5, 6, 7),\n desc='dim',\n ),\n dict(\n constructor=wrap_functional(F.softmax, dim=-1),\n input_size=(2, 128), # trigger the last-dim algo in CUDA\n fullname='softmax_lastdim',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.softmax, dim=1, dtype=torch.float64),\n input_size=(2, 128),\n fullname='softmax_lastdim_dtype',\n pickle=False,\n test_cuda=False\n ),\n dict(\n constructor=wrap_functional(F.softmax, dim=1),\n input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo\n fullname='softmax_spatial_special',\n pickle=False,\n test_cuda=(not TEST_WITH_ROCM)\n ),\n dict(\n constructor=wrap_functional(F.softmax, dim=1),\n input_size=(2, 2, 4, 4), # regular spatial algorithm\n fullname='softmax_spatial',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.softmax, dim=1, dtype=torch.float64),\n input_size=(2, 2, 4, 4), # regular spatial algorithm\n fullname='softmax_spatial_dtype',\n pickle=False,\n test_cuda=False\n ),\n dict(\n constructor=wrap_functional(F.softmax, dim=0),\n input_size=(2, 3, 4, 5),\n fullname='softmax_functional_dim0',\n test_cuda=False,\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.softmax, dim=3),\n input_size=(2, 3, 4, 5),\n fullname='softmax_functional_dim3',\n test_cuda=False,\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.softmax, dim=-1),\n input_size=(),\n fullname='softmax_functional_scalar',\n test_cuda=False,\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.log_softmax, dim=-1),\n input_size=(2, 128), # trigger the last-dim algo in CUDA\n fullname='log_softmax_lastdim',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.log_softmax, dim=1),\n input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo\n fullname='log_softmax_spatial_special',\n pickle=False,\n test_cuda=(not TEST_WITH_ROCM)\n ),\n dict(\n constructor=wrap_functional(F.log_softmax, dim=1),\n input_size=(2, 2, 4, 4), # regular spatial algorithm\n fullname='log_softmax_spatial',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.log_softmax, dim=0),\n input_size=(2, 3, 4, 5),\n fullname='log_softmax_dim0',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.log_softmax, dim=3),\n input_size=(2, 3, 4, 5),\n fullname='log_softmax_dim3',\n pickle=False,\n ),\n dict(\n constructor=wrap_functional(F.log_softmax, dim=0),\n input_size=(),\n fullname='log_softmax_scalar',\n pickle=False,\n ),\n dict(\n fullname='Unfold',\n constructor=lambda: nn.Unfold((2, 2), (1, 1), (0, 0), (1, 1)),\n input_size=(2, 4, 3, 3),\n check_gradgrad=False,\n test_cuda=True,\n ),\n dict(\n fullname='Fold',\n constructor=lambda: nn.Fold((3, 3), (2, 2), (1, 1), (0, 0), (1, 1)),\n input_size=(2, 16, 4),\n check_gradgrad=False,\n test_cuda=True,\n ),\n dict(\n fullname='Unfold_int_input',\n constructor=lambda: nn.Unfold(2, 1, 0, 1),\n input_size=(2, 4, 3, 3),\n check_gradgrad=False,\n test_cuda=True,\n ),\n dict(\n fullname='Fold_int_input',\n constructor=lambda: nn.Fold(3, 2, 1, 0, 1),\n input_size=(2, 16, 4),\n check_gradgrad=False,\n test_cuda=True,\n ),\n dict(\n module_name='Threshold',\n constructor_args=(2., 1.),\n input_size=(),\n check_inplace=True,\n desc='threshold_value_scalar'\n ),\n\n dict(\n module_name='ReLU',\n input_size=(),\n check_inplace=True,\n desc='scalar'\n ),\n dict(\n module_name='ReLU6',\n input_size=(),\n check_inplace=True,\n desc='scalar'\n ),\n dict(\n module_name='RReLU',\n constructor_args=(0.1, 0.9),\n input_size=(),\n desc='with_up_down_scalar',\n test_cuda=False,\n ),\n dict(\n module_name='Hardtanh',\n input_size=(),\n reference_fn=lambda i, _: i.clamp(-1, 1),\n desc='scalar'\n ),\n dict(\n module_name='Sigmoid',\n input_size=(),\n desc='scalar',\n ),\n dict(\n module_name='Tanh',\n input_size=(),\n desc='scalar',\n ),\n dict(\n module_name='Softmax',\n constructor_args=(0,),\n input_size=(),\n reference_fn=lambda i, _: torch.exp(i).div(torch.exp(i).sum(0, True)),\n desc='scalar',\n ),\n dict(\n module_name='LogSoftmax',\n constructor_args=(0,),\n input_size=(),\n reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(0, False)).log_(),\n desc='multiparam_scalar',\n ),\n dict(\n module_name='ELU',\n constructor_args=(2.,),\n input_size=(),\n desc='scalar',\n ),\n dict(\n module_name='Hardshrink',\n constructor_args=(2.,),\n input_size=(),\n desc='scalar',\n ),\n dict(\n module_name='LeakyReLU',\n constructor_args=(0.5,),\n input_size=(),\n check_inplace=True,\n desc='with_negval_scalar'\n ),\n dict(\n module_name='LogSigmoid',\n input_size=(),\n reference_fn=lambda i, _: i.sigmoid().log(),\n desc='scalar'\n ),\n dict(\n module_name='Softplus',\n constructor_args=(2, -100),\n input_size=(),\n reference_fn=(lambda i, _: ((i * 2) > -100).type_as(i) * i +\n ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))),\n desc='beta_threshold_scalar',\n ),\n dict(\n module_name='Softshrink',\n constructor_args=(1,),\n input_size=(),\n desc='lambda_scalar',\n ),\n dict(\n module_name='PReLU',\n input_size=(),\n reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],\n desc='scalar',\n ),\n dict(\n module_name='Softsign',\n input_size=(),\n reference_fn=lambda i, _: i.div(1 + torch.abs(i)),\n desc='scalar',\n ),\n dict(\n module_name='Softmin',\n constructor_args=(0,),\n input_size=(),\n desc='scalar',\n ),\n dict(\n module_name='Tanhshrink',\n input_size=(),\n desc='scalar',\n ),\n dict(\n fullname='Padding12_1dcircular',\n constructor=wrap_functional(F.pad, pad=(1, 2), mode='circular'),\n input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 2, 3]),\n reference_fn=lambda i, _: padding1d_circular(i, (1, 2)),\n skip_double=TEST_WITH_ROCM,\n pickle=False,\n ),\n dict(\n fullname='Padding31_1dcircular',\n constructor=wrap_functional(F.pad, pad=(3, 1), mode='circular'),\n input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 2, 3]),\n reference_fn=lambda i, _: padding1d_circular(i, (3, 1)),\n skip_double=TEST_WITH_ROCM,\n pickle=False,\n ),\n dict(\n fullname='Padding33_1dcircular',\n constructor=wrap_functional(F.pad, pad=(3, 3), mode='circular'),\n input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 2, 3]),\n reference_fn=lambda i, _: padding1d_circular(i, (3, 3)),\n skip_double=TEST_WITH_ROCM,\n pickle=False,\n ),\n dict(\n fullname='Padding1221_2dcircular',\n constructor=wrap_functional(F.pad, pad=(1, 2, 2, 1), mode='circular'),\n input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 1, 2, 3]),\n reference_fn=lambda i, _: padding2d_circular(i, (1, 2, 2, 1)),\n skip_double=TEST_WITH_ROCM,\n pickle=False,\n ),\n dict(\n fullname='Padding2322_2dcircular',\n constructor=wrap_functional(F.pad, pad=(2, 3, 2, 2), mode='circular'),\n input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 1, 2, 3]),\n reference_fn=lambda i, _: padding2d_circular(i, (2, 3, 2, 2)),\n skip_double=TEST_WITH_ROCM,\n pickle=False,\n ),\n dict(\n fullname='Padding3331_2dcircular',\n constructor=wrap_functional(F.pad, pad=(3, 3, 3, 1), mode='circular'),\n input_fn=lambda: torch.arange(9, out=torch.DoubleTensor()).reshape([1, 1, 3, 3]),\n reference_fn=lambda i, _: padding2d_circular(i, (3, 3, 3, 1)),\n skip_double=TEST_WITH_ROCM,\n pickle=False,\n ),\n dict(\n fullname='Padding122112_3dcircular',\n constructor=wrap_functional(F.pad, pad=(1, 2, 2, 1, 1, 2), mode='circular'),\n input_fn=lambda: torch.arange(12, out=torch.DoubleTensor()).reshape([1, 1, 2, 2, 3]),\n reference_fn=lambda i, _: padding3d_circular(i, (1, 2, 2, 1, 1, 2)),\n skip_double=TEST_WITH_ROCM,\n pickle=False,\n ),\n dict(\n fullname='Padding322112_3dcircular',\n constructor=wrap_functional(F.pad, pad=(3, 2, 2, 1, 1, 2), mode='circular'),\n input_fn=lambda: torch.arange(12, out=torch.DoubleTensor()).reshape([1, 1, 2, 2, 3]),\n reference_fn=lambda i, _: padding3d_circular(i, (3, 2, 2, 1, 1, 2)),\n skip_double=TEST_WITH_ROCM,\n pickle=False,\n ),\n dict(\n fullname='Padding332122_3dcircular',\n constructor=wrap_functional(F.pad, pad=(3, 3, 2, 1, 2, 2), mode='circular'),\n input_fn=lambda: torch.arange(12, out=torch.DoubleTensor()).reshape([1, 1, 2, 2, 3]),\n reference_fn=lambda i, _: padding3d_circular(i, (3, 3, 2, 1, 2, 2)),\n skip_double=TEST_WITH_ROCM,\n pickle=False,\n ),\n\n dict(\n module_name='Conv1d',\n constructor_args=(3, 4, 2, 2, (1,), 1, 1, True, 'circular'),\n input_size=(2, 3, 5,),\n cudnn=True,\n desc='stride1_pad1circular',\n ),\n dict(\n module_name='Conv1d',\n constructor_args=(3, 4, 2, 2, (2,), 1, 1, True, 'circular'),\n input_size=(2, 3, 5,),\n cudnn=True,\n desc='stride1_pad2circular',\n ),\n dict(\n module_name='Conv2d',\n constructor_args=(3, 4, (3, 3), (2, 2), (1, 2), 1, 1, True, 'circular'),\n input_size=(2, 3, 3, 3),\n cudnn=True,\n desc='pad2circular'\n ),\n dict(\n module_name='Conv3d',\n constructor_args=(3, 4, 2, 2, (1, 2, 3), 1, 1, True, 'circular'),\n input_size=(2, 3, 3, 3, 3),\n cudnn=True,\n desc='stride_pad1circular',\n ),\n]\n\n\ndef kldivloss_reference(input, target, reduction='mean'):\n safe_target = target * (target > 0).type_as(target)\n safe_target_log = (safe_target + (target <= 0).type_as(target)).log()\n result = safe_target * (safe_target_log - input)\n if reduction == 'mean':\n return result.mean()\n elif reduction == 'sum':\n return result.sum()\n elif reduction == 'batchmean' and results.dim() != 0:\n return result.sum() / result.size(0)\n return result\n\n\ndef nlllossNd_reference(input, target, weight=None, ignore_index=-100,\n reduction='mean'):\n assert input.dim() >= 3\n N = input.size(0)\n C = input.size(1)\n out_size = (N,) + input.size()[2:]\n output = torch.zeros(out_size).type_as(input)\n\n if weight is None:\n weight = torch.ones(C).type_as(input)\n total_weight = 0\n for tup in product(*[range(size) for size in out_size]):\n t_nx = target[tup]\n norm = 0. if ignore_index == t_nx else weight[t_nx].item()\n input_index = list(tup)\n input_index.insert(1, t_nx)\n output[tup] = -input[tuple(input_index)] * norm\n total_weight += norm\n\n if reduction == 'mean':\n return output.sum() / total_weight\n elif reduction == 'sum':\n return output.sum()\n return output\n\n\ndef nllloss_reference(input, target, weight=None, ignore_index=-100,\n reduction='mean'):\n\n def nll_loss_helper(input, target, weight, ignore_index):\n if target == ignore_index:\n return (0, 0)\n norm = 1 if weight is None else weight[target]\n result = -input[target] * norm\n return (result, norm)\n\n losses_and_weights = [nll_loss_helper(i, t, weight, ignore_index)\n for i, t in zip(input, target)]\n losses, weights = zip(*losses_and_weights)\n losses_tensor = input.new_tensor(losses)\n if reduction == 'mean':\n return sum(losses_tensor) / sum(weights)\n elif reduction == 'sum':\n return sum(losses_tensor)\n else:\n return losses_tensor\n\n\ndef smoothl1loss_reference(input, target, reduction='mean'):\n abs_diff = (input - target).abs()\n ge_one_mask = (abs_diff >= 1).type_as(abs_diff)\n lt_one_mask = (abs_diff < 1).type_as(abs_diff)\n output = ge_one_mask * (abs_diff - 0.5) + lt_one_mask * 0.5 * (abs_diff ** 2)\n if reduction == 'mean':\n return output.mean()\n elif reduction == 'sum':\n return output.sum()\n return output\n\n\ndef _multilabelmarginloss_reference(input, target):\n targets = []\n for target_index in target:\n if target_index < 0:\n break\n targets.append(target_index)\n\n sum = 0\n for target_index in targets:\n for i in range(0, len(input)):\n if i not in targets:\n sum += max(0, 1 - input[target_index] + input[i])\n\n return sum\n\n\ndef multilabelmarginloss_reference(input, target, reduction='mean'):\n if input.dim() == 1:\n n = 1\n dim = input.size(0)\n output = input.new(n).zero_()\n output[0] = _multilabelmarginloss_reference(input, target)\n else:\n n = input.size(0)\n dim = input.size(1)\n output = input.new(n).zero_()\n for i in range(0, n):\n output[i] = _multilabelmarginloss_reference(input[i], target[i])\n\n if reduction == 'mean':\n return output.mean() / dim\n elif reduction == 'sum':\n return output.sum() / dim\n return output / dim\n\n\ndef hingeembeddingloss_reference(input, target, margin=1.0, reduction='mean'):\n margin_clamp = (margin - input).clamp(min=0).type_as(input)\n output = torch.where(target == 1, input, margin_clamp)\n\n if reduction == 'mean':\n return output.mean()\n elif reduction == 'sum':\n return output.sum()\n return output\n\n\ndef softmarginloss_reference(input, target, reduction='mean'):\n output = (1 + (-input * target).exp()).log()\n\n if reduction == 'mean':\n return output.mean()\n elif reduction == 'sum':\n return output.sum()\n return output\n\n\ndef _multimarginloss_reference(input, target_idx, p, margin, weight):\n if weight is None:\n weight = input.new(len(input)).fill_(1)\n\n output = 0\n for i in range(0, len(input)):\n if i != target_idx:\n output += max(0, weight[target_idx] * (margin - input[target_idx] + input[i]) ** p)\n return output\n\n\ndef multimarginloss_reference(input, target, p=1, margin=1, weight=None, reduction='mean'):\n if input.dim() == 1:\n n = 1\n dim = input.size(0)\n return _multimarginloss_reference(input, target[0], p, margin, weight) / dim\n else:\n n = input.size(0)\n dim = input.size(1)\n output = input.new(n)\n for x in range(0, n):\n output[x] = _multimarginloss_reference(input[x], target[x], p, margin, weight)\n\n if reduction == 'mean':\n return output.mean() / dim\n elif reduction == 'sum':\n return output.sum() / dim\n return output / dim\n\n\ndef cosineembeddingloss_reference(input1, input2, target, margin=0, reduction='mean'):\n def _cos(a, b):\n cos = a.new(a.size(0))\n for i in range(0, a.size(0)):\n cos[i] = (a[i] * b[i]).sum() / ((((a[i] * a[i]).sum() + 1e-12) * ((b[i] * b[i]).sum() + 1e-12)) ** 0.5)\n return cos\n\n output = torch.where(target == 1, 1 - _cos(input1, input2), (_cos(input1, input2) - margin).clamp(min=0))\n\n if reduction == 'mean':\n return output.mean()\n elif reduction == 'sum':\n return output.sum()\n return output\n\n\ndef tripletmarginloss_reference(anchor, positive, negative, margin=1.0, p=2, eps=1e-6, swap=False,\n reduction='mean'):\n d_p = torch.pairwise_distance(anchor, positive, p, eps)\n d_n = torch.pairwise_distance(anchor, negative, p, eps)\n if swap:\n d_s = torch.pairwise_distance(positive, negative, p, eps)\n d_n = torch.min(d_n, d_s)\n\n output = torch.clamp(margin + d_p - d_n, min=0.0)\n if reduction == 'mean':\n return output.mean()\n elif reduction == 'sum':\n return output.sum()\n return output\n\n\ndef marginrankingloss_reference(input1, input2, target, margin=0, reduction='mean'):\n output = (-target * (input1 - input2) + margin).clamp(min=0)\n if reduction == 'mean':\n return output.mean()\n elif reduction == 'sum':\n return output.sum()\n return output\n\n\n# this directly follows Graves et al's paper, in contrast to the production implementation, it does not use log-space\ndef ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean'):\n input_lengths = torch.as_tensor(input_lengths, dtype=torch.long)\n target_lengths = torch.as_tensor(target_lengths, dtype=torch.long)\n dt = log_probs.dtype\n log_probs = log_probs.double() # we need the accuracy as we are not in logspace\n targets = targets.long()\n cum_target_lengths = target_lengths.cumsum(0)\n losses = []\n for i in range(log_probs.size(1)):\n input_length = input_lengths[i].item()\n target_length = target_lengths[i].item()\n cum_target_length = cum_target_lengths[i].item()\n targets_prime = targets.new_full((2 * target_length + 1,), blank)\n if targets.dim() == 2:\n targets_prime[1::2] = targets[i, :target_length]\n else:\n targets_prime[1::2] = targets[cum_target_length - target_length:cum_target_length]\n probs = log_probs[:input_length, i].exp()\n alpha = log_probs.new_zeros((target_length * 2 + 1,))\n alpha[0] = probs[0, blank]\n alpha[1] = probs[0, targets_prime[1]]\n mask_third = (targets_prime[:-2] != targets_prime[2:])\n for t in range(1, input_length):\n alpha_next = alpha.clone()\n alpha_next[1:] += alpha[:-1]\n alpha_next[2:] += torch.where(mask_third, alpha[:-2], alpha.new_zeros(1))\n alpha = probs[t, targets_prime] * alpha_next\n losses.append(-alpha[-2:].sum().log()[None])\n output = torch.cat(losses, 0)\n if reduction == 'mean':\n return (output / target_lengths.to(dtype=output.dtype, device=output.device)).mean()\n elif reduction == 'sum':\n return output.sum()\n output = output.to(dt)\n return output\n\n\ndef padding1d_circular(input, pad):\n r\"\"\" input:\n [[[0., 1., 2.],\n [3., 4., 5.]]]\n pad: (1, 2)\n output:\n [[[2., 0., 1., 2., 0., 1.],\n [5., 3., 4., 5., 3., 4.]]]\n \"\"\"\n return torch.cat([input[:, :, -pad[0]:], input,\n input[:, :, 0:pad[1]]], dim=2)\n\n\ndef padding2d_circular(input, pad):\n r\"\"\"input:\n [[[[0., 1., 2],\n [3., 4., 5.]]]]\n pad: (1, 2, 2, 1)\n output:\n [[[[2., 0., 1., 2., 0., 1.],\n [5., 3., 4., 5., 3., 4.],\n [2., 0., 1., 2., 0., 1.],\n [5., 3., 4., 5., 3., 4.],\n [2., 0., 1., 2., 0., 1.]]]]\n \"\"\"\n input = torch.cat([input[:, :, -pad[2]:], input, input[:, :, 0:pad[3]]], dim=2)\n return torch.cat([input[:, :, :, -pad[0]:], input, input[:, :, :, 0:pad[1]]], dim=3)\n\n\ndef padding3d_circular(input, pad):\n r\"\"\"input:\n [[[[[ 0., 1., 2.],\n [ 3., 4., 5.]],\n [[ 6., 7., 8.],\n [ 9., 10., 11.]]]]]\n pad: (1, 2, 2, 1, 1, 2)\n output: [[[[[ 8., 6., 7., 8., 6., 7.],\n [11., 9., 10., 11., 9., 10.],\n [ 8., 6., 7., 8., 6., 7.],\n [11., 9., 10., 11., 9., 10.],\n [ 8., 6., 7., 8., 6., 7.]],\n\n [[ 2., 0., 1., 2., 0., 1.],\n [ 5., 3., 4., 5., 3., 4.],\n [ 2., 0., 1., 2., 0., 1.],\n [ 5., 3., 4., 5., 3., 4.],\n [ 2., 0., 1., 2., 0., 1.]],\n\n [[ 8., 6., 7., 8., 6., 7.],\n [11., 9., 10., 11., 9., 10.],\n [ 8., 6., 7., 8., 6., 7.],\n [11., 9., 10., 11., 9., 10.],\n [ 8., 6., 7., 8., 6., 7.]],\n\n [[ 2., 0., 1., 2., 0., 1.],\n [ 5., 3., 4., 5., 3., 4.],\n [ 2., 0., 1., 2., 0., 1.],\n [ 5., 3., 4., 5., 3., 4.],\n [ 2., 0., 1., 2., 0., 1.]],\n\n [[ 8., 6., 7., 8., 6., 7.],\n [11., 9., 10., 11., 9., 10.],\n [ 8., 6., 7., 8., 6., 7.],\n [11., 9., 10., 11., 9., 10.],\n [ 8., 6., 7., 8., 6., 7.]]]]]\n \"\"\"\n input = torch.cat([input[:, :, -pad[4]:], input, input[:, :, 0:pad[5]]], dim=2)\n input = torch.cat([input[:, :, :, -pad[2]:], input, input[:, :, :, 0:pad[3]]], dim=3)\n return torch.cat([input[:, :, :, :, -pad[0]:], input, input[:, :, :, :, 0:pad[1]]], dim=4)\n\n\nloss_reference_fns = {\n 'KLDivLoss': kldivloss_reference,\n 'NLLLoss': nllloss_reference,\n 'NLLLossNd': nlllossNd_reference,\n 'SmoothL1Loss': smoothl1loss_reference,\n 'MultiLabelMarginLoss': multilabelmarginloss_reference,\n 'HingeEmbeddingLoss': hingeembeddingloss_reference,\n 'SoftMarginLoss': softmarginloss_reference,\n 'MultiMarginLoss': multimarginloss_reference,\n 'CosineEmbeddingLoss': cosineembeddingloss_reference,\n 'TripletMarginLoss': tripletmarginloss_reference,\n 'MarginRankingLoss': marginrankingloss_reference,\n 'CTCLoss': ctcloss_reference,\n}\n\n\ncriterion_tests = [\n dict(\n module_name='L1Loss',\n input_size=(2, 3, 4),\n target_size=(2, 3, 4),\n reference_fn=lambda i, t, _: 1. / i.numel() *\n sum((a - b).abs().sum() for a, b in zip(i, t)),\n ),\n dict(\n module_name='NLLLoss',\n input_fn=lambda: torch.rand(15, 10).log(),\n target_fn=lambda: torch.Tensor(15).uniform_().mul(10).floor().long(),\n reference_fn=lambda i, t, m:\n nllloss_reference(i, t, reduction=get_reduction(m)),\n check_sum_reduction=True\n ),\n dict(\n module_name='NLLLoss',\n constructor_args=(None, None, 2),\n input_fn=lambda: torch.rand(15, 10).log(),\n target_fn=lambda: torch.Tensor(15).uniform_().mul(10).floor().long(),\n reference_fn=lambda i, t, _: nllloss_reference(i, t, ignore_index=2),\n desc='ignore_index'\n ),\n dict(\n module_name='NLLLoss',\n constructor_args_fn=lambda: (torch.rand(10),),\n input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),\n target_fn=lambda: torch.Tensor(15).uniform_().mul(10).floor().long(),\n reference_fn=lambda i, t, m:\n nllloss_reference(i, t, weight=get_weight(m)),\n desc='weights',\n ),\n dict(\n module_name='NLLLoss',\n constructor_args_fn=lambda: (torch.rand(10), None, 2),\n input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),\n target_fn=lambda: torch.Tensor(15).uniform_().mul(10).floor().long(),\n reference_fn=lambda i, t, m:\n nllloss_reference(i, t, weight=get_weight(m), ignore_index=2),\n desc='weights_ignore_index'\n ),\n dict(\n module_name='NLLLoss',\n constructor_args_fn=lambda: (torch.rand(10), None, -1),\n input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),\n target_fn=lambda: torch.Tensor(15).uniform_().mul(10 + 1).floor().long() - 1,\n reference_fn=lambda i, t, m:\n nllloss_reference(i, t, weight=get_weight(m), ignore_index=-1),\n desc='weights_ignore_index_neg'\n ),\n dict(\n module_name='KLDivLoss',\n input_fn=lambda: torch.rand(10, 10).log(),\n target_fn=lambda: torch.rand(10, 10),\n reference_fn=lambda i, t, m:\n kldivloss_reference(i, t, get_reduction(m)),\n check_sum_reduction=True,\n ),\n dict(\n module_name='MSELoss',\n input_size=(2, 3, 4, 5),\n target_size=(2, 3, 4, 5),\n reference_fn=lambda i, t, m: ((i - t).abs().pow(2).sum() / (i.numel()\n if get_reduction(m) == 'mean' else 1)),\n check_sum_reduction=True,\n ),\n dict(\n module_name='BCELoss',\n input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),\n target_fn=lambda: torch.randn(15, 10).gt(0).double(),\n reference_fn=lambda i, t, m: -(t * i.log() + (1 - t) * (1 - i).log()).sum() /\n (i.numel() if get_reduction(m) else 1),\n check_gradgrad=False,\n ),\n dict(\n module_name='BCELoss',\n constructor_args_fn=lambda: (torch.rand(10),),\n input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),\n target_fn=lambda: torch.randn(15, 10).gt(0).double(),\n reference_fn=lambda i, t, m: -((t * i.log() + (1 - t) * (1 - i).log()) * get_weight(m)).sum() /\n (i.numel() if get_reduction(m) else 1),\n desc='weights',\n check_gradgrad=False,\n ),\n dict(\n module_name='CrossEntropyLoss',\n input_size=(15, 10),\n target_fn=lambda: torch.Tensor(15).uniform_().mul(10).floor().long(),\n ),\n dict(\n module_name='CrossEntropyLoss',\n constructor_args_fn=lambda: (torch.rand(10),),\n input_size=(15, 10),\n target_fn=lambda: torch.Tensor(15).uniform_().mul(10).floor().long(),\n desc='weights',\n ),\n dict(\n module_name='HingeEmbeddingLoss',\n input_size=(10,),\n target_fn=lambda: torch.randn(10).gt(0).double().mul_(2).sub(1),\n reference_fn=lambda i, t, m:\n hingeembeddingloss_reference(i, t, reduction=get_reduction(m)),\n check_sum_reduction=True,\n ),\n dict(\n module_name='HingeEmbeddingLoss',\n constructor_args=(0.5,),\n input_size=(10,),\n target_fn=lambda: torch.randn(10).gt(0).double().mul_(2).sub(1),\n reference_fn=lambda i, t, m:\n hingeembeddingloss_reference(i, t, margin=0.5, reduction=get_reduction(m)),\n desc='margin',\n check_sum_reduction=True,\n ),\n dict(\n module_name='MultiLabelMarginLoss',\n input_size=(10,),\n target_fn=lambda: torch.rand(10).mul(10).floor().long(),\n reference_fn=lambda i, t, m:\n multilabelmarginloss_reference(i, t, reduction=get_reduction(m)),\n desc=\"1d\",\n check_sum_reduction=True,\n check_gradgrad=False,\n ),\n dict(\n module_name='MultiLabelMarginLoss',\n input_size=(5, 10),\n target_fn=lambda: torch.rand(5, 10).mul(10).floor().long(),\n reference_fn=lambda i, t, m:\n multilabelmarginloss_reference(i, t, reduction=get_reduction(m)),\n check_sum_reduction=True,\n check_gradgrad=False,\n ),\n dict(\n module_name='MultiLabelSoftMarginLoss',\n input_size=(5, 10),\n target_fn=lambda: torch.rand(5, 10).mul(2).floor(),\n reference_fn=lambda i, t, m: -(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()).sum() / i.numel(),\n check_gradgrad=False,\n ),\n dict(\n module_name='MultiMarginLoss',\n input_size=(5, 10),\n target_fn=lambda: torch.rand(5).mul(8).floor().long(),\n reference_fn=lambda i, t, m:\n multimarginloss_reference(i, t, reduction=get_reduction(m)),\n check_sum_reduction=True,\n check_gradgrad=False,\n ),\n dict(\n module_name='MultiMarginLoss',\n input_size=(10,),\n target_fn=lambda: torch.rand(1).mul(8).floor().long(),\n reference_fn=lambda i, t, m:\n multimarginloss_reference(i, t, reduction=get_reduction(m)),\n desc='1d',\n check_sum_reduction=True,\n check_gradgrad=False,\n ),\n dict(\n module_name='MultiMarginLoss',\n constructor_args=(2,),\n input_fn=lambda: torch.rand(5, 10).clamp_(1e-2, 1 - 1e-2),\n target_fn=lambda: torch.rand(5).mul(8).floor().long(),\n reference_fn=lambda i, t, m:\n multimarginloss_reference(i, t, p=2, reduction=get_reduction(m)),\n desc='p',\n check_sum_reduction=True,\n check_gradgrad=False,\n ),\n dict(\n module_name='MultiMarginLoss',\n constructor_args=(1, 0.5),\n legacy_constructor_args=(1, None, 0.5),\n input_size=(5, 10),\n target_fn=lambda: torch.rand(5).mul(8).floor().long(),\n reference_fn=lambda i, t, m:\n multimarginloss_reference(i, t, margin=0.5, reduction=get_reduction(m)),\n desc='margin',\n check_sum_reduction=True,\n check_gradgrad=False,\n ),\n dict(\n module_name='MultiMarginLoss',\n constructor_args=(1, 1., torch.rand(10)),\n legacy_constructor_args=(1, torch.rand(10)),\n input_size=(5, 10),\n target_fn=lambda: torch.rand(5).mul(8).floor().long(),\n reference_fn=lambda i, t, m:\n multimarginloss_reference(i, t, weight=get_weight(m), reduction=get_reduction(m)),\n desc='weights',\n check_sum_reduction=True,\n check_gradgrad=False,\n ),\n dict(\n module_name='SmoothL1Loss',\n input_size=(5, 10),\n target_size=(5, 10),\n check_sum_reduction=True,\n reference_fn=lambda i, t, m:\n smoothl1loss_reference(i, t, reduction=get_reduction(m)),\n ),\n dict(\n module_name='SoftMarginLoss',\n input_size=(5, 5),\n target_fn=lambda: torch.randn(5, 5).sign(),\n reference_fn=lambda i, t, m:\n softmarginloss_reference(i, t, reduction=get_reduction(m)),\n check_sum_reduction=True,\n ),\n dict(\n module_name='CosineEmbeddingLoss',\n input_fn=lambda: (torch.rand(15, 10), torch.rand(15, 10)),\n target_fn=lambda: torch.randn(15).sign(),\n reference_fn=lambda i, t, m:\n cosineembeddingloss_reference(i[0], i[1], t, reduction=get_reduction(m)),\n check_sum_reduction=True,\n ),\n dict(\n module_name='CosineEmbeddingLoss',\n constructor_args=(0.7,),\n input_fn=lambda: (torch.rand(15, 10), torch.rand(15, 10)),\n target_fn=lambda: torch.randn(15).sign(),\n reference_fn=lambda i, t, m:\n cosineembeddingloss_reference(i[0], i[1], t, margin=0.7, reduction=get_reduction(m)),\n desc='margin',\n check_sum_reduction=True,\n ),\n dict(\n module_name='MarginRankingLoss',\n input_fn=lambda: (torch.randn(50).mul(10), torch.randn(50).mul(10)),\n target_fn=lambda: torch.randn(50).sign(),\n reference_fn=lambda i, t, m:\n marginrankingloss_reference(i[0], i[1], t, reduction=get_reduction(m)),\n check_sum_reduction=True,\n ),\n dict(\n module_name='MarginRankingLoss',\n constructor_args=(0.5,),\n input_fn=lambda: (torch.randn(50).mul(10), torch.randn(50).mul(10)),\n target_fn=lambda: torch.randn(50).sign(),\n reference_fn=lambda i, t, m:\n marginrankingloss_reference(i[0], i[1], t, margin=0.5, reduction=get_reduction(m)),\n desc='margin',\n check_sum_reduction=True,\n ),\n]\n\n\nclass NNTestCase(TestCase):\n\n def _jacobian(self, input, num_out):\n if isinstance(input, tuple):\n return tuple(self._jacobian(elem, num_out) for elem in input)\n elif isinstance(input, list):\n return [self._jacobian(elem, num_out) for elem in input]\n else:\n return torch.zeros(input.nelement(), num_out)\n\n def _flatten_tensors(self, x):\n if isinstance(x, torch.Tensor):\n if x.is_sparse:\n return x.to_dense().view(-1)\n else:\n return x.view(-1)\n else:\n return tuple(self._flatten_tensors(a) for a in x)\n\n def _zero_grad_input(self, input):\n if isinstance(input, torch.Tensor):\n if input.requires_grad and input.grad is not None:\n input.grad.zero_()\n input.grad.detach_()\n else:\n for i in input:\n self._zero_grad_input(i)\n\n def _analytical_jacobian(self, module, input, jacobian_input=True, jacobian_parameters=True):\n output = self._forward(module, input)\n output_size = output.nelement()\n\n if jacobian_input:\n jacobian_inp = self._jacobian(input, output_size)\n flat_jacobian_input = list(iter_tensors(jacobian_inp))\n\n if jacobian_parameters:\n num_param = sum(p.numel() for p in self._get_parameters(module)[0])\n jacobian_param = torch.zeros(num_param, output_size)\n\n for i in range(output_size):\n param, d_param = self._get_parameters(module)\n # make non grad zeros\n d_param = [torch.zeros_like(p) if d is None else d for (p, d) in zip(param, d_param)]\n\n d_out = torch.zeros_like(output)\n flat_d_out = d_out.view(-1)\n flat_d_out[i] = 1\n\n if jacobian_parameters:\n self._zero_grad_parameters(module)\n # Tensors will accumulate gradient from multiple steps\n if jacobian_input:\n self._zero_grad_input(input)\n d_input = self._backward(module, input, output, d_out)\n\n if jacobian_input:\n for jacobian_x, d_x in zip(flat_jacobian_input, iter_tensors(d_input)):\n jacobian_x[:, i] = d_x.contiguous().view(-1)\n if jacobian_parameters:\n jacobian_param[:, i] = torch.cat(self._flatten_tensors(d_param), 0)\n\n res = tuple()\n if jacobian_input:\n res += jacobian_inp,\n if jacobian_parameters:\n res += jacobian_param,\n\n return res\n\n def _numerical_jacobian(self, module, input, jacobian_input=True, jacobian_parameters=True):\n def fw(input):\n return self._forward(module, input).detach()\n\n res = tuple()\n if jacobian_input:\n res += get_numerical_jacobian(fw, input, eps=1e-6),\n if jacobian_parameters:\n param, _ = self._get_parameters(module)\n res += torch.cat([get_numerical_jacobian(fw, input, p, eps=1e-6) for p in param], 0),\n return res\n\n def check_jacobian(self, module, input, jacobian_input=True):\n jacobian_parameters = bool(self._get_parameters(module)[0])\n analytical = self._analytical_jacobian(module, input, jacobian_input, jacobian_parameters)\n numerical = self._numerical_jacobian(module, input, jacobian_input, jacobian_parameters)\n analytical_t = list(iter_tensors(analytical))\n numerical_t = list(iter_tensors(numerical))\n\n # TODO: compare structure\n self.assertLessEqual(\n max(a.add(-1, n).abs().max() for a, n in zip(analytical_t, numerical_t)),\n PRECISION\n )\n\n def check_criterion_jacobian(self, criterion, input, target):\n eps = 1e-6\n self._forward_criterion(criterion, input, target)\n analytical_d_x = self._backward_criterion(criterion, input, target)\n numerical_d_x = deepcopy(analytical_d_x)\n\n input_t = iter_tensors(input)\n numerical_t = iter_tensors(numerical_d_x)\n for x, d_x in zip(input_t, numerical_t):\n x = x.view(-1).data\n d_x = d_x.view(-1).data\n for i in range(x.nelement()):\n original = x[i].item()\n x[i] = original + eps\n fx1 = self._forward_criterion(criterion, input, target)\n x[i] = original - eps\n fx2 = self._forward_criterion(criterion, input, target)\n deriv = (fx1 - fx2) / (2. * eps)\n d_x[i] = float(deriv)\n x[i] = original\n\n # TODO: check structure\n analytical_t = list(iter_tensors(analytical_d_x))\n numerical_t = list(iter_tensors(numerical_d_x))\n\n self.assertLessEqual(\n max(a.add(-1, n).abs().max() for a, n in zip(analytical_t, numerical_t)),\n PRECISION\n )\n\n\nclass TestBase(object):\n\n _required_arg_names = {'constructor_args', 'input', 'extra_args'}\n\n def __init__(self, constructor, desc='', reference_fn=None, fullname=None, **kwargs):\n self.desc = desc\n self.fullname = fullname\n self.constructor = constructor\n self.reference_fn = reference_fn\n for name in self._required_arg_names:\n if name not in kwargs and name + '_fn' not in kwargs and name + '_size' not in kwargs:\n if name in {'constructor_args', 'extra_args'}:\n kwargs[name] = tuple()\n else:\n raise ValueError(\"{}: Specify {} by a value, a function to generate it, or it's size!\"\n .format(self.get_name(), name))\n self._extra_kwargs = kwargs\n self._arg_cache = {}\n\n def get_name(self):\n if self.fullname is not None:\n return 'test_' + self.fullname\n\n test_name = 'test_' + self.constructor.__name__\n if self.desc:\n test_name += '_' + self.desc\n return test_name\n\n def _unpack(self, value):\n if isinstance(value, torch.Tensor):\n return value\n elif is_iterable(value):\n return type(value)(self._unpack(v) for v in value)\n else:\n return value\n\n @property\n def constructor_args(self):\n return self._get_arg('constructor_args', True)\n\n @property\n def extra_args(self):\n return self._get_arg('extra_args', True)\n\n def _get_arg(self, name, unpack):\n assert name in self._required_arg_names\n\n if name not in self._arg_cache:\n fn_name = name + '_fn'\n size_name = name + '_size'\n\n if name in self._extra_kwargs:\n self._arg_cache[name] = self._extra_kwargs[name]\n elif fn_name in self._extra_kwargs:\n self._arg_cache[name] = self._extra_kwargs[fn_name]()\n else:\n assert size_name in self._extra_kwargs\n\n def map_tensor_sizes(sizes):\n if isinstance(sizes, list):\n return [map_tensor_sizes(s) for s in sizes]\n elif isinstance(sizes, torch.Tensor):\n return sizes.double()\n else:\n return torch.randn(sizes)\n\n self._arg_cache[name] = map_tensor_sizes(self._extra_kwargs[size_name])\n\n return self._unpack(self._arg_cache[name]) if unpack else self._arg_cache[name]\n\n def _get_input(self, unpack=True):\n return self._get_arg('input', unpack)\n\n def __call__(self, test_case):\n raise NotImplementedError\n\n\nclass ModuleTest(TestBase):\n\n def __init__(self, *args, **kwargs):\n super(ModuleTest, self).__init__(*args, **kwargs)\n self.jacobian_input = kwargs.get('jacobian_input', True)\n self.should_test_cuda = kwargs.get('test_cuda', True)\n self.should_test_pickle = kwargs.get('pickle', True)\n self.check_gradgrad = kwargs.get('check_gradgrad', True)\n self.FIXME_no_cuda_gradgrad_comparison = \\\n kwargs.get('FIXME_no_cuda_gradgrad_comparison', False)\n self.precision = kwargs.get('precision', 2e-4)\n\n def __call__(self, test_case):\n module = self.constructor(*self.constructor_args)\n input = self._get_input()\n\n if self.reference_fn is not None:\n out = test_case._forward(module, input)\n ref_input = deepcopy(input)\n expected_out = self.reference_fn(ref_input, test_case._get_parameters(module)[0])\n test_case.assertEqual(out, expected_out)\n self.test_noncontig(test_case, module, input)\n\n if self.should_test_pickle:\n # TODO: do this with in-memory files as soon as torch.save will support it\n with TemporaryFile() as f:\n test_case._forward(module, input)\n torch.save(module, f)\n f.seek(0)\n module_copy = torch.load(f)\n test_case.assertEqual(test_case._forward(module, input), test_case._forward(module_copy, input))\n\n self._do_test(test_case, module, input)\n\n def noncontiguize(self, obj):\n if isinstance(obj, list):\n return [self.noncontiguize(o) for o in obj]\n tensor = obj\n ndim = tensor.dim()\n # Always making only the last dimension noncontiguous is easy to hide\n # bugs because .view(-1) will still work. So try to find a dim with size\n # > 1 and make that non-contiguous, i.e., stack + select on the\n # dimension directly after that.\n dim = ndim\n for d in range(ndim):\n if tensor.size(d) > 1:\n dim = d + 1\n break\n noncontig = torch.stack([torch.empty_like(tensor), tensor], dim).select(dim, 1).detach()\n assert noncontig.numel() == 1 or not noncontig.is_contiguous()\n noncontig.requires_grad = tensor.requires_grad\n return noncontig\n\n def test_noncontig(self, test_case, module, input):\n # check no scalars, can't make non-contig\n if isinstance(input, torch.Tensor) and input.dim() == 0:\n return\n if any(i.dim() == 0 for i in input if isinstance(i, torch.Tensor)):\n return\n\n test_case._zero_grad_parameters(module)\n test_case._zero_grad_input(input)\n with freeze_rng_state():\n output = test_case._forward(module, input)\n grad_output = output.new(output.shape).normal_()\n output = output.clone()\n d_input = deepcopy(test_case._backward(module, input, output, grad_output))\n d_param = deepcopy(test_case._get_parameters(module)[1])\n\n nc_input = self.noncontiguize(input)\n nc_grad_output = self.noncontiguize(grad_output)\n for contig_i, contig_g in product((True, False), repeat=2):\n i = input if contig_i else nc_input\n go = grad_output if contig_g else nc_grad_output\n test_case._zero_grad_parameters(module)\n test_case._zero_grad_input(i)\n with freeze_rng_state():\n out = test_case._forward(module, i)\n grad = test_case._backward(module, i, out, go)\n\n test_case.assertEqual(out, output)\n test_case.assertEqual(grad, d_input, 1e-4)\n test_case.assertEqual(test_case._get_parameters(module)[1], d_param)\n\n def test_cuda(self, test_case):\n if not TEST_CUDA or not self.should_test_cuda:\n raise unittest.SkipTest('Excluded from CUDA tests')\n try:\n cpu_input = self._get_input()\n type_map = {'torch.DoubleTensor': torch.cuda.FloatTensor}\n gpu_input = to_gpu(cpu_input, type_map=type_map)\n\n cpu_module = self.constructor(*self.constructor_args)\n gpu_module = self.constructor(*self.constructor_args).float().cuda()\n cpu_param = test_case._get_parameters(cpu_module)\n gpu_param = test_case._get_parameters(gpu_module)\n for cpu_p, gpu_p in zip(cpu_param[0], gpu_param[0]):\n gpu_p.data.copy_(cpu_p)\n\n test_case._zero_grad_input(cpu_input)\n test_case._zero_grad_input(gpu_input)\n test_case._zero_grad_parameters(cpu_module)\n test_case._zero_grad_parameters(gpu_module)\n cpu_output = test_case._forward(cpu_module, cpu_input)\n gpu_output = test_case._forward(gpu_module, gpu_input)\n test_case.assertEqual(cpu_output, gpu_output, self.precision)\n\n # Run backwards on CPU and GPU and compare results\n for _ in range(5):\n cpu_gradOutput = cpu_output.clone().normal_()\n gpu_gradOutput = cpu_gradOutput.type('torch.cuda.FloatTensor')\n cpu_gradInput = test_case._backward(cpu_module, cpu_input, cpu_output, cpu_gradOutput)\n gpu_gradInput = test_case._backward(gpu_module, gpu_input, gpu_output, gpu_gradOutput)\n test_case.assertEqual(cpu_gradInput, gpu_gradInput, self.precision)\n for cpu_d_p, gpu_d_p in zip(cpu_param[1], gpu_param[1]):\n test_case.assertEqual(cpu_d_p, gpu_d_p, self.precision)\n\n # Run double-backwards on CPU and GPU and compare results\n if self.check_gradgrad and not self.FIXME_no_cuda_gradgrad_comparison:\n cpu_output = cpu_module(cpu_input)\n gpu_output = gpu_module(gpu_input)\n\n cpu_gradOutput = torch.randn_like(cpu_output, requires_grad=True)\n gpu_gradOutput = cpu_gradOutput.type_as(gpu_output).detach()\n gpu_gradOutput.requires_grad = True\n\n cpu_gradInputs = torch.autograd.grad(\n cpu_output,\n (cpu_input,) + tuple(cpu_module.parameters()),\n cpu_gradOutput,\n create_graph=True)\n gpu_gradInputs = torch.autograd.grad(\n gpu_output,\n (gpu_input,) + tuple(gpu_module.parameters()),\n gpu_gradOutput,\n create_graph=True)\n\n for cpu_d_i, gpu_d_i in zip(cpu_gradInputs, gpu_gradInputs):\n test_case.assertEqual(cpu_d_i, gpu_d_i, self.precision)\n\n # We mix output into the second backwards computation so that\n # torch.autograd.grad doesn't complain that some inputs\n # are unreachable (which can happen if you differentiate\n # only on the gradient.\n cpu_gg = torch.autograd.grad(\n cpu_output.sum() + sum(map(lambda x: x.sum(), cpu_gradInputs)),\n (cpu_input, cpu_gradOutput) + tuple(cpu_module.parameters()),\n retain_graph=True)\n gpu_gg = torch.autograd.grad(\n gpu_output.sum() + sum(map(lambda x: x.sum(), gpu_gradInputs)),\n (gpu_input, gpu_gradOutput) + tuple(gpu_module.parameters()),\n retain_graph=True)\n\n test_case.assertEqual(cpu_gradInput, gpu_gradInput, self.precision)\n for cpu_d_p, gpu_d_p in zip(cpu_gg, gpu_gg):\n test_case.assertEqual(cpu_d_p, gpu_d_p, self.precision)\n\n self.test_noncontig(test_case, gpu_module, gpu_input)\n except NotImplementedError:\n pass\n # TODO: remove this after CUDA scatter_ is implemented\n except AttributeError as e:\n if len(e.args) == 1 and \"'FloatTensor' object has no attribute 'scatter_'\" in e.args[0]:\n pass\n else:\n raise\n\n\nclass CriterionTest(TestBase):\n\n _required_arg_names = TestBase._required_arg_names.union({'target'})\n\n def __init__(self, *args, **kwargs):\n super(CriterionTest, self).__init__(*args, **kwargs)\n self.should_test_cuda = kwargs.get('test_cuda', True)\n self.check_forward_only = kwargs.get('check_forward_only', True)\n\n def _get_target(self):\n return self._get_arg('target', True)\n\n def __call__(self, test_case):\n module = self.constructor(*self.constructor_args)\n input = self._get_input()\n\n # Check that these methods don't raise errors\n module.__repr__()\n str(module)\n\n target = self._get_target()\n\n if self.reference_fn is not None:\n out = test_case._forward_criterion(module, input, target, extra_args=self.extra_args)\n ref_args = (deepcopy(input), deepcopy(target)) + self.extra_args + (module,)\n expected_out = self.reference_fn(*ref_args)\n test_case.assertEqual(out, expected_out)\n\n if self.check_forward_only:\n return\n\n test_case.check_criterion_jacobian(module, input, target)\n self._do_extra_tests(test_case, module, input, target)\n\n def test_cuda(self, test_case):\n if not TEST_CUDA or not self.should_test_cuda:\n raise unittest.SkipTest('Excluded from CUDA tests')\n try:\n cpu_input = self._get_input()\n type_map = {\n 'torch.DoubleTensor': torch.cuda.FloatTensor,\n }\n gpu_input = to_gpu(cpu_input, type_map=type_map)\n\n cpu_target = self._get_target()\n gpu_target = to_gpu(cpu_target, type_map=type_map)\n\n cpu_module = self.constructor(*self.constructor_args)\n gpu_module = self.constructor(*self.constructor_args).float().cuda()\n\n cpu_output = test_case._forward_criterion(cpu_module, cpu_input, cpu_target)\n gpu_output = test_case._forward_criterion(gpu_module, gpu_input, gpu_target)\n test_case.assertEqual(cpu_output, gpu_output, 4e-4)\n\n gradOutput = torch.randn(())\n cpu_gradInput = test_case._backward_criterion(cpu_module, cpu_input, cpu_target, gradOutput)\n gpu_gradInput = test_case._backward_criterion(gpu_module, gpu_input, gpu_target, gradOutput)\n test_case.assertEqual(cpu_gradInput, gpu_gradInput, 4e-4)\n except NotImplementedError:\n pass\n\n def _do_extra_tests(self, test_case, module, input, target):\n pass\n" ]
[ [ "torch.autograd.gradcheck.iter_tensors", "torch.cat", "torch.nn.EmbeddingBag", "torch.nn.Unfold", "torch.randperm", "torch.ones", "torch.autograd.gradcheck.get_numerical_jacobian", "torch.load", "torch.nn.FractionalMaxPool3d", "torch.exp", "torch.where", "torch.nn.ConvTranspose1d", "torch.nn.Conv1d", "torch.nn.ConvTranspose2d", "torch.abs", "torch.DoubleTensor", "torch.randn_like", "torch.nn.Conv3d", "torch.zeros_like", "torch.as_tensor", "torch.empty", "torch.Tensor", "torch.pairwise_distance", "torch.nn.Embedding", "torch.zeros", "torch.nn.FractionalMaxPool2d", "torch.min", "torch.save", "torch.clamp", "torch.nn.Conv2d", "torch.rand", "torch.nn.Sigmoid", "torch.nn.Fold", "torch.randn", "torch.empty_like" ] ]
dodohow1011/waveglow
[ "53d9883a73f7f0569b25eb665788ca59368f9413" ]
[ "train.py" ]
[ "# *****************************************************************************\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the NVIDIA CORPORATION nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# *****************************************************************************\nimport argparse\nimport json\nimport os\nimport sys\nimport torch\n\n#=====START: ADDED FOR DISTRIBUTED======\nfrom distributed import init_distributed, apply_gradient_allreduce, reduce_tensor\nfrom torch.utils.data.distributed import DistributedSampler\n#=====END: ADDED FOR DISTRIBUTED======\n\nfrom torch.utils.data import DataLoader\nfrom glow import WaveGlow, WaveGlowLoss\n# from mel2samp import Mel2Samp\nfrom data_utils import TextMelLoader, TextMelCollate\nfrom hparams import create_hparams\nfrom utils import to_gpu\nfrom logger import waveglowLogger\n\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\n\ndef load_checkpoint(checkpoint_path, model, optimizer):\n assert os.path.isfile(checkpoint_path)\n checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n iteration = checkpoint_dict['iteration']\n optimizer.load_state_dict(checkpoint_dict['optimizer'])\n model_for_loading = checkpoint_dict['model']\n model.load_state_dict(model_for_loading.state_dict())\n print(\"Loaded checkpoint '{}' (iteration {})\" .format(\n checkpoint_path, iteration))\n return model, optimizer, iteration\n\ndef save_checkpoint(model, optimizer, learning_rate, iteration, filepath):\n print(\"Saving model and optimizer state at iteration {} to {}\".format(\n iteration, filepath))\n model_for_saving = WaveGlow(hparams).cuda()\n model_for_saving.load_state_dict(model.state_dict())\n torch.save({'model': model_for_saving,\n 'iteration': iteration,\n 'optimizer': optimizer.state_dict(),\n 'learning_rate': learning_rate}, filepath)\n\ndef parse_batch(batch):\n text_padded, input_lengths, mel_padded, gate_padded, output_lengths = batch\n text_padded = to_gpu(text_padded).long()\n input_lengths = to_gpu(input_lengths).long()\n max_len = torch.max(input_lengths.data).item()\n mel_padded = to_gpu(mel_padded).float()\n output_lengths = to_gpu(output_lengths).long()\n\n return text_padded, input_lengths, mel_padded, max_len, output_lengths\n\ndef prepare_directories_and_logger(output_directory, log_directory):\n if not os.path.isdir(output_directory):\n os.makedirs(output_directory)\n os.chmod(output_directory, 0o775)\n logger = waveglowLogger(os.path.join(output_directory, log_directory))\n \n return logger\n\ndef load_pretrained_taco(taco2_path, hparams):\n assert os.path.isfile(taco2_path)\n print(\"Loading pretrain2 tacotron2 model, '{}'\".format(taco2_path))\n checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n Taco2 = Tacotron2(hparams).cuda()\n Taco2.load_state_dict(checkpoint_dict['state_dict'])\n print(\"Loaded pretrain2 tacotron2 model, '{}'\".format(taco2_path))\n return Taco2\n\n\ndef train(num_gpus, rank, group_name, output_directory, log_directory, checkpoint_path, hparams):\n torch.manual_seed(hparams.seed)\n torch.cuda.manual_seed(hparams.seed)\n\n #=====START: ADDED FOR DISTRIBUTED======\n if num_gpus > 1:\n init_distributed(rank, num_gpus, group_name, **dist_config)\n #=====END: ADDED FOR DISTRIBUTED======\n\n criterion = WaveGlowLoss(hparams.sigma)\n model = WaveGlow(hparams).cuda()\n\n\n #=====START: ADDED FOR DISTRIBUTED======\n if num_gpus > 1:\n model = apply_gradient_allreduce(model)\n #=====END: ADDED FOR DISTRIBUTED======\n\n learning_rate = hparams.learning_rate\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n if hparams.fp16_run:\n from apex import amp\n model, optimizer = amp.initialize(model, optimizer, opt_level='O1')\n\n # Load checkpoint if one exists\n iteration = 0\n if checkpoint_path:\n model, optimizer, iteration = load_checkpoint(checkpoint_path, model, optimizer)\n iteration += 1 # next iteration is iteration + 1\n\n trainset = TextMelLoader(hparams.training_files, hparams)\n collate_fn = TextMelCollate()\n # =====START: ADDED FOR DISTRIBUTED======\n train_sampler = DistributedSampler(trainset) if num_gpus > 1 else None\n # =====END: ADDED FOR DISTRIBUTED======\n batch_size = hparams.batch_size\n train_loader = DataLoader(trainset, num_workers=0, shuffle=False,\n sampler=train_sampler,\n batch_size=batch_size,\n pin_memory=False,\n drop_last=True, collate_fn=collate_fn)\n\n # Get shared output_directory readya\n\n if rank == 0:\n if not os.path.isdir(output_directory):\n os.makedirs(output_directory)\n os.chmod(output_directory, 0o775)\n print(\"output directory\", output_directory)\n\n if hparams.with_tensorboard and rank == 0:\n logger = prepare_directories_and_logger(output_directory, log_directory)\n\n model.train()\n epoch_offset = max(0, int(iteration / len(train_loader)))\n print (\"Total Epochs: {}\".format(hparams.epochs))\n print (\"Batch Size: {}\".format(hparams.batch_size))\n # ================ MAIN TRAINNIG LOOP! ===================\n for epoch in range(epoch_offset, hparams.epochs):\n print(\"Epoch: {}\".format(epoch))\n for i, batch in enumerate(train_loader):\n model.zero_grad()\n\n text_padded, input_lengths, mel_padded, max_len, output_lengths = parse_batch(batch)\n # mel_padded = mel_padded.transpose(1, 2)\n src_pos = torch.arange(hparams.n_position)\n src_pos = to_gpu(src_pos).long().unsqueeze(0)\n src_pos = src_pos.expand(hparams.batch_size, -1)\n \n\n z, log_s_list, log_det_w_list, enc_slf_attn, dec_enc_attn, out_mel = model(mel_padded, text_padded, src_pos)\n outputs = (z, log_s_list, log_det_w_list, out_mel)\n loss = criterion(outputs, mel_padded, iteration)\n if num_gpus > 1:\n reduced_loss = reduce_tensor(loss.data, num_gpus).item()\n else:\n reduced_loss = loss.item()\n\n if hparams.fp16_run:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hparams.grad_clip_thresh)\n\n optimizer.step()\n\n print(\"{}:\\t{:.9f}\".format(iteration, reduced_loss))\n if hparams.with_tensorboard and rank == 0:\n logger.log_training(reduced_loss, grad_norm, learning_rate, iteration)\n\n if (iteration % hparams.iters_per_checkpoint == 0):\n if rank == 0:\n logger.log_alignment(model, enc_slf_attn, dec_enc_attn, out_mel, mel_padded, iteration)\n checkpoint_path = \"{}/waveglow_{}\".format(\n output_directory, iteration)\n save_checkpoint(model, optimizer, learning_rate, iteration,\n checkpoint_path)\n\n iteration += 1\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-o', '--output_directory', type=str, help='directory to save checkpoints')\n parser.add_argument('-l', '--log_directory', type=str,\n help='directory to save tensorboard logs')\n '''parser.add_argument('-c', '--config', type=str,\n help='JSON file for configuration')'''\n parser.add_argument('-p', '--checkpoint_path', type=str, default=None,\n required=False, help='checkpoint path')\n parser.add_argument('-r', '--rank', type=int, default=0,\n help='rank of process for distributed')\n parser.add_argument('-g', '--group_name', type=str, default='',\n help='name of group for distributed')\n parser.add_argument('--hparams', type=str,\n required=False, help='comma separated name=value pairs')\n \n args = parser.parse_args()\n hparams = create_hparams(args.hparams)\n\n num_gpus = 1\n if num_gpus > 1:\n if args.group_name == '':\n print(\"WARNING: Multiple GPUs detected but no distributed group set\")\n print(\"Only running 1 GPU. Use distributed.py for multiple GPUs\")\n num_gpus = 1\n\n if num_gpus == 1 and args.rank != 0:\n raise Exception(\"Doing single GPU training on rank > 0\")\n\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = False\n \n train(num_gpus, args.rank, args.group_name, args.output_directory, args.log_directory, args.checkpoint_path, hparams)\n" ]
[ [ "torch.cuda.manual_seed", "torch.arange", "torch.max", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.utils.data.distributed.DistributedSampler", "torch.load" ] ]
cctrunz/landlab
[ "4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939", "4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939", "4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939" ]
[ "landlab/utils/fault_facet_finder.py", "examples/flow_routing/simple_sp_driver_wlakes.py", "landlab/grid/tests/test_raster_grid/test_BC_updates.py" ]
[ "#! /usr/bin/env python\n\n\"\"\"\nThis class is designed to provide functions to allow the automated\nidentification of planar facet surfaces above fault traces.\nModule is SLOW (e.g., minutes+ per full analysis of a \"large\" data set). It is\nonly intended for model post-analysis or DEM analysis. Do not loop this class!!\nThis is part of the NSF funded project investigating fault scarp degradation,\nTucker, Hobley, McCoy.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nimport numpy as np\nimport six\nfrom pylab import figure, plot, show\n\nfrom landlab.plot import imshow as gridshow\n\nif six.PY3:\n\n def cmp(a, b):\n return (a > b) - (a < b)\n\n\nclass find_facets(object):\n \"\"\"\n Note that this class assumes the grid does not change during the model\n run. Changes to data stored in the grid should (?) update automatically.\n\n If *fault_azimuth* is supplied, it should be -pi/2 < az <= pi/2 (i.e.,\n we don't consider fault dip, even if it's known).\n \"\"\"\n\n def __init__(self, grid, elev_field=\"topographic__elevation\", fault_azimuth=None):\n \"\"\"\n Note that this class assumes the grid does not change during the model\n run. Changes to data stored in the grid should (?) update automatically.\n\n If *fault_azimuth* is supplied, it should be -pi/2 < az <= pi/2 (i.e.,\n we don't consider fault dip, even if it's known).\n \"\"\"\n if not np.isclose(grid.dx, grid.dy):\n raise ValueError(\"row and column spacing must be the same\")\n\n self.grid = grid\n self.elevs = self.grid.at_node[elev_field]\n self.az = fault_azimuth\n\n def analyse_fault_trace(self, fault_trace_node_ids):\n \"\"\"\n This method takes the grid and an array listing the (contiguous) node\n ids of cells that contain a single fault segment trace of interest.\n\n It sets and returns the azimuth of the fault trace, az,\n -pi/2 < az <= pi/2.\n (i.e., smallest angle between north and the trace).\n \"\"\"\n self.ft_trace_node_ids = fault_trace_node_ids\n x = self.grid.node_x[fault_trace_node_ids]\n y = self.grid.node_y[fault_trace_node_ids]\n (grad, offset) = np.polyfit(x, y, 1)\n angle = np.arctan(grad)\n if grad >= 0.0:\n self.az = np.pi / 2.0 - angle\n else:\n self.az = -np.pi / 2.0 - angle\n\n return self.az\n\n def set_slopes_aspects(self):\n self.slopes = self.grid.calc_slopes_of_nodes(elevs=self.elevs)\n self.aspect = self.grid.calc_aspect_of_node(elevs=self.elevs)\n print(\"Calculated and stored slopes and aspects...\")\n\n def define_aspect_node_subset(self, angle_tolerance=5.0):\n \"\"\"\n This method sets and returns a list of all nodes in the landscape which\n have an\n aspect within 5 degrees of perpendicular to the fault trace.\n\n It assumes self.az, the angle between north and the fault trace, has\n already been set, and also that self.slopes and self.aspect are also\n set.\n The returned boolean array is num_core_nodes long.\n *angle_tolerance* is the angle in degrees that the aspect must be within\n from the fault trace angle.\n NB: this version is too discriminating on the aspect restriction,\n presumably because we use only a single ft angle for what's really\n a 2d trace. Need to work with local aspect.\n \"\"\"\n perp_to_az = np.pi / 2.0 + self.az\n five_degrees = np.pi / 180.0 * angle_tolerance # note the silly naming\n perp_plus_five = (perp_to_az + five_degrees) % (2.0 * np.pi)\n perp_minus_five = (perp_to_az - five_degrees) % (2.0 * np.pi)\n other_dip_plus_five = (perp_to_az + np.pi + five_degrees) % (2.0 * np.pi)\n other_dip_minus_five = (perp_to_az + np.pi - five_degrees) % (2.0 * np.pi)\n\n # need to be careful near the 2pi->0 discontinuity...\n greater_condition = np.greater(self.aspect, perp_minus_five)\n lesser_condition = np.less(self.aspect, perp_plus_five)\n if (perp_to_az - five_degrees) < 0.0:\n condition_first_dip = np.logical_or(greater_condition, lesser_condition)\n else:\n condition_first_dip = np.logical_and(greater_condition, lesser_condition)\n greater_condition_2 = np.greater(self.aspect, other_dip_minus_five)\n lesser_condition_2 = np.less(self.aspect, other_dip_plus_five)\n if (perp_to_az + np.pi + five_degrees) // (\n 2.0 * np.pi\n ): # top condition exceeds 2pi\n condition_opposite_dip = np.logical_or(\n greater_condition_2, lesser_condition_2\n )\n else:\n condition_opposite_dip = np.logical_and(\n greater_condition_2, lesser_condition_2\n )\n\n self.aspect_close_nodes = np.logical_or(\n condition_first_dip, condition_opposite_dip\n )\n print(\"Calculated and stored nodes with aspects compatible with fault trace...\")\n return self.aspect_close_nodes\n\n def define_aspect_node_subset_local(\n self, dist_tolerance=4.0, angle_tolerance=15.0, dip_dir=\"E\"\n ):\n \"\"\"\n \"\"\"\n grid = self.grid\n try:\n print(\"using subset\")\n # remember, steep_nodes is already core_nodes.size long\n subset = np.where(self.steep_nodes)[0]\n except NameError:\n print(\"using all nodes\")\n subset = np.arange(grid.core_nodes.size)\n closest_ft_node = np.empty(subset.size, dtype=int)\n angle_to_ft = np.empty_like(closest_ft_node, dtype=float)\n new_angle_to_ft = np.empty_like(closest_ft_node, dtype=float)\n distance_to_ft = np.empty_like(closest_ft_node, dtype=float)\n distance_to_ft.fill(sys.float_info.max)\n new_distance_to_ft = np.empty_like(closest_ft_node, dtype=float)\n for i in self.ft_trace_node_ids:\n grid.calc_distances_of_nodes_to_point(\n (grid.node_x[i], grid.node_y[i]),\n node_subset=grid.core_nodes[subset],\n get_az=\"angles\",\n out_distance=new_distance_to_ft,\n out_azimuth=new_angle_to_ft,\n )\n closer_nodes = new_distance_to_ft < distance_to_ft\n distance_to_ft[closer_nodes] = new_distance_to_ft[closer_nodes]\n angle_to_ft[closer_nodes] = new_angle_to_ft[closer_nodes]\n closest_ft_node[closer_nodes] = i\n self.closest_ft_node = -np.ones(grid.core_nodes.size)\n self.distance_to_ft = -np.ones(grid.core_nodes.size)\n self.angle_to_ft = -np.ones(grid.core_nodes.size)\n self.closest_ft_node[subset] = closest_ft_node\n self.distance_to_ft[subset] = distance_to_ft\n # angle_to_ft is actually the angle_from_ft! So we need to adjust.\n # a second problem is that pts downslope (opposite az) can also be on the line.\n # solution - take a dip_dir input...\n angle_to_ft = (angle_to_ft + np.pi) % (2.0 * np.pi)\n self.angle_to_ft[subset] = angle_to_ft\n # gridshow.imshow_grid_at_node(self.grid, self.distance_to_ft)\n # show()\n # gridshow.imshow_grid_at_node(self.grid, self.angle_to_ft)\n # show()\n # the relevant condition is now that the local aspect and angle to fault\n # are the same...\n # We need to bias the five degrees against distant points, as it's easier\n # to have similar angles in the far field. Rule should be in px - the\n # two angles should be within *angle_tol* px of each other at the ft\n # trace.\n divergence_at_ft = distance_to_ft * np.tan(\n (angle_to_ft - self.aspect[subset]) % np.pi\n )\n # might be *too* forgiving for close-in nodes\n condition = np.less(np.fabs(divergence_at_ft), grid.dx * dist_tolerance)\n # ...so add another tester; must be w/i 15 degrees of each other:\n diff_angles = np.min(\n [\n np.fabs(angle_to_ft - self.aspect[subset]),\n np.fabs(np.fabs(angle_to_ft - self.aspect[subset]) - 2.0 * np.pi),\n ],\n axis=0,\n )\n self.diff_angles = np.empty(grid.core_nodes.size, dtype=float)\n self.diff_angles.fill(sys.float_info.max)\n self.diff_angles[subset] = diff_angles\n # gridshow.imshow_grid_at_node(self.grid, self.angle_to_ft)\n # show()\n figure(6)\n gridshow.imshow_grid_at_node(\n self.grid, np.where(self.diff_angles < 100000.0, self.diff_angles, -1.0)\n )\n condition2 = np.less(diff_angles, angle_tolerance * np.pi / 180.0)\n condition = np.logical_and(condition, condition2)\n core_nodes_size_condition = np.zeros(grid.core_nodes.size, dtype=bool)\n core_nodes_size_condition[subset] = condition\n # gridshow.imshow_grid_at_node(self.grid, core_nodes_size_condition)\n # show()\n # core_nodes_size_condition = np.zeros(grid.core_nodes.size, dtype=bool)\n # core_nodes_size_condition[subset] = condition2\n # gridshow.imshow_grid_at_node(self.grid, core_nodes_size_condition)\n # show()\n self.aspect_close_nodes = core_nodes_size_condition\n print(\"Calculated and stored nodes with aspects compatible with fault trace...\")\n return self.aspect_close_nodes\n\n def define_steep_nodes(self, threshold_in_degrees=5.0):\n \"\"\"\n This method sets and returns a list of all nodes in the landscape which\n are \"steep\" and could be part of a facet.\n The critical hillslope angle is set by *threshold_in_degrees*, and\n defaults to 5.\n\n This assumes you have already called define_aspect_node_subset, in\n which self.slope is set.\n The returned boolean array is num_core_nodes long.\n \"\"\"\n threshold_in_rads = threshold_in_degrees * np.pi / 180.0\n self.steep_nodes = np.greater(self.slopes, threshold_in_rads)\n print(\"Calculated and stored nodes with slopes exceeding slope threshold...\")\n # gridshow.imshow_grid_at_node(self.grid, self.steep_nodes)\n # show()\n return self.steep_nodes\n\n def show_possible_nodes(self):\n \"\"\"\n Once the subsets by aspect and slope have been set, call this function\n to see both the whole elevation map, and the subset of nodes that\n will be searched.\n \"\"\"\n possible_core_nodes = np.logical_and(self.steep_nodes, self.aspect_close_nodes)\n figure(1)\n gridshow.imshow_grid_at_node(self.grid, self.elevs)\n figure(2)\n gridshow.imshow_grid_at_node(self.grid, self.slopes)\n figure(3)\n gridshow.imshow_grid_at_node(self.grid, self.aspect)\n figure(4)\n gridshow.imshow_grid_at_node(self.grid, possible_core_nodes)\n show()\n\n def find_coherent_facet_patches(self, tolerance=3.0, threshold_num_px=12):\n \"\"\"\n This method searches the (already determined) possible pixels for\n patches with coherent slope angles, within a *tolerance* (in degrees).\n A patch is only recorded if it consists of at least *threshold_num_px*.\n\n The method records and returns:\n\n * a ragged array of lists, where each list is the pixels comprising\n each facet patch, and\n * a (num_patches, 2) array recording the mean slope and and its stdev\n for each patch.\n \"\"\"\n self.possible_core_nodes = np.where(\n np.logical_and(self.steep_nodes, self.aspect_close_nodes)\n )[0]\n consistent_slope_patches = []\n for i in self.possible_core_nodes:\n nodes_in_patch = np.array([i])\n patch_size = 1\n mean_slope = self.slopes[nodes_in_patch]\n while 1:\n possible_neighbors = np.union1d(\n self.grid.active_adjacent_nodes_at_node[nodes_in_patch].flat,\n self.possible_core_nodes,\n )\n neighbor_slopes = self.slopes[possible_neighbors]\n low_tol_condition = np.greater(neighbor_slopes, mean_slope - tolerance)\n high_tol_condition = np.less(neighbor_slopes, mean_slope + tolerance)\n total_condition = np.logical_and(low_tol_condition, high_tol_condition)\n nodes_in_patch = possible_neighbors[total_condition]\n new_patch_size = nodes_in_patch.size\n if patch_size == new_patch_size:\n break\n else:\n patch_size = new_patch_size\n mean_slope = np.mean(neighbor_slopes[total_condition])\n if new_patch_size > threshold_num_px:\n consistent_slope_patches.append(nodes_in_patch)\n # May also need a uniqueness test: a px should only appear in one list.\n # (i.e., all patches containing a given px should all be identical)\n self.consistent_slope_patches = consistent_slope_patches\n return consistent_slope_patches\n\n def find_slope_lines(self, tolerance=1.0):\n \"\"\"\n This method attempts to find slope-consistent line profiles up facets,\n perpendicular to the fault.\n Assumes you used define_aspect_node_subset_local().\n \"\"\"\n grid = self.grid\n self.possible_core_nodes = np.where(\n np.logical_and(self.steep_nodes, self.aspect_close_nodes)\n )[0]\n pcn = self.possible_core_nodes\n unique_starting_pts = np.unique(self.closest_ft_node[pcn]) # in real node nos\n # set up places to store the profile data:\n profile_ft_node_id = []\n profile_ft_node_x = []\n profile_ft_node_y = []\n profile_ft_node_z = []\n profile_ft_node_dist = []\n profile_x_facet_pts = []\n profile_z_facet_pts = []\n profile_S_facet_pts = []\n count = 0\n for i in unique_starting_pts:\n count += 1\n print(\"Running \", count, \" of \", unique_starting_pts.size)\n # set the local angle of the ft trace:\n ft_pt_distances_to_node = self.grid.calc_distances_of_nodes_to_point(\n (grid.node_x[i], grid.node_y[i]), node_subset=self.ft_trace_node_ids\n )\n close_ft_nodes = np.less(ft_pt_distances_to_node, 5.0 * grid.dx)\n x = grid.node_x[self.ft_trace_node_ids[close_ft_nodes]]\n y = grid.node_y[self.ft_trace_node_ids[close_ft_nodes]]\n (grad, offset) = np.polyfit(x, y, 1)\n condition = np.equal(self.closest_ft_node[pcn], i)\n nodes_possible = pcn[condition]\n print(nodes_possible.size, \" nodes\")\n if nodes_possible.size > 10.0:\n # their_az = self.angle_to_ft[nodes_possible]\n # their_diff_angles = self.diff_angles[nodes_possible]\n their_elevs = self.elevs[grid.core_nodes][nodes_possible]\n # their_distances = self.distance_to_ft[nodes_possible]\n # need to make new distances so we remove the ambiguity of angle around the ft point (i.e., dists from a far-field pt on the ft normal)\n # now make a multiplier to make sure the reference point for\n # new distances is far from the actual pts:\n multiplier = 10.0 * np.ptp(grid.node_y[grid.core_nodes[nodes_possible]])\n # derive the position:\n x_ref = grid.node_x[i] + cmp(\n grid.node_x[i],\n np.mean(grid.node_x[grid.core_nodes[nodes_possible]]),\n ) * multiplier * abs(grad)\n y_ref = (\n grid.node_y[i]\n + cmp(\n grid.node_y[i],\n np.mean(grid.node_y[grid.core_nodes[nodes_possible]]),\n )\n * multiplier\n )\n # get new absolute distances\n dist_to_ft = self.grid.calc_distances_of_nodes_to_point(\n (x_ref, y_ref), node_subset=np.array([i])\n )\n dists_along_profile = (\n self.grid.calc_distances_of_nodes_to_point(\n (x_ref, y_ref), node_subset=grid.core_nodes[nodes_possible]\n )\n - dist_to_ft\n )\n # note the ft is now the origin, but pts might be back-to-front (consistently, though)\n # sort the distances. Remove any pts that aren't in a \"cluster\".\n # We assume there will be one big \"bunched\" plane, then a load\n # of outliers\n dist_order = np.argsort(dists_along_profile)\n dist_diffs = np.diff(dists_along_profile[dist_order])\n print(\"dists along profile sorted: \", dists_along_profile[dist_order])\n print(\"dist diffs: \", dist_diffs)\n # max_diff = 3.*np.median(dist_diffs) #######this might need\n # attention if there's a heavy tail on the distances\n if grad < 1:\n mod = np.sqrt(1.0 + grad ** 2.0)\n else:\n mod = np.sqrt(1.0 + (1.0 / grad) ** 2.0)\n max_diff = 1.9 * mod * grid.dx\n locs_of_large_diffs = np.where(dist_diffs > max_diff)[0]\n # there should only be 1 place on the line where there's a cluster, i.e., a large pts_betw_of_max_diffs.\n # This is what we're seeking.\n # ...this can be empty quite easily\n pts_betw_large_diffs = np.diff(locs_of_large_diffs)\n # need to be careful here in case the where call gives an empty\n # array\n if locs_of_large_diffs.size > 1:\n biggest_interval_loc = np.argmax(pts_betw_large_diffs)\n elif locs_of_large_diffs.size == 1:\n # one side or the other must be bigger:\n if 2.0 * locs_of_large_diffs[0] < dists_along_profile.size - 1:\n locs_of_large_diffs = np.array(\n [locs_of_large_diffs[0], (dists_along_profile.size - 1)]\n )\n else:\n locs_of_large_diffs = np.array([0, locs_of_large_diffs[0]])\n biggest_interval_loc = np.array([0])\n # here we assume that the single large diff must be further\n # from the ft than the plane\n else:\n locs_of_large_diffs = np.array([0, (dists_along_profile.size - 1)])\n biggest_interval_loc = np.array([0])\n # ...all the pts in the line are one cluster\n # apply a test to ensure we only save \"big\" patches; a\n # threshold of 10 pts on the line\n try:\n patch_size = pts_betw_large_diffs[biggest_interval_loc]\n except IndexError: # pts_betw_large_diffs is empty\n patch_size = locs_of_large_diffs[1] - locs_of_large_diffs[0]\n if patch_size > 10.0:\n start_pt_of_cluster = locs_of_large_diffs[biggest_interval_loc] + 1\n end_pt_of_cluster = (\n locs_of_large_diffs[biggest_interval_loc + 1] + 1\n ) # both referring to the sorted list\n # both +1s are to account for required frame of ref changes - indices refer to where the big gaps start, not where they ends\n # so:\n dists_to_sorted_pts = dists_along_profile[dist_order][\n start_pt_of_cluster:end_pt_of_cluster\n ]\n elevs_of_sorted_pts = their_elevs[dist_order][\n start_pt_of_cluster:end_pt_of_cluster\n ]\n slopes_of_sorted_pts = self.slopes[nodes_possible][dist_order][\n start_pt_of_cluster:end_pt_of_cluster\n ]\n profile_ft_node_id.append(i.copy())\n profile_ft_node_x.append(grid.node_x[i].copy())\n profile_ft_node_y.append(grid.node_y[i].copy())\n profile_ft_node_z.append(self.elevs[i].copy())\n profile_ft_node_dist.append(dist_to_ft.copy())\n profile_x_facet_pts.append(dists_to_sorted_pts.copy())\n profile_z_facet_pts.append(elevs_of_sorted_pts.copy())\n profile_S_facet_pts.append(slopes_of_sorted_pts.copy())\n figure(5)\n plot(dists_to_sorted_pts, elevs_of_sorted_pts)\n # dirty, but effective code!\n\n self.profile_ft_node_id = profile_ft_node_id\n self.profile_ft_node_x = profile_ft_node_x\n self.profile_ft_node_y = profile_ft_node_y\n self.profile_ft_node_z = profile_ft_node_z\n self.profile_ft_node_dist = profile_ft_node_dist\n self.profile_x_facet_pts = profile_x_facet_pts\n self.profile_z_facet_pts = profile_z_facet_pts\n self.profile_S_facet_pts = profile_S_facet_pts\n\n def fit_slopes_to_facet_lines(\n self, polynomial_degree=4, curvature_threshold=0.0004\n ):\n \"\"\"\n Fits (linear) lines of best fit to extracted profiles, already stored as\n class properties.\n \"\"\"\n avg_slopes_linear = []\n avg_slopes_poly = []\n curv_of_flattest_part_list = []\n slope_min_curv = []\n rsqd_list = []\n big_slope_small_curv = []\n elev_at_bssc = []\n for i in six.range(len(self.profile_x_facet_pts)):\n x = self.profile_x_facet_pts[i]\n z = self.profile_z_facet_pts[i]\n (grad, offset) = np.polyfit(x, z, 1)\n coeffs, residuals = np.polyfit(x, z, polynomial_degree, full=True)[:2]\n rsqd = 1.0 - residuals / (z.size * z.var())\n # differentiate the coeffs to get slope:\n diff_multiplier = np.arange(polynomial_degree + 1)[::-1]\n curv_multiplier = np.arange(polynomial_degree)[::-1]\n z_equ = np.poly1d(coeffs)\n S_equ = np.poly1d((coeffs * diff_multiplier)[:-1])\n curv_equ = np.poly1d(\n ((coeffs * diff_multiplier)[:-1] * curv_multiplier)[:-1]\n )\n S_at_each_pt = S_equ(x)\n curv_at_each_pt = curv_equ(x)\n avg_slopes_linear.append(abs(grad))\n avg_slopes_poly.append(np.amax(np.fabs(S_at_each_pt)))\n loc_of_flattest_part = np.argmin(np.fabs(curv_at_each_pt[2:-2])) + 2\n curv_of_flattest_part = curv_at_each_pt[loc_of_flattest_part]\n S_at_min_curve_untested = abs(S_at_each_pt[loc_of_flattest_part])\n small_curves = np.less(np.fabs(curv_at_each_pt[2:-2]), curvature_threshold)\n try:\n big_slope_small_curv.append(np.amax(S_at_each_pt[small_curves]))\n elev_at_bssc.append(z[np.argmax(S_at_each_pt[small_curves])])\n except ValueError:\n big_slope_small_curv.append(np.nan)\n elev_at_bssc.append(np.nan)\n slope_min_curv.append(S_at_min_curve_untested)\n curv_of_flattest_part_list.append(curv_of_flattest_part)\n rsqd_list.append(rsqd)\n # figure(8)\n # synthetic_z = grad*x + offset\n synthetic_z = z_equ(x)\n plot(x, z, \"x\")\n plot(x, synthetic_z, \"-\")\n self.avg_slopes_linear = np.array(avg_slopes_linear)\n self.avg_slopes_poly = np.array(avg_slopes_poly)\n self.curv_of_flattest_part = np.array(curv_of_flattest_part_list)\n self.slope_min_curv = np.array(slope_min_curv)\n self.big_slope_small_curv = np.array(big_slope_small_curv)\n self.elev_at_bssc = np.array(elev_at_bssc)\n self.rsqd = np.array(rsqd_list)\n", "\"\"\"\nsimple_sp_driver.py\n\nA simple driver implementing Braun-Willett flow routing and then a\n(non-fastscape) stream power component.\nDEJH, 09/15/14\n\"\"\"\nfrom __future__ import print_function\n\nimport time\n\nimport numpy\nimport pylab\n\nfrom landlab import ModelParameterDictionary, RasterModelGrid\nfrom landlab.components.flow_routing import DepressionFinderAndRouter, FlowAccumulator\nfrom landlab.components.stream_power import FastscapeEroder, StreamPowerEroder\nfrom landlab.plot.imshow import imshow_grid\n\ninputs = ModelParameterDictionary(\"./drive_sp_params.txt\")\nnrows = inputs.read_int(\"nrows\")\nncols = inputs.read_int(\"ncols\")\ndx = inputs.read_float(\"dx\")\ndt = inputs.read_float(\"dt\")\ntime_to_run = inputs.read_float(\"run_time\")\n# nt needs defining\nuplift = 10. * inputs.read_float(\"uplift_rate\")\ninit_elev = inputs.read_float(\"init_elev\")\n\nmg = RasterModelGrid((nrows, ncols), xy_spacing=dx)\n\n# create the fields in the grid\nmg.add_zeros(\"topographic__elevation\", at=\"node\")\nz = mg.zeros(at=\"node\") + init_elev\n# z += mg.node_x*0.001\nmg[\"node\"][\"topographic__elevation\"] = z + numpy.random.rand(len(z)) / 1000.\n\n# make some K values in a field to test\nmg.at_node[\"K_values\"] = 0.1 + numpy.random.rand(nrows * ncols) / 10.\n\nprint(\"Running ...\")\ntime_on = time.time()\n\n# instantiate the components:\nfr = FlowAccumulator(mg, flow_director=\"D8\")\nsp = StreamPowerEroder(mg, \"./drive_sp_params.txt\")\nlf = DepressionFinderAndRouter(mg)\n# load the Fastscape module too, to allow direct comparison\nfsp = FastscapeEroder(mg, \"./drive_sp_params.txt\")\n\n# perform the loop:\nelapsed_time = 0. # total time in simulation\nwhile elapsed_time < time_to_run:\n # for i in range(10):\n print(elapsed_time)\n if elapsed_time + dt > time_to_run:\n print(\"Short step!\")\n dt = time_to_run - elapsed_time\n mg = fr.route_flow(method=\"D8\")\n lf.map_depressions()\n # print 'Area: ', numpy.max(mg.at_node['drainage_area'])\n # mg = fsp.erode(mg)\n mg, _, _ = sp.erode(\n mg,\n dt,\n node_drainage_areas=\"drainage_area\",\n slopes_at_nodes=\"topographic__steepest_slope\",\n K_if_used=\"K_values\",\n )\n # add uplift\n mg.at_node[\"topographic__elevation\"][mg.core_nodes] += uplift * dt\n elapsed_time += dt\n\ntime_off = time.time()\nprint(\"Elapsed time: \", time_off - time_on)\n\n# Finalize and plot\nelev = mg[\"node\"][\"topographic__elevation\"]\nelev_r = mg.node_vector_to_raster(elev)\n\n# Clear previous plots\npylab.figure(1)\npylab.close()\n\n# Plot topography\npylab.figure(1)\nim = imshow_grid(mg, \"topographic__elevation\") # display a colored image\nprint(elev_r)\n\npylab.figure(2)\nim = pylab.plot(\n dx * numpy.arange(nrows), elev_r[:, int(ncols // 2)]\n) # display a colored image\npylab.title(\"Vertical cross section\")\n\npylab.show()\n\nprint(\"Done.\")\n", "import numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom landlab import (\n CLOSED_BOUNDARY,\n FIXED_GRADIENT_BOUNDARY,\n FIXED_LINK,\n INACTIVE_LINK,\n RasterModelGrid,\n)\n\n\ndef test_issue_428_a():\n \"\"\"Issue #428\"\"\"\n grid = RasterModelGrid((4, 4))\n grid.set_closed_boundaries_at_grid_edges(True, True, True, True)\n\n assert grid.status_at_node[1] == 4\n assert grid.status_at_link[4] == 4\n assert_array_equal(grid.active_link_dirs_at_node[1], [0, 0, 0, 0])\n\n grid.status_at_node[1] = 1\n assert grid.status_at_link[4] == 0\n assert_array_equal(grid.active_link_dirs_at_node[1], [0, -1, 0, 0])\n\n\ndef test_issue_428_b():\n \"\"\"Issue #428\"\"\"\n grid = RasterModelGrid((4, 4))\n\n z = np.ones(grid.number_of_nodes)\n z[grid.nodes_at_bottom_edge] = -9999.0\n z[grid.nodes_at_left_edge] = -9999.0\n z[grid.nodes_at_top_edge] = -9999.0\n z[grid.nodes_at_right_edge] = -9999.0\n z[1] = 0.5\n\n assert_array_equal(grid.active_link_dirs_at_node[1], [0, -1, 0, 0])\n\n grid.set_watershed_boundary_condition(z)\n assert_array_equal(grid.active_link_dirs_at_node[1], [0, -1, 0, 0])\n\n\ndef test_link_update_with_nodes_closed():\n rmg = RasterModelGrid((4, 5))\n rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY\n inactive_array = np.array([INACTIVE_LINK] * 5)\n assert_array_equal(rmg.status_at_link[4:9], inactive_array)\n\n\ndef test_link_update_with_nodes_fixed_grad():\n rmg = RasterModelGrid((4, 5))\n rmg.status_at_node[rmg.nodes_at_bottom_edge] = FIXED_GRADIENT_BOUNDARY\n fixed_array = np.array([FIXED_LINK] * 3)\n assert_array_equal(rmg.status_at_link[5:8], fixed_array)\n\n\ndef test_bc_set_code_init():\n rmg = RasterModelGrid((4, 5))\n assert rmg.bc_set_code == 0\n\n\ndef test_bc_set_code_change():\n rmg = RasterModelGrid((4, 5))\n rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY\n assert rmg.bc_set_code != 0\n" ]
[ [ "numpy.isclose", "numpy.tan", "numpy.mean", "numpy.where", "numpy.less", "numpy.empty", "numpy.logical_and", "numpy.fabs", "numpy.arange", "numpy.polyfit", "numpy.sqrt", "numpy.poly1d", "numpy.greater", "numpy.argmax", "numpy.empty_like", "numpy.logical_or", "numpy.array", "numpy.equal", "numpy.zeros", "numpy.diff", "numpy.arctan", "numpy.amax", "numpy.argsort", "numpy.union1d", "numpy.ones", "numpy.ptp", "numpy.unique" ], [ "numpy.arange", "numpy.random.rand" ], [ "numpy.array", "numpy.testing.assert_array_equal", "numpy.ones" ] ]
DSCI-310/Group-10-Project
[ "cfc50ebcbbf160e0a72a1144e6f7ae8c345db4aa" ]
[ "tests/test_alpha_tuning.py" ]
[ "from pandas import DataFrame\nfrom sklearn.model_selection import train_test_split\nimport pytest\nfrom src.analysis.alpha_tuning import ridge_alpha_tuning\nfrom sklearn.pipeline import Pipeline, make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.linear_model import RidgeCV\n\n@pytest.fixture\ndef toy_dataset():\n return DataFrame({'x1': [1, 2, 3, 4, 6, 7, 8, 9, 0],\n 'x2': [1, 2, 3, 4, 5, 6, 7, 8, 10],\n 'y': [2, 3, 4, 5, 6, 7, 7, 8, 9]\n })\n\n@pytest.fixture\ndef toy_dataset_2():\n return DataFrame({\n 'x1': [1, 2, 3, 4, 6, 7, 8, 9, 0,1, 2, 3,\n 4, 6, 7, 8, 9, 0,1, 2, 3, 4, 6,\n 7, 8, 9, 0,1, 2, 3, 4, 6, 7, 8, 9, 0],\n 'x2': [1, 2, 3, 4, 5, 6, 7, 8, 10,1,\n 2, 3, 4, 6, 7, 8, 9, 0,1, 2, 3,\n 4, 6, 7, 8, 9, 0,1, 2, 3, 4, 6,\n 7, 8, 9, 0],\n 'y': [2, 3, 4, 5, 6, 7, 7, 8, 9,\n 2, 3, 4, 5, 6, 7, 7, 8, 9,\n 2, 3, 4, 5, 6, 7, 7, 8, 9,\n 2, 3, 4, 5, 6, 7, 7, 8, 9]\n })\n\ndef test_ridgealphatuning_fullfunc(toy_dataset):\n alpha = [1, 5, 12]\n train, test = train_test_split(toy_dataset, test_size=.4, random_state=123)\n trainx, trainy = train.drop(columns='y'), train['y']\n cv_pipe = make_pipeline(StandardScaler(), RidgeCV(alphas=alpha, cv=2))\n cv_pipe.fit(trainx, trainy)\n best_a = cv_pipe.named_steps['ridgecv'].alpha_\n print(best_a)\n assert ridge_alpha_tuning(alpha, StandardScaler(), trainx, trainy, cv=2) == best_a\n\ndef test_ridgealphatuning_alpha(toy_dataset):\n alpha = 1\n train, test = train_test_split(toy_dataset, test_size=.4, random_state=123)\n trainx, trainy = train.drop(columns='y'), train['y']\n with pytest.raises(TypeError) as e_info:\n ridge_alpha_tuning(alpha, StandardScaler(), trainx, trainy, cv=2)\n assert \"alpha is not a list\" in str(e_info.value)\n\ndef test_ridgealphatuning_trainx(toy_dataset):\n alpha = [1, 10, 100]\n train, test = train_test_split(toy_dataset, test_size=.4, random_state=123)\n trainx, trainy = train.drop(columns='y'), train['y']\n trainx = 1\n with pytest.raises(TypeError) as e_info:\n ridge_alpha_tuning(alpha, StandardScaler(), trainx, trainy, cv=2)\n assert \"train_x should be data frame\" in str(e_info.value)\n\ndef test_ridgealphatuning_trainy(toy_dataset):\n alpha = [1, 10, 100]\n\n train, test = train_test_split(toy_dataset, test_size=.4, random_state=123)\n trainx, trainy = train.drop(columns='y'), train['y']\n trainy = 1213\n with pytest.raises(TypeError) as e_info:\n ridge_alpha_tuning(alpha, StandardScaler(), trainx, trainy, cv=2)\n assert \"train_y should be data frame\" in str(e_info.value)\n\ndef test_ridgealphatuning_cv(toy_dataset):\n alpha = [1, 10, 100]\n train, test = train_test_split(toy_dataset, test_size=.4, random_state=123)\n trainx, trainy = train.drop(columns='y'), train['y']\n with pytest.raises(TypeError) as e_info:\n ridge_alpha_tuning(alpha, StandardScaler(), trainx, trainy, cv=\"two\")\n assert \"cv should be an integer\" in str(e_info.value)\n\ndef test_ridgealphatuning_smallalpha(toy_dataset):\n alpha = [1]\n train, test = train_test_split(toy_dataset, test_size=.4, random_state=123)\n trainx, trainy = train.drop(columns='y'), train['y']\n cv_pipe = make_pipeline(StandardScaler(), RidgeCV(alphas=alpha, cv=2))\n cv_pipe.fit(trainx, trainy)\n best_a = cv_pipe.named_steps['ridgecv'].alpha_\n print(best_a)\n assert ridge_alpha_tuning(alpha, StandardScaler(), trainx, trainy, cv=2) == best_a\n\ndef test_ridgealphatuning_largedat(toy_dataset_2):\n alpha = [1, 10, 100]\n train, test = train_test_split(toy_dataset_2, test_size=.4, random_state=123)\n trainx, trainy = train.drop(columns='y'), train['y']\n cv_pipe = make_pipeline(StandardScaler(), RidgeCV(alphas=alpha, cv=2))\n cv_pipe.fit(trainx, trainy)\n best_a = cv_pipe.named_steps['ridgecv'].alpha_\n print(best_a)\n assert ridge_alpha_tuning(alpha, StandardScaler(), trainx, trainy, cv=2) == best_a" ]
[ [ "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.linear_model.RidgeCV", "sklearn.preprocessing.StandardScaler" ] ]
amerkel2/kartothek
[ "a555a7706a065c8f2ff1760c108c3be2e5be8b3a" ]
[ "kartothek/serialization/_generic.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains functionality for persisting/serialising DataFrames.\n\"\"\"\n\nfrom typing import Dict\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_list_like\n\nfrom kartothek.serialization._util import _check_contains_null\n\nfrom ._util import ensure_unicode_string_type\n\n\nclass DataFrameSerializer:\n \"\"\"\n Abstract class that supports serializing DataFrames to/from\n simplekv stores.\n \"\"\"\n\n _serializers: Dict[str, \"DataFrameSerializer\"] = {}\n type_stable = False\n\n def __ne__(self, other):\n return not (self == other)\n\n @classmethod\n def register_serializer(cls, suffix, serializer):\n cls._serializers[suffix] = serializer\n\n @classmethod\n def restore_dataframe(\n cls,\n store,\n key,\n filter_query=None,\n columns=None,\n predicate_pushdown_to_io=True,\n categories=None,\n predicates=None,\n date_as_object=False,\n ):\n \"\"\"\n Load a DataFrame from the specified store. The key is also used to\n detect the used format.\n\n Parameters\n ----------\n store: simplekv.KeyValueStore\n store engine\n key: str\n Key that specifies a path where object should be\n retrieved from the store resource.\n filter_query: str\n Optional query to filter the DataFrame. Must adhere to the specification\n of pandas.DataFrame.query.\n columns : str or None\n Only read in listed columns. When set to None, the full file\n will be read in.\n predicate_pushdown_to_io: bool\n Push predicates through to the I/O layer, default True. Disable\n this if you see problems with predicate pushdown for the given\n file even if the file format supports it. Note that this option\n only hides problems in the store layer that need to be addressed\n there.\n categories: list of str (optional)\n Columns that should be loaded as categoricals.\n predicates: list of list of tuple[str, str, Any]\n Optional list of predicates, like [[('x', '>', 0), ...], that are used\n to filter the resulting DataFrame, possibly using predicate pushdown,\n if supported by the file format.\n This parameter is not compatible with filter_query.\n\n Predicates are expressed in disjunctive normal form (DNF). This means\n that the innermost tuple describe a single column predicate. These\n inner predicate make are all combined with a conjunction (AND) into a\n larger predicate. The most outer list then combines all predicates\n with a disjunction (OR). By this, we should be able to express all\n kinds of predicates that are possible using boolean logic.\n date_as_object: bool\n Retrieve all date columns as an object column holding datetime.date objects\n instead of pd.Timestamp. Note that this option only works for type-stable\n serializers, e.g. ``ParquetSerializer``.\n Returns\n -------\n Data in pandas dataframe format.\n \"\"\"\n if filter_query and predicates:\n raise ValueError(\"Can only specify one of filter_query and predicates\")\n\n for suffix, serializer in cls._serializers.items():\n if key.endswith(suffix):\n df = serializer.restore_dataframe(\n store,\n key,\n filter_query,\n columns,\n predicate_pushdown_to_io=predicate_pushdown_to_io,\n categories=categories,\n predicates=predicates,\n date_as_object=date_as_object,\n )\n df.columns = df.columns.map(ensure_unicode_string_type)\n return df\n\n # No serialiser matched\n raise ValueError(\n \"The specified file format for '{}' is not supported\".format(key)\n )\n\n def store(self, store, key_prefix, df):\n \"\"\"\n Persist a DataFrame to the specified store.\n\n The used store format (e.g. Parquet) will be appended to the key.\n\n Parameters\n ----------\n store: simplekv.KeyValueStore\n store engine\n key_prefix: str\n Key prefix that specifies a path where object should be\n stored on the store resource. The used file format will be\n appended to the key.\n df: pandas.DataFrame or pyarrow.Table\n DataFrame that shall be persisted\n\n Returns\n -------\n str\n The actual key where the DataFrame is stored.\n \"\"\"\n raise NotImplementedError(\"Abstract method called.\")\n\n\ndef filter_df(df, filter_query=None):\n \"\"\"\n General implementation of query filtering.\n\n Serialisation formats such as Parquet that support predicate push-down\n may pre-filter in their own implementations.\n \"\"\"\n if df.shape[0] > 0 and filter_query is not None:\n df = df.query(filter_query)\n return df\n\n\ndef check_predicates(predicates):\n \"\"\"\n Check if predicates are well-formed.\n \"\"\"\n if predicates is not None:\n if len(predicates) == 0 or any(len(p) == 0 for p in predicates):\n raise ValueError(\"Malformed predicates\")\n for conjunction in predicates:\n for col, op, val in conjunction:\n if (\n isinstance(val, list)\n and any(_check_contains_null(v) for v in val)\n or _check_contains_null(val)\n ):\n raise NotImplementedError(\n \"Null-terminated binary strings are not supported as predicate values.\"\n )\n\n\ndef filter_df_from_predicates(df, predicates, strict_date_types=False):\n \"\"\"\n Filter a `pandas.DataFrame` based on predicates in disjunctive normal form.\n\n Parameters\n ----------\n df: pd.DataFrame\n The pandas DataFrame to be filtered\n predicates: list of lists\n Predicates in disjunctive normal form (DNF). For a thorough documentation, see\n :class:`DataFrameSerializer.restore_dataframe`\n strict_date_types: bool\n If False (default), cast all datelike values to datetime64 for comparison.\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n indexer = np.zeros(len(df), dtype=bool)\n for conjunction in predicates:\n inner_indexer = np.ones(len(df), dtype=bool)\n for column, op, value in conjunction:\n filter_array_like(\n df[ensure_unicode_string_type(column)].values,\n op,\n value,\n inner_indexer,\n inner_indexer,\n strict_date_types=strict_date_types,\n )\n indexer = inner_indexer | indexer\n return df[indexer]\n\n\ndef _handle_categorical_data(array_like, require_ordered):\n if require_ordered and pd.api.types.is_categorical(array_like):\n if isinstance(array_like, pd.Categorical):\n categorical = array_like\n else:\n categorical = array_like.cat\n array_value_type = categorical.categories.dtype\n if categorical.categories.is_monotonic:\n array_like = categorical.as_ordered()\n else:\n array_like = categorical.reorder_categories(\n categorical.categories.sort_values(), ordered=True\n )\n else:\n array_value_type = array_like.dtype\n return array_like, array_value_type\n\n\ndef _handle_null_arrays(array_like, value_dtype):\n # NULL types might not be preserved well, so try to cast floats (pandas default type) to the value type\n # Determine the type using the `kind` interface since this is common for a numpy array, pandas series and pandas extension arrays\n if array_like.dtype.kind == \"f\" and np.isnan(array_like).all():\n if array_like.dtype.kind != value_dtype.kind:\n array_like = array_like.astype(value_dtype)\n return array_like, array_like.dtype\n\n\ndef _handle_timelike_values(array_value_type, value, value_dtype, strict_date_types):\n if is_list_like(value):\n value = [pd.Timestamp(val).to_datetime64() for val in value]\n else:\n value = pd.Timestamp(value).to_datetime64()\n value_dtype = pd.Series(value).dtype\n return value, value_dtype\n\n\ndef _ensure_type_stability(array_like, value, strict_date_types, require_ordered):\n \"\"\"\n Ensure that the provided value and the provided array will have compatible\n types, such that comparisons are unambiguous.\n\n The type check is based on the numpy type system and accesses the arrays\n `kind` attribute and asserts equality. The provided value will be\n interpreted as a scalar in this case. For scalars which do not have a proper\n python representation, we will relax the strictness as long as there is a\n valid and unambiguous interpretation of a comparison operation. In\n particular we consider the following combinations valid:\n\n * unsigned integer (u) <> integer (i)\n * zero-terminated bytes (S) <> Python Object (O)\n * Unicode string (U) <> Python Object (O)\n\n Parameters\n ----------\n strict_date_types: bool\n If False, assume that datetime.date and datetime.datetime are\n compatible types. In this case, the value is cast appropriately\n require_ordered: bool\n Indicate if the operator to be evaluated will require a notion of\n ordering. In the case of pd.Categorical we will then assume a\n lexicographical ordering and cast the pd.CategoricalDtype accordingly\n \"\"\"\n\n value_dtype = pd.Series(value).dtype\n array_like, array_value_type = _handle_categorical_data(array_like, require_ordered)\n array_like, array_value_type = _handle_null_arrays(array_like, value_dtype)\n\n type_comp = (value_dtype.kind, array_value_type.kind)\n\n compatible_types = [\n # UINT and INT\n (\"u\", \"i\"),\n (\"i\", \"u\"),\n # various string kinds\n (\"O\", \"S\"),\n (\"O\", \"U\"),\n # bool w/ Nones\n (\"b\", \"O\"),\n ]\n\n if not strict_date_types:\n # objects (datetime.date) and datetime64\n compatible_types.append((\"O\", \"M\"))\n\n type_comp = (value_dtype.kind, array_value_type.kind)\n\n if len(set(type_comp)) > 1 and type_comp not in compatible_types:\n raise TypeError(\n f\"Unexpected type encountered. Expected {array_value_type.kind} but got {value_dtype.kind}.\"\n )\n if \"M\" in type_comp:\n value, value_dtype = _handle_timelike_values(\n array_value_type, value, value_dtype, strict_date_types\n )\n return array_like, value\n\n\ndef filter_array_like(\n array_like, op, value, mask=None, out=None, strict_date_types=False\n):\n \"\"\"\n Filter an array-like object using operations defined in the predicates\n\n Parameters\n ----------\n array_like: array-like, c.f. pd.api.types.is_array_like\n The array like object to be filtered\n op: string\n value: object\n mask: boolean array-like, optional\n A boolean array like object which will be combined with the result\n of this evaluation using a logical AND. If an array with all True is\n given, it will be the same result as if left empty\n out: array-like\n An array into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated\n array is returned.\n strict_date_types: bool\n If False (default), cast all datelike values to datetime64 for comparison.\n \"\"\"\n if mask is None:\n mask = np.ones(len(array_like), dtype=bool)\n\n if out is None:\n out = np.zeros(len(array_like), dtype=bool)\n\n # In the case of an empty list, don't bother with evaluating types, etc.\n if is_list_like(value) and len(value) == 0:\n false_arr = np.zeros(len(array_like), dtype=bool)\n np.logical_and(false_arr, mask, out=out)\n return out\n\n require_ordered = \"<\" in op or \">\" in op\n array_like, value = _ensure_type_stability(\n array_like, value, strict_date_types, require_ordered\n )\n\n with np.errstate(invalid=\"ignore\"):\n if op == \"==\":\n np.logical_and(array_like == value, mask, out=out)\n elif op == \"!=\":\n np.logical_and(array_like != value, mask, out=out)\n elif op == \"<=\":\n np.logical_and(array_like <= value, mask, out=out)\n elif op == \">=\":\n np.logical_and(array_like >= value, mask, out=out)\n elif op == \"<\":\n np.logical_and(array_like < value, mask, out=out)\n elif op == \">\":\n np.logical_and(array_like > value, mask, out=out)\n elif op == \"in\":\n value = np.asarray(value)\n np.logical_and(\n np.isin(array_like, value)\n if len(value) > 0\n else np.zeros(len(array_like), dtype=bool),\n mask,\n out=out,\n )\n else:\n raise NotImplementedError(\"op not supported\")\n\n return out\n" ]
[ [ "numpy.isnan", "pandas.api.types.is_list_like", "numpy.asarray", "numpy.errstate", "numpy.logical_and", "pandas.Timestamp", "pandas.Series", "pandas.api.types.is_categorical", "numpy.isin" ] ]
alexgallego1997/GamestonkTerminal
[ "1c6ce5c99111aa7195c51f6930fcdbb9dadd2f00", "eb2b0d766bf1b6bb8656d6733083962efb152fe2" ]
[ "gamestonk_terminal/fundamental_analysis/yahoo_finance_api.py", "gamestonk_terminal/technical_analysis/volatility.py" ]
[ "import argparse\nfrom datetime import datetime\nimport yfinance as yf\nimport pandas as pd\nfrom gamestonk_terminal.dataframe_helpers import clean_df_index\nfrom gamestonk_terminal.helper_funcs import (\n long_number_format,\n parse_known_args_and_warn,\n)\n\n\ndef info(l_args, s_ticker):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"info\",\n description=\"\"\"\n Print information about the company. The following fields are expected:\n Zip, Sector, Full time employees, Long business summary, City, Phone, State, Country,\n Website, Max age, Address, Industry, Previous close, Regular market open, Two hundred\n day average, Payout ratio, Regular market day high, Average daily volume 10 day,\n Regular market previous close, Fifty day average, Open, Average volume 10 days, Beta,\n Regular market day low, Price hint, Currency, Trailing PE, Regular market volume,\n Market cap, Average volume, Price to sales trailing 12 months, Day low, Ask, Ask size,\n Volume, Fifty two week high, Forward PE, Fifty two week low, Bid, Tradeable, Bid size,\n Day high, Exchange, Short name, Long name, Exchange timezone name, Exchange timezone\n short name, Is esg populated, Gmt off set milliseconds, Quote type, Symbol, Message\n board id, Market, Enterprise to revenue, Profit margins, Enterprise to ebitda, 52 week\n change, Forward EPS, Shares outstanding, Book value, Shares short, Shares percent\n shares out, Last fiscal year end, Held percent institutions, Net income to common,\n Trailing EPS, Sand p52 week change, Price to book, Held percent insiders, Next fiscal\n year end, Most recent quarter, Short ratio, Shares short previous month date, Float\n shares, Enterprise value, Last split date, Last split factor, Earnings quarterly growth,\n Date short interest, PEG ratio, Short percent of float, Shares short prior month,\n Regular market price, Logo_url. [Source: Yahoo Finance]\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n stock = yf.Ticker(s_ticker)\n df_info = pd.DataFrame(stock.info.items(), columns=[\"Metric\", \"Value\"])\n df_info = df_info.set_index(\"Metric\")\n\n clean_df_index(df_info)\n\n if (\n \"Last split date\" in df_info.index\n and df_info.loc[\"Last split date\"].values[0]\n ):\n df_info.loc[\"Last split date\"].values[0] = datetime.fromtimestamp(\n df_info.loc[\"Last split date\"].values[0]\n ).strftime(\"%d/%m/%Y\")\n\n df_info = df_info.mask(df_info[\"Value\"].astype(str).eq(\"[]\")).dropna()\n df_info = df_info.applymap(lambda x: long_number_format(x))\n\n df_info = df_info.rename(\n index={\n \"Address1\": \"Address\",\n \"Average daily volume10 day\": \"Average daily volume 10 day\",\n \"Average volume10days\": \"Average volume 10 days\",\n \"Price to sales trailing12 months\": \"Price to sales trailing 12 months\",\n }\n )\n df_info.index = df_info.index.str.replace(\"eps\", \"EPS\")\n df_info.index = df_info.index.str.replace(\"p e\", \"PE\")\n df_info.index = df_info.index.str.replace(\"Peg\", \"PEG\")\n\n pd.set_option(\"display.max_colwidth\", None)\n\n if \"Long business summary\" in df_info.index:\n print(df_info.drop(index=[\"Long business summary\"]).to_string(header=False))\n print(\"\")\n print(df_info.loc[\"Long business summary\"].values[0])\n print(\"\")\n else:\n print(df_info.to_string(header=False))\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n return\n\n\ndef shareholders(l_args, s_ticker):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"shrs\",\n description=\"\"\"Print Major, institutional and mutualfunds shareholders.\n [Source: Yahoo Finance]\"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n stock = yf.Ticker(s_ticker)\n pd.set_option(\"display.max_colwidth\", None)\n\n # Major holders\n print(\"Major holders\")\n df_major_holders = stock.major_holders\n df_major_holders[1] = df_major_holders[1].apply(\n lambda x: x.replace(\"%\", \"Percentage\")\n )\n print(df_major_holders.to_string(index=False, header=False))\n print(\"\")\n\n # Institutional holders\n print(\"Institutional holders\")\n df_institutional_shareholders = stock.institutional_holders\n df_institutional_shareholders.columns = (\n df_institutional_shareholders.columns.str.replace(\"% Out\", \"Stake\")\n )\n df_institutional_shareholders[\"Shares\"] = df_institutional_shareholders[\n \"Shares\"\n ].apply(lambda x: long_number_format(x))\n df_institutional_shareholders[\"Value\"] = df_institutional_shareholders[\n \"Value\"\n ].apply(lambda x: long_number_format(x))\n df_institutional_shareholders[\"Stake\"] = df_institutional_shareholders[\n \"Stake\"\n ].apply(lambda x: str(f\"{100 * x:.2f}\") + \" %\")\n print(df_institutional_shareholders.to_string(index=False))\n print(\"\")\n\n # Mutualfunds holders\n print(\"Mutualfunds holders\")\n df_mutualfund_shareholders = stock.mutualfund_holders\n df_mutualfund_shareholders.columns = (\n df_mutualfund_shareholders.columns.str.replace(\"% Out\", \"Stake\")\n )\n df_mutualfund_shareholders[\"Shares\"] = df_mutualfund_shareholders[\n \"Shares\"\n ].apply(lambda x: long_number_format(x))\n df_mutualfund_shareholders[\"Value\"] = df_mutualfund_shareholders[\"Value\"].apply(\n lambda x: long_number_format(x)\n )\n df_mutualfund_shareholders[\"Stake\"] = df_mutualfund_shareholders[\"Stake\"].apply(\n lambda x: str(f\"{100 * x:.2f}\") + \" %\"\n )\n print(df_mutualfund_shareholders.to_string(index=False))\n\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n return\n\n\ndef sustainability(l_args, s_ticker):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"sust\",\n description=\"\"\"\n Print sustainability values of the company. The following fields are expected:\n Palmoil, Controversialweapons, Gambling, Socialscore, Nuclear, Furleather, Alcoholic,\n Gmo, Catholic, Socialpercentile, Peercount, Governancescore, Environmentpercentile,\n Animaltesting, Tobacco, Totalesg, Highestcontroversy, Esgperformance, Coal, Pesticides,\n Adult, Percentile, Peergroup, Smallarms, Environmentscore, Governancepercentile,\n Militarycontract. [Source: Yahoo Finance]\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n stock = yf.Ticker(s_ticker)\n pd.set_option(\"display.max_colwidth\", None)\n\n df_sustainability = stock.sustainability\n\n if not df_sustainability:\n print(f\"No sustainability information in Yahoo for {s_ticker}\")\n print(\"\")\n return\n\n clean_df_index(df_sustainability)\n\n df_sustainability = df_sustainability.rename(\n index={\n \"Controversialweapons\": \"Controversial Weapons\",\n \"Socialpercentile\": \"Social Percentile\",\n \"Peercount\": \"Peer Count\",\n \"Governancescore\": \"Governance Score\",\n \"Environmentpercentile\": \"Environment Percentile\",\n \"Animaltesting\": \"Animal Testing\",\n \"Highestcontroversy\": \"Highest Controversy\",\n \"Environmentscore\": \"Environment Score\",\n \"Governancepercentile\": \"Governance Percentile\",\n \"Militarycontract\": \"Military Contract\",\n }\n )\n\n print(df_sustainability.to_string(header=False))\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n return\n\n\ndef calendar_earnings(l_args, s_ticker):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"cal\",\n description=\"\"\"\n Calendar earnings of the company. Including revenue and earnings estimates.\n [Source: Yahoo Finance]\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n stock = yf.Ticker(s_ticker)\n df_calendar = stock.calendar\n\n if df_calendar.empty:\n print(f\"No earnings calendar information in Yahoo for {s_ticker}\")\n print(\"\")\n return\n\n df_calendar.iloc[0, 0] = df_calendar.iloc[0, 0].date().strftime(\"%d/%m/%Y\")\n df_calendar.iloc[:, 0] = df_calendar.iloc[:, 0].apply(\n lambda x: long_number_format(x)\n )\n\n print(f\"Earnings Date: {df_calendar.iloc[:, 0]['Earnings Date']}\")\n\n avg = df_calendar.iloc[:, 0][\"Earnings Average\"]\n low = df_calendar.iloc[:, 0][\"Earnings Low\"]\n high = df_calendar.iloc[:, 0][\"Earnings High\"]\n\n print(f\"Earnings Estimate Avg: {avg} [{low}, {high}]\")\n print(\n f\"Revenue Estimate Avg: {df_calendar.iloc[:, 0]['Revenue Average']} \\\n [{df_calendar.iloc[:, 0]['Revenue Low']}, {df_calendar.iloc[:, 0]['Revenue High']}]\"\n )\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n return\n", "import argparse\nimport matplotlib.pyplot as plt\nimport pandas_ta as ta\nfrom pandas.plotting import register_matplotlib_converters\nfrom gamestonk_terminal.helper_funcs import (\n check_positive,\n parse_known_args_and_warn,\n plot_autoscale,\n)\nfrom gamestonk_terminal.config_plot import PLOT_DPI\nfrom gamestonk_terminal import feature_flags as gtff\n\nregister_matplotlib_converters()\n\n\ndef bbands(l_args, s_ticker, s_interval, df_stock):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"bbands\",\n description=\"\"\"\n Bollinger Bands consist of three lines. The middle band is a simple\n moving average (generally 20 periods) of the typical price (TP). The upper and lower\n bands are F standard deviations (generally 2) above and below the middle band.\n The bands widen and narrow when the volatility of the price is higher or lower,\n respectively. \\n \\nBollinger Bands do not, in themselves, generate buy or sell signals;\n they are an indicator of overbought or oversold conditions. When the price is near the\n upper or lower band it indicates that a reversal may be imminent. The middle band\n becomes a support or resistance level. The upper and lower bands can also be\n interpreted as price targets. When the price bounces off of the lower band and crosses\n the middle band, then the upper band becomes the price target.\n \"\"\",\n )\n\n parser.add_argument(\n \"-l\",\n \"--length\",\n action=\"store\",\n dest=\"n_length\",\n type=check_positive,\n default=5,\n help=\"length\",\n )\n parser.add_argument(\n \"-s\",\n \"--std\",\n action=\"store\",\n dest=\"n_std\",\n type=check_positive,\n default=2,\n help=\"std\",\n )\n parser.add_argument(\n \"-m\", \"--mamode\", action=\"store\", dest=\"s_mamode\", default=\"sma\", help=\"mamode\"\n )\n parser.add_argument(\n \"-o\",\n \"--offset\",\n action=\"store\",\n dest=\"n_offset\",\n type=check_positive,\n default=0,\n help=\"offset\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # Daily\n if s_interval == \"1440min\":\n df_ta = ta.bbands(\n close=df_stock[\"5. adjusted close\"],\n length=ns_parser.n_length,\n std=ns_parser.n_std,\n mamode=ns_parser.s_mamode,\n offset=ns_parser.n_offset,\n ).dropna()\n\n # Intraday\n else:\n df_ta = ta.bbands(\n close=df_stock[\"4. close\"],\n length=ns_parser.n_length,\n std=ns_parser.n_std,\n mamode=ns_parser.s_mamode,\n offset=ns_parser.n_offset,\n ).dropna()\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)\n if s_ticker == \"1440min\":\n plt.plot(\n df_stock.index, df_stock[\"5. adjusted close\"].values, color=\"k\", lw=3\n )\n else:\n plt.plot(df_stock.index, df_stock[\"4. close\"].values, color=\"k\", lw=3)\n plt.plot(df_ta.index, df_ta.iloc[:, 0].values, \"r\", lw=2)\n plt.plot(df_ta.index, df_ta.iloc[:, 1].values, \"b\", lw=1.5, ls=\"--\")\n plt.plot(df_ta.index, df_ta.iloc[:, 2].values, \"g\", lw=2)\n plt.title(f\"Bollinger Band (BBands) on {s_ticker}\")\n plt.xlim(df_stock.index[0], df_stock.index[-1])\n plt.xlabel(\"Time\")\n plt.ylabel(\"Share Price ($)\")\n plt.legend([s_ticker, df_ta.columns[0], df_ta.columns[1], df_ta.columns[2]])\n plt.gca().fill_between(\n df_ta.index,\n df_ta.iloc[:, 0].values,\n df_ta.iloc[:, 2].values,\n alpha=0.1,\n color=\"b\",\n )\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n" ]
[ [ "pandas.set_option" ], [ "matplotlib.pyplot.ion", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.minorticks_on", "matplotlib.pyplot.gca", "pandas.plotting.register_matplotlib_converters" ] ]
kamalabdel97/hvplot
[ "0301d73f36d49c9c5fc98604cf91ca4638b0b44d" ]
[ "hvplot/tests/testgeo.py" ]
[ "from unittest import TestCase, SkipTest, expectedFailure\n\nimport numpy as np\nimport pandas as pd\nimport holoviews as hv\n\n\nclass TestGeo(TestCase):\n\n def setUp(self):\n try:\n import xarray as xr\n import rasterio # noqa\n import geoviews # noqa\n import cartopy.crs as ccrs\n except:\n raise SkipTest('xarray, rasterio, geoviews, or cartopy not available')\n import hvplot.xarray # noqa\n import hvplot.pandas # noqa\n self.da = (xr.open_rasterio(\n 'https://github.com/mapbox/rasterio/raw/master/tests/data/RGB.byte.tif')\n .sel(band=1))\n self.crs = ccrs.epsg(self.da.crs.split('epsg:')[1])\n\n def assertCRS(self, plot, proj='utm'):\n assert plot.crs.proj4_params['proj'] == proj\n\n def test_plot_with_crs_as_object(self):\n plot = self.da.hvplot.image('x', 'y', crs=self.crs)\n self.assertCRS(plot)\n\n def test_plot_with_crs_as_proj_string(self):\n plot = self.da.hvplot.image('x', 'y', crs=self.da.crs)\n self.assertCRS(plot)\n\n def test_plot_with_geo_as_true_crs_undefined(self):\n plot = self.da.hvplot.image('x', 'y', geo=True)\n self.assertCRS(plot)\n\n def test_plot_with_crs_as_attr_str(self):\n da = self.da.copy()\n da.attrs = {'bar': self.crs}\n plot = da.hvplot.image('x', 'y', crs='bar')\n self.assertCRS(plot)\n\n def test_plot_with_crs_as_nonexistent_attr_str(self):\n with self.assertRaisesRegex(ValueError, \"'foo' must be\"):\n self.da.hvplot.image('x', 'y', crs='foo')\n\n def test_plot_with_geo_as_true_crs_no_crs_on_data_returns_default(self):\n da = self.da.copy()\n da.attrs = {'bar': self.crs}\n plot = da.hvplot.image('x', 'y', geo=True)\n self.assertCRS(plot, 'eqc')\n\n\nclass TestGeoAnnotation(TestCase):\n\n def setUp(self):\n try:\n import geoviews # noqa\n import cartopy.crs as ccrs # noqa\n except:\n raise SkipTest('geoviews or cartopy not available')\n import hvplot.pandas # noqa\n self.crs = ccrs.PlateCarree()\n self.df = pd.DataFrame(np.random.rand(10, 2), columns=['x', 'y'])\n\n def test_plot_with_coastline(self):\n import geoviews as gv\n plot = self.df.hvplot.points('x', 'y', geo=True, coastline=True)\n self.assertEqual(len(plot), 2)\n coastline = plot.get(1)\n self.assertIsInstance(coastline, gv.Feature)\n\n def test_plot_with_coastline_sets_geo_by_default(self):\n import geoviews as gv\n plot = self.df.hvplot.points('x', 'y', coastline=True)\n self.assertEqual(len(plot), 2)\n coastline = plot.get(1)\n self.assertIsInstance(coastline, gv.Feature)\n\n def test_plot_with_coastline_scale(self):\n plot = self.df.hvplot.points('x', 'y', geo=True, coastline='10m')\n opts = plot.get(1).opts.get('plot')\n self.assertEqual(opts.kwargs, {'scale': '10m'})\n\n def test_plot_with_tiles(self):\n plot = self.df.hvplot.points('x', 'y', geo=True, tiles=True)\n self.assertEqual(len(plot), 2)\n self.assertIsInstance(plot.get(0), hv.Tiles)\n self.assertIn('wikimedia', plot.get(0).data)\n\n def test_plot_with_specific_tiles(self):\n plot = self.df.hvplot.points('x', 'y', geo=True, tiles='ESRI')\n self.assertEqual(len(plot), 2)\n self.assertIsInstance(plot.get(0), hv.Tiles)\n self.assertIn('ArcGIS', plot.get(0).data)\n\n def test_plot_with_specific_tile_class(self):\n plot = self.df.hvplot.points('x', 'y', geo=True, tiles=hv.element.tiles.EsriImagery)\n self.assertEqual(len(plot), 2)\n self.assertIsInstance(plot.get(0), hv.Tiles)\n self.assertIn('ArcGIS', plot.get(0).data)\n\n def test_plot_with_specific_tile_obj(self):\n plot = self.df.hvplot.points('x', 'y', geo=True, tiles=hv.element.tiles.EsriImagery())\n self.assertEqual(len(plot), 2)\n self.assertIsInstance(plot.get(0), hv.Tiles)\n self.assertIn('ArcGIS', plot.get(0).data)\n\n def test_plot_with_specific_gv_tile_obj(self):\n import geoviews as gv\n plot = self.df.hvplot.points('x', 'y', geo=True, tiles=gv.tile_sources.CartoDark)\n self.assertEqual(len(plot), 2)\n self.assertIsInstance(plot.get(0), gv.element.WMTS)\n\n\nclass TestGeoElements(TestCase):\n\n def setUp(self):\n try:\n import geoviews # noqa\n import cartopy.crs as ccrs # noqa\n except:\n raise SkipTest('geoviews or cartopy not available')\n import hvplot.pandas # noqa\n self.crs = ccrs.PlateCarree()\n self.df = pd.DataFrame(np.random.rand(10, 2), columns=['x', 'y'])\n\n def test_geo_hexbin(self):\n hextiles = self.df.hvplot.hexbin('x', 'y', geo=True)\n self.assertEqual(hextiles.crs, self.crs)\n\n def test_geo_points(self):\n points = self.df.hvplot.points('x', 'y', geo=True)\n self.assertEqual(points.crs, self.crs)\n\n def test_geo_opts(self):\n points = self.df.hvplot.points('x', 'y', geo=True)\n opts = hv.Store.lookup_options('bokeh', points, 'plot').kwargs\n self.assertEqual(opts.get('data_aspect'), 1)\n self.assertEqual(opts.get('width'), None)\n\n def test_geo_opts_with_width(self):\n points = self.df.hvplot.points('x', 'y', geo=True, width=200)\n opts = hv.Store.lookup_options('bokeh', points, 'plot').kwargs\n self.assertEqual(opts.get('data_aspect'), 1)\n self.assertEqual(opts.get('width'), 200)\n self.assertEqual(opts.get('height'), None)\n\n\nclass TestGeoPandas(TestCase):\n\n def setUp(self):\n try:\n import geopandas as gpd # noqa\n import geoviews # noqa\n import cartopy.crs as ccrs # noqa\n except:\n raise SkipTest('geopandas, geoviews, or cartopy not available')\n import hvplot.pandas # noqa\n\n self.cities = gpd.read_file(gpd.datasets.get_path('naturalearth_cities'))\n\n def test_points_hover_cols_is_empty_by_default(self):\n points = self.cities.hvplot()\n assert points.kdims == ['x', 'y']\n assert points.vdims == []\n\n def test_points_hover_cols_does_not_include_geometry_when_all(self):\n points = self.cities.hvplot(x='x', y='y', hover_cols='all')\n assert points.kdims == ['x', 'y']\n assert points.vdims == ['index', 'name']\n\n def test_points_hover_cols_when_all_and_use_columns_is_false(self):\n points = self.cities.hvplot(x='x', hover_cols='all', use_index=False)\n assert points.kdims == ['x', 'y']\n assert points.vdims == ['name']\n\n def test_points_hover_cols_index_in_list(self):\n points = self.cities.hvplot(y='y', hover_cols=['index'])\n assert points.kdims == ['x', 'y']\n assert points.vdims == ['index']\n\n def test_points_hover_cols_with_c_set_to_name(self):\n points = self.cities.hvplot(c='name')\n assert points.kdims == ['x', 'y']\n assert points.vdims == ['name']\n opts = hv.Store.lookup_options('bokeh', points, 'style').kwargs\n assert opts['color'] == 'name'\n\n @expectedFailure\n def test_points_hover_cols_with_by_set_to_name(self):\n points = self.cities.hvplot(by='name')\n assert points.kdims == ['x', 'y']\n assert points.vdims == ['name']\n" ]
[ [ "numpy.random.rand" ] ]
YonatanSimson/OpenSfM
[ "358843738359f4b5d767b22df2f3960ded31c981", "358843738359f4b5d767b22df2f3960ded31c981" ]
[ "opensfm/actions/create_submodels.py", "opensfm/actions/export_openmvs.py" ]
[ "import logging\nfrom collections import defaultdict\n\nimport numpy as np\nfrom opensfm.large import metadataset\nfrom opensfm.large import tools\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef run_dataset(data):\n \"\"\" Split the dataset into smaller submodels. \"\"\"\n\n meta_data = metadataset.MetaDataSet(data.data_path)\n\n meta_data.remove_submodels()\n data.invent_reference_lla()\n _create_image_list(data, meta_data)\n\n if meta_data.image_groups_exists():\n _read_image_groups(meta_data)\n else:\n _cluster_images(meta_data, data.config[\"submodel_size\"])\n\n _add_cluster_neighbors(meta_data, data.config[\"submodel_overlap\"])\n _save_clusters_geojson(meta_data)\n _save_cluster_neighbors_geojson(meta_data)\n\n meta_data.create_submodels(meta_data.load_clusters_with_neighbors())\n\n\ndef _create_image_list(data, meta_data):\n ills = []\n for image in data.images():\n exif = data.load_exif(image)\n if (\n \"gps\" not in exif\n or \"latitude\" not in exif[\"gps\"]\n or \"longitude\" not in exif[\"gps\"]\n ):\n logger.warning(\"Skipping {} because of missing GPS\".format(image))\n continue\n\n lat = exif[\"gps\"][\"latitude\"]\n lon = exif[\"gps\"][\"longitude\"]\n ills.append((image, lat, lon))\n\n meta_data.create_image_list(ills)\n\n\ndef _read_image_groups(meta_data):\n image_cluster = {}\n cluster_images = defaultdict(list)\n for image, cluster in meta_data.load_image_groups():\n image_cluster[image] = cluster\n cluster_images[cluster].append(image)\n K = len(cluster_images)\n cluster_index = dict(zip(sorted(cluster_images.keys()), range(K)))\n\n images = []\n positions = []\n labels = []\n centers = np.zeros((K, 2))\n centers_count = np.zeros((K, 1))\n for image, lat, lon in meta_data.images_with_gps():\n images.append(image)\n positions.append([lat, lon])\n cluster = image_cluster[image]\n label = cluster_index[cluster]\n labels.append(label)\n centers[label, 0] += lat\n centers[label, 1] += lon\n centers_count[label] += 1\n\n images = np.array(images)\n positions = np.array(positions, np.float32)\n labels = np.array(labels)\n centers /= centers_count\n\n meta_data.save_clusters(images, positions, labels, centers)\n\n\ndef _cluster_images(meta_data, cluster_size):\n images = []\n positions = []\n for image, lat, lon in meta_data.images_with_gps():\n images.append(image)\n positions.append([lat, lon])\n\n positions = np.array(positions, np.float32)\n images = np.array(images).reshape((len(images), 1))\n\n K = float(images.shape[0]) / cluster_size\n K = int(np.ceil(K))\n\n labels, centers = tools.kmeans(positions, K)[1:]\n\n images = images.ravel()\n labels = labels.ravel()\n\n meta_data.save_clusters(images, positions, labels, centers)\n\n\ndef _add_cluster_neighbors(meta_data, max_distance):\n images, positions, labels, centers = meta_data.load_clusters()\n clusters = tools.add_cluster_neighbors(positions, labels, centers, max_distance)\n\n image_clusters = []\n for cluster in clusters:\n image_clusters.append(list(np.take(images, np.array(cluster))))\n\n meta_data.save_clusters_with_neighbors(image_clusters)\n\n\ndef _save_cluster_neighbors_geojson(meta_data):\n image_coordinates = {}\n for image, lat, lon in meta_data.images_with_gps():\n image_coordinates[image] = [lon, lat]\n\n features = []\n clusters = meta_data.load_clusters_with_neighbors()\n for cluster_idx, images in enumerate(clusters):\n for image in images:\n features.append(\n {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": image_coordinates[image],\n },\n \"properties\": {\"name\": image, \"submodel\": cluster_idx},\n }\n )\n geojson = {\"type\": \"FeatureCollection\", \"features\": features}\n meta_data.save_cluster_with_neighbors_geojson(geojson)\n\n\ndef _save_clusters_geojson(meta_data):\n image_coordinates = {}\n for image, lat, lon in meta_data.images_with_gps():\n image_coordinates[image] = [lon, lat]\n\n features = []\n images, positions, labels, centers = meta_data.load_clusters()\n for image, label in zip(images, labels):\n features.append(\n {\n \"type\": \"Feature\",\n \"geometry\": {\"type\": \"Point\", \"coordinates\": image_coordinates[image]},\n \"properties\": {\"name\": image, \"submodel\": int(label)}, # cluster_idx\n }\n )\n geojson = {\"type\": \"FeatureCollection\", \"features\": features}\n meta_data.save_clusters_geojson(geojson)\n", "import os\n\nimport numpy as np\nfrom opensfm import dataset\nfrom opensfm import io\nfrom opensfm import pydense\n\n\ndef run_dataset(data, image_list):\n \"\"\" Export reconstruction to OpenMVS format. \"\"\"\n\n udata = dataset.UndistortedDataSet(data)\n reconstructions = udata.load_undistorted_reconstruction()\n tracks_manager = udata.load_undistorted_tracks_manager()\n\n export_only = None\n if image_list:\n export_only = {}\n with open(image_list, \"r\") as f:\n for image in f:\n export_only[image.strip()] = True\n\n if reconstructions:\n export(reconstructions[0], tracks_manager, udata, export_only)\n\n\ndef export(reconstruction, tracks_manager, udata, export_only):\n exporter = pydense.OpenMVSExporter()\n for camera in reconstruction.cameras.values():\n if camera.projection_type == \"perspective\":\n w, h = camera.width, camera.height\n K = np.array(\n [\n [camera.focal, 0, (w - 1.0) / 2 / max(w, h)],\n [0, camera.focal, (h - 1.0) / 2 / max(w, h)],\n [0, 0, 1],\n ]\n )\n exporter.add_camera(str(camera.id), K)\n\n for shot in reconstruction.shots.values():\n if export_only is not None and shot.id not in export_only:\n continue\n\n if shot.camera.projection_type == \"perspective\":\n image_path = udata._undistorted_image_file(shot.id)\n exporter.add_shot(\n str(os.path.abspath(image_path)),\n str(shot.id),\n str(shot.camera.id),\n shot.pose.get_rotation_matrix(),\n shot.pose.get_origin(),\n )\n\n for point in reconstruction.points.values():\n observations = tracks_manager.get_track_observations(point.id)\n\n if export_only is not None:\n shots = [k for k in observations if k in export_only]\n else:\n shots = list(observations)\n\n if shots:\n coordinates = np.array(point.coordinates, dtype=np.float64)\n exporter.add_point(coordinates, shots)\n\n io.mkdir_p(udata.data_path + \"/openmvs\")\n exporter.export(udata.data_path + \"/openmvs/scene.mvs\")\n" ]
[ [ "numpy.array", "numpy.ceil", "numpy.zeros" ], [ "numpy.array" ] ]
Rye-Catcher-ZCH/HITSZ-2020-Bioinformatics-Course-Project-Two-Team-zchnb
[ "80fb3abf62c95d5a09c54dc15d2ec48c3e2547e9" ]
[ "DeepConv-DTI-master/evaluate_performance.py" ]
[ "import pandas as pd\nimport argparse\nimport os\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import precision_recall_curve, auc, roc_curve\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"predictions\", help=\"prediction result to evaluate\")\nparser.add_argument(\"--test-name\", \"-n\", help=\"name of test data set\", nargs=\"*\")\nparser.add_argument(\"--threshold\", \"-T\", help=\"threshold for prediction\", type=float, default=0.5)\nparser.add_argument(\"--no-threshold\", \"-N\", help=\"performance evaluation without threshold (AUC,AUPR)\", action=\"store_true\")\nparser.add_argument(\"--evaluation-output\",\"-o\" , help=\"output for result evaluation\")\nargs = parser.parse_args()\nno_th = args.no_threshold\nprediction_dir = args.predictions\ntest_names = args.test_name\nth = args.threshold\noutput_file = args.evaluation_output\n\nextension = prediction_dir.split(\".\")[-1]\n\nif extension=='csv':\n result_df = pd.read_csv(prediction_dir,header=[0,1])\nelif extension=='tsv':\n result_df = pd.read_table(prediction_dir, header=[0,1])\n\nth = args.threshold\ndef label_by_th(y_pred, threshold=0.5):\n y_pred_copy = y_pred.copy()\n y_pred_copy[y_pred>= threshold] = 1 \n y_pred_copy[y_pred<threshold] = 0 \n return y_pred_copy\nif no_th:\n evaluation_df = pd.DataFrame(index=[\"Sen\", \"Spe\", \"Pre\", \"Acc\", \"F1\",\"AUC\", \"AUPR\"])\nelse:\n evaluation_df = pd.DataFrame(index=[\"Sen\", \"Spe\", \"Pre\", \"Acc\", \"F1\"])\n\n\n#print result_df.head()\nfor dataset in test_names:\n tn, fp, fn, tp = confusion_matrix(result_df[dataset,\"label\"].dropna(), label_by_th(result_df[dataset,\"predicted\"].dropna(), th)).ravel()\n print(\"Evaluation of the %s set \" % dataset)\n sen = float(tp)/(fn+tp)\n pre = float(tp)/(tp+fp)\n spe = float(tn)/(tn+fp)\n acc = float(tn+tp)/(tn+fp+fn+tp)\n f1 = (2*sen*pre)/(sen+pre)\n print( \"\\tSen : \", sen )\n print(\"\\tSpe : \", spe )\n print(\"\\tAcc : \", acc )\n print(\"\\tPrecision : \", pre)\n print(\"\\tF1 : \", f1)\n result_dic = {\"Acc\": acc, \"Sen\" : sen, \"Pre\":pre, \"F1\":f1, \"Spe\":spe}\n if no_th:\n fpr, tpr, thresholds_AUC = roc_curve(result_df[dataset,\"label\"], result_df[dataset,\"predicted\"])\n AUC = auc(fpr, tpr)\n precision, recall, thresholds = precision_recall_curve(result_df[dataset,\"label\"],result_df[dataset,\"predicted\"])\n AUPR = auc(recall,precision)\n print(\"\\tArea Under ROC Curve(AUC): %0.3f\" % AUC)\n print(\"\\tArea Under PR Curve(AUPR): %0.3f\" % AUPR)\n print(\"=================================================\")\n result_dic.update({\"AUC\":AUC, \"AUPR\":AUPR}) \n evaluation_df[dataset] = pd.Series(result_dic)\nevaluation_output = args.evaluation_output\nif evaluation_output:\n print(\"save to %s\"%output_file)\n dir_name, file_name = os.path.split(evaluation_output)\n if not os.path.isdir(dir_name):\n os.system(\"mkdir -p \"+dir_name)\n print(\"No directory named %s : create directory\" % dir_name)\n evaluation_df.to_csv(evaluation_output)\n\n" ]
[ [ "pandas.read_table", "sklearn.metrics.precision_recall_curve", "pandas.DataFrame", "pandas.Series", "sklearn.metrics.auc", "pandas.read_csv", "sklearn.metrics.roc_curve" ] ]
4QuantOSS/OpenDIGITS
[ "f7969aef0f91d074d09290261ca36e2e93128276" ]
[ "digits/utils/image.py" ]
[ "# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport math\nimport os.path\nimport requests\n\n# Find the best implementation available\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nimport numpy as np\nimport PIL.Image\nimport scipy.misc\n\nfrom . import is_url, HTTP_TIMEOUT, errors\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n# Library defaults:\n# PIL.Image:\n# size -- (width, height)\n# np.array:\n# shape -- (height, width, channels)\n# range -- [0-255]\n# dtype -- uint8\n# channels -- RGB\n# caffe.datum:\n# datum.data type -- bytes (uint8)\n# datum.float_data type -- float32\n# when decoding images, channels are BGR\n# DIGITS:\n# image_dims -- (height, width, channels)\n\n# List of supported file extensions\n# Use like \"if filename.endswith(SUPPORTED_EXTENSIONS)\"\nSUPPORTED_EXTENSIONS = ('.png', '.jpg', '.jpeg', '.bmp', '.ppm', '.pgm')\n\n\ndef load_image(path):\n \"\"\"\n Reads a file from `path` and returns a PIL.Image with mode 'L' or 'RGB'\n Raises LoadImageError\n\n Arguments:\n path -- path to the image, can be a filesystem path or a URL\n \"\"\"\n image = None\n if is_url(path):\n try:\n r = requests.get(path,\n allow_redirects=False,\n timeout=HTTP_TIMEOUT)\n r.raise_for_status()\n stream = StringIO(r.content)\n image = PIL.Image.open(stream)\n except requests.exceptions.RequestException as e:\n raise errors.LoadImageError(e.message)\n except IOError as e:\n raise errors.LoadImageError(e.message)\n elif os.path.exists(path):\n try:\n image = PIL.Image.open(path)\n image.load()\n except IOError as e:\n raise errors.LoadImageError('IOError: Trying to load \"{}\": {}'.format(path, e))\n else:\n raise errors.LoadImageError('\"%s\" not found' % path)\n\n if image.mode in ['L', 'RGB']:\n # No conversion necessary\n return image\n elif image.mode in ['1']:\n # Easy conversion to L\n return image.convert('L')\n elif image.mode in ['LA']:\n # Deal with transparencies\n new = PIL.Image.new('L', image.size, 255)\n new.paste(image, mask=image.convert('RGBA'))\n return new\n elif image.mode in ['CMYK', 'YCbCr']:\n # Easy conversion to RGB\n return image.convert('RGB')\n elif image.mode in ['P', 'RGBA']:\n # Deal with transparencies\n new = PIL.Image.new('RGB', image.size, (255, 255, 255))\n new.paste(image, mask=image.convert('RGBA'))\n return new\n else:\n raise errors.LoadImageError('Image mode \"%s\" not supported' % image.mode)\n\n\ndef upscale(image, ratio):\n \"\"\"\n return upscaled image array\n\n Arguments:\n image -- a (H,W,C) numpy.ndarray\n ratio -- scaling factor (>1)\n \"\"\"\n if not isinstance(image, np.ndarray):\n raise ValueError('Expected ndarray')\n if ratio < 1:\n raise ValueError('Ratio must be greater than 1 (ratio=%f)' % ratio)\n width = int(math.floor(image.shape[1] * ratio))\n height = int(math.floor(image.shape[0] * ratio))\n channels = image.shape[2]\n out = np.ndarray((height, width, channels), dtype=np.uint8)\n for x, y in np.ndindex((width, height)):\n out[y, x] = image[int(math.floor(y / ratio)), int(math.floor(x / ratio))]\n return out\n\n\ndef image_to_array(image,\n channels=None):\n \"\"\"\n Returns an image as a np.array\n\n Arguments:\n image -- a PIL.Image or numpy.ndarray\n\n Keyword Arguments:\n channels -- channels of new image (stays unchanged if not specified)\n \"\"\"\n\n if channels not in [None, 1, 3, 4]:\n raise ValueError('unsupported number of channels: %s' % channels)\n\n if isinstance(image, PIL.Image.Image):\n # Convert image mode (channels)\n if channels is None:\n image_mode = image.mode\n if image_mode not in ['L', 'RGB', 'RGBA']:\n raise ValueError('unknown image mode \"%s\"' % image_mode)\n elif channels == 1:\n # 8-bit pixels, black and white\n image_mode = 'L'\n elif channels == 3:\n # 3x8-bit pixels, true color\n image_mode = 'RGB'\n elif channels == 4:\n # 4x8-bit pixels, true color with alpha\n image_mode = 'RGBA'\n if image.mode != image_mode:\n image = image.convert(image_mode)\n image = np.array(image)\n elif isinstance(image, np.ndarray):\n if image.dtype != np.uint8:\n image = image.astype(np.uint8)\n if image.ndim == 3 and image.shape[2] == 1:\n image = image.reshape(image.shape[:2])\n if channels is None:\n if not (image.ndim == 2 or (image.ndim == 3 and image.shape[2] in [3, 4])):\n raise ValueError('invalid image shape: %s' % (image.shape,))\n elif channels == 1:\n if image.ndim != 2:\n if image.ndim == 3 and image.shape[2] in [3, 4]:\n # color to grayscale. throw away alpha\n image = np.dot(image[:, :, :3], [0.299, 0.587, 0.114]).astype(np.uint8)\n else:\n raise ValueError('invalid image shape: %s' % (image.shape,))\n elif channels == 3:\n if image.ndim == 2:\n # grayscale to color\n image = np.repeat(image, 3).reshape(image.shape + (3,))\n elif image.shape[2] == 4:\n # throw away alpha\n image = image[:, :, :3]\n elif image.shape[2] != 3:\n raise ValueError('invalid image shape: %s' % (image.shape,))\n elif channels == 4:\n if image.ndim == 2:\n # grayscale to color\n image = np.repeat(image, 4).reshape(image.shape + (4,))\n image[:, :, 3] = 255\n elif image.shape[2] == 3:\n # add alpha\n image = np.append(image, np.zeros(image.shape[:2] + (1,), dtype='uint8'), axis=2)\n image[:, :, 3] = 255\n elif image.shape[2] != 4:\n raise ValueError('invalid image shape: %s' % (image.shape,))\n else:\n raise ValueError('resize_image() expected a PIL.Image.Image or a numpy.ndarray')\n\n return image\n\n\ndef resize_image(image, height, width,\n channels=None,\n resize_mode=None,\n ):\n \"\"\"\n Resizes an image and returns it as a np.array\n\n Arguments:\n image -- a PIL.Image or numpy.ndarray\n height -- height of new image\n width -- width of new image\n\n Keyword Arguments:\n channels -- channels of new image (stays unchanged if not specified)\n resize_mode -- can be crop, squash, fill or half_crop\n \"\"\"\n\n if resize_mode is None:\n resize_mode = 'squash'\n if resize_mode not in ['crop', 'squash', 'fill', 'half_crop']:\n raise ValueError('resize_mode \"%s\" not supported' % resize_mode)\n\n # convert to array\n image = image_to_array(image, channels)\n\n # No need to resize\n if image.shape[0] == height and image.shape[1] == width:\n return image\n\n # Resize\n interp = 'bilinear'\n\n width_ratio = float(image.shape[1]) / width\n height_ratio = float(image.shape[0]) / height\n if resize_mode == 'squash' or width_ratio == height_ratio:\n return scipy.misc.imresize(image, (height, width), interp=interp)\n elif resize_mode == 'crop':\n # resize to smallest of ratios (relatively larger image), keeping aspect ratio\n if width_ratio > height_ratio:\n resize_height = height\n resize_width = int(round(image.shape[1] / height_ratio))\n else:\n resize_width = width\n resize_height = int(round(image.shape[0] / width_ratio))\n image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp)\n\n # chop off ends of dimension that is still too long\n if width_ratio > height_ratio:\n start = int(round((resize_width - width) / 2.0))\n return image[:, start:start + width]\n else:\n start = int(round((resize_height - height) / 2.0))\n return image[start:start + height, :]\n else:\n if resize_mode == 'fill':\n # resize to biggest of ratios (relatively smaller image), keeping aspect ratio\n if width_ratio > height_ratio:\n resize_width = width\n resize_height = int(round(image.shape[0] / width_ratio))\n if (height - resize_height) % 2 == 1:\n resize_height += 1\n else:\n resize_height = height\n resize_width = int(round(image.shape[1] / height_ratio))\n if (width - resize_width) % 2 == 1:\n resize_width += 1\n image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp)\n elif resize_mode == 'half_crop':\n # resize to average ratio keeping aspect ratio\n new_ratio = (width_ratio + height_ratio) / 2.0\n resize_width = int(round(image.shape[1] / new_ratio))\n resize_height = int(round(image.shape[0] / new_ratio))\n if width_ratio > height_ratio and (height - resize_height) % 2 == 1:\n resize_height += 1\n elif width_ratio < height_ratio and (width - resize_width) % 2 == 1:\n resize_width += 1\n image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp)\n # chop off ends of dimension that is still too long\n if width_ratio > height_ratio:\n start = int(round((resize_width - width) / 2.0))\n image = image[:, start:start + width]\n else:\n start = int(round((resize_height - height) / 2.0))\n image = image[start:start + height, :]\n else:\n raise Exception('unrecognized resize_mode \"%s\"' % resize_mode)\n\n # fill ends of dimension that is too short with random noise\n if width_ratio > height_ratio:\n padding = (height - resize_height) / 2\n noise_size = (padding, width)\n if channels > 1:\n noise_size += (channels,)\n noise = np.random.randint(0, 255, size = noise_size).astype('uint8')\n image = np.concatenate((noise, image, noise), axis=0)\n else:\n padding = (width - resize_width) / 2\n noise_size = (height, padding)\n if channels > 1:\n noise_size += (channels,)\n noise = np.random.randint(0, 255, size=noise_size).astype('uint8')\n image = np.concatenate((noise, image, noise), axis=1)\n\n return image\n\n\ndef embed_image_html(image):\n \"\"\"\n Returns an image embedded in HTML base64 format\n (Based on Caffe's web_demo)\n\n Arguments:\n image -- a PIL.Image or np.ndarray\n \"\"\"\n if image is None:\n return None\n elif isinstance(image, PIL.Image.Image):\n pass\n elif isinstance(image, np.ndarray):\n image = PIL.Image.fromarray(image)\n else:\n raise ValueError('image must be a PIL.Image or a np.ndarray')\n\n # Read format from the image\n fmt = image.format\n if not fmt:\n # default to PNG\n fmt = 'png'\n else:\n fmt = fmt.lower()\n\n string_buf = StringIO()\n image.save(string_buf, format=fmt)\n data = string_buf.getvalue().encode('base64').replace('\\n', '')\n return 'data:image/%s;base64,%s' % (fmt, data)\n\n\ndef get_layer_vis_square(data,\n allow_heatmap=True,\n normalize=True,\n min_img_dim=100,\n max_width=1200,\n channel_order='RGB',\n ):\n \"\"\"\n Returns a vis_square for the given layer data\n\n Arguments:\n data -- a np.ndarray\n\n Keyword arguments:\n allow_heatmap -- if True, convert single channel images to heatmaps\n normalize -- whether to normalize the data when visualizing\n max_width -- maximum width for the vis_square\n \"\"\"\n if channel_order not in ['RGB', 'BGR']:\n raise ValueError('Unsupported channel_order %s' % channel_order)\n if data.ndim == 1:\n # interpret as 1x1 grayscale images\n # (N, 1, 1)\n data = data[:, np.newaxis, np.newaxis]\n elif data.ndim == 2:\n # interpret as 1x1 grayscale images\n # (N, 1, 1)\n data = data.reshape((data.shape[0] * data.shape[1], 1, 1))\n elif data.ndim == 3:\n if data.shape[0] == 3:\n # interpret as a color image\n # (1, H, W,3)\n if channel_order == 'BGR':\n data = data[[2, 1, 0], ...] # BGR to RGB (see issue #59)\n data = data.transpose(1, 2, 0)\n data = data[np.newaxis, ...]\n else:\n # interpret as grayscale images\n # (N, H, W)\n pass\n elif data.ndim == 4:\n if data.shape[0] == 3:\n # interpret as HxW color images\n # (N, H, W, 3)\n data = data.transpose(1, 2, 3, 0)\n if channel_order == 'BGR':\n data = data[:, :, :, [2, 1, 0]] # BGR to RGB (see issue #59)\n elif data.shape[1] == 3:\n # interpret as HxW color images\n # (N, H, W, 3)\n data = data.transpose(0, 2, 3, 1)\n if channel_order == 'BGR':\n data = data[:, :, :, [2, 1, 0]] # BGR to RGB (see issue #59)\n else:\n # interpret as HxW grayscale images\n # (N, H, W)\n data = data.reshape((data.shape[0] * data.shape[1], data.shape[2], data.shape[3]))\n else:\n raise RuntimeError('unrecognized data shape: %s' % (data.shape,))\n\n # chop off data so that it will fit within max_width\n padsize = 0\n width = data.shape[2]\n if width > max_width:\n data = data[:1, :max_width, :max_width]\n else:\n if width > 1:\n padsize = 1\n width += 1\n n = max(max_width / width, 1)\n n *= n\n data = data[:n]\n\n if not allow_heatmap and data.ndim == 3:\n data = data[..., np.newaxis]\n\n vis = vis_square(data,\n padsize=padsize,\n normalize=normalize,\n )\n\n # find minimum dimension and upscale if necessary\n _min = sorted(vis.shape[:2])[0]\n if _min < min_img_dim:\n # upscale image\n ratio = min_img_dim / float(_min)\n vis = upscale(vis, ratio)\n return vis\n\n\ndef vis_square(images,\n padsize=1,\n normalize=False,\n colormap='jet',\n ):\n \"\"\"\n Visualize each image in a grid of size approx sqrt(n) by sqrt(n)\n Returns a np.array image\n (Based on Caffe's filter_visualization notebook)\n\n Arguments:\n images -- an array of shape (N, H, W) or (N, H, W, C)\n if C is not set, a heatmap is computed for the result\n\n Keyword arguments:\n padsize -- how many pixels go between the tiles\n normalize -- if true, scales (min, max) across all images out to (0, 1)\n colormap -- a string representing one of the supported colormaps\n \"\"\"\n assert 3 <= images.ndim <= 4, 'images.ndim must be 3 or 4'\n # convert to float since we're going to do some math\n images = images.astype('float32')\n if normalize:\n images -= images.min()\n if images.max() > 0:\n images /= images.max()\n images *= 255\n\n if images.ndim == 3:\n # they're grayscale - convert to a colormap\n redmap, greenmap, bluemap = get_color_map(colormap)\n\n red = np.interp(images * (len(redmap) - 1) / 255.0, xrange(len(redmap)), redmap)\n green = np.interp(images * (len(greenmap) - 1) / 255.0, xrange(len(greenmap)), greenmap)\n blue = np.interp(images * (len(bluemap) - 1) / 255.0, xrange(len(bluemap)), bluemap)\n\n # Slap the channels back together\n images = np.concatenate((red[..., np.newaxis], green[..., np.newaxis], blue[..., np.newaxis]), axis=3)\n images = np.minimum(images, 255)\n images = np.maximum(images, 0)\n\n # convert back to uint8\n images = images.astype('uint8')\n\n # Compute the output image matrix dimensions\n n = int(np.ceil(np.sqrt(images.shape[0])))\n ny = n\n nx = n\n length = images.shape[0]\n if n * (n - 1) >= length:\n nx = n - 1\n\n # Add padding between the images\n padding = ((0, nx * ny - length), (0, padsize), (0, padsize)) + ((0, 0),) * (images.ndim - 3)\n padded = np.pad(images, padding, mode='constant', constant_values=255)\n\n # Tile the images beside each other\n tiles = padded.reshape((ny, nx) + padded.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, padded.ndim + 1)))\n tiles = tiles.reshape((ny * tiles.shape[1], nx * tiles.shape[3]) + tiles.shape[4:])\n\n if tiles.shape[-1] == 1:\n # grayscale to color\n tiles = np.dstack([tiles.squeeze()] * 3)\n\n return tiles\n\n\ndef get_color_map(name):\n \"\"\"\n Return a colormap as (redmap, greenmap, bluemap)\n\n Arguments:\n name -- the name of the colormap. If unrecognized, will default to 'jet'.\n \"\"\"\n redmap = [0]\n greenmap = [0]\n bluemap = [0]\n if name == 'white':\n # essentially a noop\n redmap = [0, 1]\n greenmap = [0, 1]\n bluemap = [0, 1]\n elif name == 'simple':\n redmap = [0, 1, 1, 1]\n greenmap = [0, 0, 1, 1]\n bluemap = [0, 0, 0, 1]\n elif name == 'hot':\n redmap = [0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952, 0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936, 0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # noqa\n greenmap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603163, 0.0714285714285714, 0.1111111111111112, 0.1507936507936507, 0.1904761904761905, 0.23015873015873, 0.2698412698412698, 0.3095238095238093, 0.3492063492063491, 0.3888888888888888, 0.4285714285714284, 0.4682539682539679, 0.5079365079365079, 0.5476190476190477, 0.5873015873015872, 0.6269841269841268, 0.6666666666666665, 0.7063492063492065, 0.746031746031746, 0.7857142857142856, 0.8253968253968254, 0.8650793650793651, 0.9047619047619047, 0.9444444444444442, 0.984126984126984, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # noqa\n bluemap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04761904761904745, 0.1269841269841265, 0.2063492063492056, 0.2857142857142856, 0.3650793650793656, 0.4444444444444446, 0.5238095238095237, 0.6031746031746028, 0.6825396825396828, 0.7619047619047619, 0.8412698412698409, 0.92063492063492, 1] # noqa\n elif name == 'rainbow':\n redmap = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9365079365079367, 0.8571428571428572, 0.7777777777777777, 0.6984126984126986, 0.6190476190476191, 0.53968253968254, 0.4603174603174605, 0.3809523809523814, 0.3015873015873018, 0.2222222222222223, 0.1428571428571432, 0.06349206349206415, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03174603174603208, 0.08465608465608465, 0.1375661375661377, 0.1904761904761907, 0.2433862433862437, 0.2962962962962963, 0.3492063492063493, 0.4021164021164023, 0.4550264550264553, 0.5079365079365079, 0.5608465608465609, 0.6137566137566139, 0.666666666666667] # noqa\n greenmap = [0, 0.03968253968253968, 0.07936507936507936, 0.119047619047619, 0.1587301587301587, 0.1984126984126984, 0.2380952380952381, 0.2777777777777778, 0.3174603174603174, 0.3571428571428571, 0.3968253968253968, 0.4365079365079365, 0.4761904761904762, 0.5158730158730158, 0.5555555555555556, 0.5952380952380952, 0.6349206349206349, 0.6746031746031745, 0.7142857142857142, 0.753968253968254, 0.7936507936507936, 0.8333333333333333, 0.873015873015873, 0.9126984126984127, 0.9523809523809523, 0.992063492063492, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9841269841269842, 0.9047619047619047, 0.8253968253968256, 0.7460317460317465, 0.666666666666667, 0.587301587301587, 0.5079365079365079, 0.4285714285714288, 0.3492063492063493, 0.2698412698412698, 0.1904761904761907, 0.1111111111111116, 0.03174603174603208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # noqa\n bluemap = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01587301587301582, 0.09523809523809534, 0.1746031746031744, 0.2539682539682535, 0.333333333333333, 0.412698412698413, 0.4920634920634921, 0.5714285714285712, 0.6507936507936507, 0.7301587301587302, 0.8095238095238093, 0.8888888888888884, 0.9682539682539679, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # noqa\n elif name == 'winter':\n greenmap = [0, 1]\n bluemap = [1, 0.5]\n else:\n if name != 'jet':\n print('Warning: colormap \"%s\" not supported. Using jet instead.' % name)\n redmap = [0, 0, 0, 0, 0.5, 1, 1, 1, 0.5]\n greenmap = [0, 0, 0.5, 1, 1, 1, 0.5, 0, 0]\n bluemap = [0.5, 1, 1, 1, 0.5, 0, 0, 0, 0]\n return 255.0 * np.array(redmap), 255.0 * np.array(greenmap), 255.0 * np.array(bluemap)\n" ]
[ [ "numpy.concatenate", "numpy.ndindex", "numpy.pad", "numpy.array", "numpy.dot", "numpy.zeros", "numpy.minimum", "numpy.ndarray", "numpy.random.randint", "numpy.sqrt", "numpy.repeat", "numpy.maximum" ] ]
dingdanhao110/HINGCN
[ "281b73c03bd3b00e35bce4c5e1c27076233555e4" ]
[ "runEmbedGCN.py" ]
[ "from __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport argparse\nimport numpy as np\nimport sys\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom utilities import *\nfrom hinmodel import *\nfrom metapath import *\n\n# Training settings\nparser = argparse.ArgumentParser()\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='Disables CUDA training.')\nparser.add_argument('--fastmode', action='store_true', default=True,\n help='Validate during training pass.')\nparser.add_argument('--seed', type=int, default=42, help='Random seed.')\nparser.add_argument('--epochs', type=int, default=1000,\n help='Number of epochs to train.')\nparser.add_argument('--lr', type=float, default=0.01,\n help='Initial learning rate.')\nparser.add_argument('--weight_decay', type=float, default=5e-4,\n help='Weight decay (L2 loss on parameters).')\nparser.add_argument('--hidden', type=int, default=128,\n help='Number of hidden units.')\nparser.add_argument('--n_meta', type=int, default=3,\n help='Number of meta-paths.')\nparser.add_argument('--dim_mp', type=int, default=16,\n help='Number of hidden units in layer2.')\nparser.add_argument('--n_sample', type=int, default=16,\n help='Dataset')\nparser.add_argument('--dropout', type=float, default=0.5,\n help='Dropout rate (1 - keep probability).')\nparser.add_argument('--alpha', type=float, default=0.8,\n help='alpha for leaky relu.')\nparser.add_argument('--dataset', type=str, default='homograph',\n help='Dataset')\nparser.add_argument('--dataset_path', type=str, default='./data/dblp/',\n help='Dataset')\nparser.add_argument('--embedding_file', type=str, default='APA',\n help='Dataset')\nparser.add_argument('--label_file', type=str, default='author_label',\n help='Dataset')\nparser.add_argument('--prep-dim', type=int, default=32,\n help='Dataset')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n\ndef read_embed(path=\"./data/dblp/\",\n emd_file=\"APA\"):\n with open(\"{}{}.emb\".format(path, emd_file)) as f:\n n_nodes,n_feature = map(int, f.readline().strip().split())\n print(\"number of nodes:{}, embedding size:{}\".format(n_nodes,n_feature))\n\n embedding = np.loadtxt(\"{}{}.emb\".format(path, emd_file),\n dtype=np.float32,skiprows=1)\n emd_index = {}\n for i in range(n_nodes):\n emd_index[embedding[i, 0]] = i\n\n features = np.asarray([embedding[emd_index[i], 1:] for i in range(n_nodes)])\n\n assert features.shape[1] == n_feature\n assert features.shape[0] == n_nodes\n\n PA_file = \"PA\"\n PT_file = \"PT\"\n PA = np.genfromtxt(\"{}{}.txt\".format(path, PA_file),\n dtype=np.int32)\n PT = np.genfromtxt(\"{}{}.txt\".format(path, PT_file),\n dtype=np.int32)\n PA[:, 0] -= 1\n PA[:, 1] -= 1\n PT[:, 0] -= 1\n PT[:, 1] -= 1\n\n paper_max = max(PA[:, 0]) + 1\n author_max = max(PA[:, 1]) + 1\n term_max = max(PT[:, 1]) + 1\n\n PA = sp.coo_matrix((np.ones(PA.shape[0]), (PA[:, 0], PA[:, 1])),\n shape=(paper_max, author_max),\n dtype=np.float32)\n PT = sp.coo_matrix((np.ones(PT.shape[0]), (PT[:, 0], PT[:, 1])),\n shape=(paper_max, term_max),\n dtype=np.float32)\n\n transformer = TfidfTransformer()\n AT = PA.transpose() * PT # AT\n AT = transformer.fit_transform(AT)\n\n AT = AT.todense()\n AT = np.pad(AT, ((0,features.shape[0]-AT.shape[0]),(0,0)),'constant',constant_values=0 )\n\n print(\"number of nodes:{}, feature size:{}\".format(AT.shape[0], AT.shape[1]))\n assert AT.shape[0] == n_nodes\n\n # features = AT\n features=np.concatenate((features,AT),axis=1)\n n_feature = features.shape[1]\n\n return n_nodes, n_feature, features\n\n\ndef read_graph(path=\"./data/dblp/\", dataset=\"homograph\", label_file=\"author_label\", emb_file=\"APA\"):\n print('Loading {} dataset...'.format(dataset))\n\n n_nodes, n_feature, features = read_embed(path,emb_file)\n features = torch.FloatTensor(features)\n\n labels_raw = np.genfromtxt(\"{}{}.txt\".format(path, label_file),dtype=np.int32)\n labels_raw[:, 0] -= 1\n labels_raw[:, 1] -= 1\n labels = np.zeros(n_nodes)\n labels[labels_raw[:, 0]] = labels_raw[:, 1]\n labels = torch.LongTensor(labels)\n\n # build graph\n # idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n # idx_map = {j: i for i, j in enumerate(idx)}\n edges = np.genfromtxt(\"{}{}.txt\".format(path, dataset),\n dtype=np.int32)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(n_nodes, n_nodes),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n # features = normalize(features)\n adj = normalize(adj + sp.eye(adj.shape[0]))\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n reordered = np.random.permutation(labels_raw[:, 0])\n total_labeled = labels_raw.shape[0]\n\n idx_train = reordered[range(int(total_labeled * 0.4))]\n idx_val = reordered[range(int(total_labeled * 0.4), int(total_labeled * 0.8))]\n idx_test = reordered[range(int(total_labeled * 0.8), total_labeled)]\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, features, labels, idx_train, idx_val, idx_test\n\ndef read_graph_dblp(path=\"data/dblp2/\", dataset=\"homograph\", label_file=\"author_label\", emb_file=\"APC_16\"):\n print('Loading {} dataset...'.format(dataset))\n\n n_nodes, n_feature, features = read_embed(path,emb_file)\n features = torch.FloatTensor(features)\n\n labels_raw = np.genfromtxt(\"{}{}.txt\".format(path, label_file),dtype=np.int32)\n labels_raw[:, 0] -= 1\n labels_raw[:, 1] -= 1\n labels = np.zeros(n_nodes)\n labels[labels_raw[:, 0]] = labels_raw[:, 1]\n labels = torch.LongTensor(labels)\n\n # build graph\n # idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n # idx_map = {j: i for i, j in enumerate(idx)}\n edges = np.genfromtxt(\"{}{}.txt\".format(path, dataset),\n dtype=np.int32)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(n_nodes, n_nodes),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n # features = normalize(features)\n adj = normalize(adj + sp.eye(adj.shape[0]))\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n reordered = np.random.permutation(labels_raw[:, 0])\n total_labeled = labels_raw.shape[0]\n\n idx_train = reordered[range(int(total_labeled * 0.4))]\n idx_val = reordered[range(int(total_labeled * 0.4), int(total_labeled * 0.8))]\n idx_test = reordered[range(int(total_labeled * 0.8), total_labeled)]\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, features, labels, idx_train, idx_val, idx_test\n\n\ndef read_graph_yelp(path=\"./data/yelp/\", dataset=\"homograph\",\n label_file=\"true_cluster\", emb_file=\"RBUK_16\"):\n print('Loading {} dataset...'.format(dataset))\n\n with open(\"{}{}.emb\".format(path, emb_file)) as f:\n n_nodes, n_feature = map(int, f.readline().strip().split())\n print(\"number of nodes:{}, embedding size:{}\".format(n_nodes, n_feature))\n\n embedding = np.loadtxt(\"{}{}.emb\".format(path, emb_file),\n dtype=np.int32, skiprows=1)\n emd_index = {}\n for i in range(n_nodes):\n emd_index[embedding[i, 0]] = i\n\n embedding = np.asarray([embedding[emd_index[i], 1:] for i in range(n_nodes)])\n\n assert embedding.shape[1] == n_feature\n assert embedding.shape[0] == n_nodes\n embedding = torch.FloatTensor(embedding)\n\n features = np.genfromtxt(\"{}{}.txt\".format(path, 'attributes'),\n dtype=np.float)\n features = np.pad(features, ((0, embedding.shape[0] - features.shape[0]), (0, 0)), 'constant', constant_values=0)\n\n features = torch.FloatTensor(features[:,:2])\n features = torch.cat([features,embedding], dim=1)\n\n # features = torch.FloatTensor(embedding)\n\n # build graph\n # idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n # idx_map = {j: i for i, j in enumerate(idx)}\n edges = np.genfromtxt(\"{}{}.txt\".format(path, dataset),\n dtype=np.int32)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(n_nodes, n_nodes),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n # features = normalize(features)\n adj = normalize(adj + sp.eye(adj.shape[0]))\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n labels = np.genfromtxt(\"{}{}.txt\".format(path, label_file),\n dtype=np.int32)\n reordered = np.random.permutation(np.arange(labels.shape[0]))\n total_labeled = labels.shape[0]\n\n idx_train = reordered[range(int(total_labeled * 0.4))]\n idx_val = reordered[range(int(total_labeled * 0.4), int(total_labeled * 0.8))]\n idx_test = reordered[range(int(total_labeled * 0.8), total_labeled)]\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n labels = torch.LongTensor(labels)\n\n return adj, features, labels, idx_train, idx_val, idx_test\n\ndef read_graph_yago(path=\"./data/freebase/\", dataset=\"homograph\",\n label_file=\"labels\", emb_file=\"MADW_16\"):\n print('Loading {} dataset...'.format(dataset))\n\n with open(\"{}{}.emb\".format(path, emb_file)) as f:\n n_nodes, n_feature = map(int, f.readline().strip().split())\n print(\"number of nodes:{}, embedding size:{}\".format(n_nodes, n_feature))\n\n # n_nodes-=1\n embedding = np.loadtxt(\"{}{}.emb\".format(path, emb_file),\n dtype=np.float32, skiprows=1,encoding='latin-1')\n emb_index = {}\n for i in range(n_nodes):\n # if type(embedding[i, 0]) is not int:\n # continue\n emb_index[embedding[i, 0]] = i\n\n features = np.asarray([embedding[emb_index[i], 1:] for i in range(embedding.shape[0])])\n\n features = torch.FloatTensor(features)\n features = torch.zeros((embedding.shape[0],1))\n\n\n # build graph\n # idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n # idx_map = {j: i for i, j in enumerate(idx)}\n edges = np.genfromtxt(\"{}{}.txt\".format(path, dataset),\n dtype=np.int32)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(n_nodes, n_nodes),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n # features = normalize(features)\n adj = normalize(adj + sp.eye(adj.shape[0]))\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n movies = []\n with open('{}{}.txt'.format(path, \"movies\"), mode='r', encoding='UTF-8') as f:\n for line in f:\n movies.append(line.split()[0])\n\n n_movie = len(movies)\n movie_dict = {a: i for (i, a) in enumerate(movies)}\n\n # features = np.zeros(n_movie).reshape(-1, 1)\n\n labels_raw = []\n with open('{}{}.txt'.format(path, label_file), 'r', encoding='UTF-8') as f:\n for line in f:\n arr = line.split()\n labels_raw.append([int(movie_dict[arr[0]]), int(arr[1])])\n labels_raw = np.asarray(labels_raw)\n\n labels = np.zeros(n_movie)\n labels[labels_raw[:, 0]] = labels_raw[:, 1]\n\n reordered = np.random.permutation(labels_raw[:, 0])\n total_labeled = labels_raw.shape[0]\n\n idx_train = reordered[range(int(total_labeled * 0.4))]\n idx_val = reordered[range(int(total_labeled * 0.4), int(total_labeled * 0.8))]\n idx_test = reordered[range(int(total_labeled * 0.8), total_labeled)]\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n labels = torch.LongTensor(labels)\n\n return adj, features, labels, idx_train, idx_val, idx_test\n\n# Load data\nadj, features, labels, idx_train, idx_val, idx_test = \\\n read_graph_dblp()\n\nprint('Read data finished!')\n\n# Model and optimizer\nmodel = GCN(n_nodes=features.shape[0],\n nfeat=features.shape[1],\n nhid=args.hidden,\n nclass=labels.max().item() + 1,\n dropout=args.dropout,\n prep=True,\n emb_dim=16, #args.prep_dim\n )\noptimizer = optim.Adam(model.parameters(),\n lr=args.lr, weight_decay=args.weight_decay)\n\nprint('Model init finished!')\n\nif args.cuda:\n model.cuda()\n features = features.cuda()\n adj = adj.cuda()\n idx_train = idx_train.cuda()\n idx_val = idx_val.cuda()\n idx_test = idx_test.cuda()\n labels = labels.cuda()\n\n\ndef train(epoch):\n t = time.time()\n model.train()\n optimizer.zero_grad()\n output = model(features,adj)\n loss_train = F.nll_loss(output[idx_train], labels[idx_train])\n acc_train = accuracy(output[idx_train], labels[idx_train])\n loss_train.backward()\n optimizer.step()\n\n if not args.fastmode:\n # Evaluate validation set performance separately,\n # deactivates dropout during validation run.\n model.eval()\n output = model(features,adj)\n\n loss_val = F.nll_loss(output[idx_val], labels[idx_val])\n acc_val = accuracy(output[idx_val], labels[idx_val])\n print('Epoch: {:04d}'.format(epoch + 1),\n 'loss_train: {:.4f}'.format(loss_train.item()),\n 'acc_train: {:.4f}'.format(acc_train.item()),\n 'loss_val: {:.4f}'.format(loss_val.item()),\n 'acc_val: {:.4f}'.format(acc_val.item()),\n 'time: {:.4f}s'.format(time.time() - t))\n test()\n\n\ndef test():\n model.eval()\n output = model(features,adj)\n loss_test = F.nll_loss(output[idx_test], labels[idx_test])\n acc_test = accuracy(output[idx_test], labels[idx_test])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n\nprint(model)\n\n# Train model\nt_total = time.time()\nfor epoch in range(args.epochs):\n train(epoch)\nprint(\"Optimization Finished!\")\nprint(\"Total time elapsed: {:.4f}s\".format(time.time() - t_total))\n\n# Testing\ntest()\n" ]
[ [ "numpy.concatenate", "torch.zeros", "numpy.pad", "torch.cuda.manual_seed", "torch.cat", "numpy.asarray", "numpy.zeros", "numpy.random.seed", "torch.FloatTensor", "numpy.random.permutation", "numpy.ones", "torch.manual_seed", "torch.cuda.is_available", "torch.LongTensor", "numpy.arange", "torch.nn.functional.nll_loss", "sklearn.feature_extraction.text.TfidfTransformer" ] ]
aasavari-kakne/simclr
[ "2a6a1805416736d77bf015b02159b84721ce0831" ]
[ "fashion_labels.py" ]
[ "import csv\nimport os\nimport sys\nimport time\n\nimport numpy as np\nimport pandas as pd\nimport streamlit as st\nimport tensorflow as tf\n\nst.title(\"Fashion Labels\")\n\n\n@st.cache\ndef get_labels(label_name_file, add_value=False):\n \"\"\"\n get list of labels and position_by_attriubtes dict\n\n Args:\n label_name_file: path to the label mapping file\n\n Returns:\n label_names: list of label names\n positions_by_attributes: {attributes/values: pos}\n\n \"\"\"\n label_names = []\n value_pos = 0\n\n # position of all values and attributes\n positions_by_attributes = {\"all\": []}\n\n with open(label_name_file, 'r', encoding=\"ISO-8859-1\") as f:\n reader = csv.reader(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for row in reader:\n attribute_name = row[1]\n value_name = row[2]\n\n if attribute_name and not attribute_name.startswith('has'):\n if attribute_name not in positions_by_attributes:\n positions_by_attributes[attribute_name] = []\n positions_by_attributes[attribute_name].append(value_pos)\n positions_by_attributes[\"all\"].append(value_pos)\n if add_value:\n positions_by_attributes['{}@{}'.format(attribute_name, value_name)] = [value_pos]\n value = row[2]\n label_name = '{}@{}'.format(attribute_name, value)\n label_names.append(label_name)\n\n value_pos += 1\n\n return np.array(label_names), positions_by_attributes\n\n\n@st.cache\ndef get_attributes(labels_file, attr_value_positions):\n sample_rows = []\n with open(labels_file, 'r') as f:\n reader = csv.reader(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n for row in reader:\n np_values = np.array(row[3:])[attr_value_positions]\n uniques, indices = np.unique(np_values, return_index=True)\n if '1' in uniques:\n sample_rows.append([row[0], indices[uniques.tolist().index('1')], row[1]])\n return sample_rows\n\n\ndef main(argv):\n np_label_names, positions_by_attributes = get_labels(argv[1])\n\n attr_list = sorted(list(positions_by_attributes.keys()))\n selected_attr = st.sidebar.selectbox('Select attribute', attr_list)\n num_items = st.sidebar.slider('Select number of items', 10, 1000, 100)\n\n attr_value_positions = positions_by_attributes[selected_attr]\n st.write(np_label_names[attr_value_positions])\n\n attr_values = get_attributes(argv[2], attr_value_positions)\n\n image_dir = argv[3]\n output_dir = argv[4]\n\n if len(attr_values) == 0:\n st.write('No result')\n else:\n urls = [t[0] for t in attr_values]\n values = [np_label_names[attr_value_positions][t[1]] for t in attr_values]\n values = [v.split('@')[1] for v in values]\n\n values_df = pd.DataFrame({'url': urls, 'value': values})\n uniques, counts = np.unique(values_df['value'], return_counts=True)\n value_stat_df = pd.DataFrame({'unique': uniques.tolist(), 'count': counts.tolist()})\n st.write(value_stat_df)\n\n st.image(image=urls[:num_items], caption=values[:num_items], width=100)\n\n if st.sidebar.button('Create NPZ Files'):\n start_time = time.time()\n image_data_list, label_list, url_list = [], [], []\n counter = 0\n for row in attr_values:\n filename = '%s/%s' % (image_dir, row[2])\n try:\n image_data = tf.gfile.FastGFile(filename, 'rb').read()\n image_data_list.append(image_data)\n label_list.append(row[1])\n url_list.append(row[0])\n\n counter += 1\n except:\n st.write('%s error' % filename)\n if os.path.isfile(filename):\n st.write('file %s exists' % filename)\n else:\n st.write('file %s does not exist' % filename)\n\n if len(image_data_list) == 1000:\n st.write('Writing %s/%d.npz' % (output_dir, counter / 1000))\n np.savez_compressed('%s/%d.npz' % (output_dir, counter / 1000),\n image=image_data_list,\n label=label_list,\n url=url_list)\n image_data_list, label_list, url_list = [], [], []\n\n np.savez_compressed('%s/%d.npz' % (output_dir, counter / 1000 + 1),\n image=image_data_list,\n label=label_list,\n url=url_list)\n\n st.write('Time taken %.2f' % (time.time() - start_time))\n\n\nmain(sys.argv)\n" ]
[ [ "numpy.array", "pandas.DataFrame", "numpy.savez_compressed", "tensorflow.gfile.FastGFile", "numpy.unique" ] ]
loaiabdalslam/Brainless
[ "d363e0d713fc9b024a4fac990b9c39cd59769454" ]
[ "tests/advanced_tests/advanced_install_tests.py" ]
[ "import datetime\nimport os\nimport random\nimport sys\nimport warnings\n\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\nimport tests.utils_testing as utils\nfrom brainless import Predictor\nfrom brainless.utils_models import load_ml_model\n\nsys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path\nos.environ['is_test_suite'] = 'True'\n\n\ndef test_feature_learning_getting_single_predictions_classification(model_name=None):\n np.random.seed(0)\n\n df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()\n\n column_descriptions = {\n 'survived': 'output',\n 'sex': 'categorical',\n 'embarked': 'categorical',\n 'pclass': 'categorical'\n }\n\n ml_predictor = Predictor(\n type_of_estimator='classifier', column_descriptions=column_descriptions)\n\n # NOTE: this is bad practice to pass in our same training set as our fl_data set,\n # but we don't have enough data to do it any other way\n df_titanic_train, fl_data = train_test_split(df_titanic_train, test_size=0.2)\n ml_predictor.train(\n df_titanic_train, model_names=model_name, feature_learning=True, fl_data=fl_data)\n\n file_name = ml_predictor.save(str(random.random()))\n\n saved_ml_pipeline = load_ml_model(file_name)\n\n os.remove(file_name)\n try:\n keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'\n os.remove(keras_file_name)\n except:\n pass\n\n df_titanic_test_dictionaries = df_titanic_test.to_dict('records')\n\n # 1. make sure the accuracy is the same\n\n predictions = []\n for row in df_titanic_test_dictionaries:\n predictions.append(saved_ml_pipeline.predict_proba(row)[1])\n\n print('predictions')\n print(predictions)\n\n first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)\n print('first_score')\n print(first_score)\n # Make sure our score is good, but not unreasonably good\n\n lower_bound = -0.16\n if model_name == 'DeepLearningClassifier':\n lower_bound = -0.187\n\n assert lower_bound < first_score < -0.133\n\n # 2. make sure the speed is reasonable (do it a few extra times)\n data_length = len(df_titanic_test_dictionaries)\n start_time = datetime.datetime.now()\n for idx in range(1000):\n row_num = idx % data_length\n saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n\n print('duration.total_seconds()')\n print(duration.total_seconds())\n\n # It's very difficult to set a benchmark for speed that will work across all machines.\n # On my 2013 bottom of the line 15\" MacBook Pro,\n # this runs in about 0.8 seconds for 1000 predictions\n # That's about 1 millisecond per prediction\n # Assuming we might be running on a test box that's pretty weak, multiply by 3\n # Also make sure we're not running unreasonably quickly\n assert 0.2 < duration.total_seconds() < 15\n\n # 3. make sure we're not modifying the dictionaries\n # (the score is the same after running a few experiments as it is the first time)\n\n predictions = []\n for row in df_titanic_test_dictionaries:\n predictions.append(saved_ml_pipeline.predict_proba(row)[1])\n\n print('predictions')\n print(predictions)\n print('df_titanic_test_dictionaries')\n print(df_titanic_test_dictionaries)\n second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)\n print('second_score')\n print(second_score)\n # Make sure our score is good, but not unreasonably good\n\n assert lower_bound < second_score < -0.133\n\n\ndef test_feature_learning_categorical_ensembling_getting_single_predictions_classification(\n model_name=None):\n np.random.seed(0)\n\n df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()\n\n column_descriptions = {\n 'survived': 'output',\n 'sex': 'categorical',\n 'embarked': 'categorical',\n 'pclass': 'categorical'\n }\n\n ml_predictor = Predictor(\n type_of_estimator='classifier', column_descriptions=column_descriptions)\n\n # NOTE: this is bad practice to pass in our same training set as our fl_data set,\n # but we don't have enough data to do it any other way\n df_titanic_train, fl_data = train_test_split(df_titanic_train, test_size=0.2)\n ml_predictor.train_categorical_ensemble(\n df_titanic_train,\n model_names=model_name,\n feature_learning=True,\n fl_data=fl_data,\n categorical_column='embarked')\n\n file_name = ml_predictor.save(str(random.random()))\n\n from brainless.utils_models import load_ml_model\n\n saved_ml_pipeline = load_ml_model(file_name)\n\n os.remove(file_name)\n try:\n keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'\n os.remove(keras_file_name)\n except:\n pass\n\n df_titanic_test_dictionaries = df_titanic_test.to_dict('records')\n\n # 1. make sure the accuracy is the same\n\n predictions = []\n for row in df_titanic_test_dictionaries:\n predictions.append(saved_ml_pipeline.predict_proba(row)[1])\n\n print('predictions')\n print(predictions)\n\n first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)\n print('first_score')\n print(first_score)\n # Make sure our score is good, but not unreasonably good\n\n lower_bound = -0.175\n if model_name == 'DeepLearningClassifier':\n lower_bound = -0.245\n if model_name == 'CatBoostClassifier':\n lower_bound = -0.265\n\n assert lower_bound < first_score < -0.14\n\n # 2. make sure the speed is reasonable (do it a few extra times)\n data_length = len(df_titanic_test_dictionaries)\n start_time = datetime.datetime.now()\n for idx in range(1000):\n row_num = idx % data_length\n saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n\n print('duration.total_seconds()')\n print(duration.total_seconds())\n\n # It's very difficult to set a benchmark for speed that will work across all machines.\n # On my 2013 bottom of the line 15\" MacBook Pro,\n # this runs in about 0.8 seconds for 1000 predictions\n # That's about 1 millisecond per prediction\n # Assuming we might be running on a test box that's pretty weak, multiply by 3\n # Also make sure we're not running unreasonably quickly\n assert 0.2 < duration.total_seconds() < 15\n\n # 3. make sure we're not modifying the dictionaries\n # (the score is the same after running a few experiments as it is the first time)\n\n predictions = []\n for row in df_titanic_test_dictionaries:\n predictions.append(saved_ml_pipeline.predict_proba(row)[1])\n\n print('predictions')\n print(predictions)\n print('df_titanic_test_dictionaries')\n print(df_titanic_test_dictionaries)\n second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)\n print('second_score')\n print(second_score)\n # Make sure our score is good, but not unreasonably good\n\n assert lower_bound < second_score < -0.147\n\n\ndef test_feature_learning_getting_single_predictions_regression(model_name=None):\n np.random.seed(0)\n\n df_boston_train, df_boston_test = utils.get_boston_regression_dataset()\n\n column_descriptions = {'MEDV': 'output', 'CHAS': 'categorical'}\n\n ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)\n\n # NOTE: this is bad practice to pass in our same training set as our fl_data set,\n # but we don't have enough data to do it any other way\n df_boston_train, fl_data = train_test_split(df_boston_train, test_size=0.2)\n ml_predictor.train(\n df_boston_train, model_names=model_name, feature_learning=True, fl_data=fl_data)\n\n file_name = ml_predictor.save(str(random.random()))\n\n # from brainless.utils_models import load_keras_model\n\n # saved_ml_pipeline = load_keras_model(file_name)\n\n saved_ml_pipeline = load_ml_model(file_name)\n\n os.remove(file_name)\n try:\n keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'\n os.remove(keras_file_name)\n except:\n pass\n\n df_boston_test_dictionaries = df_boston_test.to_dict('records')\n\n # 1. make sure the accuracy is the same\n\n predictions = []\n for row in df_boston_test_dictionaries:\n predictions.append(saved_ml_pipeline.predict(row))\n\n first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)\n print('first_score')\n print(first_score)\n # Make sure our score is good, but not unreasonably good\n\n lower_bound = -4.0\n\n assert lower_bound < first_score < -2.8\n\n # 2. make sure the speed is reasonable (do it a few extra times)\n data_length = len(df_boston_test_dictionaries)\n start_time = datetime.datetime.now()\n for idx in range(1000):\n row_num = idx % data_length\n saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n\n print('duration.total_seconds()')\n print(duration.total_seconds())\n\n # It's very difficult to set a benchmark for speed that will work across all machines.\n # On my 2013 bottom of the line 15\" MacBook Pro,\n # this runs in about 0.8 seconds for 1000 predictions\n # That's about 1 millisecond per prediction\n # Assuming we might be running on a test box that's pretty weak, multiply by 3\n # Also make sure we're not running unreasonably quickly\n assert 0.2 < duration.total_seconds() / 1.0 < 20\n\n # 3. make sure we're not modifying the dictionaries\n # (the score is the same after running a few experiments as it is the first time)\n\n predictions = []\n for row in df_boston_test_dictionaries:\n predictions.append(saved_ml_pipeline.predict(row))\n\n second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)\n print('second_score')\n print(second_score)\n # Make sure our score is good, but not unreasonably good\n\n assert lower_bound < second_score < -2.8\n\n\ndef test_feature_learning_categorical_ensembling_getting_single_predictions_regression(\n model_name=None):\n np.random.seed(0)\n\n df_boston_train, df_boston_test = utils.get_boston_regression_dataset()\n\n column_descriptions = {'MEDV': 'output', 'CHAS': 'categorical'}\n\n ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)\n\n # NOTE: this is bad practice to pass in our same training set as our fl_data set,\n # but we don't have enough data to do it any other way\n df_boston_train, fl_data = train_test_split(df_boston_train, test_size=0.2)\n ml_predictor.train_categorical_ensemble(\n df_boston_train,\n model_names=model_name,\n feature_learning=True,\n fl_data=fl_data,\n categorical_column='CHAS')\n\n # print('Score on training data')\n # ml_predictor.score(df_boston_train, df_boston_train.MEDV)\n\n file_name = ml_predictor.save(str(random.random()))\n\n from brainless.utils_models import load_ml_model\n\n saved_ml_pipeline = load_ml_model(file_name)\n\n # with open(file_name, 'rb') as read_file:\n # saved_ml_pipeline = dill.load(read_file)\n os.remove(file_name)\n try:\n keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'\n os.remove(keras_file_name)\n except:\n pass\n\n df_boston_test_dictionaries = df_boston_test.to_dict('records')\n\n # 1. make sure the accuracy is the same\n\n predictions = []\n for row in df_boston_test_dictionaries:\n predictions.append(saved_ml_pipeline.predict(row))\n\n first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)\n print('first_score')\n print(first_score)\n # Make sure our score is good, but not unreasonably good\n\n lower_bound = -4.5\n\n assert lower_bound < first_score < -3.4\n\n # 2. make sure the speed is reasonable (do it a few extra times)\n data_length = len(df_boston_test_dictionaries)\n start_time = datetime.datetime.now()\n for idx in range(1000):\n row_num = idx % data_length\n saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n\n print('duration.total_seconds()')\n print(duration.total_seconds())\n\n # It's very difficult to set a benchmark for speed that will work across all machines.\n # On my 2013 bottom of the line 15\" MacBook Pro,\n # this runs in about 0.8 seconds for 1000 predictions\n # That's about 1 millisecond per prediction\n # Assuming we might be running on a test box that's pretty weak, multiply by 3\n # Also make sure we're not running unreasonably quickly\n assert 0.2 < duration.total_seconds() / 1.0 < 15\n\n # 3. make sure we're not modifying the dictionaries\n # (the score is the same after running a few experiments as it is the first time)\n\n predictions = []\n for row in df_boston_test_dictionaries:\n predictions.append(saved_ml_pipeline.predict(row))\n\n second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)\n print('second_score')\n print(second_score)\n # Make sure our score is good, but not unreasonably good\n\n assert lower_bound < second_score < -3.4\n\n\ndef test_all_algos_classification(model_name=None):\n np.random.seed(0)\n\n df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()\n\n column_descriptions = {\n 'survived': 'output',\n 'sex': 'categorical',\n 'embarked': 'categorical',\n 'pclass': 'categorical'\n }\n\n ml_predictor = Predictor(\n type_of_estimator='classifier', column_descriptions=column_descriptions)\n\n ml_predictor.train(\n df_titanic_train,\n model_names=[\n 'LogisticRegression', 'RandomForestClassifier', 'RidgeClassifier',\n 'GradientBoostingClassifier', 'ExtraTreesClassifier', 'AdaBoostClassifier',\n 'SGDClassifier', 'Perceptron', 'PassiveAggressiveClassifier', 'DeepLearningClassifier',\n 'XGBClassifier', 'LGBMClassifier', 'LinearSVC'\n ])\n\n test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)\n\n print('test_score')\n print(test_score)\n\n # Linear models aren't super great on this dataset...\n assert -0.215 < test_score < -0.131\n\n\ndef test_all_algos_regression():\n # a random seed of 42 has ExtraTreesRegressor getting the best CV score,\n # and that model doesn't generalize as well as GradientBoostingRegressor.\n np.random.seed(0)\n\n df_boston_train, df_boston_test = utils.get_boston_regression_dataset()\n\n column_descriptions = {'MEDV': 'output', 'CHAS': 'categorical'}\n\n ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)\n\n ml_predictor.train(\n df_boston_train,\n model_names=[\n 'LinearRegression', 'RandomForestRegressor', 'Ridge', 'GradientBoostingRegressor',\n 'AdaBoostRegressor', 'SGDRegressor', 'PassiveAggressiveRegressor', 'Lasso', 'LassoLars',\n 'ElasticNet', 'OrthogonalMatchingPursuit', 'BayesianRidge', 'ARDRegression',\n 'MiniBatchKMeans', 'DeepLearningRegressor', 'LGBMRegressor', 'XGBClassifier',\n 'LinearSVR', 'CatBoostRegressor'\n ])\n\n test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)\n\n print('test_score')\n print(test_score)\n\n assert -3.4 < test_score < -2.8\n\n\ndef test_throws_warning_when_fl_data_equals_df_train():\n df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()\n\n column_descriptions = {\n 'survived': 'output',\n 'sex': 'categorical',\n 'embarked': 'categorical',\n 'pclass': 'categorical'\n }\n\n ml_predictor = Predictor(\n type_of_estimator='classifier', column_descriptions=column_descriptions)\n\n with warnings.catch_warnings(record=True) as w:\n\n try:\n ml_predictor.train(df_titanic_train, feature_learning=True, fl_data=df_titanic_train)\n except KeyError as e:\n pass\n # We should not be getting to this line- we should be throwing an error above\n for thing in w:\n print(thing)\n assert len(w) >= 1\n assert True\n" ]
[ [ "numpy.random.seed", "sklearn.model_selection.train_test_split" ] ]
junghun73/Learning
[ "8b5a295c42f142a3b2f5fa13fc75434a2ea9235a", "cc87204fcc4bd2f4702f7c29c83cb8ed5c94b7d6" ]
[ "advanced/mathematical_optimization/examples/plot_exercise_ill_conditioned.py", "packages/scikit-learn/show_ica.py" ]
[ "\"\"\"\nAlternating optimization\n=========================\n\nThe challenge here is that Hessian of the problem is a very\nill-conditioned matrix. This can easily be seen, as the Hessian of the\nfirst term in simply 2*np.dot(K.T, K). Thus the conditioning of the\nproblem can be judged from looking at the conditioning of K.\n\"\"\"\nimport time\n\nimport numpy as np\nfrom scipy import optimize\nimport pylab as pl\n\nnp.random.seed(0)\n\nK = np.random.normal(size=(100, 100))\n\ndef f(x):\n return np.sum((np.dot(K, x - 1))**2) + np.sum(x**2)**2\n\n\ndef f_prime(x):\n return 2*np.dot(np.dot(K.T, K), x - 1) + 4*np.sum(x**2)*x\n\n\ndef hessian(x):\n H = 2*np.dot(K.T, K) + 4*2*x*x[:, np.newaxis]\n return H + 4*np.eye(H.shape[0])*np.sum(x**2)\n\n\n###############################################################################\n# Some pretty plotting\n\npl.figure(1)\npl.clf()\nZ = X, Y = np.mgrid[-1.5:1.5:100j, -1.1:1.1:100j]\n# Complete in the additional dimensions with zeros\nZ = np.reshape(Z, (2, -1)).copy()\nZ.resize((100, Z.shape[-1]))\nZ = np.apply_along_axis(f, 0, Z)\nZ = np.reshape(Z, X.shape)\npl.imshow(Z.T, cmap=pl.cm.gray_r, extent=[-1.5, 1.5, -1.1, 1.1],\n origin='lower')\npl.contour(X, Y, Z, cmap=pl.cm.gnuplot)\n\n# A reference but slow solution:\nt0 = time.time()\nx_ref = optimize.fmin_powell(f, K[0], xtol=1e-10, ftol=1e-6, disp=0)\nprint(' Powell: time %.2fs' % (time.time() - t0))\nf_ref = f(x_ref)\n\n# Compare different approaches\nt0 = time.time()\nx_bfgs = optimize.fmin_bfgs(f, K[0], disp=0)[0]\nprint(' BFGS: time %.2fs, x error %.2f, f error %.2f' % (time.time() - t0,\n np.sqrt(np.sum((x_bfgs - x_ref)**2)), f(x_bfgs) - f_ref))\n\nt0 = time.time()\nx_l_bfgs = optimize.fmin_l_bfgs_b(f, K[0], approx_grad=1, disp=0)[0]\nprint(' L-BFGS: time %.2fs, x error %.2f, f error %.2f' % (time.time() - t0,\n np.sqrt(np.sum((x_l_bfgs - x_ref)**2)), f(x_l_bfgs) - f_ref))\n\n\nt0 = time.time()\nx_bfgs = optimize.fmin_bfgs(f, K[0], f_prime, disp=0)[0]\nprint(\" BFGS w f': time %.2fs, x error %.2f, f error %.2f\" % (\n time.time() - t0, np.sqrt(np.sum((x_bfgs - x_ref)**2)),\n f(x_bfgs) - f_ref))\n\nt0 = time.time()\nx_l_bfgs = optimize.fmin_l_bfgs_b(f, K[0], f_prime, disp=0)[0]\nprint(\"L-BFGS w f': time %.2fs, x error %.2f, f error %.2f\" % (\n time.time() - t0, np.sqrt(np.sum((x_l_bfgs - x_ref)**2)),\n f(x_l_bfgs) - f_ref))\n\nt0 = time.time()\nx_newton = optimize.fmin_ncg(f, K[0], f_prime, fhess=hessian, disp=0)[0]\nprint(\" Newton: time %.2fs, x error %.2f, f error %.2f\" % (\n time.time() - t0, np.sqrt(np.sum((x_newton - x_ref)**2)),\n f(x_newton) - f_ref))\n\npl.show()\n\n", "from sklearn import datasets, decomposition\nimport pylab as pl\nimport numpy as np\n\ndigits = datasets.load_digits()\n\ndigits.data += .2 * np.random.normal(size=digits.data.shape)\nica = decomposition.FastICA(n_components=10)\ntt = ica.fit(digits.data.T).transform(digits.data.T).T\n\nfor i in range(8):\n pl.subplot(2, 4, 1 + i)\n pl.imshow(tt[i].reshape(8, 8), cmap=pl.cm.gray_r, interpolation='nearest')\n# pl.axis('off')\npl.show()\n" ]
[ [ "scipy.optimize.fmin_ncg", "numpy.random.normal", "numpy.dot", "numpy.reshape", "scipy.optimize.fmin_l_bfgs_b", "numpy.random.seed", "scipy.optimize.fmin_powell", "numpy.sum", "scipy.optimize.fmin_bfgs", "numpy.eye", "numpy.apply_along_axis" ], [ "numpy.random.normal", "sklearn.decomposition.FastICA", "sklearn.datasets.load_digits" ] ]
gasongjian/easyml
[ "cd6bddef1f154c5a79e4e0e72ee858436d962caf" ]
[ "check.py" ]
[ "import statsmodels.api as sm\nfrom statsmodels.sandbox.nonparametric import kernels\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\n\n'''特征空间解析\n\n我们将特征类型分为如下四种\n- numeric:连续的特征,表现为可以定义序关系且唯一值可以有无数个\n- category:类别型特征\n- Multi-category:多类别\n- object:无结构数据,暂不提供任何解析\n'''\n\n\n\ndef describe_numeric_1d(series,quantiles=None,missing_value = None):\n \"\"\"Describe a numeric series.\n\n Args:\n series: The Series to describe.\n quantiles: list,like [0.25,0.75].\n\n Returns:\n A dict containing calculated series description values.\n\n \"\"\"\n if quantiles is None:\n quantiles = [0.005,0.01,0.05,0.25,0.5,0.75,0.95,0.99,0.995]\n \n n = len(series)\n if missing_value:\n series = series.replace({missing_value:np.NaN})\n stats = {\n \"mean\": series.mean(),\n \"std\": series.std(),\n \"variance\": series.var(),\n \"min\": series.min(),\n \"max\": series.max(),\n \"kurtosis\": series.kurt(),\n \"skewness\": series.skew(),\n \"zeros\": (n - np.count_nonzero(series))/n,\n \"missing\":np.sum(series.isna())/n\n }\n stats.update({\n \"{:.1%}\".format(percentile).replace('.0',''): value\n for percentile, value in series.quantile(quantiles).to_dict().items()\n })\n stats[\"iqr\"] = stats[\"75%\"] - stats[\"25%\"]\n stats[\"cv\"] = stats[\"std\"] / stats[\"mean\"] if stats[\"mean\"] else np.NaN\n return stats\n\n\ndef _get_distrbution(x,x_lim=None,gridsize=None,bw=None,bw_method='scott',eta=1):\n '''\n ## brandwidth select\n A = min(std(x, ddof=1), IQR/1.349)\n Scott's rule: 1.059 * A * n ** (-1/5.) \n Silverman's Rule: .9 * A * n ** (-1/5.) \n \n '''\n x = pd.Series(x)\n stats = describe_numeric_1d(x)\n x_mean_fix = x[(x>=stats['5%'])&(x<=stats['95%'])].mean()\n\n # 截断数据用于分析合理的密度函数\n if x_lim is None:\n cv_lower,cv_upper = x[x<=stats['5%']].std()/(abs(x_mean_fix)+1e-14), x[x>=stats['95%']].std()/(abs(x_mean_fix)+1e-14)\n x_lim = [stats['5%'] if cv_lower>=eta else stats['min'],stats['95%'] if cv_upper>=eta else stats['max']]\n\n domain = [stats['min'],stats['max']]\n if cv_lower>=eta:\n domain[0] = -np.inf\n if cv_upper>=eta:\n domain[1] = np.inf\n\n # 选择绘图和计算需要的网格大小\n try:\n bw = float(bw)\n except:\n bw = sm.nonparametric.bandwidths.select_bandwidth(x,bw=bw_method,kernel = None)\n # 特征的样本数一般较大,这里规定gridsize最小为 128\n n_fix = len(x[(x>=x_lim[0])&(x<=x_lim[1])])\n if gridsize is None:\n gridsize=max(128,min(int(np.round((x_lim[1] - x_lim[0])/bw)),n_fix)) if bw>=1e-7 else None\n \n dens = sm.nonparametric.KDEUnivariate(x.dropna().astype(np.double).values)\n dens.fit(gridsize=gridsize,bw=bw,clip=x_lim)\n\n # 获取最新的 brandwidth 等数据\n bw = dens.bw\n # bw_method = bw_method if dens.bw_method = 'user-given' else dens.bw_method\n gridsize = len(dens.support)\n \n result = stats\n result.update({'key_dist':['bw','bw_method','support','density','x_lim','cdf','icdf','domain','gridsize','evaluate']\n ,'bw':bw\n ,'bw_method':bw_method\n ,'support':dens.support\n ,'density':dens.density\n ,'x_lim':x_lim\n ,'cdf':dens.cdf\n ,'icdf':dens.icdf\n ,'domain':domain\n ,'gridsize':gridsize\n ,'evaluate':dens.evaluate\n })\n return result\n\n\nclass feature_numeric(object):\n def __init__(self,name=None):\n self.name = name\n self.dtype = 'numeric'\n self.stats = None\n self.dist = None\n self.cross_proba=None\n self.cross_stats=None\n\n # 随机取样\n def sample(self,n):\n return self.dist['icdf'][np.random.randint(low=0,high = self.dist['gridsize'] -1,size=n)]\n \n def pdf(self,x):\n return self.dist['evaluate'](x)\n \n def describe(self):\n return self.stats\n \n def get_values(self,key):\n if key in self.dist:\n return self.dist[key]\n elif key in self.stats:\n return self.stats[key]\n elif key in self.cross_proba:\n return self.cross_proba[key]\n elif key in self.cross_stats:\n return self.cross_stats[key]\n else:\n return None\n \n def fit(self,x,y=None,**arg):\n result = _get_distrbution(x,**arg)\n self.stats = {key:value for key,value in result.items() if key not in result['key_dist']+['key_dist']}\n self.dist = {key:value for key,value in result.items() if key in result['key_dist']}\n if y is not None and len(x) == len(y):\n cross_proba,cross_stats = self.crosstab_bin(x,y)\n self.cross_proba = cross_proba\n self.cross_stats = cross_stats\n \n def crosstab_bin(self,x,y):\n \n x = pd.Series(x)\n y = pd.Series(y)\n n = len(y)\n dist_y = y.value_counts()/n\n\n bw = self.dist['bw']\n support = self.dist['support']\n domain = self.dist['domain']\n q995 = self.stats['99.5%']\n gridsize = self.dist['gridsize']\n seq = np.mean(support[1:] - support[0:-1])\n\n # 添加额外的支撑集,便于分析合理的泛化方法\n if domain[1] == np.inf:\n n_add = np.ceil((q995 - support[-1])/seq)\n support_add = [support[-1] + seq*(i+1) for i in range(int(n_add))]\n support_new = np.concatenate((support,support_add))\n else:\n support_new = support.copy()\n\n p_y1_x = np.zeros_like(support_new)\n cumulative = np.zeros_like(support_new)\n for i,xi in enumerate(support_new):\n ind =(x<=xi+bw)&(x>=xi-bw)\n tmp = y[ind].value_counts().to_dict()\n cnt = {0:dist_y[0],1:dist_y[1]}\n cnt[0] += tmp.get(0,0)\n cnt[1] += tmp.get(1,0)\n p_y1_x[i] = cnt[1]/(cnt[0]+cnt[1])\n cumulative[i] = np.sum(x<=xi)/n\n\n # 根据贝叶斯法则可以求出 \n p_x_y1 = self.dist['density']*p_y1_x[:gridsize]/dist_y[1]\n p_x_y0 = self.dist['density']*(1-p_y1_x[:gridsize])/dist_y[0]\n iv =np.sum((p_x_y1 - p_x_y0)*np.log2((1e-14+p_x_y1)/(p_x_y0+1e-14)))*seq\n\n cross_proba = {\n \"p(y=1|x)\":p_y1_x\n ,\"p(x|y=1)\":p_x_y1\n ,\"p(x|y=0)\":p_x_y0\n ,\"woe(x)\":np.log2(p_x_y0/p_x_y1)\n ,\"cumulative\":cumulative\n ,\"support_x\":support_new\n ,\"support_y\":support\n }\n\n cross_stats = {\n \"iv\":iv\n ,\"p(y=1)\":dist_y[1]\n ,\"p(y=0)\":dist_y[0]\n }\n\n return cross_proba,cross_stats\n \n \n def plot_pdf(self):\n x_min,x_max = self.stats['min'],self.stats['max']\n bw = self.dist['bw']\n if self.name:\n title = 'density curve of {}'.format(self.name)\n else:\n title = 'density curve'\n fig,ax=plt.subplots(figsize=[10,6.6])\n support = self.dist['support']\n #seq = np.mean(support[1:]-support[0:-1])\n #ind = (support>=x_min-3*seq)&(support<=x_max+3*seq)\n ax.plot(support,self.dist['density']);\n ax.set_title(title);\n ax.set_xlabel('range = [{},{}]'.format(x_min,x_max));\n fig.show()\n return None\n \n def summary(self):\n \n # 区分两个版本,一个有y 一个没 y\n tmp = pd.DataFrame(index=range(0,10),columns=['name1','value1','name2','value2','name3','value3'])\n tmp.name1 = ['missing','zeros','min','max','mean','std','skewness','kurtosis','cv','iqr']\n tmp.value1 = [self.stats[k] for k in tmp['name1'].values]\n tmp.name2 = ['0.5%','1%','5%','25%','50%','75%','95%','99%','99.5%','domain']\n tmp.value2 = [self.stats[k] for k in tmp['name2'][:-1].values]+[str(self.dist['domain'])]\n tmp.loc[0,'name3'] = 'iv'\n tmp.loc[0,'value3'] = self.cross_stats['iv']\n tmp.loc[1,'name3'] = 'p(y=1)'\n tmp.loc[1,'value3'] = self.cross_stats['p(y=1)']\n display(tmp)\n\n support_new = self.cross_proba['support_x']\n ind1 = (self.cross_proba['support_x']>=self.stats['min'])&(self.cross_proba['support_x']<=self.stats['max'])\n p_y1 = self.cross_stats['p(y=1)']\n \n fig,[ax1,ax2]=plt.subplots(2,1,figsize=[10,13])\n ax1.plot(support_new[ind1],self.cross_proba['p(y=1|x)'][ind1] ,'.');\n ax1.plot([support_new[0],support_new[-1]] ,[p_y1,p_y1],label = 'baseline')\n ax1_ = ax1.twinx()\n ax1_.plot(support_new[ind1],self.cross_proba['cumulative'][ind1],label = 'cumulative',color='red')\n ax1_.legend(loc = 'center left')\n ax1.set_title(r'$p(y=1|x)$');\n ax1.legend()\n\n ind2 = (self.cross_proba['support_y']>=self.stats['min'])&(self.cross_proba['support_y']<=self.stats['max'])\n ax2.plot(self.cross_proba['support_y'],self.cross_proba['p(x|y=1)'],label=r'$p(x|y=1)$')\n ax2.plot(self.cross_proba['support_y'],self.cross_proba['p(x|y=0)'],label=r'$p(x|y=0)$')\n ax2.plot(self.cross_proba['support_y'],self.dist['density'],label=r'$p(x)$',color = '0.5',linestyle='--')\n ax2_ = ax2.twinx()\n ax2_.plot(self.cross_proba['support_y'][ind2],self.cross_proba['woe(x)'][ind2],label = 'woe(x)',color='red')\n ax2_.legend(loc = 'center right')\n ax2.legend()\n ax2.set_title(r'$p(x|y=1)$ vs $p(x|y=0)$')\n ax2.set_xlabel('iv = {:.2f}'.format(self.cross_stats['iv']))\n\n fig.show()\n \n \ndef sample_size_cal(p,alpha=0.05,e=0.05):\n import scipy.stats as stats\n z=stats.norm.ppf(1-alpha/2)\n return int(np.ceil(z**2*p*(1-p)/e**2))\n\ndef describe_categorical(x\n ,missing_value = None\n ,pct_pos = 0.5\n ,backoff_p = 0.05\n ,backoff_rnk = 30\n ,backoff_n = None\n ,alpha=0.05\n ,e=0.05):\n x = pd.Series(x)\n if missing_value:\n x = x.replace({str(missing_value):np.nan})\n n = len(x)\n missing = np.sum(x.isnull())\n p_x = x.value_counts().sort_values(ascending=False)/n\n itemlist = p_x.index.tolist()\n # 识别稀有类\n if backoff_n is None:\n backoff_n = sample_size_cal(pct_pos,alpha=alpha,e=e)\n x_base = pd.DataFrame(x.value_counts().sort_values(ascending=False),index=itemlist)\n x_base.columns = ['cnt']\n x_base['proba'] = x_base['cnt']/n\n x_base['type'] = 'normal'\n x_base['rnk'] = range(1,len(x_base)+1)\n x_base.loc[((x_base.proba<backoff_p)&(x_base.cnt<backoff_n))|(x_base.rnk>=backoff_rnk),'type'] = 'rare'\n stats = {\n \"missing\": missing/n,\n \"distinct_count\":len(itemlist),\n \"n\":n,\n \"entropy\":-1*np.sum(p_x*np.log2(p_x))\n }\n dist = {\n \"itemlist\":itemlist,\n \"p(x)\":p_x,\n \"type\":x_base['type'].to_dict(),\n \"itemlist_rare\":x_base[x_base.type=='rare'].index.tolist(),\n \"data\":x_base\n }\n return stats,dist\n\n\n\nclass feature_categorical(object):\n def __init__(self,name=None):\n self.name = name\n self.dtype = 'categorical'\n self.stats = None\n self.dist = None\n self.cross_proba=None\n self.cross_stats=None\n \n def crosstab_bin(self,x,y):\n x = pd.Series(x)\n y = pd.Series(y)\n n = x.shape[0]\n \n p_x = x.value_counts().sort_values(ascending=False)/n\n h_x = -1*np.sum(p_x*np.log2(p_x))\n p_y = y.value_counts()/n\n \n # woe 等需要知道 y=1 中 missing的缺失率\n n_y_missing = {1:0,0:0}\n n_missing = np.sum(x.isnull())\n if n_missing>=1:\n n_y_missing.update(y[x.isnull()].value_counts().to_dict())\n cross_missing = {\n \"p(missing|y=1)\":n_y_missing[1]/(p_y[1]*n)\n ,\"p(missing|y=0)\":n_y_missing[0]/(p_y[0]*n)\n ,\"p(y=1|missing)\":n_y_missing[1]/(n_y_missing[0]+n_y_missing[1])\n ,\"p(y=0|missing)\":n_y_missing[0]/(n_y_missing[0]+n_y_missing[1])\n }\n else:\n cross_missing = {\n \"p(missing|y=1)\":0\n ,\"p(missing|y=0)\":0\n ,\"p(y=1|missing)\":np.nan\n ,\"p(y=0|missing)\":np.nan\n }\n \n # 为避免部分类别项不同时存在正负样本,统计给每一个类别都加一个样本\n p_xy = (pd.crosstab(x,y)+[p_y[0],p_y[1]])/n\n p_x_y = p_xy.div(p_xy.sum(axis=0),axis=1)\n p_y_x = p_xy.div(p_xy.sum(axis=1),axis=0)\n p_xy_expected = pd.DataFrame(np.dot(pd.DataFrame(p_x),pd.DataFrame(p_y).T),index=p_x.index,columns=p_y.index) \n info_gain = (p_xy*np.log2(p_xy/p_xy_expected)).sum().sum()\n info_gain_ratio = info_gain/h_x\n \n cross_proba = {\n \"p(y)\":p_y\n ,\"p(x,y)\":p_xy\n ,\"p(x)p(y)\":pd.DataFrame(np.dot(pd.DataFrame(p_x),pd.DataFrame(p_y).T),index=p_x.index,columns=p_y.index)\n ,\"p(y=1|x)\":p_y_x[1]\n ,\"p(x|y=1)\":p_x_y[1]\n ,\"p(x|y=0)\":p_x_y[0]\n ,\"woe(x)\":np.log2(p_x_y[0]/p_x_y[1])\n ,\"cross_missing\":cross_missing\n }\n\n cross_stats = {\n \"iv\":np.sum((p_x_y[0] - p_x_y[1])*np.log2(p_x_y[0]/p_x_y[1]))\n ,\"p(y=1)\":p_y[1]\n ,\"p(y=0)\":p_y[0]\n ,\"info_gain\":info_gain\n ,\"info_gain_ratio\":info_gain_ratio\n }\n\n return cross_proba,cross_stats\n \n \n def fit(self,x,y=None,missing_value=None,pct_pos=0.5,backoff_p=0.05,backoff_rnk=30\n ,backoff_n=None,alpha=0.05,e=0.05):\n param = {'missing_value':missing_value,'pct_pos':pct_pos,'backoff_p':backoff_p,'backoff_rnk':backoff_rnk\n ,'backoff_n':backoff_n,'alpha':alpha,'e':e}\n stats,dist = describe_categorical(x,**param)\n \n self.stats = stats\n self.dist = dist\n if y is not None and len(x) == len(y):\n cross_proba,cross_stats = self.crosstab_bin(x,y)\n self.cross_proba = cross_proba\n self.cross_stats = cross_stats\n \n # 随机取样\n def sample(self,n,drop_na=True):\n itemlist = self.dist['itemlist']\n p=self.dist['p(x)'][itemlist]\n if drop_na and self.stats['missing']>0:\n itemlist+=[np.nan]\n p+=[self.stats['missing']]\n return np.random.choice(itemlist, n, p=p)\n \n def pdf(self,x):\n return self.dist['p(x)'][x]\n \n def describe(self):\n return self.stats,self.dist\n \n def get_values(self,key):\n if key in self.dist:\n return self.dist[key]\n elif key in self.stats:\n return self.stats[key]\n elif key in self.cross_proba:\n return self.cross_proba[key]\n elif key in self.cross_stats:\n return self.cross_stats[key]\n else:\n return None\n\n def plot_pdf(self):\n \n if self.name:\n title = 'frequency histogram of {}'.format(self.name)\n else:\n title = 'frequency histogram'\n \n x_base = self.dist['data']\n other = pd.Series({'Other values ({})'.format(len(x_base[x_base['type'] == 'rare']))\n :x_base.loc[x_base['type'] == 'rare','proba'].sum()})\n tmp = x_base.loc[x_base.type == 'normal','proba']\n tmp = pd.concat([pd.Series({'(Missing)':self.stats['missing']}),tmp,other])\n\n fig,ax=plt.subplots(figsize=[10,6.6])\n sns.barplot(tmp.values*100,tmp.index,orient = 'h',ax=ax)\n ax.set_title(title)\n ax.set_xlabel('pct %')\n fig.show()\n\n \n \n \n def summary(self):\n \n if self.name:\n title = 'frequency histogram and woe(x) of {}'.format(self.name)\n else:\n title = 'frequency histogram and woe(x)'\n\n tmp = pd.DataFrame(index=range(0,6),columns=['name1','value1','name2','value2'])\n tmp.name1 = ['n','missing','distinct_count','distinct_count_normal','items_top3','entropy']\n tmp.value1 = [self.stats['n'],self.stats['missing'],self.stats['distinct_count'],self.stats['distinct_count']-len(self.dist['itemlist_rare'])\n ,str(self.dist['itemlist'][:3]),self.stats['entropy']]\n\n tmp.name2 = ['p(y=1)','p(y=0)','iv','info_gain','info_gain_ratio',np.nan]\n tmp.value2 = [self.cross_stats[k] for k in tmp['name2'][:-1].values]+[np.nan]\n display(tmp)\n\n x_base = self.dist['data']\n other = pd.Series({'Other values ({})'.format(len(x_base[x_base['type'] == 'rare']))\n :x_base.loc[x_base['type'] == 'rare','proba'].sum()})\n tmp = x_base.loc[x_base.type == 'normal','proba']\n tmp = pd.concat([pd.Series({'(Missing)':self.stats['missing']}),tmp,other])\n\n fig,ax=plt.subplots(figsize=[10,6.6])\n sns.barplot(tmp.values*100,tmp.index,orient = 'h',ax=ax)\n ax.set_title(title)\n ax.set_xlabel('pct %')\n\n\n # 绘制 woe\n item_rare = self.dist['itemlist_rare']\n if item_rare:\n woe_rare = np.log2(self.cross_proba['p(x|y=0)'][item_rare].sum()/self.cross_proba['p(x|y=1)'][item_rare].sum())\n else:\n woe_rare = np.nan \n woe_rare = pd.Series({'Other values ({})'.format(len(x_base[x_base['type'] == 'rare']))\n :woe_rare})\n\n if self.stats['missing']>0 and self.cross_proba['cross_missing']['p(missing|y=1)']>0:\n woe_missing = np.log2(self.cross_proba['cross_missing']['p(missing|y=0)']/self.cross_proba['cross_missing']['p(missing|y=1)'])\n else:\n woe_missing = np.nan\n itemlist_normal = [item for item in self.dist['itemlist'] if item not in item_rare]\n tmp2 = self.cross_proba['woe(x)'][itemlist_normal]\n tmp2 = pd.concat([pd.Series({'(Missing)':woe_missing}),tmp2,woe_rare])\n\n ax1 = ax.twiny()\n ax1.plot(tmp2.values,tmp2.index,'.:',color='red',label='woe(x)')\n ax1.legend()\n\n fig.show()" ]
[ [ "numpy.concatenate", "numpy.zeros_like", "numpy.ceil", "numpy.random.choice", "numpy.count_nonzero", "numpy.round", "numpy.sum", "pandas.crosstab", "pandas.DataFrame", "matplotlib.pyplot.subplots", "numpy.mean", "numpy.random.randint", "pandas.Series", "numpy.log2" ] ]
LBNL-UCB-STI/synthpop
[ "cd437c0855a30e51bf8a4c7454900b2f9db9f1ab" ]
[ "synthpop/recipes/starter2.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom .. import categorizer as cat\nfrom ..census_helpers import Census\n\n\n# TODO DOCSTRINGS!!\nclass Starter:\n \"\"\"\n This is a recipe for getting the marginals and joint distributions to use\n to pass to the synthesizer using simple categories - population, age,\n race, and sex for people, and children, income, cars, and workers for\n households. This module is responsible for\n\n Parameters\n ----------\n c : object\n census_helpers.Census object\n state : string\n FIPS code the state\n county : string\n FIPS code for the county\n tract : string, optional\n FIPS code for a specific track or None for all tracts in the county\n acsyear : integer, optional\n Final year in the 5-year estimates ACS dataset.\n Default: 2016, which corresponds to 2011-2016 ACS dataset\n\n Returns\n -------\n household_marginals : DataFrame\n Marginals per block group for the household data (from ACS 5-year estimates)\n person_marginals : DataFrame\n Marginals per block group for the person data (from ACS 5-year estimates)\n household_jointdist : DataFrame\n joint distributions for the households (from PUMS 2010-2000), one joint\n distribution for each PUMA (one row per PUMA)\n person_jointdist : DataFrame\n joint distributions for the persons (from PUMS 2010-2000), one joint\n distribution for each PUMA (one row per PUMA)\n tract_to_puma_map : dictionary\n keys are tract ids and pumas are puma ids\n \"\"\"\n def __init__(self, key, state, county, tract=None, acsyear=2016):\n self.c = c = Census(key)\n self.state = state\n self.county = county\n self.tract = tract\n self.acsyear = acsyear\n\n structure_size_columns = ['B25032_0%02dE' % i for i in range(1, 24)]\n age_of_head_columns = ['B25007_0%02dE' % i for i in range(1, 22)]\n race_of_head_columns = ['B25006_0%02dE' % i for i in range(1, 11)]\n hispanic_head_columns = ['B25003I_0%02dE' % i for i in range(1, 4)]\n hh_size_columns = ['B25009_0%02dE' % i for i in range(1, 18)]\n income_columns = ['B19001_0%02dE' % i for i in range(1, 18)]\n vehicle_columns = ['B08201_0%02dE' % i for i in range(1, 7)]\n workers_columns = ['B08202_0%02dE' % i for i in range(1, 6)]\n presence_of_children_columns = ['B11005_001E', 'B11005_002E', 'B11005_011E']\n presence_of_seniors_columns = ['B11007_002E', 'B11007_007E']\n tenure_mover_columns = ['B25038_0%02dE' % i for i in range(1, 16)]\n block_group_columns = (\n income_columns + presence_of_children_columns +\n hh_size_columns)\n tract_columns = vehicle_columns + workers_columns\n h_acs = c.block_group_and_tract_query(\n block_group_columns,\n tract_columns, state, county,\n merge_columns=['tract', 'county', 'state'],\n block_group_size_attr=\"B11005_001E\",\n tract_size_attr=\"B08201_001E\",\n tract=tract, year=acsyear)\n self.h_acs = h_acs\n\n self.h_acs_cat = cat.categorize(h_acs, {\n (\"hh_children\", \"yes\"): \"B11005_002E\",\n (\"hh_children\", \"no\"): \"B11005_011E\",\n (\"hh_income\", \"lt30\"):\n \"B19001_002E + B19001_003E + B19001_004E + \"\n \"B19001_005E + B19001_006E\",\n (\"hh_income\", \"gt30-lt60\"):\n \"B19001_007E + B19001_008E + B19001_009E + \"\n \"B19001_010E + B19001_011E\",\n (\"hh_income\", \"gt60-lt100\"): \"B19001_012E + B19001_013E\",\n (\"hh_income\", \"gt100-lt150\"): \"B19001_014E + B19001_015E\",\n (\"hh_income\", \"gt150\"): \"B19001_016E + B19001_017E\",\n (\"hh_cars\", \"none\"): \"B08201_002E\",\n (\"hh_cars\", \"one\"): \"B08201_003E\",\n (\"hh_cars\", \"two\"): \"B08201_004E\",\n (\"hh_cars\", \"three or more\"):\n \"B08201_005E + B08201_006E\",\n (\"hh_workers\", \"none\"): \"B08202_002E\",\n (\"hh_workers\", \"one\"): \"B08202_003E\",\n (\"hh_workers\", \"two\"): \"B08202_004E\",\n (\"hh_workers\", \"three or more\"): \"B08202_005E\",\n (\"hh_size\", \"one\"): \"B25009_003E + B25009_011E\",\n (\"hh_size\", \"two\"): \"B25009_004E + B25009_012E\",\n (\"hh_size\", \"three\"): \"B25009_005E + B25009_013E\",\n (\"hh_size\", \"four or more\"): \"B25009_006E + B25009_014E + \"\n \"B25009_007E + B25009_015E + \"\n \"B25009_008E + B25009_016E + \"\n \"B25009_009E + B25009_017E\"\n }, index_cols=['state', 'county', 'tract', 'block group'])\n\n # gq_population = ['B26001_001E']\n # HH population, for the hhpop/totalpop adjustment\n hh_population = ['B11002_001E']\n population = ['B01001_001E'] # This includes GQ\n hispanic = ['B03003_002E', 'B03003_003E']\n sex = ['B01001_002E', 'B01001_026E']\n race = ['B02001_0%02dE' % i for i in range(1, 11)]\n male_age_columns = ['B01001_0%02dE' % i for i in range(3, 26)]\n female_age_columns = ['B01001_0%02dE' % i for i in range(27, 50)]\n industry = ['C24030_0%02dE' % i for i in range(1, 56)] + ['B23025_007E']\n all_columns = population + sex + race + male_age_columns + \\\n female_age_columns + hh_population + hispanic + industry\n p_acs = c.block_group_query(all_columns, state, county, tract=tract, year=acsyear)\n self.p_acs = p_acs\n self.p_acs_cat = cat.categorize(p_acs, {\n (\"person_age\", \"19 and under\"):\n \"(B01001_003E + B01001_004E + B01001_005E + \"\n \"B01001_006E + B01001_007E + B01001_027E + \"\n \"B01001_028E + B01001_029E + B01001_030E + \"\n \"B01001_031E) * B11002_001E*1.0/B01001_001E\",\n (\"person_age\", \"20 to 35\"):\n \"(B01001_008E + B01001_009E + B01001_010E + \"\n \"B01001_011E + B01001_012E + B01001_032E + \"\n \"B01001_033E + B01001_034E + B01001_035E + \"\n \"B01001_036E) * B11002_001E*1.0/B01001_001E\",\n (\"person_age\", \"35 to 60\"):\n \"(B01001_013E + B01001_014E + B01001_015E + \"\n \"B01001_016E + B01001_017E + B01001_037E + \"\n \"B01001_038E + B01001_039E + B01001_040E + \"\n \"B01001_041E) * B11002_001E*1.0/B01001_001E\",\n (\"person_age\", \"above 60\"):\n \"(B01001_018E + B01001_019E + B01001_020E + \"\n \"B01001_021E + B01001_022E + B01001_023E + \"\n \"B01001_024E + B01001_025E + B01001_042E + \"\n \"B01001_043E + B01001_044E + B01001_045E + \"\n \"B01001_046E + B01001_047E + B01001_048E + \"\n \"B01001_049E) * B11002_001E*1.0/B01001_001E\",\n (\"race\", \"white\"): \"(B02001_002E) * B11002_001E*1.0/B01001_001E\",\n (\"race\", \"black\"): \"(B02001_003E) * B11002_001E*1.0/B01001_001E\",\n (\"race\", \"asian\"): \"(B02001_005E) * B11002_001E*1.0/B01001_001E\",\n (\"race\", \"other\"): \"(B02001_004E + B02001_006E + B02001_007E + \"\n \"B02001_008E) * B11002_001E*1.0/B01001_001E\",\n (\"person_sex\", \"male\"):\n \"(B01001_002E) * B11002_001E*1.0/B01001_001E\",\n (\"person_sex\", \"female\"):\n \"(B01001_026E) * B11002_001E*1.0/B01001_001E\",\n (\"hispanic\", \"yes\"):\n \"(B03003_003E) * B11002_001E*1.0/B01001_001E\",\n (\"hispanic\", \"no\"):\n \"(B03003_002E) * B11002_001E*1.0/B01001_001E\",\n (\"industry\", \"agriculture\"): \"(C24030_003E + C24030_006E + C24030_030E + C24030_033E) * \"\n \"B11002_001E*1.0/B01001_001E\",\n (\"industry\", \"manufacturing\"): \"(C24030_007E + C24030_034E) * B11002_001E*1.0/B01001_001E\",\n (\"industry\", \"retail / transportation\"): \"(C24030_008E + C24030_009E + C24030_010E + C24030_035E + \"\n \"C24030_036E + C24030_037E) * B11002_001E*1.0/B01001_001E\",\n (\"industry\", \"information\"): \"(C24030_013E + C24030_014E + C24030_017E + C24030_040E + C24030_041E + \"\n \"C24030_044E) * B11002_001E*1.0/B01001_001E\",\n (\"industry\", \"educational / health\"): \"(C24030_021E + C24030_048E) * B11002_001E*1.0/B01001_001E\",\n (\"industry\", \"arts\"): \"(C24030_024E + C24030_051E) * B11002_001E*1.0/B01001_001E\",\n (\"industry\", \"other services\"): \"(C24030_027E + C24030_028E + C24030_054E + C24030_055E) * \"\n \"B11002_001E*1.0/B01001_001E\",\n (\"industry\", \"not employed\"): \"B11002_001E - C24030_001E * B11002_001E*1.0/B01001_001E\"\n }, index_cols=['state', 'county', 'tract', 'block group'])\n\n # Put the needed PUMS variables here. These are also the PUMS variables\n # that will be in the outputted synthetic population\n self.h_pums_cols = ('serialno', 'PUMA00', 'PUMA10', 'RT', 'NP', 'TYPE',\n 'R65', 'HINCP', 'VEH', 'R18')\n self.p_pums_cols = ('serialno', 'PUMA00', 'PUMA10', 'RELP', 'AGEP',\n 'ESR', 'RAC1P', 'HISP', 'SEX', 'SPORDER',\n 'PERNP', 'SCHL', 'WKHP', 'JWTR', 'SCH', 'NAICSP')\n\n def get_geography_name(self):\n # this synthesis is at the block group level for most variables\n return \"block_group\"\n\n def get_state(self):\n return self.state\n\n def get_county(self):\n return self.county\n\n def get_num_geographies(self):\n return len(self.p_acs_cat)\n\n def get_available_geography_ids(self):\n # return the ids of the geographies, in this case a state, county,\n # tract, block_group id tuple\n for tup in self.p_acs_cat.index:\n yield pd.Series(tup, index=self.p_acs_cat.index.names)\n\n def get_household_marginal_for_geography(self, ind):\n return self.h_acs_cat.loc[tuple(ind.values)]\n\n def get_person_marginal_for_geography(self, ind):\n return self.p_acs_cat.loc[tuple(ind.values)]\n\n def get_household_joint_dist_for_geography(self, ind):\n c = self.c\n\n puma10, puma00 = c.tract_to_puma(ind.state, ind.county, ind.tract)\n\n # this is cached so won't download more than once\n if type(puma00) == str:\n h_pums = self.c.download_household_pums(ind.state, puma10, puma00,\n usecols=self.h_pums_cols)\n p_pums = self.c.download_population_pums(ind.state, puma10, puma00,\n usecols=self.p_pums_cols)\n elif np.isnan(puma00): # only puma10 available\n h_pums = self.c.download_household_pums(ind.state, puma10, None,\n usecols=self.h_pums_cols)\n p_pums = self.c.download_population_pums(ind.state, puma10, None,\n usecols=self.p_pums_cols)\n\n h_pums = h_pums.set_index('serialno')\n\n # join persons to households,\n # calculate needed household-level variables\n age_of_head = p_pums[p_pums.RELP == 0].groupby('serialno').AGEP.max()\n num_workers = p_pums[p_pums.ESR.isin([1, 2, 4, 5])].groupby(\n 'serialno').size()\n h_pums['race_of_head'] = p_pums[p_pums.RELP == 0].groupby(\n 'serialno').RAC1P.max()\n h_pums['hispanic_head'] = p_pums[p_pums.RELP == 0].groupby(\n 'serialno').HISP.max()\n h_pums['age_of_head'] = age_of_head\n h_pums['workers'] = num_workers\n h_pums.workers = h_pums.workers.fillna(0)\n h_pums = h_pums.reset_index()\n\n def sf_detached_cat(r):\n if r.BLD == 2:\n return \"yes\"\n return \"no\"\n\n def age_of_head_cat(r):\n if r.age_of_head < 35:\n return \"lt35\"\n elif r.age_of_head >= 65:\n return \"gt65\"\n return \"gt35-lt65\"\n\n def race_of_head_cat(r):\n if r.race_of_head == 1:\n return \"white\"\n elif r.race_of_head == 2:\n return \"black\"\n elif r.race_of_head == 6:\n return \"asian\"\n return \"other\"\n\n def hispanic_head_cat(r):\n if r.hispanic_head == 1:\n return \"no\"\n return \"yes\"\n\n def hh_size_cat(r):\n if r.NP == 1:\n return \"one\"\n elif r.NP == 2:\n return \"two\"\n elif r.NP == 3:\n return \"three\"\n return \"four or more\"\n\n def cars_cat(r):\n if r.VEH == 0:\n return \"none\"\n elif r.VEH == 1:\n return \"one\"\n elif r.VEH == 2:\n return \"two\"\n return \"three or more\"\n\n def children_cat(r):\n if r.R18 == 1:\n return \"yes\"\n return \"no\"\n\n def seniors_cat(r):\n if r.R65 > 0:\n return \"yes\"\n return \"no\"\n\n def income_cat(r):\n if r.HINCP >= 150000:\n return \"gt150\"\n elif (r.HINCP >= 100000) & (r.HINCP < 150000):\n return \"gt100-lt150\"\n elif (r.HINCP >= 60000) & (r.HINCP < 100000):\n return \"gt60-lt100\"\n elif (r.HINCP >= 30000) & (r.HINCP < 60000):\n return \"gt30-lt60\"\n return \"lt30\"\n\n def workers_cat(r):\n if r.workers >= 3:\n return \"two or more\"\n elif r.workers == 2:\n return \"two\"\n elif r.workers == 1:\n return \"one\"\n return \"none\"\n\n def tenure_mover_cat(r):\n if (r.MV < 4) & (r.TEN < 3):\n return \"own recent\"\n elif (r.MV >= 4) & (r.TEN < 3):\n return \"own not recent\"\n elif (r.MV < 4) & (r.TEN >= 3):\n return \"rent recent\"\n return \"rent not recent\"\n\n h_pums, jd_households = cat.joint_distribution(\n h_pums,\n cat.category_combinations(self.h_acs_cat.columns),\n {\"hh_cars\": cars_cat,\n \"hh_children\": children_cat,\n \"hh_income\": income_cat,\n \"hh_workers\": workers_cat,\n \"hh_size\": hh_size_cat}\n )\n return h_pums, jd_households\n\n def get_person_joint_dist_for_geography(self, ind):\n c = self.c\n\n puma10, puma00 = c.tract_to_puma(ind.state, ind.county, ind.tract)\n # this is cached so won't download more than once\n if type(puma00) == str:\n p_pums = self.c.download_population_pums(ind.state, puma10, puma00,\n usecols=self.p_pums_cols)\n elif np.isnan(puma00): # only puma10 available\n p_pums = self.c.download_population_pums(ind.state, puma10, None,\n usecols=self.p_pums_cols)\n\n def age_cat(r):\n if r.AGEP <= 19:\n return \"19 and under\"\n elif r.AGEP <= 35:\n return \"20 to 35\"\n elif r.AGEP <= 60:\n return \"35 to 60\"\n return \"above 60\"\n\n def race_cat(r):\n if r.RAC1P == 1:\n return \"white\"\n elif r.RAC1P == 2:\n return \"black\"\n elif r.RAC1P == 6:\n return \"asian\"\n return \"other\"\n\n def sex_cat(r):\n if r.SEX == 1:\n return \"male\"\n return \"female\"\n\n def hispanic_cat(r):\n if r.HISP == 1:\n return \"no\"\n return \"yes\"\n\n def industry_cat(r):\n try:\n if r.NAICSP[0] == '1':\n return \"agriculture\"\n elif r.NAICSP[0] == '2':\n return \"agriculture\"\n elif r.NAICSP[0] == '3':\n return \"manufacturing\"\n elif r.NAICSP[0] == '4':\n return \"retail / transportation\"\n elif r.NAICSP[0] == '5':\n return \"information\"\n elif r.NAICSP[0] == '6':\n return \"educational / health\"\n elif r.NAICSP[0] == '7':\n return \"arts\"\n elif r.NAICSP[0] == '8':\n return \"other services\"\n elif r.NAICSP[0] == '9':\n return \"other services\"\n else:\n return \"not employed\"\n except:\n return \"not employed\"\n\n p_pums, jd_persons = cat.joint_distribution(\n p_pums,\n cat.category_combinations(self.p_acs_cat.columns),\n {\"person_age\": age_cat, \"race\": race_cat, \"person_sex\": sex_cat,\n \"hispanic\": hispanic_cat, \"industry\": industry_cat}\n )\n return p_pums, jd_persons\n" ]
[ [ "numpy.isnan", "pandas.Series" ] ]
gaetanmargueritte/ccg2esn
[ "3745ef23eb409837d20261045fad808af5ab82a8" ]
[ "RSSviz/RSSviz.py" ]
[ "\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib\r\nimport seaborn as sns\r\nimport umap\r\n\r\n\r\nclass RecurentStateSpaceVisualisation():\r\n def __init__(self, n_compo=2):\r\n self.reducer = umap.UMAP(n_components=n_compo, random_state = 42)\r\n\r\n def define_space(self, recurent_states):\r\n self.reducer.fit(recurent_states)\r\n\r\n def show_space(self, recurent_states, labels):\r\n \"\"\"Plot the vector contained in recurent_states\r\n after dimension reduction with labels for each point\"\"\"\r\n plt.figure()\r\n reduced_states = self.reducer.transform(recurent_states)\r\n fig, ax = plt.subplots()\r\n ax.scatter(reduced_states[:,0],reduced_states[:,1] , s = 5)\r\n\r\n for i, label in enumerate(labels):\r\n print(i)\r\n ax.annotate(label, (reduced_states[i][0], reduced_states[i][1]))\r\n\r\n def show_sentences(self,\r\n sentences_states,\r\n sentences,\r\n show_words = True,\r\n one_chunck = False,\r\n split_on_space = True,\r\n reduced_sentences_states = None,\r\n step_zero_included = False):\r\n \"\"\"Plot the states in sentences_states as lines in the RSSviz.\r\n Arguments:\r\n sentences_states - list of vector corresponding to\r\n the hidden states during the processing of each sentence.\r\n sentences - list of strings\r\n\r\n show_words - show the corresponding word near each point\r\n one_chunck - if True, process all the states in one chuck for the dimension\r\n reduction (sentences_states needs to be a numpy array). If False,\r\n each sentence has its states reduced separatly (sentences_states\r\n needs to be a list of numpy array).\r\n split_on_space - Should the strings of sentences be splited on space to extract\r\n the words.\r\n reduced_sentences_states - If show_sentences has already been applied on these\r\n sentences, you can reused the points computed to\r\n avoid the time taken by the dimension reduction.\r\n step_zero_included - If True, the first state should be the initial state of the\r\n RNN and so no word will be plotted next to it.\"\"\"\r\n fig = plt.figure()\r\n\r\n if split_on_space:\r\n words = [s.split(\" \") for s in sentences]\r\n else:\r\n words =sentences\r\n\r\n save_reduced_sentences_states = []\r\n\r\n if one_chunck and reduced_sentences_states is None: ## all the sentences are transoformed in one chunck\r\n all_reduced_sentences_states = self.reducer.transform(sentences_states)\r\n index = 0\r\n if one_chunck and not(reduced_sentences_states is None):\r\n all_reduced_sentences_states = reduced_sentences_states\r\n index = 0\r\n\r\n for i in range(len(sentences)):\r\n\r\n if not(one_chunck):\r\n if reduced_sentences_states is None:\r\n reduced_sentence_states = self.reducer.transform(sentences_states[i])\r\n save_reduced_sentences_states.append(reduced_sentence_states)\r\n else:\r\n reduced_sentence_states = reduced_sentences_states[i]\r\n else:\r\n if not(step_zero_included):\r\n reduced_sentence_states = all_reduced_sentences_states[index: index+len(words[i])]\r\n else:\r\n reduced_sentence_states = all_reduced_sentences_states[index: index+len(words[i])+1]\r\n\r\n index += len(words[i])\r\n if step_zero_included:\r\n index +=1\r\n\r\n plt.plot(reduced_sentence_states[:,0],reduced_sentence_states[:,1])\r\n\r\n ax = fig.axes[0]\r\n if show_words:\r\n for j, word in enumerate(words[i]):\r\n if step_zero_included:\r\n ax.annotate(word, (reduced_sentence_states[j+1][0], reduced_sentence_states[j+1][1]))\r\n else:\r\n ax.annotate(word, (reduced_sentence_states[j][0], reduced_sentence_states[j][1]))\r\n\r\n if one_chunck:\r\n return all_reduced_sentences_states\r\n return save_reduced_sentences_states\r\n\r\n\r\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure" ] ]
gjkennedy/pyoptsparse
[ "fde73c62dc45c96be983f73cb62f60cf14a082a2" ]
[ "pyoptsparse/pySNOPT/setup.py" ]
[ "#!/usr/bin/env python\n\nimport os, sys\n\n\ndef configuration(parent_package=\"\", top_path=None):\n\n from numpy.distutils.misc_util import Configuration\n\n config = Configuration(\"pySNOPT\", parent_package, top_path)\n config.add_data_files(\"LICENSE\", \"README\")\n\n # Since snopt has a bunch of source files, we will just check if\n # snoptc.c exists. If so, we will assume all the rest of the files\n # are present.\n\n snoptc = os.path.join(\"pyoptsparse/pySNOPT/source\", \"snoptc.f\")\n if os.path.exists(snoptc):\n config.add_library(\"snopt\", sources=[os.path.join(\"source\", \"*.f\")])\n config.add_extension(\"snopt\", sources=[\"source/f2py/snopt.pyf\"], libraries=[\"snopt\"])\n return config\n" ]
[ [ "numpy.distutils.misc_util.Configuration" ] ]
ak-gupta/nbaspa
[ "db961717bb23854e0373b7732638021a18d909f5", "db961717bb23854e0373b7732638021a18d909f5" ]
[ "tests/test_model/test_tasks/test_io.py", "nbaspa/model/tasks/metrics.py" ]
[ "\"\"\"Test loading data.\"\"\"\n\nfrom pathlib import Path\n\nimport cloudpickle\nfrom lifelines import CoxTimeVaryingFitter\nfrom sklearn.isotonic import IsotonicRegression\n\nfrom nbaspa.model.tasks import LoadData, LoadModel\n\ndef test_load_model_data(data, gamelocation):\n \"\"\"Test loading fake model data.\"\"\"\n # Run the task and compare\n tsk = LoadData()\n output = tsk.run(data_dir=gamelocation)\n output.sort_values(by=[\"GAME_ID\", \"TIME\"], ascending=True, inplace=True)\n output.reset_index(drop=True, inplace=True)\n\n assert output.equals(data)\n\ndef test_load_model(tmpdir):\n \"\"\"Test writing and reading a model.\"\"\"\n model = CoxTimeVaryingFitter(penalizer=1.0)\n location = tmpdir.mkdir(\"fake-model\")\n with open(Path(str(location), \"mymodel.pkl\"), \"wb\") as outfile:\n cloudpickle.dump(model, outfile)\n calibrator = IsotonicRegression(out_of_bounds=\"clip\")\n with open(Path(str(location), \"calibrator.pkl\"), \"wb\") as outfile:\n cloudpickle.dump(calibrator, outfile)\n \n tsk = LoadModel()\n output = tsk.run(filepath=Path(str(location), \"mymodel.pkl\"))\n\n assert isinstance(output[0], CoxTimeVaryingFitter)\n assert output[0].penalizer == 1.0\n assert isinstance(output[1], IsotonicRegression)\n assert output[1].out_of_bounds == \"clip\"", "\"\"\"Define some metrics for evaluating the model.\"\"\"\n\nfrom typing import Callable, List, Optional, Union\n\nimport numpy as np\nimport pandas as pd\nfrom prefect import Task\nfrom sklearn.metrics import roc_auc_score\n\nfrom .meta import META\n\n\nclass AUROC(Task):\n \"\"\"Calculate the AUROC score.\"\"\"\n\n def run(self, data: pd.DataFrame, mode: Optional[str] = \"survival\") -> float: # type: ignore\n \"\"\"Calculate the AUROC score.\n\n Parameters\n ----------\n data : pd.DataFrame\n The output of ``WinProbability.run()``.\n mode : str, optional (default \"survival\")\n The mode, either ``survival`` or ``benchmark``\n\n Returns\n -------\n float\n The AUROC score from ``scikit-learn``.\n \"\"\"\n output = roc_auc_score(y_true=data[META[\"event\"]], y_score=data[META[mode]])\n self.logger.info(f\"Model has a AUROC value of {np.round(output, 3)}\")\n\n return output\n\n\nclass AUROCLift(Task):\n \"\"\"Calculate the lift in AUROC between two sequences.\"\"\"\n\n def run( # type: ignore\n self,\n benchmark: Union[List[float], np.ndarray],\n test: Union[List[float], np.ndarray],\n ) -> np.ndarray:\n \"\"\"Calculate the lift in AUROC between two sequences.\n\n For our purposes, we will be calculating the AUROC across the entire game. This\n task will help produce a series comparing the survival model to the benchmark NBA\n win probability model.\n\n Parameters\n ----------\n benchmark : np.ndarray\n The benchmark series.\n test : np.ndarray\n The test series.\n\n Returns\n -------\n np.ndarray\n The output lift series.\n \"\"\"\n if isinstance(test, list):\n test = np.array(test)\n self.logger.info(\n f\"Test model has average AUROC of {np.round(np.average(test), 3)}\"\n )\n if isinstance(benchmark, list):\n benchmark = np.array(benchmark)\n self.logger.info(\n f\"Benchmark model has average AUROC of {np.round(np.average(benchmark), 3)}\"\n )\n\n return (test / benchmark) - 1\n\n\nclass MeanAUROCLift(Task):\n \"\"\"Calculate the weighted average AUROC lift over gametime.\"\"\"\n\n def run( # type: ignore\n self,\n lift: np.ndarray,\n timestep: List[int],\n weight_func: Optional[Callable] = None,\n ) -> float:\n \"\"\"Calculate the weighted average AUROC lift over gametime.\n\n Parameters\n ----------\n lift : np.ndarray\n An array of the AUROC lift from ``AUROCLift.run()`` at each time step.\n timestep : list\n The list of time periods for each AUROC calculation. Used to calculate\n weighting.\n weight_func : Callable, optional (default None)\n The function to apply to the ``timestep`` list before multiplying by\n the lift value.\n\n Returns\n -------\n float\n The weighted average AUROC lift.\n\n Examples\n --------\n >>> auroc = np.array([0.5, 0.6, 0.7])\n >>> times = [10, 20, 30]\n >>> MeanAUROCLift().run(auroc, times, np.log1p)\n 0.61167242753803508\n\n If you don't provide a weight function,\n\n >>> MeanAUROCLift().run(auroc, times)\n 0.59999999999999998\n \"\"\"\n if weight_func is not None:\n weights = weight_func(timestep)\n else:\n weights = None\n\n result = np.average(lift, weights=weights)\n self.logger.info(\n f\"Found a weighted average AUROC lift of {np.round(result * 100, 3)}%\"\n )\n\n return result\n" ]
[ [ "sklearn.isotonic.IsotonicRegression" ], [ "numpy.average", "numpy.array", "sklearn.metrics.roc_auc_score", "numpy.round" ] ]