repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
junyuchen245/SPECT-Img-Denoising-DIP-Keras
[ "5334c81de364438137a648302b208e58aef82d20" ]
[ "DIPDenoising/image_reading.py" ]
[ "import os\nimport numpy as np\nimport warnings\n#import SimpleITK as sitk\nimport cv2\nfrom scipy import misc\nfrom scipy import ndimage\n\n\ndef load_image_from_folder(folder_path, new_size, HE=False, Truc=False, Aug=False):\n \"\"\"loads images in the folder_path and returns a ndarray and threshold the label image\"\"\"\n\n image_list = []\n label_list = []\n #counter = 0\n for image_name in os.listdir(folder_path):\n image_original = np.load(folder_path + image_name)\n image_original = image_original['a']\n #if image_original.shape[0] != 320:\n # continue\n #counter = counter + 1\n #print image_name, counter\n image_ct = image_original[:, 0:len(image_original)]\n image_spect = image_original[:,len(image_original):len(image_original)*2]\n label = image_original[:,len(image_original)*2:len(image_original)*3]\n #image_ct = cv2.resize(image_ct, new_size)\n #image_spect = cv2.resize(image_spect, new_size)\n #label = cv2.resize(label, new_size)\n #activate below for binary-class segmentation\n #super_threshold_indices = label != 0\n #label[super_threshold_indices] = 255\n #label = label / 255.0\n\n if HE == True:\n image_ct = cv2.equalizeHist(image_ct)\n image_spect = cv2.equalizeHist(image_spect)\n elif Truc == True:\n clahe = cv2.createCLAHE(clipLimit=0.1, tileGridSize=(8,8))\n image_spect = clahe.apply(image_spect)\n image_ct = clahe.apply(image_ct)\n #ret, image = cv2.threshold(image,200,255,cv2.THRESH_TRUNC)\n else:\n image_spect = image_spect\n image_ct = image_ct\n\n#image augmentation method in the FusionNet paper\n if Aug == True:\n '''SPECT'''\n imageSPECT_aug_1 = ndimage.rotate(image_spect, -90)\n imageSPECT_aug_2 = np.flipud(imageSPECT_aug_1)\n imageSPECT_aug_3 = ndimage.rotate(image_spect, -180)\n imageSPECT_aug_4 = np.flipud(imageSPECT_aug_3)\n imageSPECT_aug_5 = ndimage.rotate(image_spect, -270)\n imageSPECT_aug_6 = np.flipud(imageSPECT_aug_5)\n imageSPECT_aug_7 = np.flipud(image_spect)\n\n '''CT'''\n imageCT_aug_1 = ndimage.rotate(image_ct, -90)\n imageCT_aug_2 = np.flipud(imageCT_aug_1)\n imageCT_aug_3 = ndimage.rotate(image_ct, -180)\n imageCT_aug_4 = np.flipud(imageCT_aug_3)\n imageCT_aug_5 = ndimage.rotate(image_ct, -270)\n imageCT_aug_6 = np.flipud(imageCT_aug_5)\n imageCT_aug_7 = np.flipud(image_ct)\n\n '''label'''\n label_aug_1 = ndimage.rotate(label, -90)\n label_aug_1 = label_aug_1.astype(int)\n label_aug_2 = np.flipud(label_aug_1)\n label_aug_2 = label_aug_2.astype(int)\n label_aug_3 = ndimage.rotate(label, -180)\n label_aug_3 = label_aug_3.astype(int)\n label_aug_4 = np.flipud(label_aug_3)\n label_aug_4 = label_aug_4.astype(int)\n label_aug_5 = ndimage.rotate(label, -270)\n label_aug_5 = label_aug_5.astype(int)\n label_aug_6 = np.flipud(label_aug_5)\n label_aug_6 = label_aug_6.astype(int)\n label_aug_7 = np.flipud(label)\n label_aug_7 = label_aug_7.astype(int)\n\n\n image_all_0 = np.concatenate((image_ct,image_spect),axis=1)\n image_all_1 = np.concatenate((imageCT_aug_1, imageSPECT_aug_1), axis=1)\n image_all_2 = np.concatenate((imageCT_aug_2, imageSPECT_aug_2), axis=1)\n image_all_3 = np.concatenate((imageCT_aug_3, imageSPECT_aug_3), axis=1)\n image_all_4 = np.concatenate((imageCT_aug_4, imageSPECT_aug_4), axis=1)\n image_all_5 = np.concatenate((imageCT_aug_5, imageSPECT_aug_5), axis=1)\n image_all_6 = np.concatenate((imageCT_aug_6, imageSPECT_aug_6), axis=1)\n image_all_7 = np.concatenate((imageCT_aug_7, imageSPECT_aug_7), axis=1)\n\n image_list.append(image_all_0)\n image_list.append(image_all_1)\n image_list.append(image_all_2)\n image_list.append(image_all_3)\n image_list.append(image_all_4)\n image_list.append(image_all_5)\n image_list.append(image_all_6)\n image_list.append(image_all_7)\n\n label_list.append(label)\n label_list.append(label_aug_1)\n label_list.append(label_aug_2)\n label_list.append(label_aug_3)\n label_list.append(label_aug_4)\n label_list.append(label_aug_5)\n label_list.append(label_aug_6)\n label_list.append(label_aug_7)\n else:\n image_all = np.concatenate((image_ct, image_spect), axis=1)\n image_list.append(image_all)\n label_list.append(label)\n\n image_array = np.asarray(image_list)\n label_array = np.asarray(label_list)\n\n return image_array, label_array\n\ndef load_test_from_folder(folder_path, new_size, HE=False, Truc=False, Aug=False):\n \"\"\"loads images in the folder_path and returns a ndarray and threshold the label image\"\"\"\n\n image_list = []\n #counter = 0\n for image_name in os.listdir(folder_path):\n image_original = np.load(folder_path + image_name)\n image_original = image_original['a']\n #counter = counter + 1\n #print image_name, counter\n image_ct = image_original[:, 0:len(image_original)]\n image_spect = image_original[:,len(image_original):len(image_original)*2]\n\n image_all = np.concatenate((image_ct, image_spect), axis=1)\n image_list.append(image_all)\n\n\n image_array = np.asarray(image_list)\n\n return image_array" ]
[ [ "numpy.concatenate", "numpy.asarray", "scipy.ndimage.rotate", "numpy.load", "numpy.flipud" ] ]
TysonYu/AdaptSum
[ "a4f17060e7a8e6f9b86d33a930804445e4226ba4" ]
[ "src/dapt_pretraining.py" ]
[ "\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom transformers import BartForConditionalGeneration, BartTokenizer, get_linear_schedule_with_warmup\nfrom others.logging import logger\nfrom others.utils import pad_sents, get_mask\nfrom others.optimizer import build_optim\nfrom tqdm import tqdm\nimport numpy as np\nimport argparse\nimport random\nimport os\nfrom nltk.tokenize import sent_tokenize\n\n\ndef text_infilling(sent, mask_probability=0.05, lamda=3):\n '''\n inputs:\n sent: a sentence string\n mask_probability: probability for masking tokens\n lamda: lamda for poission distribution\n outputs:\n sent: a list of tokens with masked tokens\n '''\n sent = sent.split()\n length = len(sent)\n mask_indices = (np.random.uniform(0, 1, length) < mask_probability) * 1\n span_list = np.random.poisson(lamda, length) # lamda for poission distribution\n nonzero_idx = np.nonzero(mask_indices)[0]\n for item in nonzero_idx:\n span = min(span_list[item], 5) # maximum mask 5 continuous tokens\n for i in range(span):\n if item+i >= length:\n continue\n mask_indices[item+i] = 1\n for i in range(length):\n if mask_indices[i] == 1:\n sent[i] = '<mask>'\n\n # merge the <mask>s to one <mask>\n final_sent = []\n mask_flag = 0\n for word in sent:\n if word != '<mask>':\n mask_flag = 0\n final_sent.append(word)\n else:\n if mask_flag == 0:\n final_sent.append(word)\n mask_flag = 1\n return final_sent\n\ndef sent_permutation(sent):\n '''\n inputs:\n sent: a sentence string\n outputs:\n shuffle_sent: a string after sentence permutations\n '''\n # split sentences based on '.'\n splits = sent_tokenize(sent)\n random.shuffle(splits)\n\n return \" \".join(splits)\n\n\ndef add_noise(sents, mask_probability):\n noisy_sent_list = []\n for sent in sents:\n noisy_sent = sent_permutation(sent)\n noisy_sent = text_infilling(noisy_sent, mask_probability)\n\n noisy_sent = \" \".join(noisy_sent)\n noisy_sent_list.append(noisy_sent)\n\n return noisy_sent_list\n\n\nclass CorpusDataset(Dataset):\n def __init__(self, data_path, denoising_flag=False):\n self.data = []\n with open(data_path, \"r\", ) as f:\n for i, line in enumerate(f):\n line = line.strip()\n if denoising_flag:\n line = \"denoising: \" + line\n self.data.append(line) # append a list of tokens each time\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n return self.data[idx]\n\n\nclass BartLMTrainer(object):\n def __init__(self, model, dataloader, tokenizer, args, pretrained_model=None):\n self.args = args\n self.model = model\n self.pretrained_model = pretrained_model\n self.optimizer = build_optim(args, model, None, pretrained_model)\n self.dataloader = dataloader\n self.tokenizer = tokenizer\n self.epoch = args.epoch\n self.mask_probability = args.mask_prob\n self.accumulation_steps = args.accum_step\n self.clip = args.clip\n self.domain = args.dm\n self.path = args.path\n if args.recadam:\n if args.max_steps > 0:\n t_total = args.max_steps\n self.epoch = args.max_steps // (len(self.dataloader) // self.accumulation_steps) + 1\n else:\n t_total = len(self.dataloader) // self.accumulation_steps * self.epoch\n self.scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)\n\n def train(self):\n print('Start finetuning BART language model')\n iteration = 0\n for epoch_i in range(self.epoch):\n self.model.train()\n if self.pretrained_model is not None:\n self.pretrained_model.eval()\n print('[ Epoch : {}]'.format(epoch_i))\n loss_list = []\n dist_sum, dist_num = 0.0, 0\n pbar = tqdm(self.dataloader, total=len(self.dataloader))\n for sents in pbar:\n sents = [self.shorten_sent(sent) for sent in sents]\n iteration += 1\n tokenized_sents = self.tokenize(sents)\n decoder_ids = [[self.tokenizer.bos_token_id] + item for item in tokenized_sents]\n label_ids = [item + [self.tokenizer.eos_token_id] for item in tokenized_sents]\n # print(\"before:\")\n # print(sents[0])\n # print(\"tokenized sents:\")\n # print(tokenized_sents[0])\n # sents: a list of sentence, each item inside is a string\n noisy_text = add_noise(sents, self.mask_probability)\n # noisy_text: a list of sentence, each item inside is a string\n # print(\"after:\")\n # print(noisy_text[0])\n inputs_ids = self.tokenize(noisy_text)\n # print(\"tokenized noisy text:\")\n # print(inputs_ids[0])\n\n # prepare data for training\n mask = torch.tensor(get_mask(inputs_ids, max_len=512)).cuda()\n inputs_ids = torch.tensor(pad_sents(inputs_ids, pad_token=self.tokenizer.pad_token_id, max_len=512)[0]).cuda()\n decoder_ids = torch.tensor(pad_sents(decoder_ids, pad_token=self.tokenizer.pad_token_id, max_len=512)[0]).cuda()\n label_ids = torch.tensor(pad_sents(label_ids, pad_token=-100, max_len=512)[0]).cuda()\n #optimize model\n loss = self.model(input_ids=inputs_ids, attention_mask=mask, decoder_input_ids=decoder_ids, labels=label_ids)[0]\n loss_list.append(loss.item())\n loss = loss / self.accumulation_steps\n loss.backward()\n if self.args.logging_Euclid_dist:\n dist = torch.sum(torch.abs(torch.cat(\n [p.view(-1) for n, p in self.model.named_parameters()]) - torch.cat(\n [p.view(-1) for n, p in self.pretrained_model.named_parameters()])) ** 2).item()\n\n dist_sum += dist\n dist_num += 1\n\n if iteration % self.accumulation_steps == 0:\n if self.args.recadam:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)\n self.optimizer.step()\n if self.args.recadam:\n self.scheduler.step()\n self.model.zero_grad()\n loss_list = [np.mean(loss_list)]\n\n if self.args.logging_Euclid_dist:\n# pbar.set_description(\"(Epoch {}) LOSS: {:.6f} Euclid dist: {:.6f} LR: {:.6f}\".format(epoch_i, np.mean(loss_list), dist_sum / dist_num, self.scheduler.get_last_lr()[0]))\n pbar.set_description(\"(Epoch {}) LOSS: {:.6f} Euclid dist: {:.6f}\".format(epoch_i, np.mean(loss_list), dist_sum / dist_num))\n else:\n pbar.set_description(\"(Epoch {}) LOSS: {:.6f} LearningRate: {:.10f}\".format(epoch_i, np.mean(loss_list), self.optimizer.learning_rate))\n if iteration % args.save_interval == 0:\n self.save_model(iteration)\n\n def shorten_sent(self, sent):\n split_sent = sent.split()\n if len(split_sent) > 400:\n sent = ' '.join(split_sent[:400])\n return sent\n\n def tokenize(self, sents):\n tokenized_text = [self.tokenizer.encode(sent, add_special_tokens=False) for sent in sents]\n return tokenized_text\n\n def save_model(self, iter_num):\n print(\"saving model\")\n saved_path = os.path.join('DAPT_save/{}_{}.chkpt'.format(args.dm, iter_num))\n torch.save(self.model, saved_path)\n\nif __name__ == \"__main__\":\n # configuration\n parser = argparse.ArgumentParser()\n parser.add_argument('-visible_gpu', default='1', type=str)\n parser.add_argument('-bsz', type=int, default=4, help=\"batch size\")\n parser.add_argument('-path', type=str, default=\"\", help=\"data path\")\n parser.add_argument('-epoch', type=int, default=10, help=\"epoch size\")\n parser.add_argument('-mask_prob', type=float, default=0.15, help=\"mask probability\")\n parser.add_argument('-dm', type=str, default=\"\", help=\"domain name\")\n parser.add_argument('-random_seed', type=int, default=0)\n parser.add_argument('-save_interval', default=10000, type=int)\n # optimizer configuration\n parser.add_argument('-lr', default=0.05, type=float)\n parser.add_argument('-optim', default='adam', type=str)\n parser.add_argument('-max_grad_norm', default=0, type=float)\n parser.add_argument('-beta1', default=0.9, type=float)\n parser.add_argument('-beta2', default=0.998, type=float)\n parser.add_argument('-warmup_steps', default=10000, type=int)\n parser.add_argument('-decay_method', default='noam', type=str)\n parser.add_argument('-enc_hidden_size', default=768, type=int)\n parser.add_argument('-clip', type=float, default=1.0, help=\"gradient clip\")\n parser.add_argument('-accum_step', type=int, default=10, help=\"accumulation steps\")\n parser.add_argument('-train_from', default='', type=str)\n # using RecAdam\n parser.add_argument(\"-adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument('-recadam', default=False, action='store_true')\n parser.add_argument(\"-weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"-anneal_w\", type=float, default=1.0, help=\"Weight for the annealing function in RecAdam. Default 1.0.\")\n parser.add_argument(\"-anneal_fun\", type=str, default='sigmoid', choices=[\"sigmoid\", \"linear\", 'constant'], help=\"the type of annealing function in RecAdam. Default sigmoid\")\n parser.add_argument(\"-anneal_t0\", type=int, default=1000, help=\"t0 for the annealing function in RecAdam.\")\n parser.add_argument(\"-anneal_k\", type=float, default=0.1, help=\"k for the annealing function in RecAdam.\")\n parser.add_argument(\"-pretrain_cof\", type=float, default=5000.0, help=\"Coefficient of the quadratic penalty in RecAdam. Default 5000.0.\")\n parser.add_argument(\"-logging_Euclid_dist\", action=\"store_true\", help=\"Whether to log the Euclidean distance between the pretrained model and fine-tuning model\")\n parser.add_argument(\"-max_steps\", default=-1, type=int, help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\n parser.add_argument(\"-model_type\", type=str, default=\"layers\")\n\n args = parser.parse_args()\n\n # set random seed\n random.seed(args.random_seed)\n np.random.seed(args.random_seed)\n torch.manual_seed(args.random_seed)\n torch.cuda.manual_seed(args.random_seed)\n torch.backends.cudnn.deterministic = True\n\n # set gpu\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.visible_gpu\n\n print(\"Loading datasets ...\")\n dataset = CorpusDataset(args.path)\n dataloader = DataLoader(dataset=dataset, batch_size=args.bsz, shuffle=True)\n\n if args.train_from:\n model = torch.load(args.train_from, map_location='cpu')\n else:\n model = BartForConditionalGeneration.from_pretrained('facebook/bart-base')\n model.cuda()\n\n tokenizer = BartTokenizer.from_pretrained('facebook/bart-base')\n\n if args.recadam:\n pretrained_model = BartForConditionalGeneration.from_pretrained('facebook/bart-base')\n pretrained_model.cuda()\n else:\n pretrained_model = None\n\n bart_lm_trainer = BartLMTrainer(model, dataloader, tokenizer, args, pretrained_model)\n\n bart_lm_trainer.train()" ]
[ [ "torch.cuda.manual_seed", "numpy.random.seed", "numpy.random.poisson", "torch.save", "numpy.nonzero", "numpy.mean", "torch.manual_seed", "numpy.random.uniform", "torch.utils.data.DataLoader", "torch.load" ] ]
aasensio/hazel
[ "899c8461324061bacc14da7165b9ac7eed35c96b" ]
[ "pyRoutines/angle_transformation.py" ]
[ "# cdiazbas@iac.es\nimport numpy as np\n\n\n# Return the angles in the plane of the sky given angles with respect\n# to the vertical for observations on the limb (in degrees!)\ndef absolute_to_sky(thetaB, chiB):\n thetaB = np.deg2rad(thetaB)\n chiB = np.deg2rad(chiB)\n\n t1 = np.sin(thetaB) * np.sin(chiB)\n t2 = -np.cos(thetaB)\n t3 = np.sin(thetaB) * np.cos(chiB)\n\n thetaSky = np.arccos(t3)\n sinthSky = np.sqrt(1.e0 - t3**2)\n\n sinChiSky = t1 / sinthSky\n cosChiSky = t2 / sinthSky\n\n# Test for the quadrant\n chiSky_preliminary = np.arccos(cosChiSky)\n if (np.sign(sinChiSky) > 0.e0):\n chiSky = chiSky_preliminary\n else:\n chiSky = -chiSky_preliminary\n\n return [np.rad2deg(thetaSky), np.rad2deg(chiSky)]\n\n\n# Return the angles in the vertical system given angles in the\n# plane of the sky for observations on the limb (in degrees!)\ndef sky_to_absolute(thetaSky, chiSky):\n thetaSky = np.deg2rad(thetaSky)\n chiSky = np.deg2rad(chiSky)\n\n t1 = np.sin(thetaSky) * np.sin(chiSky)\n t2 = np.cos(thetaSky)\n t3 = -np.sin(thetaSky) * np.cos(chiSky)\n\n thetaB = np.arccos(t3)\n sinthB = np.sqrt(1.e0 - t3**2)\n\n sinChiB = t1 / sinthB\n cosChiB = t2 / sinthB\n\n# Test for the quadrant\n chiB_preliminary = np.arccos(cosChiB)\n if (np.sign(sinChiB) > 0.e0):\n chiB = chiB_preliminary\n else:\n chiB = -chiB_preliminary\n\n return [np.rad2deg(thetaB), np.rad2deg(chiB)]\n\n\n# Return the angles in the plane of the sky given angles with respect\n# to the vertical for observations at angle theta (in degrees!)\ndef absolute_to_sky_general(theta, thetaB, chiB):\n theta = np.deg2rad(theta)\n thetaB = np.deg2rad(thetaB)\n chiB = np.deg2rad(chiB)\n\n cosThetaSky = np.cos(theta) * np.cos(thetaB) + \\\n np.sin(theta) * np.sin(thetaB) * np.cos(chiB)\n sinThetaSky = np.sqrt(1.e0 - cosThetaSky**2)\n\n thetaSky = np.arccos(cosThetaSky)\n\n cosChiSky = (np.cos(theta) * np.sin(thetaB) * np.cos(chiB) -\n np.cos(thetaB) * np.sin(theta)) / sinThetaSky\n sinChiSky = (np.sin(thetaB) * np.sin(chiB)) / sinThetaSky\n\n# Test for the quadrant\n chiSky_preliminary = np.arccos(cosChiSky)\n if (np.sign(sinChiSky) > 0.e0):\n chiSky = chiSky_preliminary\n else:\n chiSky = -chiSky_preliminary\n\n return [np.rad2deg(thetaSky), np.rad2deg(chiSky)]\n\n\n# Return the angles in the plane of the sky given angles with respect\n# to the vertical for observations at angle theta (in degrees!)\ndef sky_to_absolute_general(theta, thetaSky, chiSky):\n theta = np.deg2rad(theta)\n thetaSky = np.deg2rad(thetaSky)\n chiSky = np.deg2rad(chiSky)\n\n cosThetaB = np.cos(theta) * np.cos(thetaSky) - \\\n np.sin(theta) * np.sin(thetaSky) * np.cos(chiSky)\n sinThetaB = np.sqrt(1.e0 - cosThetaB**2)\n\n thetaB = np.arccos(cosThetaB)\n\n cosChiB = (np.cos(theta) * np.sin(thetaSky) * np.cos(chiSky) +\n np.cos(thetaSky) * np.sin(theta)) / sinThetaB\n sinChiB = (np.sin(thetaSky) * np.sin(chiSky)) / sinThetaB\n\n# Test for the quadrant\n chiB_preliminary = np.arccos(cosChiB)\n if (np.sign(sinChiB) > 0.e0):\n chiB = chiB_preliminary\n else:\n chiB = -chiB_preliminary\n\n return [np.rad2deg(thetaB), np.rad2deg(chiB)]\n\n\nif __name__ == '__main__':\n\n pass\n" ]
[ [ "numpy.sin", "numpy.arccos", "numpy.rad2deg", "numpy.sign", "numpy.sqrt", "numpy.cos", "numpy.deg2rad" ] ]
qimw/UACDA
[ "75d8d03786cba009f56cdb1efd2d6d5abe0c5f77" ]
[ "generate_plabel_dark_zurich.py" ]
[ "import argparse\nimport scipy\nfrom scipy import ndimage\nimport numpy as np\nimport sys\nimport re\nfrom packaging import version\n\nimport torch\nfrom torch.autograd import Variable\nimport torchvision.models as models\nimport torch.nn.functional as F\nfrom torch.utils import data, model_zoo\nfrom model.deeplab import Res_Deeplab\nfrom model.deeplab_multi import DeeplabMulti\nfrom model.deeplab_vgg import DeeplabVGG\nfrom dataset.dark_zurich_dataset import DarkZurichDataSet\nimport os\nfrom PIL import Image\nfrom utils.tool import fliplr\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport yaml\nimport imageio as iio\n\ntorch.backends.cudnn.benchmark=True\n\nIMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)\n\nDATA_DIRECTORY = './data/Cityscapes/data'\nDATA_LIST_PATH = './dataset/cityscapes_list/train.txt'\nSAVE_PATH = './data/Dark_zurich/data/pseudo_ohl-1/test'\n\nif not os.path.isdir('./data/Dark_zurich/data/pseudo_ohl-1/'):\n os.makedirs('./data/Dark_zurich/data/pseudo_ohl-1/')\n os.makedirs(SAVE_PATH)\n\nIGNORE_LABEL = 255\nNUM_CLASSES = 19\nRESTORE_FROM = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_multi-ed35151c.pth'\nRESTORE_FROM_VGG = 'http://vllab.ucmerced.edu/ytsai/CVPR18/GTA2Cityscapes_vgg-ac4ac9f6.pth'\nRESTORE_FROM_ORC = 'http://vllab1.ucmerced.edu/~whung/adaptSeg/cityscapes_oracle-b7b9934.pth'\nSET = 'train' # We generate pseudo label for training set\nINPUT_SIZE = '800,512'\n\nMODEL = 'DeeplabMulti'\n\npalette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,\n 220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,\n 0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]\nzero_pad = 256 * 3 - len(palette)\nfor i in range(zero_pad):\n palette.append(0)\n\n\ndef colorize_mask(mask):\n # mask: numpy array of the mask\n new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')\n new_mask.putpalette(palette)\n\n return new_mask\n\ndef get_arguments():\n \"\"\"Parse all the arguments provided from the CLI.\n\n Returns:\n A list of parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"DeepLab-ResNet Network\")\n parser.add_argument(\"--model\", type=str, default=MODEL,\n help=\"Model Choice (DeeplabMulti/DeeplabVGG/Oracle).\")\n parser.add_argument(\"--data-dir\", type=str, default=DATA_DIRECTORY,\n help=\"Path to the directory containing the Cityscapes dataset.\")\n parser.add_argument(\"--data-list\", type=str, default=DATA_LIST_PATH,\n help=\"Path to the file listing the images in the dataset.\")\n parser.add_argument(\"--ignore-label\", type=int, default=IGNORE_LABEL,\n help=\"The index of the label to ignore during the training.\")\n parser.add_argument(\"--num-classes\", type=int, default=NUM_CLASSES,\n help=\"Number of classes to predict (including background).\")\n parser.add_argument(\"--restore-from\", type=str, default=RESTORE_FROM,\n help=\"Where restore model parameters from.\")\n parser.add_argument(\"--gpu\", type=int, default=0,\n help=\"choose gpu device.\")\n parser.add_argument(\"--batchsize\", type=int, default=4,\n help=\"choose gpu device.\")\n parser.add_argument(\"--set\", type=str, default=SET,\n help=\"choose evaluation set.\")\n parser.add_argument(\"--save\", type=str, default=SAVE_PATH,\n help=\"Path to save result.\")\n parser.add_argument(\"--input-size\", type=str, default=INPUT_SIZE,\n help=\"Comma-separated string with height and width of source images.\")\n return parser.parse_args()\n\ndef save_heatmap(output_name):\n output, name = output_name\n fig = plt.figure()\n plt.axis('off')\n heatmap = plt.imshow(output, cmap='viridis')\n fig.colorbar(heatmap)\n fig.savefig('%s_heatmap.png' % (name.split('.jpg')[0]))\n return\n\ndef main():\n \"\"\"Create the model and start the evaluation process.\"\"\"\n\n args = get_arguments()\n\n w, h = map(int, args.input_size.split(','))\n\n config_path = os.path.join(os.path.dirname(args.restore_from),'opts.yaml')\n with open(config_path, 'r') as stream:\n config = yaml.load(stream)\n\n args.model = config['model']\n print('ModelType:%s'%args.model)\n print('NormType:%s'%config['norm_style'])\n gpu0 = args.gpu\n batchsize = args.batchsize\n\n model_name = os.path.basename( os.path.dirname(args.restore_from) )\n #args.save += model_name\n\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n confidence_path = os.path.join(args.save, 'submit/confidence')\n label_path = os.path.join(args.save, 'submit/labelTrainIds')\n label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')\n for path in [confidence_path, label_path, label_invalid_path]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n if args.model == 'DeepLab':\n model = DeeplabMulti(num_classes=args.num_classes, use_se = config['use_se'], train_bn = False, norm_style = config['norm_style'])\n elif args.model == 'Oracle':\n model = Res_Deeplab(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_ORC\n elif args.model == 'DeeplabVGG':\n model = DeeplabVGG(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_VGG\n\n if args.restore_from[:4] == 'http' :\n saved_state_dict = model_zoo.load_url(args.restore_from)\n else:\n saved_state_dict = torch.load(args.restore_from)\n\n try:\n model.load_state_dict(saved_state_dict)\n except:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(saved_state_dict)\n model.eval()\n model.cuda(gpu0)\n\n testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n scale = 1.25\n testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(h*scale), round(w*scale) ), resize_size=( round(w*scale), round(h*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n\n if version.parse(torch.__version__) >= version.parse('0.4.0'):\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)\n else:\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear')\n\n sm = torch.nn.Softmax(dim = 1)\n log_sm = torch.nn.LogSoftmax(dim = 1)\n kl_distance = nn.KLDivLoss( reduction = 'none')\n prior = np.load('./utils/prior_all.npy').transpose((2,0,1))[np.newaxis, :, :, :]\n prior = torch.from_numpy(prior)\n for index, img_data in enumerate(zip(testloader, testloader2) ):\n batch, batch2 = img_data\n image, _, name = batch\n image2, _, name2 = batch2\n\n inputs = image.cuda()\n inputs2 = image2.cuda()\n print('\\r>>>>Extracting feature...%04d/%04d'%(index*batchsize, args.batchsize*len(testloader)), end='')\n if args.model == 'DeepLab':\n with torch.no_grad():\n output1, output2 = model(inputs)\n output_batch = interp(sm(0.5* output1 + output2))\n\n heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)\n\n output1, output2 = model(fliplr(inputs))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs\n\n output1, output2 = model(inputs2)\n output_batch += interp(sm(0.5* output1 + output2))\n output1, output2 = model(fliplr(inputs2))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs2\n ratio = 0.95\n output_batch = output_batch.cpu() / 4\n # output_batch = output_batch *(ratio + (1 - ratio) * prior)\n output_batch = output_batch.data.numpy()\n heatmap_batch = heatmap_batch.cpu().data.numpy()\n elif args.model == 'DeeplabVGG' or args.model == 'Oracle':\n output_batch = model(Variable(image).cuda())\n output_batch = interp(output_batch).cpu().data.numpy()\n\n output_batch = output_batch.transpose(0,2,3,1)\n score_batch = np.max(output_batch, axis=3)\n output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)\n\n threshold = 0.3274\n for i in range(output_batch.shape[0]):\n output_single = output_batch[i,:,:]\n output_col = colorize_mask(output_single)\n output = Image.fromarray(output_single)\n\n name_tmp = name[i].split('/')[-1]\n dir_name = name[i].split('/')[-2]\n save_path = args.save + '/' + dir_name\n if not os.path.isdir(save_path):\n os.mkdir(save_path)\n output.save('%s/%s' % (save_path, name_tmp))\n print('%s/%s' % (save_path, name_tmp))\n output_col.save('%s/%s_color.png' % (save_path, name_tmp.split('.')[0]))\n\n # heatmap_tmp = heatmap_batch[i,:,:]/np.max(heatmap_batch[i,:,:])\n # fig = plt.figure()\n # plt.axis('off')\n # heatmap = plt.imshow(heatmap_tmp, cmap='viridis')\n # fig.colorbar(heatmap)\n # fig.savefig('%s/%s_heatmap.png' % (save_path, name_tmp.split('.')[0]))\n\n if args.set == 'test' or args.set == 'val':\n # label\n output.save('%s/%s' % (label_path, name_tmp))\n # label invalid\n output_single[score_batch[i, :, :] < threshold] = 255\n output = Image.fromarray(output_single)\n output.save('%s/%s' % (label_invalid_path, name_tmp))\n # conficence\n\n confidence = score_batch[i, :, :] * 65535\n confidence = np.asarray(confidence, dtype=np.uint16)\n print(confidence.min(), confidence.max())\n iio.imwrite('%s/%s' % (confidence_path, name_tmp), confidence)\n\n return args.save\n\nif __name__ == '__main__':\n with torch.no_grad():\n save_path = main()\n #os.system('python compute_iou.py ./data/Cityscapes/data/gtFine/train %s'%save_path)\n" ]
[ [ "torch.nn.LogSoftmax", "numpy.max", "numpy.array", "numpy.asarray", "torch.nn.Softmax", "torch.nn.DataParallel", "torch.autograd.Variable", "torch.no_grad", "torch.utils.model_zoo.load_url", "numpy.load", "matplotlib.pyplot.figure", "torch.from_numpy", "torch.nn.Upsample", "numpy.argmax", "torch.nn.KLDivLoss", "torch.load", "matplotlib.pyplot.axis", "matplotlib.pyplot.imshow" ] ]
haifangong/TNSC-classification-baseline
[ "2fb8696699b44fbeb0512fd60deda792b464a958" ]
[ "model/classifier.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn import init\nimport torch.nn.functional as F\n\n\nclass SCNN(nn.Module):\n def __init__(self, in_channels, n_classes):\n super(SCNN, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels, out_channels=16, kernel_size=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2),\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(16, out_channels=32, kernel_size=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2),\n )\n self.conv3 = nn.Sequential(\n nn.Conv2d(32, out_channels=64, kernel_size=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2),\n )\n self.fc = nn.Sequential(\n nn.Linear(43264, 4096),\n nn.BatchNorm1d(4096),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5),\n nn.Linear(512, n_classes),\n )\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n return x\n\n\nclass Classifier(nn.Module):\n def __init__(self, in_channels, n_classes):\n super(Classifier, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(in_channels, 1024),\n nn.BatchNorm1d(1024),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5),\n nn.Linear(1024, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5),\n nn.Linear(512, n_classes),\n # nn.Softmax(dim=1)\n )\n self._init_weight()\n\n def forward(self, x):\n x = self.avg_pool(x)\n x = torch.flatten(x, 1)\n out = self.fc(x)\n return out\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.AdaptiveAvgPool2d", "torch.nn.MaxPool2d", "torch.nn.init.constant_", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.BatchNorm1d", "torch.flatten", "torch.nn.init.xavier_normal_" ] ]
harunpehlivan/pandas
[ "09633b868f2f999599e29d32a326e112fdbbf3ec", "2e38d5552a5c7b2c0091cecddd483f4f08ad1d2c" ]
[ "pandas/io/formats/style.py", "pandas/core/dtypes/common.py" ]
[ "\"\"\"\nModule for applying conditional formatting to\nDataFrames and Series.\n\"\"\"\n\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nimport copy\nfrom functools import partial\nfrom itertools import product\nfrom uuid import uuid1\n\nimport numpy as np\n\nfrom pandas.compat import range\nfrom pandas.util._decorators import Appender\n\nfrom pandas.core.dtypes.common import is_float, is_string_like\nfrom pandas.core.dtypes.generic import ABCSeries\n\nimport pandas as pd\nfrom pandas.api.types import is_dict_like, is_list_like\nimport pandas.core.common as com\nfrom pandas.core.config import get_option\nfrom pandas.core.generic import _shared_docs\nfrom pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice\n\ntry:\n from jinja2 import (\n PackageLoader, Environment, ChoiceLoader, FileSystemLoader\n )\nexcept ImportError:\n raise ImportError(\"pandas.Styler requires jinja2. \"\n \"Please install with `conda install Jinja2`\\n\"\n \"or `pip install Jinja2`\")\n\n\ntry:\n import matplotlib.pyplot as plt\n from matplotlib import colors\n has_mpl = True\nexcept ImportError:\n has_mpl = False\n no_mpl_message = \"{0} requires matplotlib.\"\n\n\n@contextmanager\ndef _mpl(func):\n if has_mpl:\n yield plt, colors\n else:\n raise ImportError(no_mpl_message.format(func.__name__))\n\n\nclass Styler(object):\n \"\"\"\n Helps style a DataFrame or Series according to the data with HTML and CSS.\n\n Parameters\n ----------\n data : Series or DataFrame\n precision : int\n precision to round floats to, defaults to pd.options.display.precision\n table_styles : list-like, default None\n list of {selector: (attr, value)} dicts; see Notes\n uuid : str, default None\n a unique identifier to avoid CSS collisions; generated automatically\n caption : str, default None\n caption to attach to the table\n cell_ids : bool, default True\n If True, each cell will have an ``id`` attribute in their HTML tag.\n The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``\n where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row\n number and ``<num_col>`` is the column number.\n\n Attributes\n ----------\n env : Jinja2 Environment\n template : Jinja2 Template\n loader : Jinja2 Loader\n\n See Also\n --------\n pandas.DataFrame.style\n\n Notes\n -----\n Most styling will be done by passing style functions into\n ``Styler.apply`` or ``Styler.applymap``. Style functions should\n return values with strings containing CSS ``'attr: value'`` that will\n be applied to the indicated cells.\n\n If using in the Jupyter notebook, Styler has defined a ``_repr_html_``\n to automatically render itself. Otherwise call Styler.render to get\n the generated HTML.\n\n CSS classes are attached to the generated HTML\n\n * Index and Column names include ``index_name`` and ``level<k>``\n where `k` is its level in a MultiIndex\n * Index label cells include\n\n * ``row_heading``\n * ``row<n>`` where `n` is the numeric position of the row\n * ``level<k>`` where `k` is the level in a MultiIndex\n\n * Column label cells include\n * ``col_heading``\n * ``col<n>`` where `n` is the numeric position of the column\n * ``evel<k>`` where `k` is the level in a MultiIndex\n\n * Blank cells include ``blank``\n * Data cells include ``data``\n \"\"\"\n loader = PackageLoader(\"pandas\", \"io/formats/templates\")\n env = Environment(\n loader=loader,\n trim_blocks=True,\n )\n template = env.get_template(\"html.tpl\")\n\n def __init__(self, data, precision=None, table_styles=None, uuid=None,\n caption=None, table_attributes=None, cell_ids=True):\n self.ctx = defaultdict(list)\n self._todo = []\n\n if not isinstance(data, (pd.Series, pd.DataFrame)):\n raise TypeError(\"``data`` must be a Series or DataFrame\")\n if data.ndim == 1:\n data = data.to_frame()\n if not data.index.is_unique or not data.columns.is_unique:\n raise ValueError(\"style is not supported for non-unique indices.\")\n\n self.data = data\n self.index = data.index\n self.columns = data.columns\n\n self.uuid = uuid\n self.table_styles = table_styles\n self.caption = caption\n if precision is None:\n precision = get_option('display.precision')\n self.precision = precision\n self.table_attributes = table_attributes\n self.hidden_index = False\n self.hidden_columns = []\n self.cell_ids = cell_ids\n\n # display_funcs maps (row, col) -> formatting function\n\n def default_display_func(x):\n if is_float(x):\n return '{:>.{precision}g}'.format(x, precision=self.precision)\n else:\n return x\n\n self._display_funcs = defaultdict(lambda: default_display_func)\n\n def _repr_html_(self):\n \"\"\"\n Hooks into Jupyter notebook rich display system.\n \"\"\"\n return self.render()\n\n @Appender(_shared_docs['to_excel'] % dict(\n axes='index, columns', klass='Styler',\n axes_single_arg=\"{0 or 'index', 1 or 'columns'}\",\n optional_by=\"\"\"\n by : str or list of str\n Name or list of names which refer to the axis items.\"\"\",\n versionadded_to_excel='\\n .. versionadded:: 0.20'))\n def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',\n float_format=None, columns=None, header=True, index=True,\n index_label=None, startrow=0, startcol=0, engine=None,\n merge_cells=True, encoding=None, inf_rep='inf', verbose=True,\n freeze_panes=None):\n\n from pandas.io.formats.excel import ExcelFormatter\n formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,\n header=header,\n float_format=float_format, index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep)\n formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,\n startcol=startcol, freeze_panes=freeze_panes,\n engine=engine)\n\n def _translate(self):\n \"\"\"\n Convert the DataFrame in `self.data` and the attrs from `_build_styles`\n into a dictionary of {head, body, uuid, cellstyle}.\n \"\"\"\n table_styles = self.table_styles or []\n caption = self.caption\n ctx = self.ctx\n precision = self.precision\n hidden_index = self.hidden_index\n hidden_columns = self.hidden_columns\n uuid = self.uuid or str(uuid1()).replace(\"-\", \"_\")\n ROW_HEADING_CLASS = \"row_heading\"\n COL_HEADING_CLASS = \"col_heading\"\n INDEX_NAME_CLASS = \"index_name\"\n\n DATA_CLASS = \"data\"\n BLANK_CLASS = \"blank\"\n BLANK_VALUE = \"\"\n\n def format_attr(pair):\n return \"{key}={value}\".format(**pair)\n\n # for sparsifying a MultiIndex\n idx_lengths = _get_level_lengths(self.index)\n col_lengths = _get_level_lengths(self.columns, hidden_columns)\n\n cell_context = dict()\n\n n_rlvls = self.data.index.nlevels\n n_clvls = self.data.columns.nlevels\n rlabels = self.data.index.tolist()\n clabels = self.data.columns.tolist()\n\n if n_rlvls == 1:\n rlabels = [[x] for x in rlabels]\n if n_clvls == 1:\n clabels = [[x] for x in clabels]\n clabels = list(zip(*clabels))\n\n cellstyle = []\n head = []\n\n for r in range(n_clvls):\n # Blank for Index columns...\n row_es = [{\"type\": \"th\",\n \"value\": BLANK_VALUE,\n \"display_value\": BLANK_VALUE,\n \"is_visible\": not hidden_index,\n \"class\": \" \".join([BLANK_CLASS])}] * (n_rlvls - 1)\n\n # ... except maybe the last for columns.names\n name = self.data.columns.names[r]\n cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,\n \"level{lvl}\".format(lvl=r)]\n name = BLANK_VALUE if name is None else name\n row_es.append({\"type\": \"th\",\n \"value\": name,\n \"display_value\": name,\n \"class\": \" \".join(cs),\n \"is_visible\": not hidden_index})\n\n if clabels:\n for c, value in enumerate(clabels[r]):\n cs = [COL_HEADING_CLASS, \"level{lvl}\".format(lvl=r),\n \"col{col}\".format(col=c)]\n cs.extend(cell_context.get(\n \"col_headings\", {}).get(r, {}).get(c, []))\n es = {\n \"type\": \"th\",\n \"value\": value,\n \"display_value\": value,\n \"class\": \" \".join(cs),\n \"is_visible\": _is_visible(c, r, col_lengths),\n }\n colspan = col_lengths.get((r, c), 0)\n if colspan > 1:\n es[\"attributes\"] = [\n format_attr({\"key\": \"colspan\", \"value\": colspan})\n ]\n row_es.append(es)\n head.append(row_es)\n\n if (self.data.index.names and\n com._any_not_none(*self.data.index.names) and\n not hidden_index):\n index_header_row = []\n\n for c, name in enumerate(self.data.index.names):\n cs = [INDEX_NAME_CLASS,\n \"level{lvl}\".format(lvl=c)]\n name = '' if name is None else name\n index_header_row.append({\"type\": \"th\", \"value\": name,\n \"class\": \" \".join(cs)})\n\n index_header_row.extend(\n [{\"type\": \"th\",\n \"value\": BLANK_VALUE,\n \"class\": \" \".join([BLANK_CLASS])\n }] * (len(clabels[0]) - len(hidden_columns)))\n\n head.append(index_header_row)\n\n body = []\n for r, idx in enumerate(self.data.index):\n row_es = []\n for c, value in enumerate(rlabels[r]):\n rid = [ROW_HEADING_CLASS, \"level{lvl}\".format(lvl=c),\n \"row{row}\".format(row=r)]\n es = {\n \"type\": \"th\",\n \"is_visible\": (_is_visible(r, c, idx_lengths) and\n not hidden_index),\n \"value\": value,\n \"display_value\": value,\n \"id\": \"_\".join(rid[1:]),\n \"class\": \" \".join(rid)\n }\n rowspan = idx_lengths.get((c, r), 0)\n if rowspan > 1:\n es[\"attributes\"] = [\n format_attr({\"key\": \"rowspan\", \"value\": rowspan})\n ]\n row_es.append(es)\n\n for c, col in enumerate(self.data.columns):\n cs = [DATA_CLASS, \"row{row}\".format(row=r),\n \"col{col}\".format(col=c)]\n cs.extend(cell_context.get(\"data\", {}).get(r, {}).get(c, []))\n formatter = self._display_funcs[(r, c)]\n value = self.data.iloc[r, c]\n row_dict = {\"type\": \"td\",\n \"value\": value,\n \"class\": \" \".join(cs),\n \"display_value\": formatter(value),\n \"is_visible\": (c not in hidden_columns)}\n # only add an id if the cell has a style\n if (self.cell_ids or\n not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):\n row_dict[\"id\"] = \"_\".join(cs[1:])\n row_es.append(row_dict)\n props = []\n for x in ctx[r, c]:\n # have to handle empty styles like ['']\n if x.count(\":\"):\n props.append(x.split(\":\"))\n else:\n props.append(['', ''])\n cellstyle.append({'props': props,\n 'selector': \"row{row}_col{col}\"\n .format(row=r, col=c)})\n body.append(row_es)\n\n table_attr = self.table_attributes\n use_mathjax = get_option(\"display.html.use_mathjax\")\n if not use_mathjax:\n table_attr = table_attr or ''\n if 'class=\"' in table_attr:\n table_attr = table_attr.replace('class=\"',\n 'class=\"tex2jax_ignore ')\n else:\n table_attr += ' class=\"tex2jax_ignore\"'\n\n return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,\n precision=precision, table_styles=table_styles,\n caption=caption, table_attributes=table_attr)\n\n def format(self, formatter, subset=None):\n \"\"\"\n Format the text display value of cells.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n formatter : str, callable, or dict\n subset : IndexSlice\n An argument to ``DataFrame.loc`` that restricts which elements\n ``formatter`` is applied to.\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n\n ``formatter`` is either an ``a`` or a dict ``{column name: a}`` where\n ``a`` is one of\n\n - str: this will be wrapped in: ``a.format(x)``\n - callable: called with the value of an individual cell\n\n The default display value for numeric values is the \"general\" (``g``)\n format with ``pd.options.display.precision`` precision.\n\n Examples\n --------\n\n >>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])\n >>> df.style.format(\"{:.2%}\")\n >>> df['c'] = ['a', 'b', 'c', 'd']\n >>> df.style.format({'c': str.upper})\n \"\"\"\n if subset is None:\n row_locs = range(len(self.data))\n col_locs = range(len(self.data.columns))\n else:\n subset = _non_reducing_slice(subset)\n if len(subset) == 1:\n subset = subset, self.data.columns\n\n sub_df = self.data.loc[subset]\n row_locs = self.data.index.get_indexer_for(sub_df.index)\n col_locs = self.data.columns.get_indexer_for(sub_df.columns)\n\n if is_dict_like(formatter):\n for col, col_formatter in formatter.items():\n # formatter must be callable, so '{}' are converted to lambdas\n col_formatter = _maybe_wrap_formatter(col_formatter)\n col_num = self.data.columns.get_indexer_for([col])[0]\n\n for row_num in row_locs:\n self._display_funcs[(row_num, col_num)] = col_formatter\n else:\n # single scalar to format all cells with\n locs = product(*(row_locs, col_locs))\n for i, j in locs:\n formatter = _maybe_wrap_formatter(formatter)\n self._display_funcs[(i, j)] = formatter\n return self\n\n def render(self, **kwargs):\n \"\"\"\n Render the built up styles to HTML.\n\n Parameters\n ----------\n `**kwargs` : Any additional keyword arguments are passed through\n to ``self.template.render``. This is useful when you need to provide\n additional variables for a custom template.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n rendered : str\n The rendered HTML\n\n Notes\n -----\n ``Styler`` objects have defined the ``_repr_html_`` method\n which automatically calls ``self.render()`` when it's the\n last item in a Notebook cell. When calling ``Styler.render()``\n directly, wrap the result in ``IPython.display.HTML`` to view\n the rendered HTML in the notebook.\n\n Pandas uses the following keys in render. Arguments passed\n in ``**kwargs`` take precedence, so think carefully if you want\n to override them:\n\n * head\n * cellstyle\n * body\n * uuid\n * precision\n * table_styles\n * caption\n * table_attributes\n \"\"\"\n self._compute()\n # TODO: namespace all the pandas keys\n d = self._translate()\n # filter out empty styles, every cell will have a class\n # but the list of props may just be [['', '']].\n # so we have the neested anys below\n trimmed = [x for x in d['cellstyle']\n if any(any(y) for y in x['props'])]\n d['cellstyle'] = trimmed\n d.update(kwargs)\n return self.template.render(**d)\n\n def _update_ctx(self, attrs):\n \"\"\"\n Update the state of the Styler.\n\n Collects a mapping of {index_label: ['<property>: <value>']}.\n\n attrs : Series or DataFrame\n should contain strings of '<property>: <value>;<prop2>: <val2>'\n Whitespace shouldn't matter and the final trailing ';' shouldn't\n matter.\n \"\"\"\n for row_label, v in attrs.iterrows():\n for col_label, col in v.iteritems():\n i = self.index.get_indexer([row_label])[0]\n j = self.columns.get_indexer([col_label])[0]\n for pair in col.rstrip(\";\").split(\";\"):\n self.ctx[(i, j)].append(pair)\n\n def _copy(self, deepcopy=False):\n styler = Styler(self.data, precision=self.precision,\n caption=self.caption, uuid=self.uuid,\n table_styles=self.table_styles)\n if deepcopy:\n styler.ctx = copy.deepcopy(self.ctx)\n styler._todo = copy.deepcopy(self._todo)\n else:\n styler.ctx = self.ctx\n styler._todo = self._todo\n return styler\n\n def __copy__(self):\n \"\"\"\n Deep copy by default.\n \"\"\"\n return self._copy(deepcopy=False)\n\n def __deepcopy__(self, memo):\n return self._copy(deepcopy=True)\n\n def clear(self):\n \"\"\"\n Reset the styler, removing any previously applied styles.\n Returns None.\n \"\"\"\n self.ctx.clear()\n self._todo = []\n\n def _compute(self):\n \"\"\"\n Execute the style functions built up in `self._todo`.\n\n Relies on the conventions that all style functions go through\n .apply or .applymap. The append styles to apply as tuples of\n\n (application method, *args, **kwargs)\n \"\"\"\n r = self\n for func, args, kwargs in self._todo:\n r = func(self)(*args, **kwargs)\n return r\n\n def _apply(self, func, axis=0, subset=None, **kwargs):\n subset = slice(None) if subset is None else subset\n subset = _non_reducing_slice(subset)\n data = self.data.loc[subset]\n if axis is not None:\n result = data.apply(func, axis=axis,\n result_type='expand', **kwargs)\n result.columns = data.columns\n else:\n result = func(data, **kwargs)\n if not isinstance(result, pd.DataFrame):\n raise TypeError(\n \"Function {func!r} must return a DataFrame when \"\n \"passed to `Styler.apply` with axis=None\"\n .format(func=func))\n if not (result.index.equals(data.index) and\n result.columns.equals(data.columns)):\n msg = ('Result of {func!r} must have identical index and '\n 'columns as the input'.format(func=func))\n raise ValueError(msg)\n\n result_shape = result.shape\n expected_shape = self.data.loc[subset].shape\n if result_shape != expected_shape:\n msg = (\"Function {func!r} returned the wrong shape.\\n\"\n \"Result has shape: {res}\\n\"\n \"Expected shape: {expect}\".format(func=func,\n res=result.shape,\n expect=expected_shape))\n raise ValueError(msg)\n self._update_ctx(result)\n return self\n\n def apply(self, func, axis=0, subset=None, **kwargs):\n \"\"\"\n Apply a function column-wise, row-wise, or table-wise,\n updating the HTML representation with the result.\n\n Parameters\n ----------\n func : function\n ``func`` should take a Series or DataFrame (depending\n on ``axis``), and return an object with the same shape.\n Must return a DataFrame with identical index and\n column labels when ``axis=None``\n axis : int, str or None\n apply to each column (``axis=0`` or ``'index'``)\n or to each row (``axis=1`` or ``'columns'``) or\n to the entire DataFrame at once with ``axis=None``\n subset : IndexSlice\n a valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice\n kwargs : dict\n pass along to ``func``\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n The output shape of ``func`` should match the input, i.e. if\n ``x`` is the input row, column, or table (depending on ``axis``),\n then ``func(x).shape == x.shape`` should be true.\n\n This is similar to ``DataFrame.apply``, except that ``axis=None``\n applies the function to the entire DataFrame at once,\n rather than column-wise or row-wise.\n\n Examples\n --------\n >>> def highlight_max(x):\n ... return ['background-color: yellow' if v == x.max() else ''\n for v in x]\n ...\n >>> df = pd.DataFrame(np.random.randn(5, 2))\n >>> df.style.apply(highlight_max)\n \"\"\"\n self._todo.append((lambda instance: getattr(instance, '_apply'),\n (func, axis, subset), kwargs))\n return self\n\n def _applymap(self, func, subset=None, **kwargs):\n func = partial(func, **kwargs) # applymap doesn't take kwargs?\n if subset is None:\n subset = pd.IndexSlice[:]\n subset = _non_reducing_slice(subset)\n result = self.data.loc[subset].applymap(func)\n self._update_ctx(result)\n return self\n\n def applymap(self, func, subset=None, **kwargs):\n \"\"\"\n Apply a function elementwise, updating the HTML\n representation with the result.\n\n Parameters\n ----------\n func : function\n ``func`` should take a scalar and return a scalar\n subset : IndexSlice\n a valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice\n kwargs : dict\n pass along to ``func``\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.where\n \"\"\"\n self._todo.append((lambda instance: getattr(instance, '_applymap'),\n (func, subset), kwargs))\n return self\n\n def where(self, cond, value, other=None, subset=None, **kwargs):\n \"\"\"\n Apply a function elementwise, updating the HTML\n representation with a style which is selected in\n accordance with the return value of a function.\n\n .. versionadded:: 0.21.0\n\n Parameters\n ----------\n cond : callable\n ``cond`` should take a scalar and return a boolean\n value : str\n applied when ``cond`` returns true\n other : str\n applied when ``cond`` returns false\n subset : IndexSlice\n a valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice\n kwargs : dict\n pass along to ``cond``\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.applymap\n \"\"\"\n\n if other is None:\n other = ''\n\n return self.applymap(lambda val: value if cond(val) else other,\n subset=subset, **kwargs)\n\n def set_precision(self, precision):\n \"\"\"\n Set the precision used to render.\n\n Parameters\n ----------\n precision : int\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.precision = precision\n return self\n\n def set_table_attributes(self, attributes):\n \"\"\"\n Set the table attributes.\n\n These are the items that show up in the opening ``<table>`` tag\n in addition to to automatic (by default) id.\n\n Parameters\n ----------\n attributes : string\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_table_attributes('class=\"pure-table\"')\n # ... <table class=\"pure-table\"> ...\n \"\"\"\n self.table_attributes = attributes\n return self\n\n def export(self):\n \"\"\"\n Export the styles to applied to the current Styler.\n\n Can be applied to a second style with ``Styler.use``.\n\n Returns\n -------\n styles : list\n\n See Also\n --------\n Styler.use\n \"\"\"\n return self._todo\n\n def use(self, styles):\n \"\"\"\n Set the styles on the current Styler, possibly using styles\n from ``Styler.export``.\n\n Parameters\n ----------\n styles : list\n list of style functions\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.export\n \"\"\"\n self._todo.extend(styles)\n return self\n\n def set_uuid(self, uuid):\n \"\"\"\n Set the uuid for a Styler.\n\n Parameters\n ----------\n uuid : str\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.uuid = uuid\n return self\n\n def set_caption(self, caption):\n \"\"\"\n Set the caption on a Styler\n\n Parameters\n ----------\n caption : str\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.caption = caption\n return self\n\n def set_table_styles(self, table_styles):\n \"\"\"\n Set the table styles on a Styler.\n\n These are placed in a ``<style>`` tag before the generated HTML table.\n\n Parameters\n ----------\n table_styles : list\n Each individual table_style should be a dictionary with\n ``selector`` and ``props`` keys. ``selector`` should be a CSS\n selector that the style will be applied to (automatically\n prefixed by the table's UUID) and ``props`` should be a list of\n tuples with ``(attribute, value)``.\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_table_styles(\n ... [{'selector': 'tr:hover',\n ... 'props': [('background-color', 'yellow')]}]\n ... )\n \"\"\"\n self.table_styles = table_styles\n return self\n\n def hide_index(self):\n \"\"\"\n Hide any indices from rendering.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.hidden_index = True\n return self\n\n def hide_columns(self, subset):\n \"\"\"\n Hide columns from rendering.\n\n .. versionadded:: 0.23.0\n\n Parameters\n ----------\n subset : IndexSlice\n An argument to ``DataFrame.loc`` that identifies which columns\n are hidden.\n\n Returns\n -------\n self : Styler\n \"\"\"\n subset = _non_reducing_slice(subset)\n hidden_df = self.data.loc[subset]\n self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)\n return self\n\n # -----------------------------------------------------------------------\n # A collection of \"builtin\" styles\n # -----------------------------------------------------------------------\n\n @staticmethod\n def _highlight_null(v, null_color):\n return ('background-color: {color}'.format(color=null_color)\n if pd.isna(v) else '')\n\n def highlight_null(self, null_color='red'):\n \"\"\"\n Shade the background ``null_color`` for missing values.\n\n Parameters\n ----------\n null_color : str\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.applymap(self._highlight_null, null_color=null_color)\n return self\n\n def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,\n subset=None, text_color_threshold=0.408):\n \"\"\"\n Color the background in a gradient according to\n the data in each column (optionally row).\n\n Requires matplotlib.\n\n Parameters\n ----------\n cmap : str or colormap\n matplotlib colormap\n low, high : float\n compress the range by these values.\n axis : int or str\n 1 or 'columns' for columnwise, 0 or 'index' for rowwise\n subset : IndexSlice\n a valid slice for ``data`` to limit the style application to\n text_color_threshold : float or int\n luminance threshold for determining text color. Facilitates text\n visibility across varying background colors. From 0 to 1.\n 0 = all text is dark colored, 1 = all text is light colored.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n self : Styler\n\n Raises\n ------\n ValueError\n If ``text_color_threshold`` is not a value from 0 to 1.\n\n Notes\n -----\n Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the\n text legible by not using the entire range of the color map. The range\n of the data is extended by ``low * (x.max() - x.min())`` and ``high *\n (x.max() - x.min())`` before normalizing.\n \"\"\"\n subset = _maybe_numeric_slice(self.data, subset)\n subset = _non_reducing_slice(subset)\n self.apply(self._background_gradient, cmap=cmap, subset=subset,\n axis=axis, low=low, high=high,\n text_color_threshold=text_color_threshold)\n return self\n\n @staticmethod\n def _background_gradient(s, cmap='PuBu', low=0, high=0,\n text_color_threshold=0.408):\n \"\"\"\n Color background in a range according to the data.\n \"\"\"\n if (not isinstance(text_color_threshold, (float, int)) or\n not 0 <= text_color_threshold <= 1):\n msg = \"`text_color_threshold` must be a value from 0 to 1.\"\n raise ValueError(msg)\n\n with _mpl(Styler.background_gradient) as (plt, colors):\n smin = s.values.min()\n smax = s.values.max()\n rng = smax - smin\n # extend lower / upper bounds, compresses color range\n norm = colors.Normalize(smin - (rng * low), smax + (rng * high))\n # matplotlib colors.Normalize modifies inplace?\n # https://github.com/matplotlib/matplotlib/issues/5427\n rgbas = plt.cm.get_cmap(cmap)(norm(s.values))\n\n def relative_luminance(rgba):\n \"\"\"\n Calculate relative luminance of a color.\n\n The calculation adheres to the W3C standards\n (https://www.w3.org/WAI/GL/wiki/Relative_luminance)\n\n Parameters\n ----------\n color : rgb or rgba tuple\n\n Returns\n -------\n float\n The relative luminance as a value from 0 to 1\n \"\"\"\n r, g, b = (\n x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)\n for x in rgba[:3]\n )\n return 0.2126 * r + 0.7152 * g + 0.0722 * b\n\n def css(rgba):\n dark = relative_luminance(rgba) < text_color_threshold\n text_color = '#f1f1f1' if dark else '#000000'\n return 'background-color: {b};color: {c};'.format(\n b=colors.rgb2hex(rgba), c=text_color\n )\n\n if s.ndim == 1:\n return [css(rgba) for rgba in rgbas]\n else:\n return pd.DataFrame(\n [[css(rgba) for rgba in row] for row in rgbas],\n index=s.index, columns=s.columns\n )\n\n def set_properties(self, subset=None, **kwargs):\n \"\"\"\n Convenience method for setting one or more non-data dependent\n properties or each cell.\n\n Parameters\n ----------\n subset : IndexSlice\n a valid slice for ``data`` to limit the style application to\n kwargs : dict\n property: value pairs to be set for each cell\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_properties(color=\"white\", align=\"right\")\n >>> df.style.set_properties(**{'background-color': 'yellow'})\n \"\"\"\n values = ';'.join('{p}: {v}'.format(p=p, v=v)\n for p, v in kwargs.items())\n f = lambda x: values\n return self.applymap(f, subset=subset)\n\n @staticmethod\n def _bar(s, align, colors, width=100, vmin=None, vmax=None):\n \"\"\"\n Draw bar chart in dataframe cells.\n \"\"\"\n # Get input value range.\n smin = s.min() if vmin is None else vmin\n if isinstance(smin, ABCSeries):\n smin = smin.min()\n smax = s.max() if vmax is None else vmax\n if isinstance(smax, ABCSeries):\n smax = smax.max()\n if align == 'mid':\n smin = min(0, smin)\n smax = max(0, smax)\n elif align == 'zero':\n # For \"zero\" mode, we want the range to be symmetrical around zero.\n smax = max(abs(smin), abs(smax))\n smin = -smax\n # Transform to percent-range of linear-gradient\n normed = width * (s.values - smin) / (smax - smin + 1e-12)\n zero = -width * smin / (smax - smin + 1e-12)\n\n def css_bar(start, end, color):\n \"\"\"\n Generate CSS code to draw a bar from start to end.\n \"\"\"\n css = 'width: 10em; height: 80%;'\n if end > start:\n css += 'background: linear-gradient(90deg,'\n if start > 0:\n css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(\n s=start, c=color\n )\n css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(\n e=min(end, width), c=color,\n )\n return css\n\n def css(x):\n if pd.isna(x):\n return ''\n\n # avoid deprecated indexing `colors[x > zero]`\n color = colors[1] if x > zero else colors[0]\n\n if align == 'left':\n return css_bar(0, x, color)\n else:\n return css_bar(min(x, zero), max(x, zero), color)\n\n if s.ndim == 1:\n return [css(x) for x in normed]\n else:\n return pd.DataFrame(\n [[css(x) for x in row] for row in normed],\n index=s.index, columns=s.columns\n )\n\n def bar(self, subset=None, axis=0, color='#d65f5f', width=100,\n align='left', vmin=None, vmax=None):\n \"\"\"\n Draw bar chart in the cell backgrounds.\n\n Parameters\n ----------\n subset : IndexSlice, optional\n A valid slice for `data` to limit the style application to.\n axis : int, str or None, default 0\n Apply to each column (`axis=0` or `'index'`)\n or to each row (`axis=1` or `'columns'`) or\n to the entire DataFrame at once with `axis=None`.\n color : str or 2-tuple/list\n If a str is passed, the color is the same for both\n negative and positive numbers. If 2-tuple/list is used, the\n first element is the color_negative and the second is the\n color_positive (eg: ['#d65f5f', '#5fba7d']).\n width : float, default 100\n A number between 0 or 100. The largest value will cover `width`\n percent of the cell's width.\n align : {'left', 'zero',' mid'}, default 'left'\n How to align the bars with the cells.\n\n - 'left' : the min value starts at the left of the cell.\n - 'zero' : a value of zero is located at the center of the cell.\n - 'mid' : the center of the cell is at (max-min)/2, or\n if values are all negative (positive) the zero is aligned\n at the right (left) of the cell.\n\n .. versionadded:: 0.20.0\n\n vmin : float, optional\n Minimum bar value, defining the left hand limit\n of the bar drawing range, lower values are clipped to `vmin`.\n When None (default): the minimum value of the data will be used.\n\n .. versionadded:: 0.24.0\n\n vmax : float, optional\n Maximum bar value, defining the right hand limit\n of the bar drawing range, higher values are clipped to `vmax`.\n When None (default): the maximum value of the data will be used.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n self : Styler\n \"\"\"\n if align not in ('left', 'zero', 'mid'):\n raise ValueError(\"`align` must be one of {'left', 'zero',' mid'}\")\n\n if not (is_list_like(color)):\n color = [color, color]\n elif len(color) == 1:\n color = [color[0], color[0]]\n elif len(color) > 2:\n raise ValueError(\"`color` must be string or a list-like\"\n \" of length 2: [`color_neg`, `color_pos`]\"\n \" (eg: color=['#d65f5f', '#5fba7d'])\")\n\n subset = _maybe_numeric_slice(self.data, subset)\n subset = _non_reducing_slice(subset)\n self.apply(self._bar, subset=subset, axis=axis,\n align=align, colors=color, width=width,\n vmin=vmin, vmax=vmax)\n\n return self\n\n def highlight_max(self, subset=None, color='yellow', axis=0):\n \"\"\"\n Highlight the maximum by shading the background.\n\n Parameters\n ----------\n subset : IndexSlice, default None\n a valid slice for ``data`` to limit the style application to\n color : str, default 'yellow'\n axis : int, str, or None; default 0\n 0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,\n or ``None`` for tablewise\n\n Returns\n -------\n self : Styler\n \"\"\"\n return self._highlight_handler(subset=subset, color=color, axis=axis,\n max_=True)\n\n def highlight_min(self, subset=None, color='yellow', axis=0):\n \"\"\"\n Highlight the minimum by shading the background.\n\n Parameters\n ----------\n subset : IndexSlice, default None\n a valid slice for ``data`` to limit the style application to\n color : str, default 'yellow'\n axis : int, str, or None; default 0\n 0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,\n or ``None`` for tablewise\n\n Returns\n -------\n self : Styler\n \"\"\"\n return self._highlight_handler(subset=subset, color=color, axis=axis,\n max_=False)\n\n def _highlight_handler(self, subset=None, color='yellow', axis=None,\n max_=True):\n subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))\n self.apply(self._highlight_extrema, color=color, axis=axis,\n subset=subset, max_=max_)\n return self\n\n @staticmethod\n def _highlight_extrema(data, color='yellow', max_=True):\n \"\"\"\n Highlight the min or max in a Series or DataFrame.\n \"\"\"\n attr = 'background-color: {0}'.format(color)\n if data.ndim == 1: # Series from .apply\n if max_:\n extrema = data == data.max()\n else:\n extrema = data == data.min()\n return [attr if v else '' for v in extrema]\n else: # DataFrame from .tee\n if max_:\n extrema = data == data.max().max()\n else:\n extrema = data == data.min().min()\n return pd.DataFrame(np.where(extrema, attr, ''),\n index=data.index, columns=data.columns)\n\n @classmethod\n def from_custom_template(cls, searchpath, name):\n \"\"\"\n Factory function for creating a subclass of ``Styler``\n with a custom template and Jinja environment.\n\n Parameters\n ----------\n searchpath : str or list\n Path or paths of directories containing the templates\n name : str\n Name of your custom template to use for rendering\n\n Returns\n -------\n MyStyler : subclass of Styler\n Has the correct ``env`` and ``template`` class attributes set.\n \"\"\"\n loader = ChoiceLoader([\n FileSystemLoader(searchpath),\n cls.loader,\n ])\n\n class MyStyler(cls):\n env = Environment(loader=loader)\n template = env.get_template(name)\n\n return MyStyler\n\n def pipe(self, func, *args, **kwargs):\n \"\"\"\n Apply ``func(self, *args, **kwargs)``, and return the result.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n func : function\n Function to apply to the Styler. Alternatively, a\n ``(callable, keyword)`` tuple where ``keyword`` is a string\n indicating the keyword of ``callable`` that expects the Styler.\n *args, **kwargs :\n Arguments passed to `func`.\n\n Returns\n -------\n object :\n The value returned by ``func``.\n\n See Also\n --------\n DataFrame.pipe : Analogous method for DataFrame.\n Styler.apply : Apply a function row-wise, column-wise, or table-wise to\n modify the dataframe's styling.\n\n Notes\n -----\n Like :meth:`DataFrame.pipe`, this method can simplify the\n application of several user-defined functions to a styler. Instead\n of writing:\n\n .. code-block:: python\n\n f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)\n\n users can write:\n\n .. code-block:: python\n\n (df.style.set_precision(3)\n .pipe(g, arg1=a)\n .pipe(f, arg2=b, arg3=c))\n\n In particular, this allows users to define functions that take a\n styler object, along with other parameters, and return the styler after\n making styling changes (such as calling :meth:`Styler.apply` or\n :meth:`Styler.set_properties`). Using ``.pipe``, these user-defined\n style \"transformations\" can be interleaved with calls to the built-in\n Styler interface.\n\n Examples\n --------\n >>> def format_conversion(styler):\n ... return (styler.set_properties(**{'text-align': 'right'})\n ... .format({'conversion': '{:.1%}'}))\n\n The user-defined ``format_conversion`` function above can be called\n within a sequence of other style modifications:\n\n >>> df = pd.DataFrame({'trial': list(range(5)),\n ... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})\n >>> (df.style\n ... .highlight_min(subset=['conversion'], color='yellow')\n ... .pipe(format_conversion)\n ... .set_caption(\"Results with minimum conversion highlighted.\"))\n \"\"\"\n return com._pipe(self, func, *args, **kwargs)\n\n\ndef _is_visible(idx_row, idx_col, lengths):\n \"\"\"\n Index -> {(idx_row, idx_col): bool}).\n \"\"\"\n return (idx_col, idx_row) in lengths\n\n\ndef _get_level_lengths(index, hidden_elements=None):\n \"\"\"\n Given an index, find the level length for each element.\n\n Optional argument is a list of index positions which\n should not be visible.\n\n Result is a dictionary of (level, inital_position): span\n \"\"\"\n sentinel = object()\n levels = index.format(sparsify=sentinel, adjoin=False, names=False)\n\n if hidden_elements is None:\n hidden_elements = []\n\n lengths = {}\n if index.nlevels == 1:\n for i, value in enumerate(levels):\n if(i not in hidden_elements):\n lengths[(0, i)] = 1\n return lengths\n\n for i, lvl in enumerate(levels):\n for j, row in enumerate(lvl):\n if not get_option('display.multi_sparse'):\n lengths[(i, j)] = 1\n elif (row != sentinel) and (j not in hidden_elements):\n last_label = j\n lengths[(i, last_label)] = 1\n elif (row != sentinel):\n # even if its hidden, keep track of it in case\n # length >1 and later elements are visible\n last_label = j\n lengths[(i, last_label)] = 0\n elif(j not in hidden_elements):\n lengths[(i, last_label)] += 1\n\n non_zero_lengths = {\n element: length for element, length in lengths.items() if length >= 1}\n\n return non_zero_lengths\n\n\ndef _maybe_wrap_formatter(formatter):\n if is_string_like(formatter):\n return lambda x: formatter.format(x)\n elif callable(formatter):\n return formatter\n else:\n msg = (\"Expected a template string or callable, got {formatter} \"\n \"instead\".format(formatter=formatter))\n raise TypeError(msg)\n", "\"\"\" common type operations \"\"\"\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import algos, lib\nfrom pandas._libs.tslibs import conversion\nfrom pandas.compat import PY3, PY36, string_types\n\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype,\n PandasExtensionDtype, PeriodDtype, registry)\nfrom pandas.core.dtypes.generic import (\n ABCCategorical, ABCDateOffset, ABCDatetimeIndex, ABCIndexClass,\n ABCPeriodArray, ABCPeriodIndex, ABCSeries)\nfrom pandas.core.dtypes.inference import ( # noqa:F401\n is_array_like, is_bool, is_complex, is_decimal, is_dict_like, is_file_like,\n is_float, is_hashable, is_integer, is_interval, is_iterator, is_list_like,\n is_named_tuple, is_nested_list_like, is_number, is_re, is_re_compilable,\n is_scalar, is_sequence, is_string_like)\n\n_POSSIBLY_CAST_DTYPES = {np.dtype(t).name\n for t in ['O', 'int8', 'uint8', 'int16', 'uint16',\n 'int32', 'uint32', 'int64', 'uint64']}\n\n_NS_DTYPE = conversion.NS_DTYPE\n_TD_DTYPE = conversion.TD_DTYPE\n_INT64_DTYPE = np.dtype(np.int64)\n\n# oh the troubles to reduce import time\n_is_scipy_sparse = None\n\nensure_float64 = algos.ensure_float64\nensure_float32 = algos.ensure_float32\n\n_ensure_datetime64ns = conversion.ensure_datetime64ns\n_ensure_timedelta64ns = conversion.ensure_timedelta64ns\n\n\ndef ensure_float(arr):\n \"\"\"\n Ensure that an array object has a float dtype if possible.\n\n Parameters\n ----------\n arr : array-like\n The array whose data type we want to enforce as float.\n\n Returns\n -------\n float_arr : The original array cast to the float dtype if\n possible. Otherwise, the original array is returned.\n \"\"\"\n\n if issubclass(arr.dtype.type, (np.integer, np.bool_)):\n arr = arr.astype(float)\n return arr\n\n\nensure_uint64 = algos.ensure_uint64\nensure_int64 = algos.ensure_int64\nensure_int32 = algos.ensure_int32\nensure_int16 = algos.ensure_int16\nensure_int8 = algos.ensure_int8\nensure_platform_int = algos.ensure_platform_int\nensure_object = algos.ensure_object\n\n\ndef ensure_categorical(arr):\n \"\"\"\n Ensure that an array-like object is a Categorical (if not already).\n\n Parameters\n ----------\n arr : array-like\n The array that we want to convert into a Categorical.\n\n Returns\n -------\n cat_arr : The original array cast as a Categorical. If it already\n is a Categorical, we return as is.\n \"\"\"\n\n if not is_categorical(arr):\n from pandas import Categorical\n arr = Categorical(arr)\n return arr\n\n\ndef ensure_int64_or_float64(arr, copy=False):\n \"\"\"\n Ensure that an dtype array of some integer dtype\n has an int64 dtype if possible\n If it's not possible, potentially because of overflow,\n convert the array to float64 instead.\n\n Parameters\n ----------\n arr : array-like\n The array whose data type we want to enforce.\n copy: boolean\n Whether to copy the original array or reuse\n it in place, if possible.\n\n Returns\n -------\n out_arr : The input array cast as int64 if\n possible without overflow.\n Otherwise the input array cast to float64.\n \"\"\"\n try:\n return arr.astype('int64', copy=copy, casting='safe')\n except TypeError:\n return arr.astype('float64', copy=copy)\n\n\ndef classes(*klasses):\n \"\"\" evaluate if the tipo is a subclass of the klasses \"\"\"\n return lambda tipo: issubclass(tipo, klasses)\n\n\ndef classes_and_not_datetimelike(*klasses):\n \"\"\"\n evaluate if the tipo is a subclass of the klasses\n and not a datetimelike\n \"\"\"\n return lambda tipo: (issubclass(tipo, klasses) and\n not issubclass(tipo, (np.datetime64, np.timedelta64)))\n\n\ndef is_object_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of the object dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the object dtype.\n\n Examples\n --------\n >>> is_object_dtype(object)\n True\n >>> is_object_dtype(int)\n False\n >>> is_object_dtype(np.array([], dtype=object))\n True\n >>> is_object_dtype(np.array([], dtype=int))\n False\n >>> is_object_dtype([1, 2, 3])\n False\n \"\"\"\n return _is_dtype_type(arr_or_dtype, classes(np.object_))\n\n\ndef is_sparse(arr):\n \"\"\"\n Check whether an array-like is a 1-D pandas sparse array.\n\n Check that the one-dimensional array-like is a pandas sparse array.\n Returns True if it is a pandas sparse array, not another type of\n sparse array.\n\n Parameters\n ----------\n arr : array-like\n Array-like to check.\n\n Returns\n -------\n bool\n Whether or not the array-like is a pandas sparse array.\n\n See Also\n --------\n DataFrame.to_sparse : Convert DataFrame to a SparseDataFrame.\n Series.to_sparse : Convert Series to SparseSeries.\n Series.to_dense : Return dense representation of a Series.\n\n Examples\n --------\n Returns `True` if the parameter is a 1-D pandas sparse array.\n\n >>> is_sparse(pd.SparseArray([0, 0, 1, 0]))\n True\n >>> is_sparse(pd.SparseSeries([0, 0, 1, 0]))\n True\n\n Returns `False` if the parameter is not sparse.\n\n >>> is_sparse(np.array([0, 0, 1, 0]))\n False\n >>> is_sparse(pd.Series([0, 1, 0, 0]))\n False\n\n Returns `False` if the parameter is not a pandas sparse array.\n\n >>> from scipy.sparse import bsr_matrix\n >>> is_sparse(bsr_matrix([0, 1, 0, 0]))\n False\n\n Returns `False` if the parameter has more than one dimension.\n\n >>> df = pd.SparseDataFrame([389., 24., 80.5, np.nan],\n columns=['max_speed'],\n index=['falcon', 'parrot', 'lion', 'monkey'])\n >>> is_sparse(df)\n False\n >>> is_sparse(df.max_speed)\n True\n \"\"\"\n from pandas.core.arrays.sparse import SparseDtype\n\n dtype = getattr(arr, 'dtype', arr)\n return isinstance(dtype, SparseDtype)\n\n\ndef is_scipy_sparse(arr):\n \"\"\"\n Check whether an array-like is a scipy.sparse.spmatrix instance.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a scipy.sparse.spmatrix instance.\n\n Notes\n -----\n If scipy is not installed, this function will always return False.\n\n Examples\n --------\n >>> from scipy.sparse import bsr_matrix\n >>> is_scipy_sparse(bsr_matrix([1, 2, 3]))\n True\n >>> is_scipy_sparse(pd.SparseArray([1, 2, 3]))\n False\n >>> is_scipy_sparse(pd.SparseSeries([1, 2, 3]))\n False\n \"\"\"\n\n global _is_scipy_sparse\n\n if _is_scipy_sparse is None:\n try:\n from scipy.sparse import issparse as _is_scipy_sparse\n except ImportError:\n _is_scipy_sparse = lambda _: False\n\n return _is_scipy_sparse(arr)\n\n\ndef is_categorical(arr):\n \"\"\"\n Check whether an array-like is a Categorical instance.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is of a Categorical instance.\n\n Examples\n --------\n >>> is_categorical([1, 2, 3])\n False\n\n Categoricals, Series Categoricals, and CategoricalIndex will return True.\n\n >>> cat = pd.Categorical([1, 2, 3])\n >>> is_categorical(cat)\n True\n >>> is_categorical(pd.Series(cat))\n True\n >>> is_categorical(pd.CategoricalIndex([1, 2, 3]))\n True\n \"\"\"\n\n return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr)\n\n\ndef is_datetimetz(arr):\n \"\"\"\n Check whether an array-like is a datetime array-like with a timezone\n component in its dtype.\n\n .. deprecated:: 0.24.0\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a datetime array-like with a\n timezone component in its dtype.\n\n Examples\n --------\n >>> is_datetimetz([1, 2, 3])\n False\n\n Although the following examples are both DatetimeIndex objects,\n the first one returns False because it has no timezone component\n unlike the second one, which returns True.\n\n >>> is_datetimetz(pd.DatetimeIndex([1, 2, 3]))\n False\n >>> is_datetimetz(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\"))\n True\n\n The object need not be a DatetimeIndex object. It just needs to have\n a dtype which has a timezone component.\n\n >>> dtype = DatetimeTZDtype(\"ns\", tz=\"US/Eastern\")\n >>> s = pd.Series([], dtype=dtype)\n >>> is_datetimetz(s)\n True\n \"\"\"\n\n warnings.warn(\"'is_datetimetz' is deprecated and will be removed in a \"\n \"future version. Use 'is_datetime64tz_dtype' instead.\",\n FutureWarning, stacklevel=2)\n return is_datetime64tz_dtype(arr)\n\n\ndef is_offsetlike(arr_or_obj):\n \"\"\"\n Check if obj or all elements of list-like is DateOffset\n\n Parameters\n ----------\n arr_or_obj : object\n\n Returns\n -------\n boolean\n Whether the object is a DateOffset or listlike of DatetOffsets\n\n Examples\n --------\n >>> is_offsetlike(pd.DateOffset(days=1))\n True\n >>> is_offsetlike('offset')\n False\n >>> is_offsetlike([pd.offsets.Minute(4), pd.offsets.MonthEnd()])\n True\n >>> is_offsetlike(np.array([pd.DateOffset(months=3), pd.Timestamp.now()]))\n False\n \"\"\"\n if isinstance(arr_or_obj, ABCDateOffset):\n return True\n elif (is_list_like(arr_or_obj) and len(arr_or_obj) and\n is_object_dtype(arr_or_obj)):\n return all(isinstance(x, ABCDateOffset) for x in arr_or_obj)\n return False\n\n\ndef is_period(arr):\n \"\"\"\n Check whether an array-like is a periodical index.\n\n .. deprecated:: 0.24.0\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a periodical index.\n\n Examples\n --------\n >>> is_period([1, 2, 3])\n False\n >>> is_period(pd.Index([1, 2, 3]))\n False\n >>> is_period(pd.PeriodIndex([\"2017-01-01\"], freq=\"D\"))\n True\n \"\"\"\n\n warnings.warn(\"'is_period' is deprecated and will be removed in a future \"\n \"version. Use 'is_period_dtype' or is_period_arraylike' \"\n \"instead.\", FutureWarning, stacklevel=2)\n\n return isinstance(arr, ABCPeriodIndex) or is_period_arraylike(arr)\n\n\ndef is_datetime64_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of the datetime64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the datetime64 dtype.\n\n Examples\n --------\n >>> is_datetime64_dtype(object)\n False\n >>> is_datetime64_dtype(np.datetime64)\n True\n >>> is_datetime64_dtype(np.array([], dtype=int))\n False\n >>> is_datetime64_dtype(np.array([], dtype=np.datetime64))\n True\n >>> is_datetime64_dtype([1, 2, 3])\n False\n \"\"\"\n\n return _is_dtype_type(arr_or_dtype, classes(np.datetime64))\n\n\ndef is_datetime64tz_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of a DatetimeTZDtype dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of a DatetimeTZDtype dtype.\n\n Examples\n --------\n >>> is_datetime64tz_dtype(object)\n False\n >>> is_datetime64tz_dtype([1, 2, 3])\n False\n >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive\n False\n >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\"))\n True\n\n >>> dtype = DatetimeTZDtype(\"ns\", tz=\"US/Eastern\")\n >>> s = pd.Series([], dtype=dtype)\n >>> is_datetime64tz_dtype(dtype)\n True\n >>> is_datetime64tz_dtype(s)\n True\n \"\"\"\n\n if arr_or_dtype is None:\n return False\n return DatetimeTZDtype.is_dtype(arr_or_dtype)\n\n\ndef is_timedelta64_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of the timedelta64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the timedelta64 dtype.\n\n Examples\n --------\n >>> is_timedelta64_dtype(object)\n False\n >>> is_timedelta64_dtype(np.timedelta64)\n True\n >>> is_timedelta64_dtype([1, 2, 3])\n False\n >>> is_timedelta64_dtype(pd.Series([], dtype=\"timedelta64[ns]\"))\n True\n >>> is_timedelta64_dtype('0 days')\n False\n \"\"\"\n\n return _is_dtype_type(arr_or_dtype, classes(np.timedelta64))\n\n\ndef is_period_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of the Period dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the Period dtype.\n\n Examples\n --------\n >>> is_period_dtype(object)\n False\n >>> is_period_dtype(PeriodDtype(freq=\"D\"))\n True\n >>> is_period_dtype([1, 2, 3])\n False\n >>> is_period_dtype(pd.Period(\"2017-01-01\"))\n False\n >>> is_period_dtype(pd.PeriodIndex([], freq=\"A\"))\n True\n \"\"\"\n\n # TODO: Consider making Period an instance of PeriodDtype\n if arr_or_dtype is None:\n return False\n return PeriodDtype.is_dtype(arr_or_dtype)\n\n\ndef is_interval_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of the Interval dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the Interval dtype.\n\n Examples\n --------\n >>> is_interval_dtype(object)\n False\n >>> is_interval_dtype(IntervalDtype())\n True\n >>> is_interval_dtype([1, 2, 3])\n False\n >>>\n >>> interval = pd.Interval(1, 2, closed=\"right\")\n >>> is_interval_dtype(interval)\n False\n >>> is_interval_dtype(pd.IntervalIndex([interval]))\n True\n \"\"\"\n\n # TODO: Consider making Interval an instance of IntervalDtype\n if arr_or_dtype is None:\n return False\n return IntervalDtype.is_dtype(arr_or_dtype)\n\n\ndef is_categorical_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of the Categorical dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the Categorical dtype.\n\n Examples\n --------\n >>> is_categorical_dtype(object)\n False\n >>> is_categorical_dtype(CategoricalDtype())\n True\n >>> is_categorical_dtype([1, 2, 3])\n False\n >>> is_categorical_dtype(pd.Categorical([1, 2, 3]))\n True\n >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))\n True\n \"\"\"\n\n if arr_or_dtype is None:\n return False\n return CategoricalDtype.is_dtype(arr_or_dtype)\n\n\ndef is_string_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of the string dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the string dtype.\n\n Examples\n --------\n >>> is_string_dtype(str)\n True\n >>> is_string_dtype(object)\n True\n >>> is_string_dtype(int)\n False\n >>>\n >>> is_string_dtype(np.array(['a', 'b']))\n True\n >>> is_string_dtype(pd.Series([1, 2]))\n False\n \"\"\"\n\n # TODO: gh-15585: consider making the checks stricter.\n def condition(dtype):\n return dtype.kind in ('O', 'S', 'U') and not is_period_dtype(dtype)\n return _is_dtype(arr_or_dtype, condition)\n\n\ndef is_period_arraylike(arr):\n \"\"\"\n Check whether an array-like is a periodical array-like or PeriodIndex.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a periodical array-like or\n PeriodIndex instance.\n\n Examples\n --------\n >>> is_period_arraylike([1, 2, 3])\n False\n >>> is_period_arraylike(pd.Index([1, 2, 3]))\n False\n >>> is_period_arraylike(pd.PeriodIndex([\"2017-01-01\"], freq=\"D\"))\n True\n \"\"\"\n\n if isinstance(arr, (ABCPeriodIndex, ABCPeriodArray)):\n return True\n elif isinstance(arr, (np.ndarray, ABCSeries)):\n return is_period_dtype(arr.dtype)\n return getattr(arr, 'inferred_type', None) == 'period'\n\n\ndef is_datetime_arraylike(arr):\n \"\"\"\n Check whether an array-like is a datetime array-like or DatetimeIndex.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a datetime array-like or\n DatetimeIndex.\n\n Examples\n --------\n >>> is_datetime_arraylike([1, 2, 3])\n False\n >>> is_datetime_arraylike(pd.Index([1, 2, 3]))\n False\n >>> is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))\n True\n \"\"\"\n\n if isinstance(arr, ABCDatetimeIndex):\n return True\n elif isinstance(arr, (np.ndarray, ABCSeries)):\n return (is_object_dtype(arr.dtype)\n and lib.infer_dtype(arr, skipna=False) == 'datetime')\n return getattr(arr, 'inferred_type', None) == 'datetime'\n\n\ndef is_datetimelike(arr):\n \"\"\"\n Check whether an array-like is a datetime-like array-like.\n\n Acceptable datetime-like objects are (but not limited to) datetime\n indices, periodic indices, and timedelta indices.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a datetime-like array-like.\n\n Examples\n --------\n >>> is_datetimelike([1, 2, 3])\n False\n >>> is_datetimelike(pd.Index([1, 2, 3]))\n False\n >>> is_datetimelike(pd.DatetimeIndex([1, 2, 3]))\n True\n >>> is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\"))\n True\n >>> is_datetimelike(pd.PeriodIndex([], freq=\"A\"))\n True\n >>> is_datetimelike(np.array([], dtype=np.datetime64))\n True\n >>> is_datetimelike(pd.Series([], dtype=\"timedelta64[ns]\"))\n True\n >>>\n >>> dtype = DatetimeTZDtype(\"ns\", tz=\"US/Eastern\")\n >>> s = pd.Series([], dtype=dtype)\n >>> is_datetimelike(s)\n True\n \"\"\"\n\n return (is_datetime64_dtype(arr) or is_datetime64tz_dtype(arr) or\n is_timedelta64_dtype(arr) or\n isinstance(arr, ABCPeriodIndex))\n\n\ndef is_dtype_equal(source, target):\n \"\"\"\n Check if two dtypes are equal.\n\n Parameters\n ----------\n source : The first dtype to compare\n target : The second dtype to compare\n\n Returns\n ----------\n boolean\n Whether or not the two dtypes are equal.\n\n Examples\n --------\n >>> is_dtype_equal(int, float)\n False\n >>> is_dtype_equal(\"int\", int)\n True\n >>> is_dtype_equal(object, \"category\")\n False\n >>> is_dtype_equal(CategoricalDtype(), \"category\")\n True\n >>> is_dtype_equal(DatetimeTZDtype(), \"datetime64\")\n False\n \"\"\"\n\n try:\n source = _get_dtype(source)\n target = _get_dtype(target)\n return source == target\n except (TypeError, AttributeError):\n\n # invalid comparison\n # object == category will hit this\n return False\n\n\ndef is_dtype_union_equal(source, target):\n \"\"\"\n Check whether two arrays have compatible dtypes to do a union.\n numpy types are checked with ``is_dtype_equal``. Extension types are\n checked separately.\n\n Parameters\n ----------\n source : The first dtype to compare\n target : The second dtype to compare\n\n Returns\n ----------\n boolean\n Whether or not the two dtypes are equal.\n\n >>> is_dtype_equal(\"int\", int)\n True\n\n >>> is_dtype_equal(CategoricalDtype(['a', 'b'],\n ... CategoricalDtype(['b', 'c']))\n True\n\n >>> is_dtype_equal(CategoricalDtype(['a', 'b'],\n ... CategoricalDtype(['b', 'c'], ordered=True))\n False\n \"\"\"\n source = _get_dtype(source)\n target = _get_dtype(target)\n if is_categorical_dtype(source) and is_categorical_dtype(target):\n # ordered False for both\n return source.ordered is target.ordered\n return is_dtype_equal(source, target)\n\n\ndef is_any_int_dtype(arr_or_dtype):\n \"\"\"Check whether the provided array or dtype is of an integer dtype.\n\n In this function, timedelta64 instances are also considered \"any-integer\"\n type objects and will return True.\n\n This function is internal and should not be exposed in the public API.\n\n .. versionchanged:: 0.24.0\n\n The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered\n as integer by this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of an integer dtype.\n\n Examples\n --------\n >>> is_any_int_dtype(str)\n False\n >>> is_any_int_dtype(int)\n True\n >>> is_any_int_dtype(float)\n False\n >>> is_any_int_dtype(np.uint64)\n True\n >>> is_any_int_dtype(np.datetime64)\n False\n >>> is_any_int_dtype(np.timedelta64)\n True\n >>> is_any_int_dtype(np.array(['a', 'b']))\n False\n >>> is_any_int_dtype(pd.Series([1, 2]))\n True\n >>> is_any_int_dtype(np.array([], dtype=np.timedelta64))\n True\n >>> is_any_int_dtype(pd.Index([1, 2.])) # float\n False\n \"\"\"\n\n return _is_dtype_type(\n arr_or_dtype, classes(np.integer, np.timedelta64))\n\n\ndef is_integer_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of an integer dtype.\n\n Unlike in `in_any_int_dtype`, timedelta64 instances will return False.\n\n .. versionchanged:: 0.24.0\n\n The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered\n as integer by this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of an integer dtype and\n not an instance of timedelta64.\n\n Examples\n --------\n >>> is_integer_dtype(str)\n False\n >>> is_integer_dtype(int)\n True\n >>> is_integer_dtype(float)\n False\n >>> is_integer_dtype(np.uint64)\n True\n >>> is_integer_dtype('int8')\n True\n >>> is_integer_dtype('Int8')\n True\n >>> is_integer_dtype(pd.Int8Dtype)\n True\n >>> is_integer_dtype(np.datetime64)\n False\n >>> is_integer_dtype(np.timedelta64)\n False\n >>> is_integer_dtype(np.array(['a', 'b']))\n False\n >>> is_integer_dtype(pd.Series([1, 2]))\n True\n >>> is_integer_dtype(np.array([], dtype=np.timedelta64))\n False\n >>> is_integer_dtype(pd.Index([1, 2.])) # float\n False\n \"\"\"\n\n return _is_dtype_type(\n arr_or_dtype, classes_and_not_datetimelike(np.integer))\n\n\ndef is_signed_integer_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of a signed integer dtype.\n\n Unlike in `in_any_int_dtype`, timedelta64 instances will return False.\n\n .. versionchanged:: 0.24.0\n\n The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered\n as integer by this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a signed integer dtype\n and not an instance of timedelta64.\n\n Examples\n --------\n >>> is_signed_integer_dtype(str)\n False\n >>> is_signed_integer_dtype(int)\n True\n >>> is_signed_integer_dtype(float)\n False\n >>> is_signed_integer_dtype(np.uint64) # unsigned\n False\n >>> is_signed_integer_dtype('int8')\n True\n >>> is_signed_integer_dtype('Int8')\n True\n >>> is_signed_dtype(pd.Int8Dtype)\n True\n >>> is_signed_integer_dtype(np.datetime64)\n False\n >>> is_signed_integer_dtype(np.timedelta64)\n False\n >>> is_signed_integer_dtype(np.array(['a', 'b']))\n False\n >>> is_signed_integer_dtype(pd.Series([1, 2]))\n True\n >>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64))\n False\n >>> is_signed_integer_dtype(pd.Index([1, 2.])) # float\n False\n >>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned\n False\n \"\"\"\n\n return _is_dtype_type(\n arr_or_dtype, classes_and_not_datetimelike(np.signedinteger))\n\n\ndef is_unsigned_integer_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of an unsigned integer dtype.\n\n .. versionchanged:: 0.24.0\n\n The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also\n considered as integer by this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of an unsigned integer dtype.\n\n Examples\n --------\n >>> is_unsigned_integer_dtype(str)\n False\n >>> is_unsigned_integer_dtype(int) # signed\n False\n >>> is_unsigned_integer_dtype(float)\n False\n >>> is_unsigned_integer_dtype(np.uint64)\n True\n >>> is_unsigned_integer_dtype('uint8')\n True\n >>> is_unsigned_integer_dtype('UInt8')\n True\n >>> is_unsigned_integer_dtype(pd.UInt8Dtype)\n True\n >>> is_unsigned_integer_dtype(np.array(['a', 'b']))\n False\n >>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed\n False\n >>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float\n False\n >>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))\n True\n \"\"\"\n return _is_dtype_type(\n arr_or_dtype, classes_and_not_datetimelike(np.unsignedinteger))\n\n\ndef is_int64_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of the int64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the int64 dtype.\n\n Notes\n -----\n Depending on system architecture, the return value of `is_int64_dtype(\n int)` will be True if the OS uses 64-bit integers and False if the OS\n uses 32-bit integers.\n\n Examples\n --------\n >>> is_int64_dtype(str)\n False\n >>> is_int64_dtype(np.int32)\n False\n >>> is_int64_dtype(np.int64)\n True\n >>> is_int64_dtype('int8')\n False\n >>> is_int64_dtype('Int8')\n False\n >>> is_int64_dtype(pd.Int64Dtype)\n True\n >>> is_int64_dtype(float)\n False\n >>> is_int64_dtype(np.uint64) # unsigned\n False\n >>> is_int64_dtype(np.array(['a', 'b']))\n False\n >>> is_int64_dtype(np.array([1, 2], dtype=np.int64))\n True\n >>> is_int64_dtype(pd.Index([1, 2.])) # float\n False\n >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned\n False\n \"\"\"\n\n return _is_dtype_type(arr_or_dtype, classes(np.int64))\n\n\ndef is_datetime64_any_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of the datetime64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the datetime64 dtype.\n\n Examples\n --------\n >>> is_datetime64_any_dtype(str)\n False\n >>> is_datetime64_any_dtype(int)\n False\n >>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive\n True\n >>> is_datetime64_any_dtype(DatetimeTZDtype(\"ns\", \"US/Eastern\"))\n True\n >>> is_datetime64_any_dtype(np.array(['a', 'b']))\n False\n >>> is_datetime64_any_dtype(np.array([1, 2]))\n False\n >>> is_datetime64_any_dtype(np.array([], dtype=np.datetime64))\n True\n >>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3],\n dtype=np.datetime64))\n True\n \"\"\"\n\n if arr_or_dtype is None:\n return False\n return (is_datetime64_dtype(arr_or_dtype) or\n is_datetime64tz_dtype(arr_or_dtype))\n\n\ndef is_datetime64_ns_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of the datetime64[ns] dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the datetime64[ns] dtype.\n\n Examples\n --------\n >>> is_datetime64_ns_dtype(str)\n False\n >>> is_datetime64_ns_dtype(int)\n False\n >>> is_datetime64_ns_dtype(np.datetime64) # no unit\n False\n >>> is_datetime64_ns_dtype(DatetimeTZDtype(\"ns\", \"US/Eastern\"))\n True\n >>> is_datetime64_ns_dtype(np.array(['a', 'b']))\n False\n >>> is_datetime64_ns_dtype(np.array([1, 2]))\n False\n >>> is_datetime64_ns_dtype(np.array([], dtype=np.datetime64)) # no unit\n False\n >>> is_datetime64_ns_dtype(np.array([],\n dtype=\"datetime64[ps]\")) # wrong unit\n False\n >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3],\n dtype=np.datetime64)) # has 'ns' unit\n True\n \"\"\"\n\n if arr_or_dtype is None:\n return False\n try:\n tipo = _get_dtype(arr_or_dtype)\n except TypeError:\n if is_datetime64tz_dtype(arr_or_dtype):\n tipo = _get_dtype(arr_or_dtype.dtype)\n else:\n return False\n return tipo == _NS_DTYPE or getattr(tipo, 'base', None) == _NS_DTYPE\n\n\ndef is_timedelta64_ns_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of the timedelta64[ns] dtype.\n\n This is a very specific dtype, so generic ones like `np.timedelta64`\n will return False if passed into this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the timedelta64[ns] dtype.\n\n Examples\n --------\n >>> is_timedelta64_ns_dtype(np.dtype('m8[ns]'))\n True\n >>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency\n False\n >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))\n True\n >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))\n False\n \"\"\"\n return _is_dtype(arr_or_dtype, lambda dtype: dtype == _TD_DTYPE)\n\n\ndef is_datetime_or_timedelta_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of\n a timedelta64 or datetime64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a timedelta64,\n or datetime64 dtype.\n\n Examples\n --------\n >>> is_datetime_or_timedelta_dtype(str)\n False\n >>> is_datetime_or_timedelta_dtype(int)\n False\n >>> is_datetime_or_timedelta_dtype(np.datetime64)\n True\n >>> is_datetime_or_timedelta_dtype(np.timedelta64)\n True\n >>> is_datetime_or_timedelta_dtype(np.array(['a', 'b']))\n False\n >>> is_datetime_or_timedelta_dtype(pd.Series([1, 2]))\n False\n >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.timedelta64))\n True\n >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.datetime64))\n True\n \"\"\"\n\n return _is_dtype_type(\n arr_or_dtype, classes(np.datetime64, np.timedelta64))\n\n\ndef _is_unorderable_exception(e):\n \"\"\"\n Check if the exception raised is an unorderable exception.\n\n The error message differs for 3 <= PY <= 3.5 and PY >= 3.6, so\n we need to condition based on Python version.\n\n Parameters\n ----------\n e : Exception or sub-class\n The exception object to check.\n\n Returns\n -------\n boolean\n Whether or not the exception raised is an unorderable exception.\n \"\"\"\n\n if PY36:\n return \"'>' not supported between instances of\" in str(e)\n\n elif PY3:\n return 'unorderable' in str(e)\n return False\n\n\ndef is_numeric_v_string_like(a, b):\n \"\"\"\n Check if we are comparing a string-like object to a numeric ndarray.\n\n NumPy doesn't like to compare such objects, especially numeric arrays\n and scalar string-likes.\n\n Parameters\n ----------\n a : array-like, scalar\n The first object to check.\n b : array-like, scalar\n The second object to check.\n\n Returns\n -------\n boolean\n Whether we return a comparing a string-like object to a numeric array.\n\n Examples\n --------\n >>> is_numeric_v_string_like(1, 1)\n False\n >>> is_numeric_v_string_like(\"foo\", \"foo\")\n False\n >>> is_numeric_v_string_like(1, \"foo\") # non-array numeric\n False\n >>> is_numeric_v_string_like(np.array([1]), \"foo\")\n True\n >>> is_numeric_v_string_like(\"foo\", np.array([1])) # symmetric check\n True\n >>> is_numeric_v_string_like(np.array([1, 2]), np.array([\"foo\"]))\n True\n >>> is_numeric_v_string_like(np.array([\"foo\"]), np.array([1, 2]))\n True\n >>> is_numeric_v_string_like(np.array([1]), np.array([2]))\n False\n >>> is_numeric_v_string_like(np.array([\"foo\"]), np.array([\"foo\"]))\n False\n \"\"\"\n\n is_a_array = isinstance(a, np.ndarray)\n is_b_array = isinstance(b, np.ndarray)\n\n is_a_numeric_array = is_a_array and is_numeric_dtype(a)\n is_b_numeric_array = is_b_array and is_numeric_dtype(b)\n is_a_string_array = is_a_array and is_string_like_dtype(a)\n is_b_string_array = is_b_array and is_string_like_dtype(b)\n\n is_a_scalar_string_like = not is_a_array and is_string_like(a)\n is_b_scalar_string_like = not is_b_array and is_string_like(b)\n\n return ((is_a_numeric_array and is_b_scalar_string_like) or\n (is_b_numeric_array and is_a_scalar_string_like) or\n (is_a_numeric_array and is_b_string_array) or\n (is_b_numeric_array and is_a_string_array))\n\n\ndef is_datetimelike_v_numeric(a, b):\n \"\"\"\n Check if we are comparing a datetime-like object to a numeric object.\n\n By \"numeric,\" we mean an object that is either of an int or float dtype.\n\n Parameters\n ----------\n a : array-like, scalar\n The first object to check.\n b : array-like, scalar\n The second object to check.\n\n Returns\n -------\n boolean\n Whether we return a comparing a datetime-like to a numeric object.\n\n Examples\n --------\n >>> dt = np.datetime64(pd.datetime(2017, 1, 1))\n >>>\n >>> is_datetimelike_v_numeric(1, 1)\n False\n >>> is_datetimelike_v_numeric(dt, dt)\n False\n >>> is_datetimelike_v_numeric(1, dt)\n True\n >>> is_datetimelike_v_numeric(dt, 1) # symmetric check\n True\n >>> is_datetimelike_v_numeric(np.array([dt]), 1)\n True\n >>> is_datetimelike_v_numeric(np.array([1]), dt)\n True\n >>> is_datetimelike_v_numeric(np.array([dt]), np.array([1]))\n True\n >>> is_datetimelike_v_numeric(np.array([1]), np.array([2]))\n False\n >>> is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))\n False\n \"\"\"\n\n if not hasattr(a, 'dtype'):\n a = np.asarray(a)\n if not hasattr(b, 'dtype'):\n b = np.asarray(b)\n\n def is_numeric(x):\n \"\"\"\n Check if an object has a numeric dtype (i.e. integer or float).\n \"\"\"\n return is_integer_dtype(x) or is_float_dtype(x)\n\n is_datetimelike = needs_i8_conversion\n return ((is_datetimelike(a) and is_numeric(b)) or\n (is_datetimelike(b) and is_numeric(a)))\n\n\ndef is_datetimelike_v_object(a, b):\n \"\"\"\n Check if we are comparing a datetime-like object to an object instance.\n\n Parameters\n ----------\n a : array-like, scalar\n The first object to check.\n b : array-like, scalar\n The second object to check.\n\n Returns\n -------\n boolean\n Whether we return a comparing a datetime-like to an object instance.\n\n Examples\n --------\n >>> obj = object()\n >>> dt = np.datetime64(pd.datetime(2017, 1, 1))\n >>>\n >>> is_datetimelike_v_object(obj, obj)\n False\n >>> is_datetimelike_v_object(dt, dt)\n False\n >>> is_datetimelike_v_object(obj, dt)\n True\n >>> is_datetimelike_v_object(dt, obj) # symmetric check\n True\n >>> is_datetimelike_v_object(np.array([dt]), obj)\n True\n >>> is_datetimelike_v_object(np.array([obj]), dt)\n True\n >>> is_datetimelike_v_object(np.array([dt]), np.array([obj]))\n True\n >>> is_datetimelike_v_object(np.array([obj]), np.array([obj]))\n False\n >>> is_datetimelike_v_object(np.array([dt]), np.array([1]))\n False\n >>> is_datetimelike_v_object(np.array([dt]), np.array([dt]))\n False\n \"\"\"\n\n if not hasattr(a, 'dtype'):\n a = np.asarray(a)\n if not hasattr(b, 'dtype'):\n b = np.asarray(b)\n\n is_datetimelike = needs_i8_conversion\n return ((is_datetimelike(a) and is_object_dtype(b)) or\n (is_datetimelike(b) and is_object_dtype(a)))\n\n\ndef needs_i8_conversion(arr_or_dtype):\n \"\"\"\n Check whether the array or dtype should be converted to int64.\n\n An array-like or dtype \"needs\" such a conversion if the array-like\n or dtype is of a datetime-like dtype\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype should be converted to int64.\n\n Examples\n --------\n >>> needs_i8_conversion(str)\n False\n >>> needs_i8_conversion(np.int64)\n False\n >>> needs_i8_conversion(np.datetime64)\n True\n >>> needs_i8_conversion(np.array(['a', 'b']))\n False\n >>> needs_i8_conversion(pd.Series([1, 2]))\n False\n >>> needs_i8_conversion(pd.Series([], dtype=\"timedelta64[ns]\"))\n True\n >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\"))\n True\n \"\"\"\n\n if arr_or_dtype is None:\n return False\n return (is_datetime_or_timedelta_dtype(arr_or_dtype) or\n is_datetime64tz_dtype(arr_or_dtype) or\n is_period_dtype(arr_or_dtype))\n\n\ndef is_numeric_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of a numeric dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a numeric dtype.\n\n Examples\n --------\n >>> is_numeric_dtype(str)\n False\n >>> is_numeric_dtype(int)\n True\n >>> is_numeric_dtype(float)\n True\n >>> is_numeric_dtype(np.uint64)\n True\n >>> is_numeric_dtype(np.datetime64)\n False\n >>> is_numeric_dtype(np.timedelta64)\n False\n >>> is_numeric_dtype(np.array(['a', 'b']))\n False\n >>> is_numeric_dtype(pd.Series([1, 2]))\n True\n >>> is_numeric_dtype(pd.Index([1, 2.]))\n True\n >>> is_numeric_dtype(np.array([], dtype=np.timedelta64))\n False\n \"\"\"\n\n return _is_dtype_type(\n arr_or_dtype, classes_and_not_datetimelike(np.number, np.bool_))\n\n\ndef is_string_like_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of a string-like dtype.\n\n Unlike `is_string_dtype`, the object dtype is excluded because it\n is a mixed dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the string dtype.\n\n Examples\n --------\n >>> is_string_like_dtype(str)\n True\n >>> is_string_like_dtype(object)\n False\n >>> is_string_like_dtype(np.array(['a', 'b']))\n True\n >>> is_string_like_dtype(pd.Series([1, 2]))\n False\n \"\"\"\n\n return _is_dtype(\n arr_or_dtype, lambda dtype: dtype.kind in ('S', 'U'))\n\n\ndef is_float_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of a float dtype.\n\n This function is internal and should not be exposed in the public API.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a float dtype.\n\n Examples\n --------\n >>> is_float_dtype(str)\n False\n >>> is_float_dtype(int)\n False\n >>> is_float_dtype(float)\n True\n >>> is_float_dtype(np.array(['a', 'b']))\n False\n >>> is_float_dtype(pd.Series([1, 2]))\n False\n >>> is_float_dtype(pd.Index([1, 2.]))\n True\n \"\"\"\n return _is_dtype_type(arr_or_dtype, classes(np.floating))\n\n\ndef is_bool_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of a boolean dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a boolean dtype.\n\n Notes\n -----\n An ExtensionArray is considered boolean when the ``_is_boolean``\n attribute is set to True.\n\n Examples\n --------\n >>> is_bool_dtype(str)\n False\n >>> is_bool_dtype(int)\n False\n >>> is_bool_dtype(bool)\n True\n >>> is_bool_dtype(np.bool)\n True\n >>> is_bool_dtype(np.array(['a', 'b']))\n False\n >>> is_bool_dtype(pd.Series([1, 2]))\n False\n >>> is_bool_dtype(np.array([True, False]))\n True\n >>> is_bool_dtype(pd.Categorical([True, False]))\n True\n >>> is_bool_dtype(pd.SparseArray([True, False]))\n True\n \"\"\"\n if arr_or_dtype is None:\n return False\n try:\n dtype = _get_dtype(arr_or_dtype)\n except TypeError:\n return False\n\n if isinstance(arr_or_dtype, CategoricalDtype):\n arr_or_dtype = arr_or_dtype.categories\n # now we use the special definition for Index\n\n if isinstance(arr_or_dtype, ABCIndexClass):\n\n # TODO(jreback)\n # we don't have a boolean Index class\n # so its object, we need to infer to\n # guess this\n return (arr_or_dtype.is_object and\n arr_or_dtype.inferred_type == 'boolean')\n elif is_extension_array_dtype(arr_or_dtype):\n dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype)\n return dtype._is_boolean\n\n return issubclass(dtype.type, np.bool_)\n\n\ndef is_extension_type(arr):\n \"\"\"\n Check whether an array-like is of a pandas extension class instance.\n\n Extension classes include categoricals, pandas sparse objects (i.e.\n classes represented within the pandas library and not ones external\n to it like scipy sparse matrices), and datetime-like arrays.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is of a pandas extension class instance.\n\n Examples\n --------\n >>> is_extension_type([1, 2, 3])\n False\n >>> is_extension_type(np.array([1, 2, 3]))\n False\n >>>\n >>> cat = pd.Categorical([1, 2, 3])\n >>>\n >>> is_extension_type(cat)\n True\n >>> is_extension_type(pd.Series(cat))\n True\n >>> is_extension_type(pd.SparseArray([1, 2, 3]))\n True\n >>> is_extension_type(pd.SparseSeries([1, 2, 3]))\n True\n >>>\n >>> from scipy.sparse import bsr_matrix\n >>> is_extension_type(bsr_matrix([1, 2, 3]))\n False\n >>> is_extension_type(pd.DatetimeIndex([1, 2, 3]))\n False\n >>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\"))\n True\n >>>\n >>> dtype = DatetimeTZDtype(\"ns\", tz=\"US/Eastern\")\n >>> s = pd.Series([], dtype=dtype)\n >>> is_extension_type(s)\n True\n \"\"\"\n\n if is_categorical(arr):\n return True\n elif is_sparse(arr):\n return True\n elif is_datetime64tz_dtype(arr):\n return True\n return False\n\n\ndef is_extension_array_dtype(arr_or_dtype):\n \"\"\"\n Check if an object is a pandas extension array type.\n\n See the :ref:`Use Guide <extending.extension-types>` for more.\n\n Parameters\n ----------\n arr_or_dtype : object\n For array-like input, the ``.dtype`` attribute will\n be extracted.\n\n Returns\n -------\n bool\n Whether the `arr_or_dtype` is an extension array type.\n\n Notes\n -----\n This checks whether an object implements the pandas extension\n array interface. In pandas, this includes:\n\n * Categorical\n * Sparse\n * Interval\n * Period\n * DatetimeArray\n * TimedeltaArray\n\n Third-party libraries may implement arrays or types satisfying\n this interface as well.\n\n Examples\n --------\n >>> from pandas.api.types import is_extension_array_dtype\n >>> arr = pd.Categorical(['a', 'b'])\n >>> is_extension_array_dtype(arr)\n True\n >>> is_extension_array_dtype(arr.dtype)\n True\n\n >>> arr = np.array(['a', 'b'])\n >>> is_extension_array_dtype(arr.dtype)\n False\n \"\"\"\n dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype)\n return (isinstance(dtype, ExtensionDtype) or\n registry.find(dtype) is not None)\n\n\ndef is_complex_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of a complex dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a compex dtype.\n\n Examples\n --------\n >>> is_complex_dtype(str)\n False\n >>> is_complex_dtype(int)\n False\n >>> is_complex_dtype(np.complex)\n True\n >>> is_complex_dtype(np.array(['a', 'b']))\n False\n >>> is_complex_dtype(pd.Series([1, 2]))\n False\n >>> is_complex_dtype(np.array([1 + 1j, 5]))\n True\n \"\"\"\n\n return _is_dtype_type(arr_or_dtype, classes(np.complexfloating))\n\n\ndef _is_dtype(arr_or_dtype, condition):\n \"\"\"\n Return a boolean if the condition is satisfied for the arr_or_dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType\n The array-like or dtype object whose dtype we want to extract.\n condition : callable[Union[np.dtype, ExtensionDtype]]\n\n Returns\n -------\n bool\n\n \"\"\"\n\n if arr_or_dtype is None:\n return False\n try:\n dtype = _get_dtype(arr_or_dtype)\n except (TypeError, ValueError, UnicodeEncodeError):\n return False\n return condition(dtype)\n\n\ndef _get_dtype(arr_or_dtype):\n \"\"\"\n Get the dtype instance associated with an array\n or dtype object.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype object whose dtype we want to extract.\n\n Returns\n -------\n obj_dtype : The extract dtype instance from the\n passed in array or dtype object.\n\n Raises\n ------\n TypeError : The passed in object is None.\n \"\"\"\n\n if arr_or_dtype is None:\n raise TypeError(\"Cannot deduce dtype from null object\")\n\n # fastpath\n elif isinstance(arr_or_dtype, np.dtype):\n return arr_or_dtype\n elif isinstance(arr_or_dtype, type):\n return np.dtype(arr_or_dtype)\n\n # if we have an array-like\n elif hasattr(arr_or_dtype, 'dtype'):\n arr_or_dtype = arr_or_dtype.dtype\n\n return pandas_dtype(arr_or_dtype)\n\n\ndef _is_dtype_type(arr_or_dtype, condition):\n \"\"\"\n Return a boolean if the condition is satisfied for the arr_or_dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype object whose dtype we want to extract.\n condition : callable[Union[np.dtype, ExtensionDtypeType]]\n\n Returns\n -------\n bool : if the condition is satisifed for the arr_or_dtype\n \"\"\"\n\n if arr_or_dtype is None:\n return condition(type(None))\n\n # fastpath\n if isinstance(arr_or_dtype, np.dtype):\n return condition(arr_or_dtype.type)\n elif isinstance(arr_or_dtype, type):\n if issubclass(arr_or_dtype, (PandasExtensionDtype, ExtensionDtype)):\n arr_or_dtype = arr_or_dtype.type\n return condition(np.dtype(arr_or_dtype).type)\n elif arr_or_dtype is None:\n return condition(type(None))\n\n # if we have an array-like\n if hasattr(arr_or_dtype, 'dtype'):\n arr_or_dtype = arr_or_dtype.dtype\n\n # we are not possibly a dtype\n elif is_list_like(arr_or_dtype):\n return condition(type(None))\n\n try:\n tipo = pandas_dtype(arr_or_dtype).type\n except (TypeError, ValueError, UnicodeEncodeError):\n if is_scalar(arr_or_dtype):\n return condition(type(None))\n\n return False\n\n return condition(tipo)\n\n\ndef infer_dtype_from_object(dtype):\n \"\"\"\n Get a numpy dtype.type-style object for a dtype object.\n\n This methods also includes handling of the datetime64[ns] and\n datetime64[ns, TZ] objects.\n\n If no dtype can be found, we return ``object``.\n\n Parameters\n ----------\n dtype : dtype, type\n The dtype object whose numpy dtype.type-style\n object we want to extract.\n\n Returns\n -------\n dtype_object : The extracted numpy dtype.type-style object.\n \"\"\"\n\n if isinstance(dtype, type) and issubclass(dtype, np.generic):\n # Type object from a dtype\n return dtype\n elif isinstance(dtype, (np.dtype, PandasExtensionDtype, ExtensionDtype)):\n # dtype object\n try:\n _validate_date_like_dtype(dtype)\n except TypeError:\n # Should still pass if we don't have a date-like\n pass\n return dtype.type\n\n try:\n dtype = pandas_dtype(dtype)\n except TypeError:\n pass\n\n if is_extension_array_dtype(dtype):\n return dtype.type\n elif isinstance(dtype, string_types):\n\n # TODO(jreback)\n # should deprecate these\n if dtype in ['datetimetz', 'datetime64tz']:\n return DatetimeTZDtype.type\n elif dtype in ['period']:\n raise NotImplementedError\n\n if dtype == 'datetime' or dtype == 'timedelta':\n dtype += '64'\n try:\n return infer_dtype_from_object(getattr(np, dtype))\n except (AttributeError, TypeError):\n # Handles cases like _get_dtype(int) i.e.,\n # Python objects that are valid dtypes\n # (unlike user-defined types, in general)\n #\n # TypeError handles the float16 type code of 'e'\n # further handle internal types\n pass\n\n return infer_dtype_from_object(np.dtype(dtype))\n\n\ndef _validate_date_like_dtype(dtype):\n \"\"\"\n Check whether the dtype is a date-like dtype. Raises an error if invalid.\n\n Parameters\n ----------\n dtype : dtype, type\n The dtype to check.\n\n Raises\n ------\n TypeError : The dtype could not be casted to a date-like dtype.\n ValueError : The dtype is an illegal date-like dtype (e.g. the\n the frequency provided is too specific)\n \"\"\"\n\n try:\n typ = np.datetime_data(dtype)[0]\n except ValueError as e:\n raise TypeError('{error}'.format(error=e))\n if typ != 'generic' and typ != 'ns':\n msg = '{name!r} is too specific of a frequency, try passing {type!r}'\n raise ValueError(msg.format(name=dtype.name, type=dtype.type.__name__))\n\n\ndef pandas_dtype(dtype):\n \"\"\"\n Converts input into a pandas only dtype object or a numpy dtype object.\n\n Parameters\n ----------\n dtype : object to be converted\n\n Returns\n -------\n np.dtype or a pandas dtype\n\n Raises\n ------\n TypeError if not a dtype\n \"\"\"\n # short-circuit\n if isinstance(dtype, np.ndarray):\n return dtype.dtype\n elif isinstance(dtype, (np.dtype, PandasExtensionDtype, ExtensionDtype)):\n return dtype\n\n # registered extension types\n result = registry.find(dtype)\n if result is not None:\n return result\n\n # try a numpy dtype\n # raise a consistent TypeError if failed\n try:\n npdtype = np.dtype(dtype)\n except Exception:\n # we don't want to force a repr of the non-string\n if not isinstance(dtype, string_types):\n raise TypeError(\"data type not understood\")\n raise TypeError(\"data type '{}' not understood\".format(\n dtype))\n\n # Any invalid dtype (such as pd.Timestamp) should raise an error.\n # np.dtype(invalid_type).kind = 0 for such objects. However, this will\n # also catch some valid dtypes such as object, np.object_ and 'object'\n # which we safeguard against by catching them earlier and returning\n # np.dtype(valid_dtype) before this condition is evaluated.\n if is_hashable(dtype) and dtype in [object, np.object_, 'object', 'O']:\n # check hashability to avoid errors/DeprecationWarning when we get\n # here and `dtype` is an array\n return npdtype\n elif npdtype.kind == 'O':\n raise TypeError(\"dtype '{}' not understood\".format(dtype))\n\n return npdtype\n" ]
[ [ "pandas.isna", "pandas.io.formats.excel.ExcelFormatter", "pandas.api.types.is_list_like", "pandas.core.indexing._maybe_numeric_slice", "pandas.core.indexing._non_reducing_slice", "matplotlib.colors.rgb2hex", "pandas.core.common._pipe", "numpy.where", "pandas.core.dtypes.common.is_string_like", "matplotlib.colors.Normalize", "matplotlib.pyplot.cm.get_cmap", "pandas.core.common._any_not_none", "pandas.compat.range", "pandas.api.types.is_dict_like", "pandas.core.dtypes.common.is_float", "pandas.core.config.get_option" ], [ "scipy.sparse.issparse", "pandas._libs.lib.infer_dtype", "numpy.asarray", "numpy.datetime_data", "pandas.core.dtypes.inference.is_list_like", "pandas.core.dtypes.inference.is_scalar", "pandas.core.dtypes.dtypes.registry.find", "pandas.core.dtypes.dtypes.DatetimeTZDtype.is_dtype", "pandas.core.dtypes.dtypes.PeriodDtype.is_dtype", "pandas.Categorical", "pandas.core.dtypes.inference.is_string_like", "pandas.core.dtypes.dtypes.IntervalDtype.is_dtype", "pandas.core.dtypes.inference.is_hashable", "numpy.dtype", "pandas.core.dtypes.dtypes.CategoricalDtype.is_dtype" ] ]
achaiah/pywick
[ "9d663faf0c1660a9b8359a6472c164f658dfc8cb", "9d663faf0c1660a9b8359a6472c164f658dfc8cb" ]
[ "pywick/optimizers/ralamb.py", "pywick/optimizers/madgrad.py" ]
[ "# Source: https://gist.github.com/redknightlois/c4023d393eb8f92bb44b2ab582d7ec20\n\nfrom torch.optim.optimizer import Optimizer\nimport torch\nimport math\n\n\nclass Ralamb(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-4):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n self.buffer = [[None, None, None] for ind in range(10)]\n super(Ralamb, self).__init__(params, defaults)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('Ralamb does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n # Decay the first and second moment running average coefficient\n # m_t\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n # v_t\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n state['step'] += 1\n buffered = self.buffer[int(state['step'] % 10)]\n\n if state['step'] == buffered[0]:\n N_sma, radam_step_size = buffered[1], buffered[2]\n else:\n buffered[0] = state['step']\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n buffered[1] = N_sma\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n radam_step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])\n else:\n radam_step_size = group['lr'] / (1 - beta1 ** state['step'])\n buffered[2] = radam_step_size\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n # more conservative since it's an approximated value\n radam_step = p_data_fp32.clone()\n if N_sma >= 5:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n radam_step.addcdiv_(-radam_step_size, exp_avg, denom)\n else:\n radam_step.add_(-radam_step_size, exp_avg)\n\n radam_norm = radam_step.pow(2).sum().sqrt()\n weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)\n if 0 in (weight_norm, radam_norm):\n trust_ratio = 1\n else:\n trust_ratio = weight_norm / radam_norm\n\n state['weight_norm'] = weight_norm\n state['adam_norm'] = radam_norm\n state['trust_ratio'] = trust_ratio\n\n if N_sma >= 5:\n p_data_fp32.addcdiv_(-radam_step_size * trust_ratio, exp_avg, denom)\n else:\n p_data_fp32.add_(-radam_step_size * trust_ratio, exp_avg)\n\n p.data.copy_(p_data_fp32)\n\n return loss", "\"\"\" PyTorch MADGRAD optimizer\nMADGRAD: https://arxiv.org/abs/2101.11075\nCode from: https://github.com/facebookresearch/madgrad\n\"\"\"\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import TYPE_CHECKING, Any, Callable, Optional\n\nimport torch\nimport torch.optim\n\nif TYPE_CHECKING:\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass MADGRAD(torch.optim.Optimizer):\n \"\"\"\n MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic\n Optimization.\n .. _MADGRAD: https://arxiv.org/abs/2101.11075\n MADGRAD is a general purpose optimizer that can be used in place of SGD or\n Adam may converge faster and generalize better. Currently GPU-only.\n Typically, the same learning rate schedule that is used for SGD or Adam may\n be used. The overall learning rate is not comparable to either method and\n should be determined by a hyper-parameter sweep.\n MADGRAD requires less weight decay than other methods, often as little as\n zero. Momentum values used for SGD or Adam's beta1 should work here also.\n On sparse problems both weight_decay and momentum should be set to 0.\n Arguments:\n params (iterable):\n Iterable of parameters to optimize or dicts defining parameter groups.\n lr (float):\n Learning rate (default: 1e-2).\n momentum (float):\n Momentum value in the range [0,1) (default: 0.9).\n weight_decay (float):\n Weight decay, i.e. a L2 penalty (default: 0).\n eps (float):\n Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6).\n \"\"\"\n\n def __init__(\n self,\n params: _params_t,\n lr: float = 1e-2,\n momentum: float = 0.9,\n weight_decay: float = 0,\n eps: float = 1e-6,\n decoupled_decay: bool = False,\n ):\n if momentum < 0 or momentum >= 1:\n raise ValueError(f\"Momentum {momentum} must be in the range [0,1]\")\n if lr <= 0:\n raise ValueError(f\"Learning rate {lr} must be positive\")\n if weight_decay < 0:\n raise ValueError(f\"Weight decay {weight_decay} must be non-negative\")\n if eps < 0:\n raise ValueError(f\"Eps must be non-negative\")\n\n defaults = dict(\n lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay)\n super().__init__(params, defaults)\n\n @property\n def supports_memory_efficient_fp16(self) -> bool:\n return False\n\n @property\n def supports_flat_params(self) -> bool:\n return True\n\n @torch.no_grad()\n def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n eps = group['eps']\n lr = group['lr'] + eps\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n ck = 1 - momentum\n\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad\n if momentum != 0.0 and grad.is_sparse:\n raise RuntimeError(\"momentum != 0 is not compatible with sparse gradients\")\n\n state = self.state[p]\n if len(state) == 0:\n state['step'] = 0\n state['grad_sum_sq'] = torch.zeros_like(p)\n state['s'] = torch.zeros_like(p)\n if momentum != 0:\n state['x0'] = torch.clone(p).detach()\n\n state['step'] += 1\n grad_sum_sq = state['grad_sum_sq']\n s = state['s']\n lamb = lr * math.sqrt(state['step'])\n\n # Apply weight decay\n if weight_decay != 0:\n if group['decoupled_decay']:\n p.mul_(1.0 - group['lr'] * weight_decay)\n else:\n if grad.is_sparse:\n raise RuntimeError(\"weight_decay option is not compatible with sparse gradients\")\n grad.add_(p, alpha=weight_decay)\n\n if grad.is_sparse:\n grad = grad.coalesce()\n grad_val = grad._values()\n\n p_masked = p.sparse_mask(grad)\n grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad)\n s_masked = s.sparse_mask(grad)\n\n # Compute x_0 from other known quantities\n rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps)\n x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1)\n\n # Dense + sparse op\n grad_sq = grad * grad\n grad_sum_sq.add_(grad_sq, alpha=lamb)\n grad_sum_sq_masked.add_(grad_sq, alpha=lamb)\n\n rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps)\n\n s.add_(grad, alpha=lamb)\n s_masked._values().add_(grad_val, alpha=lamb)\n\n # update masked copy of p\n p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1)\n # Copy updated masked p to dense p using an add operation\n p_masked._values().add_(p_kp1_masked_vals, alpha=-1)\n p.add_(p_masked, alpha=-1)\n else:\n if momentum == 0:\n # Compute x_0 from other known quantities\n rms = grad_sum_sq.pow(1 / 3).add_(eps)\n x0 = p.addcdiv(s, rms, value=1)\n else:\n x0 = state['x0']\n\n # Accumulate second moments\n grad_sum_sq.addcmul_(grad, grad, value=lamb)\n rms = grad_sum_sq.pow(1 / 3).add_(eps)\n\n # Update s\n s.add_(grad, alpha=lamb)\n\n # Step\n if momentum == 0:\n p.copy_(x0.addcdiv(s, rms, value=-1))\n else:\n z = x0.addcdiv(s, rms, value=-1)\n\n # p is a moving average of z\n p.mul_(1 - ck).add_(z, alpha=ck)\n\n return loss" ]
[ [ "torch.zeros_like" ], [ "torch.zeros_like", "torch.no_grad", "torch.clone", "torch.enable_grad" ] ]
peimengsui/ml_from_scratch
[ "5f5d276fee8f25ab91fd4342434aa23eb154a405" ]
[ "ml_from_scratch/logistic_regression.py" ]
[ "import numpy as np\nimport math\nfrom ml_from_scratch.activation_functions import Sigmoid\nfrom ml_from_scratch.utils import make_diagonal\n\n\nclass LogisticRegression():\n \"\"\" Logistic Regression classifier.\n Parameters:\n -----------\n n_iters: int\n Number of iterations running gradient descent, default is 1000\n lr: float\n learning rate\n gradient_descent: boolean\n True or false depending if gradient descent should be used when training. If\n false then we use Newton Method.\n \"\"\"\n def __init__(self, n_iters=1000, lr=.1, gradient_descent=True):\n self.param = None\n self.n_iters = n_iters\n self.lr = lr\n self.gradient_descent = gradient_descent\n self.sigmoid = Sigmoid()\n\n def _initialize_parameters(self, X):\n n_features = np.shape(X)[1]\n # Initialize parameters between [-1/sqrt(N), 1/sqrt(N)]\n limit = 1 / math.sqrt(n_features)\n self.param = np.random.uniform(-limit, limit, (n_features,))\n\n def fit(self, X, y):\n self._initialize_parameters(X)\n # Tune parameters for n iterations\n for i in range(self.n_iters):\n # Make a new prediction\n y_pred = self.sigmoid(X.dot(self.param))\n if self.gradient_descent:\n # Move against the gradient of the loss function with\n # respect to the parameters to minimize the loss\n self.param -= self.lr * (y_pred - y).dot(X)\n else:\n # Make a diagonal matrix of the sigmoid gradient column vector\n diag_gradient = make_diagonal(self.sigmoid.gradient(X.dot(self.param)))\n # Batch opt:\n self.param = np.linalg.pinv(X.T.dot(diag_gradient).dot(X)).\\\n dot(X.T).dot(diag_gradient.dot(X).dot(self.param) + y - y_pred)\n\n def predict(self, X):\n y_pred = np.round(self.sigmoid(X.dot(self.param))).astype(int)\n return y_pred\n\n def predict_proba(self, X):\n p_pred = self.sigmoid(X.dot(self.param))\n return p_pred\n" ]
[ [ "numpy.random.uniform", "numpy.shape" ] ]
IharBakhanovich/DeepHyperion
[ "f7f696ba95124125dfe967ea4890d944a9958d77" ]
[ "DeepHyperion-BNG/self_driving/beamng_member.py" ]
[ "import hashlib\nimport random\nfrom typing import Tuple, Dict\n\nfrom self_driving.beamng_config import BeamNGConfig\nfrom self_driving.beamng_evaluator import BeamNGEvaluator\nfrom core.member import Member\nfrom self_driving.catmull_rom import catmull_rom\nfrom self_driving.road_bbox import RoadBoundingBox\nfrom self_driving.road_polygon import RoadPolygon\nfrom self_driving.edit_distance_polyline import iterative_levenshtein\n\nTuple4F = Tuple[float, float, float, float]\nTuple2F = Tuple[float, float]\n\n\nclass BeamNGMember(Member):\n \"\"\"A class representing a road returned by the RoadGenerator.\"\"\"\n counter = 0\n\n def __init__(self, control_nodes: Tuple4F, sample_nodes: Tuple4F, num_spline_nodes: int,\n road_bbox: RoadBoundingBox):\n super().__init__()\n BeamNGMember.counter += 1\n self.name = f'mbr{str(BeamNGMember.counter)}'\n self.name_ljust = self.name.ljust(7)\n self.control_nodes = control_nodes\n self.sample_nodes = sample_nodes\n self.num_spline_nodes = num_spline_nodes\n self.road_bbox = road_bbox\n self.config: BeamNGConfig = None\n self.problem: 'BeamNGProblem' = None\n self._evaluator: BeamNGEvaluator = None\n\n def clone(self):\n res = BeamNGMember(list(self.control_nodes), list(self.sample_nodes), self.num_spline_nodes, self.road_bbox)\n res.config = self.config\n res.problem = self.problem\n res.distance_to_boundary = self.distance_to_boundary\n return res\n\n def to_dict(self) -> dict:\n return {\n 'control_nodes': self.control_nodes,\n 'sample_nodes': self.sample_nodes,\n 'num_spline_nodes': self.num_spline_nodes,\n 'road_bbox_size': self.road_bbox.bbox.bounds,\n 'distance_to_boundary': self.distance_to_boundary\n }\n\n @classmethod\n def from_dict(cls, dict: Dict):\n road_bbox = RoadBoundingBox(dict['road_bbox_size'])\n res = BeamNGMember([tuple(t) for t in dict['control_nodes']],\n [tuple(t) for t in dict['sample_nodes']],\n dict['num_spline_nodes'], road_bbox)\n res.distance_to_boundary = dict['distance_to_boundary']\n return res\n\n def evaluate(self):\n if self.needs_evaluation():\n self.simulation = self.problem._get_evaluator().evaluate([self])\n print('eval mbr', self)\n\n #assert not self.needs_evaluation()\n\n def needs_evaluation(self):\n return self.distance_to_boundary is None or self.simulation is None\n\n def clear_evaluation(self):\n self.distance_to_boundary = None\n\n def is_valid(self):\n return (RoadPolygon.from_nodes(self.sample_nodes).is_valid() and\n self.road_bbox.contains(RoadPolygon.from_nodes(self.control_nodes[1:-1])))\n\n def distance(self, other: 'BeamNGMember'):\n #TODO\n #return frechet_dist(self.sample_nodes, other.sample_nodes)\n return iterative_levenshtein(self.sample_nodes, other.sample_nodes)\n #return frechet_dist(self.sample_nodes[0::3], other.sample_nodes[0::3])\n\n def to_tuple(self):\n import numpy as np\n barycenter = np.mean(self.control_nodes, axis=0)[:2]\n return barycenter\n\n def mutate(self) -> 'BeamNGMember':\n RoadMutator(self, lower_bound=-int(self.problem.config.MUTATION_EXTENT), upper_bound=int(self.problem.config.MUTATION_EXTENT)).mutate()\n self.distance_to_boundary = None\n return self\n\n def __repr__(self):\n eval_boundary = 'na'\n if self.distance_to_boundary:\n eval_boundary = str(self.distance_to_boundary)\n if self.distance_to_boundary > 0:\n eval_boundary = '+' + eval_boundary\n eval_boundary = '~' + eval_boundary\n eval_boundary = eval_boundary[:7].ljust(7)\n h = hashlib.sha256(str([tuple(node) for node in self.control_nodes]).encode('UTF-8')).hexdigest()[-5:]\n return f'{self.name_ljust} h={h} b={eval_boundary}'\n\n\nclass RoadMutator:\n NUM_UNDO_ATTEMPTS = 20\n\n def __init__(self, road: BeamNGMember, lower_bound=-2, upper_bound=2):\n self.road = road\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n\n def mutate_gene(self, index, xy_prob=0.5) -> Tuple[int, int]:\n gene = list(self.road.control_nodes[index])\n # Choose the mutation extent\n candidate_mut_values = [i for i in range(self.lower_bound, self.upper_bound) if i !=0]\n mut_value = random.choice(candidate_mut_values)\n #mut_value = random.randint(self.lower_bound, self.upper_bound)\n # Avoid to choose 0\n #if mut_value == 0:\n # mut_value += 1\n\n # Select coordinate to mutate\n if random.random() < xy_prob:\n c = 1\n else:\n c = 0\n gene[c] += mut_value\n\n self.road.control_nodes[index] = tuple(gene)\n self.road.sample_nodes = catmull_rom(self.road.control_nodes, self.road.num_spline_nodes)\n return c, mut_value\n\n def undo_mutation(self, index, c, mut_value):\n gene = list(self.road.control_nodes[index])\n gene[c] -= mut_value\n self.road.control_nodes[index] = tuple(gene)\n self.road.sample_nodes = catmull_rom(self.road.control_nodes, self.road.num_spline_nodes)\n\n def mutate(self, num_undo_attempts=10):\n backup_nodes = list(self.road.control_nodes)\n attempted_genes = set()\n n = len(self.road.control_nodes) - 2\n seglength = 3\n candidate_length = n - (2 * seglength)\n assert(candidate_length > 0)\n\n def next_gene_index() -> int:\n if len(attempted_genes) == candidate_length:\n return -1\n i = None\n condition = False\n while not condition:\n i = random.randint(seglength, n - seglength)\n if i not in attempted_genes:\n condition = True\n assert(i is not None)\n assert seglength <= i <= n - seglength\n\n # i = random.randint(3, n - 3)\n # while i in attempted_genes:\n # i = random.randint(3, n-3)\n\n attempted_genes.add(i)\n return i\n\n gene_index = next_gene_index()\n while gene_index != -1:\n c, mut_value = self.mutate_gene(gene_index)\n attempt = 0\n is_valid = self.road.is_valid()\n while not is_valid and attempt < num_undo_attempts:\n self.undo_mutation(gene_index, c, mut_value)\n c, mut_value = self.mutate_gene(gene_index)\n attempt += 1\n is_valid = self.road.is_valid()\n if is_valid:\n break\n else:\n gene_index = next_gene_index()\n if gene_index == -1:\n raise ValueError(\"No gene can be mutated\")\n\n assert self.road.is_valid()\n assert self.road.control_nodes != backup_nodes" ]
[ [ "numpy.mean" ] ]
friedrichromstedt/moviemaker3
[ "7941a06d43bbbb63e45496044040a163ab97d78d" ]
[ "moviemaker3/math/angle.py" ]
[ "import numpy\nfrom fframework import asfunction, OpFunction\n\n__all__ = ['Angle']\n\nclass Angle(OpFunction):\n \"\"\"Transforms a mesh into the angle of the mesh to the x axis.\"\"\"\n\n def __init__(self, mesh):\n \"\"\"*mesh* is the mesh Function.\"\"\"\n\n self.mesh = asfunction(mesh)\n\n def __call__(self, ps):\n \"\"\"Returns the arctan2. The (y, x) coordinate is in the last \n dimension.\"\"\"\n\n meshT = self.mesh(ps).T\n return numpy.arctan2(meshT[0], meshT[1]).T\n" ]
[ [ "numpy.arctan2" ] ]
motional/polarstream
[ "74af9548cad69a4f546b83dae7b87454bc590c9e" ]
[ "det3d/core/bbox/box_np_ops.py" ]
[ "from pathlib import Path\r\n\r\nimport numba\r\nimport numpy as np\r\nfrom det3d.core.bbox.geometry import (\r\n points_count_convex_polygon_3d_jit,\r\n points_in_convex_polygon_3d_jit,\r\n)\r\ntry:\r\n from spconv.utils import rbbox_intersection, rbbox_iou\r\nexcept:\r\n print(\"Import spconv fail, no support for sparse convolution!\")\r\n\r\n\r\ndef points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)):\r\n rbbox_corners = center_to_corner_box3d(\r\n rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis\r\n )\r\n surfaces = corner_to_surfaces_3d(rbbox_corners)\r\n return points_count_convex_polygon_3d_jit(points[:, :3], surfaces)\r\n\r\n\r\ndef riou_cc(rbboxes, qrbboxes, standup_thresh=0.0):\r\n # less than 50ms when used in second one thread. 10x slower than gpu\r\n boxes_corners = center_to_corner_box2d(\r\n rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4]\r\n )\r\n boxes_standup = corner_to_standup_nd(boxes_corners)\r\n qboxes_corners = center_to_corner_box2d(\r\n qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4]\r\n )\r\n qboxes_standup = corner_to_standup_nd(qboxes_corners)\r\n # if standup box not overlapped, rbbox not overlapped too.\r\n standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0)\r\n return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh)\r\n\r\n\r\ndef rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0):\r\n # less than 50ms when used in second one thread. 10x slower than gpu\r\n boxes_corners = center_to_corner_box2d(\r\n rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4]\r\n )\r\n boxes_standup = corner_to_standup_nd(boxes_corners)\r\n qboxes_corners = center_to_corner_box2d(\r\n qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4]\r\n )\r\n qboxes_standup = corner_to_standup_nd(qboxes_corners)\r\n # if standup box not overlapped, rbbox not overlapped too.\r\n standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0)\r\n return rbbox_intersection(\r\n boxes_corners, qboxes_corners, standup_iou, standup_thresh\r\n )\r\n\r\n\r\ndef corners_nd(dims, origin=0.5):\r\n \"\"\"generate relative box corners based on length per dim and\r\n origin point.\r\n\r\n Args:\r\n dims (float array, shape=[N, ndim]): array of length per dim\r\n origin (list or array or float): origin point relate to smallest point.\r\n\r\n Returns:\r\n float array, shape=[N, 2 ** ndim, ndim]: returned corners.\r\n point layout example: (2d) x0y0, x0y1, x1y0, x1y1;\r\n (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1\r\n where x0 < x1, y0 < y1, z0 < z1\r\n \"\"\"\r\n ndim = int(dims.shape[1])\r\n corners_norm = np.stack(\r\n np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1\r\n ).astype(dims.dtype)\r\n # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1\r\n # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1\r\n # so need to convert to a format which is convenient to do other computing.\r\n # for 2d boxes, format is clockwise start with minimum point\r\n # for 3d boxes, please draw lines by your hand.\r\n if ndim == 2:\r\n # generate clockwise box corners\r\n corners_norm = corners_norm[[0, 1, 3, 2]]\r\n elif ndim == 3:\r\n corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]\r\n corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)\r\n corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim])\r\n return corners\r\n\r\n\r\n@numba.njit\r\ndef corners_2d_jit(dims, origin=0.5):\r\n ndim = 2\r\n corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype)\r\n corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)\r\n corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim))\r\n return corners\r\n\r\n\r\n@numba.njit\r\ndef corners_3d_jit(dims, origin=0.5):\r\n ndim = 3\r\n corners_norm = np.array(\r\n [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1],\r\n dtype=dims.dtype,\r\n ).reshape((8, 3))\r\n corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]\r\n corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)\r\n corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim))\r\n return corners\r\n\r\n\r\n@numba.njit\r\ndef corner_to_standup_nd_jit(boxes_corner):\r\n num_boxes = boxes_corner.shape[0]\r\n ndim = boxes_corner.shape[-1]\r\n result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype)\r\n for i in range(num_boxes):\r\n for j in range(ndim):\r\n result[i, j] = np.min(boxes_corner[i, :, j])\r\n for j in range(ndim):\r\n result[i, j + ndim] = np.max(boxes_corner[i, :, j])\r\n return result\r\n\r\n\r\ndef corner_to_standup_nd(boxes_corner):\r\n assert len(boxes_corner.shape) == 3\r\n standup_boxes = []\r\n standup_boxes.append(np.min(boxes_corner, axis=1))\r\n standup_boxes.append(np.max(boxes_corner, axis=1))\r\n return np.concatenate(standup_boxes, -1)\r\n\r\n\r\ndef rbbox2d_to_near_bbox(rbboxes):\r\n \"\"\"convert rotated bbox to nearest 'standing' or 'lying' bbox.\r\n Args:\r\n rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes\r\n Returns:\r\n bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes\r\n \"\"\"\r\n rots = rbboxes[..., -1]\r\n rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi))\r\n cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis]\r\n bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])\r\n bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:])\r\n return bboxes\r\n\r\n\r\ndef rotation_3d_in_axis(points, angles, axis=0):\r\n # points: [N, point_size, 3]\r\n rot_sin = np.sin(angles)\r\n rot_cos = np.cos(angles)\r\n ones = np.ones_like(rot_cos)\r\n zeros = np.zeros_like(rot_cos)\r\n if axis == 1:\r\n rot_mat_T = np.stack(\r\n [\r\n [rot_cos, zeros, -rot_sin],\r\n [zeros, ones, zeros],\r\n [rot_sin, zeros, rot_cos],\r\n ]\r\n )\r\n elif axis == 2 or axis == -1:\r\n rot_mat_T = np.stack(\r\n [\r\n [rot_cos, -rot_sin, zeros],\r\n [rot_sin, rot_cos, zeros],\r\n [zeros, zeros, ones],\r\n ]\r\n )\r\n elif axis == 0:\r\n rot_mat_T = np.stack(\r\n [\r\n [zeros, rot_cos, -rot_sin],\r\n [zeros, rot_sin, rot_cos],\r\n [ones, zeros, zeros],\r\n ]\r\n )\r\n else:\r\n raise ValueError(\"axis should in range\")\r\n\r\n return np.einsum(\"aij,jka->aik\", points, rot_mat_T)\r\n\r\n\r\ndef rotation_points_single_angle(points, angle, axis=0):\r\n # points: [N, 3]\r\n rot_sin = np.sin(angle)\r\n rot_cos = np.cos(angle)\r\n if axis == 1:\r\n rot_mat_T = np.array(\r\n [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]],\r\n dtype=points.dtype,\r\n )\r\n elif axis == 2 or axis == -1:\r\n rot_mat_T = np.array(\r\n [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],\r\n dtype=points.dtype,\r\n )\r\n elif axis == 0:\r\n rot_mat_T = np.array(\r\n [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]],\r\n dtype=points.dtype,\r\n )\r\n else:\r\n raise ValueError(\"axis should in range\")\r\n\r\n return points @ rot_mat_T\r\n\r\n\r\ndef rotation_2d(points, angles):\r\n \"\"\"rotation 2d points based on origin point clockwise when angle positive.\r\n\r\n Args:\r\n points (float array, shape=[N, point_size, 2]): points to be rotated.\r\n angles (float array, shape=[N]): rotation angle.\r\n\r\n Returns:\r\n float array: same shape as points\r\n \"\"\"\r\n rot_sin = np.sin(angles)\r\n rot_cos = np.cos(angles)\r\n rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]])\r\n return np.einsum(\"aij,jka->aik\", points, rot_mat_T)\r\n\r\n\r\ndef rotation_box(box_corners, angle):\r\n \"\"\"rotation 2d points based on origin point clockwise when angle positive.\r\n\r\n Args:\r\n points (float array, shape=[N, point_size, 2]): points to be rotated.\r\n angle (float): rotation angle.\r\n\r\n Returns:\r\n float array: same shape as points\r\n \"\"\"\r\n rot_sin = np.sin(angle)\r\n rot_cos = np.cos(angle)\r\n rot_mat_T = np.array(\r\n [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype\r\n )\r\n return box_corners @ rot_mat_T\r\n\r\n\r\ndef center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2):\r\n \"\"\"convert kitti locations, dimensions and angles to corners\r\n\r\n Args:\r\n centers (float array, shape=[N, 3]): locations in kitti label file.\r\n dims (float array, shape=[N, 3]): dimensions in kitti label file.\r\n angles (float array, shape=[N]): rotation_y in kitti label file.\r\n origin (list or array or float): origin point relate to smallest point.\r\n use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.\r\n axis (int): rotation axis. 1 for camera and 2 for lidar.\r\n Returns:\r\n [type]: [description]\r\n \"\"\"\r\n # 'length' in kitti format is in x axis.\r\n # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)\r\n # center in kitti format is [0.5, 1.0, 0.5] in xyz.\r\n corners = corners_nd(dims, origin=origin)\r\n # corners: [N, 8, 3]\r\n if angles is not None:\r\n corners = rotation_3d_in_axis(corners, angles, axis=axis)\r\n corners += centers.reshape([-1, 1, 3])\r\n return corners\r\n\r\n\r\ndef center_to_corner_box2d(centers, dims, angles=None, origin=0.5):\r\n \"\"\"convert kitti locations, dimensions and angles to corners.\r\n format: center(xy), dims(xy), angles(clockwise when positive)\r\n\r\n Args:\r\n centers (float array, shape=[N, 2]): locations in kitti label file.\r\n dims (float array, shape=[N, 2]): dimensions in kitti label file.\r\n angles (float array, shape=[N]): rotation_y in kitti label file.\r\n\r\n Returns:\r\n [type]: [description]\r\n \"\"\"\r\n # 'length' in kitti format is in x axis.\r\n # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)\r\n # center in kitti format is [0.5, 1.0, 0.5] in xyz.\r\n corners = corners_nd(dims, origin=origin)\r\n # corners: [N, 4, 2]\r\n if angles is not None:\r\n corners = rotation_2d(corners, angles)\r\n corners += centers.reshape([-1, 1, 2])\r\n return corners\r\n\r\n\r\n@numba.jit(nopython=True)\r\ndef box2d_to_corner_jit(boxes):\r\n num_box = boxes.shape[0]\r\n corners_norm = np.zeros((4, 2), dtype=boxes.dtype)\r\n corners_norm[1, 1] = 1.0\r\n corners_norm[2] = 1.0\r\n corners_norm[3, 0] = 1.0\r\n corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)\r\n corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2)\r\n rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)\r\n box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype)\r\n for i in range(num_box):\r\n rot_sin = np.sin(boxes[i, -1])\r\n rot_cos = np.cos(boxes[i, -1])\r\n rot_mat_T[0, 0] = rot_cos\r\n rot_mat_T[0, 1] = -rot_sin\r\n rot_mat_T[1, 0] = rot_sin\r\n rot_mat_T[1, 1] = rot_cos\r\n box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2]\r\n return box_corners\r\n\r\n\r\ndef rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2):\r\n return center_to_corner_box3d(\r\n rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis\r\n )\r\n\r\n\r\ndef rbbox3d_to_bev_corners(rbboxes, origin=0.5):\r\n return center_to_corner_box2d(\r\n rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin\r\n )\r\n\r\n\r\ndef minmax_to_corner_2d(minmax_box):\r\n ndim = minmax_box.shape[-1] // 2\r\n center = minmax_box[..., :ndim]\r\n dims = minmax_box[..., ndim:] - center\r\n return center_to_corner_box2d(center, dims, origin=0.0)\r\n\r\n\r\ndef minmax_to_corner_2d_v2(minmax_box):\r\n # N, 4 -> N 4 2\r\n return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2)\r\n\r\n\r\ndef minmax_to_corner_3d(minmax_box):\r\n ndim = minmax_box.shape[-1] // 2\r\n center = minmax_box[..., :ndim]\r\n dims = minmax_box[..., ndim:] - center\r\n return center_to_corner_box3d(center, dims, origin=0.0)\r\n\r\n\r\ndef minmax_to_center_2d(minmax_box):\r\n ndim = minmax_box.shape[-1] // 2\r\n center_min = minmax_box[..., :ndim]\r\n dims = minmax_box[..., ndim:] - center_min\r\n center = center_min + 0.5 * dims\r\n return np.concatenate([center, dims], axis=-1)\r\n\r\n\r\ndef center_to_minmax_2d_0_5(centers, dims):\r\n return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1)\r\n\r\n\r\ndef center_to_minmax_2d(centers, dims, origin=0.5):\r\n if origin == 0.5:\r\n return center_to_minmax_2d_0_5(centers, dims)\r\n corners = center_to_corner_box2d(centers, dims, origin=origin)\r\n return corners[:, [0, 2]].reshape([-1, 4])\r\n\r\n\r\ndef limit_period(val, offset=0.5, period=np.pi):\r\n return val - np.floor(val / period + offset) * period\r\n\r\n\r\ndef projection_matrix_to_CRT_kitti(proj):\r\n # P = C @ [R|T]\r\n # C is upper triangular matrix, so we need to inverse CR and use QR\r\n # stable for all kitti camera projection matrix\r\n CR = proj[0:3, 0:3]\r\n CT = proj[0:3, 3]\r\n RinvCinv = np.linalg.inv(CR)\r\n Rinv, Cinv = np.linalg.qr(RinvCinv)\r\n C = np.linalg.inv(Cinv)\r\n R = np.linalg.inv(Rinv)\r\n T = Cinv @ CT\r\n return C, R, T\r\n\r\n\r\ndef get_frustum(bbox_image, C, near_clip=0.001, far_clip=100):\r\n fku = C[0, 0]\r\n fkv = -C[1, 1]\r\n u0v0 = C[0:2, 2]\r\n z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis]\r\n b = bbox_image\r\n box_corners = np.array(\r\n [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype\r\n )\r\n near_box_corners = (box_corners - u0v0) / np.array(\r\n [fku / near_clip, -fkv / near_clip], dtype=C.dtype\r\n )\r\n far_box_corners = (box_corners - u0v0) / np.array(\r\n [fku / far_clip, -fkv / far_clip], dtype=C.dtype\r\n )\r\n ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2]\r\n ret_xyz = np.concatenate([ret_xy, z_points], axis=1)\r\n return ret_xyz\r\n\r\n\r\ndef get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100):\r\n fku = C[0, 0]\r\n fkv = -C[1, 1]\r\n u0v0 = C[0:2, 2]\r\n num_box = bboxes.shape[0]\r\n z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[\r\n np.newaxis, :, np.newaxis\r\n ]\r\n z_points = np.tile(z_points, [num_box, 1, 1])\r\n box_corners = minmax_to_corner_2d_v2(bboxes)\r\n near_box_corners = (box_corners - u0v0) / np.array(\r\n [fku / near_clip, -fkv / near_clip], dtype=C.dtype\r\n )\r\n far_box_corners = (box_corners - u0v0) / np.array(\r\n [fku / far_clip, -fkv / far_clip], dtype=C.dtype\r\n )\r\n ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2]\r\n ret_xyz = np.concatenate([ret_xy, z_points], axis=-1)\r\n return ret_xyz\r\n\r\n\r\n@numba.njit\r\ndef _add_rgb_to_points_kernel(points_2d, image, points_rgb):\r\n num_points = points_2d.shape[0]\r\n image_h, image_w = image.shape[:2]\r\n for i in range(num_points):\r\n img_pos = np.floor(points_2d[i]).astype(np.int32)\r\n if img_pos[0] >= 0 and img_pos[0] < image_w:\r\n if img_pos[1] >= 0 and img_pos[1] < image_h:\r\n points_rgb[i, :] = image[img_pos[1], img_pos[0], :]\r\n # image[img_pos[1], img_pos[0]] = 0\r\n\r\n\r\ndef add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]):\r\n kernel = np.ones(mean_size, np.float32) / np.prod(mean_size)\r\n # image = cv2.filter2D(image, -1, kernel)\r\n points_cam = lidar_to_camera(points[:, :3], rect, Trv2c)\r\n points_2d = project_to_image(points_cam, P2)\r\n points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype)\r\n _add_rgb_to_points_kernel(points_2d, image, points_rgb)\r\n return points_rgb\r\n\r\n\r\ndef project_to_image(points_3d, proj_mat):\r\n points_shape = list(points_3d.shape)\r\n points_shape[-1] = 1\r\n points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1)\r\n point_2d = points_4 @ proj_mat.T\r\n point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]\r\n return point_2d_res\r\n\r\n\r\ndef camera_to_lidar(points, r_rect, velo2cam):\r\n points_shape = list(points.shape[0:-1])\r\n if points.shape[-1] == 3:\r\n points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)\r\n lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T)\r\n return lidar_points[..., :3]\r\n\r\n\r\ndef lidar_to_camera(points, r_rect, velo2cam):\r\n points_shape = list(points.shape[:-1])\r\n if points.shape[-1] == 3:\r\n points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)\r\n camera_points = points @ (r_rect @ velo2cam).T\r\n return camera_points[..., :3]\r\n\r\n\r\ndef box_camera_to_lidar(data, r_rect, velo2cam):\r\n xyz = data[:, 0:3]\r\n l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]\r\n r = data[:, 6:7]\r\n xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)\r\n return np.concatenate([xyz_lidar, w, l, h, r], axis=1)\r\n\r\n\r\ndef box_lidar_to_camera(data, r_rect, velo2cam):\r\n xyz_lidar = data[:, 0:3]\r\n w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6]\r\n r = data[:, 6:7]\r\n xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)\r\n return np.concatenate([xyz, l, h, w, r], axis=1)\r\n\r\n\r\ndef remove_outside_points(points, rect, Trv2c, P2, image_shape):\r\n # 5x faster than remove_outside_points_v1(2ms vs 10ms)\r\n C, R, T = projection_matrix_to_CRT_kitti(P2)\r\n image_bbox = [0, 0, image_shape[1], image_shape[0]]\r\n frustum = get_frustum(image_bbox, C)\r\n frustum -= T\r\n frustum = np.linalg.inv(R) @ frustum.T\r\n frustum = camera_to_lidar(frustum.T, rect, Trv2c)\r\n frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...])\r\n indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces)\r\n points = points[indices.reshape([-1])]\r\n return points\r\n\r\n\r\n@numba.jit(nopython=True)\r\ndef iou_jit(boxes, query_boxes, eps=1.0):\r\n \"\"\"calculate box iou. note that jit version runs 2x faster than cython in\r\n my machine!\r\n Parameters\r\n ----------\r\n boxes: (N, 4) ndarray of float\r\n query_boxes: (K, 4) ndarray of float\r\n Returns\r\n -------\r\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\r\n \"\"\"\r\n N = boxes.shape[0]\r\n K = query_boxes.shape[0]\r\n overlaps = np.zeros((N, K), dtype=boxes.dtype)\r\n for k in range(K):\r\n box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * (\r\n query_boxes[k, 3] - query_boxes[k, 1] + eps\r\n )\r\n for n in range(N):\r\n iw = (\r\n min(boxes[n, 2], query_boxes[k, 2])\r\n - max(boxes[n, 0], query_boxes[k, 0])\r\n + eps\r\n )\r\n if iw > 0:\r\n ih = (\r\n min(boxes[n, 3], query_boxes[k, 3])\r\n - max(boxes[n, 1], query_boxes[k, 1])\r\n + eps\r\n )\r\n if ih > 0:\r\n ua = (\r\n (boxes[n, 2] - boxes[n, 0] + eps)\r\n * (boxes[n, 3] - boxes[n, 1] + eps)\r\n + box_area\r\n - iw * ih\r\n )\r\n overlaps[n, k] = iw * ih / ua\r\n return overlaps\r\n\r\n\r\n@numba.jit(nopython=True)\r\ndef iou_3d_jit(boxes, query_boxes, add1=True):\r\n \"\"\"calculate box iou3d,\r\n ----------\r\n boxes: (N, 6) ndarray of float\r\n query_boxes: (K, 6) ndarray of float\r\n Returns\r\n -------\r\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\r\n \"\"\"\r\n N = boxes.shape[0]\r\n K = query_boxes.shape[0]\r\n overlaps = np.zeros((N, K), dtype=boxes.dtype)\r\n if add1:\r\n add1 = 1.0\r\n else:\r\n add1 = 0.0\r\n for k in range(K):\r\n box_area = (\r\n (query_boxes[k, 3] - query_boxes[k, 0] + add1)\r\n * (query_boxes[k, 4] - query_boxes[k, 1] + add1)\r\n * (query_boxes[k, 5] - query_boxes[k, 2] + add1)\r\n )\r\n for n in range(N):\r\n iw = (\r\n min(boxes[n, 3], query_boxes[k, 3])\r\n - max(boxes[n, 0], query_boxes[k, 0])\r\n + add1\r\n )\r\n if iw > 0:\r\n ih = (\r\n min(boxes[n, 4], query_boxes[k, 4])\r\n - max(boxes[n, 1], query_boxes[k, 1])\r\n + add1\r\n )\r\n if ih > 0:\r\n il = (\r\n min(boxes[n, 5], query_boxes[k, 5])\r\n - max(boxes[n, 2], query_boxes[k, 2])\r\n + add1\r\n )\r\n if il > 0:\r\n ua = float(\r\n (boxes[n, 3] - boxes[n, 0] + add1)\r\n * (boxes[n, 4] - boxes[n, 1] + add1)\r\n * (boxes[n, 5] - boxes[n, 2] + add1)\r\n + box_area\r\n - iw * ih * il\r\n )\r\n overlaps[n, k] = iw * ih * il / ua\r\n return overlaps\r\n\r\n\r\n@numba.jit(nopython=True)\r\ndef iou_nd_jit(boxes, query_boxes, add1=True):\r\n \"\"\"calculate box iou nd, 2x slower than iou_jit.\r\n ----------\r\n boxes: (N, ndim * 2) ndarray of float\r\n query_boxes: (K, ndim * 2) ndarray of float\r\n Returns\r\n -------\r\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\r\n \"\"\"\r\n N = boxes.shape[0]\r\n K = query_boxes.shape[0]\r\n ndim = boxes.shape[1] // 2\r\n overlaps = np.zeros((N, K), dtype=boxes.dtype)\r\n side_lengths = np.zeros((ndim,), dtype=boxes.dtype)\r\n if add1:\r\n add1 = 1.0\r\n else:\r\n add1 = 0.0\r\n invalid = False\r\n for k in range(K):\r\n qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1\r\n for i in range(1, ndim):\r\n qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1\r\n for n in range(N):\r\n invalid = False\r\n for i in range(ndim):\r\n side_length = (\r\n min(boxes[n, i + ndim], query_boxes[k, i + ndim])\r\n - max(boxes[n, i], query_boxes[k, i])\r\n + add1\r\n )\r\n if side_length <= 0:\r\n invalid = True\r\n break\r\n side_lengths[i] = side_length\r\n if not invalid:\r\n box_area = boxes[n, ndim] - boxes[n, 0] + add1\r\n for i in range(1, ndim):\r\n box_area *= boxes[n, ndim + i] - boxes[n, i] + add1\r\n inter = side_lengths[0]\r\n for i in range(1, ndim):\r\n inter *= side_lengths[i]\r\n # inter = np.prod(side_lengths)\r\n ua = float(box_area + qbox_area - inter)\r\n overlaps[n, k] = inter / ua\r\n\r\n return overlaps\r\n\r\n\r\ndef points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)):\r\n rbbox_corners = center_to_corner_box3d(\r\n rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis\r\n )\r\n surfaces = corner_to_surfaces_3d(rbbox_corners)\r\n indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)\r\n return indices\r\n\r\n\r\ndef corner_to_surfaces_3d(corners):\r\n \"\"\"convert 3d box corners from corner function above\r\n to surfaces that normal vectors all direct to internal.\r\n\r\n Args:\r\n corners (float array, [N, 8, 3]): 3d box corners.\r\n Returns:\r\n surfaces (float array, [N, 6, 4, 3]):\r\n \"\"\"\r\n # box_corners: [N, 8, 3], must from corner functions in this module\r\n surfaces = np.array(\r\n [\r\n [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]],\r\n [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]],\r\n [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]],\r\n [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]],\r\n [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]],\r\n [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]],\r\n ]\r\n ).transpose([2, 0, 1, 3])\r\n return surfaces\r\n\r\n\r\n@numba.jit(nopython=True)\r\ndef corner_to_surfaces_3d_jit(corners):\r\n \"\"\"convert 3d box corners from corner function above\r\n to surfaces that normal vectors all direct to internal.\r\n\r\n Args:\r\n corners (float array, [N, 8, 3]): 3d box corners.\r\n Returns:\r\n surfaces (float array, [N, 6, 4, 3]):\r\n \"\"\"\r\n # box_corners: [N, 8, 3], must from corner functions in this module\r\n num_boxes = corners.shape[0]\r\n surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)\r\n corner_idxes = np.array(\r\n [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7]\r\n ).reshape(6, 4)\r\n for i in range(num_boxes):\r\n for j in range(6):\r\n for k in range(4):\r\n surfaces[i, j, k] = corners[i, corner_idxes[j, k]]\r\n return surfaces\r\n\r\n\r\ndef assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range):\r\n \"\"\"assign a 0/1 label to each voxel based on whether\r\n the center of voxel is in gt_box. LIDAR.\r\n \"\"\"\r\n voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)\r\n coors_range = np.array(coors_range, dtype=gt_boxes.dtype)\r\n shift = coors_range[:3]\r\n voxel_origins = coors[:, ::-1] * voxel_size + shift\r\n voxel_centers = voxel_origins + voxel_size * 0.5\r\n gt_box_corners = center_to_corner_box3d(\r\n gt_boxes[:, :3] - voxel_size * 0.5,\r\n gt_boxes[:, 3:6] + voxel_size,\r\n gt_boxes[:, 6],\r\n origin=[0.5, 0.5, 0.5],\r\n axis=2,\r\n )\r\n gt_surfaces = corner_to_surfaces_3d(gt_box_corners)\r\n ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces)\r\n return np.any(ret, axis=1).astype(np.int64)\r\n\r\n\r\ndef assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range):\r\n \"\"\"assign a 0/1 label to each voxel based on whether\r\n the center of voxel is in gt_box. LIDAR.\r\n \"\"\"\r\n voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)\r\n coors_range = np.array(coors_range, dtype=gt_boxes.dtype)\r\n shift = coors_range[:3]\r\n voxel_origins = coors[:, ::-1] * voxel_size + shift\r\n voxel_maxes = voxel_origins + voxel_size\r\n voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1)\r\n voxel_corners = minmax_to_corner_3d(voxel_minmax)\r\n gt_box_corners = center_to_corner_box3d(\r\n gt_boxes[:, :3],\r\n gt_boxes[:, 3:6],\r\n gt_boxes[:, 6],\r\n origin=[0.5, 0.5, 0.5],\r\n axis=2,\r\n )\r\n gt_surfaces = corner_to_surfaces_3d(gt_box_corners)\r\n voxel_corners_flat = voxel_corners.reshape([-1, 3])\r\n ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces)\r\n ret = ret.reshape([-1, 8, ret.shape[-1]])\r\n return ret.any(-1).any(-1).astype(np.int64)\r\n\r\n\r\ndef image_box_region_area(img_cumsum, bbox):\r\n \"\"\"check a 2d voxel is contained by a box. used to filter empty\r\n anchors.\r\n Summed-area table algorithm:\r\n ==> W\r\n ------------------\r\n | | |\r\n |------A---------B\r\n | | |\r\n | | |\r\n |----- C---------D\r\n Iabcd = ID-IB-IC+IA\r\n Args:\r\n img_cumsum: [M, H, W](yx) cumsumed image.\r\n bbox: [N, 4](xyxy) bounding box,\r\n \"\"\"\r\n N = bbox.shape[0]\r\n M = img_cumsum.shape[0]\r\n ret = np.zeros([N, M], dtype=img_cumsum.dtype)\r\n ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]]\r\n IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]]\r\n IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]]\r\n IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]]\r\n ret = ID - IB - IC + IA\r\n return ret\r\n\r\n\r\ndef get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6):\r\n x_vsize = voxel_size[0]\r\n y_vsize = voxel_size[1]\r\n max_x = points[:, 0].max()\r\n max_y = points[:, 1].max()\r\n min_x = points[:, 0].min()\r\n min_y = points[:, 1].min()\r\n max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample)\r\n max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample)\r\n min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample)\r\n min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample)\r\n max_x = np.minimum(max_x + margin, bound[2])\r\n max_y = np.minimum(max_y + margin, bound[3])\r\n min_x = np.maximum(min_x - margin, bound[0])\r\n min_y = np.maximum(min_y - margin, bound[1])\r\n return np.array([min_x, min_y, max_x, max_y])\r\n \r\n\r\ndef box3d_to_bbox(box3d, rect, Trv2c, P2):\r\n box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c)\r\n box_corners = center_to_corner_box3d(\r\n box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1\r\n )\r\n box_corners_in_image = project_to_image(box_corners, P2)\r\n # box_corners_in_image: [N, 8, 2]\r\n minxy = np.min(box_corners_in_image, axis=1)\r\n maxxy = np.max(box_corners_in_image, axis=1)\r\n bbox = np.concatenate([minxy, maxxy], axis=1)\r\n return bbox\r\n\r\n\r\ndef change_box3d_center_(box3d, src, dst):\r\n dst = np.array(dst, dtype=box3d.dtype)\r\n src = np.array(src, dtype=box3d.dtype)\r\n box3d[..., :3] += box3d[..., 3:6] * (dst - src)\r\n\r\ndef encode_parts(relative_shifts):\r\n parts = np.zeros((len(relative_shifts),), dtype=np.int32)\r\n mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0)\r\n parts[mask] = 0\r\n mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0)\r\n parts[mask] = 1\r\n mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0)\r\n parts[mask] = 2\r\n mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0)\r\n parts[mask] = 3\r\n return parts" ]
[ [ "numpy.ones_like", "numpy.minimum", "numpy.tile", "numpy.min", "numpy.linalg.qr", "numpy.where", "numpy.cos", "numpy.concatenate", "numpy.max", "numpy.sin", "numpy.zeros_like", "numpy.prod", "numpy.arange", "numpy.linalg.inv", "numpy.array", "numpy.zeros", "numpy.stack", "numpy.einsum", "numpy.floor", "numpy.ones", "numpy.any", "numpy.maximum" ] ]
laphisboy/mvsnerf
[ "ea1aecd7d653b04a7f4bec27ad978f64a038bc92" ]
[ "renderer_blender_src.py" ]
[ "import argparse\nimport re\n\n####\n# # Box 1\n####\n\nimport sys,os,imageio,lpips\nroot = '/home/youngsun/documents/mvs/mvsnerf_timing'\nos.chdir(root)\nsys.path.append(root)\n\nfrom opt_src import config_parser\nfrom data import dataset_dict\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\n\n\n# models\nfrom models_src import *\nfrom renderer_src import *\nfrom data.ray_utils import get_rays\n\nfrom tqdm import tqdm\n\n\nfrom skimage.metrics import structural_similarity\n\n# pytorch-lightning\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning import LightningModule, Trainer, loggers\n\n\nfrom data.ray_utils import ray_marcher\n\nimport torch\n\n\ntorch.cuda.set_device(0)\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n\n####\n# # Box 2\n####\ndef decode_batch(batch):\n rays = batch['rays'] # (B, 8)\n rgbs = batch['rgbs'] # (B, 3)\n return rays, rgbs\n\ndef unpreprocess(data, shape=(1,1,3,1,1)):\n # to unnormalize image for visualization\n # data N V C H W\n device = data.device\n mean = torch.tensor([-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225]).view(*shape).to(device)\n std = torch.tensor([1 / 0.229, 1 / 0.224, 1 / 0.225]).view(*shape).to(device)\n\n return (data - mean) / std\n\ndef read_depth(filename):\n depth_h = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800)\n depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5,\n interpolation=cv2.INTER_NEAREST) # (600, 800)\n depth_h = depth_h[44:556, 80:720] # (512, 640)\n# depth = cv2.resize(depth_h, None, fx=0.5, fy=0.5,interpolation=cv2.INTER_NEAREST)#!!!!!!!!!!!!!!!!!!!!!!!!!\n mask = depth>0\n return depth_h,mask\n\nloss_fn_vgg = lpips.LPIPS(net='vgg') \nmse2psnr = lambda x : -10. * np.log(x) / np.log(10.)\n\n\n####\n# # Box 3\n####\n\n# create function for returning dense, sparse, far views\n\ndef get_source_imgs(source_dataset, target_position, N_views, device, view_type='nearest', \n fixed_idxs=None,\n is_source_target_overlap=False):\n \n pair_idx = get_pair_idx(source_dataset, target_position, N_views, view_type, fixed_idxs, is_source_target_overlap)\n \n imgs_source, proj_mats, near_far_source, pose_source = source_dataset.read_source_views(pair_idx=pair_idx,device=device)\n \n return imgs_source, proj_mats, near_far_source, pose_source\n \n\ndef get_pair_idx(source_dataset, target_position, N_views, view_type='nearest', \n fixed_idxs=None,\n is_source_target_overlap=False):\n \n positions = source_dataset.poses[:,:3,3]\n dis = np.sum(np.abs(positions - target_position), axis=-1)\n \n dis_sort = np.argsort(dis)\n \n if is_source_target_overlap:\n dis_sort = dis_sort[1:]\n \n \n if view_type == 'nearest': # or \"as dense as possible ㅎㅎ\"\n pair_idx = dis_sort[:N_views]\n pair_idx = [source_dataset.img_idx[item] for item in pair_idx]\n \n if view_type == 'dense':\n idxs = torch.randperm(int(np.rint(N_views*1.5)))[:N_views].sort()[0]\n pair_idx = dis_sort[idxs]\n pair_idx = [source_dataset.img_idx[item] for item in pair_idx]\n \n if view_type == 'random': # i know its unnecessarily long... \n idxs = torch.randperm(len(dis_sort))[:N_views]\n pair_idx = dis_sort[idxs]\n pair_idx = [source_dataset.img_idx[item] for item in pair_idx]\n \n if view_type == 'sparse':\n idxs = torch.linspace(0, len(dis_sort), steps=N_views+1).round()\n idxs = [np.random.choice(range(int(idxs[i]), int(idxs[i+1]))) for i in range(len(idxs)-1)]\n pair_idx = dis_sort[idxs]\n pair_idx = [source_dataset.img_idx[item] for item in pair_idx]\n \n if view_type == 'far':\n idxs = torch.randperm(int(np.rint(N_views*1.5)))[:N_views].sort(descending=True)[0]\n pair_idx = dis_sort[::-1][idxs]\n pair_idx = [source_dataset.img_idx[item] for item in pair_idx]\n \n if view_type == 'farthest':\n pair_idx = dis_sort[::-1][:N_views]\n pair_idx = [source_dataset.img_idx[item] for item in pair_idx]\n \n # return index for the case of 'fixed'\n if view_type == 'fixed':\n pair_idx = fixed_idxs\n \n return pair_idx\n\n####\n# # Box 4\n####\n\ndef render_blender(view_type='nearest', \n scenes=['ficus'], \n num_src_views=3, \n ckpt='base-3src-dense.tar', \n source_split='train',\n target_split='val',\n select_index=None, \n is_fixed=False, \n is_source_target_overlap=False\n ):\n \n psnr_all,ssim_all,LPIPS_vgg_all = [],[],[]\n # for i_scene, scene in enumerate(['ship','mic','chair','lego','drums','ficus','materials','hotdog']):#\n for i_scene, scene in enumerate(scenes):#\n psnr,ssim,LPIPS_vgg = [],[],[]\n cmd = f'--datadir /mnt/hdd/mvsnerf_data/nerf_synthetic/{scene} \\\n --dataset_name blender_src --white_bkgd \\\n --net_type v0 --ckpt ./ckpts/{ckpt} --num_src_views {num_src_views}'\n \n save_dir = f'/mnt/hdd/youngsun/mvsnerf_timing/results/{ckpt[:-4]}/blender-{num_src_views}-'\n\n if is_fixed:\n save_dir += 'fixed-'\n \n save_dir += f'{view_type}-'\n\n save_dir += f'{source_split}-{target_split}/{scene}'\n\n args = config_parser(cmd.split())\n args.use_viewdirs = True\n\n args.N_samples = 128\n # args.feat_dim = 8+12\n args.feat_dim = 8+4*num_src_views\n\n # create models\n if 0==i_scene:\n render_kwargs_train, render_kwargs_test, start, grad_vars = create_nerf_mvs(args, use_mvs=True, dir_embedder=False, pts_embedder=True)\n filter_keys(render_kwargs_train)\n\n MVSNet = render_kwargs_train['network_mvs']\n render_kwargs_train.pop('network_mvs')\n\n\n datadir = args.datadir\n datatype = 'train'\n pad = 16\n args.chunk = 5120\n\n\n print('============> rendering dataset <===================')\n dataset_source = dataset_dict[args.dataset_name](args, split=source_split)\n dataset_target = dataset_dict[args.dataset_name](args, split=target_split, select_index=select_index)\n target_idx = dataset_target.img_idx\n\n save_as_image = True\n\n os.makedirs(save_dir, exist_ok=True)\n MVSNet.train()\n MVSNet = MVSNet.cuda()\n\n with torch.no_grad():\n\n try:\n tqdm._instances.clear() \n except Exception: \n pass\n\n for i, batch in enumerate(tqdm(dataset_target)):\n torch.cuda.empty_cache()\n\n rays, img = decode_batch(batch)\n rays = rays.squeeze().to(device) # (H*W, 3)\n img = img.squeeze().cpu().numpy() # (H, W, 3)\n \n \n if is_fixed:\n \n if i == 0:\n if select_index is not None:\n pair_idx = get_pair_idx(source_dataset=dataset_source,\n target_position=dataset_target.poses[[len(select_index)//2],:3,3],\n N_views=args.num_src_views, \n view_type=view_type)\n else:\n pair_idx = get_pair_idx(source_dataset=dataset_source,\n target_position=dataset_target.poses[[50],:3,3],\n N_views=args.num_src_views, \n view_type=view_type)\n \n imgs_source, proj_mats, near_far_source, pose_source = dataset_source.read_source_views(pair_idx=pair_idx,\n device=device)\n \n else:\n # created fixed image_source\n imgs_source, proj_mats, near_far_source, pose_source = get_source_imgs(source_dataset=dataset_source, \n target_position=dataset_target.poses[[i],:3,3], \n N_views=args.num_src_views, device=device, \n view_type=view_type)\n \n\n volume_feature, _, _ = MVSNet(imgs_source, proj_mats, near_far_source, pad=pad)\n imgs_source = unpreprocess(imgs_source)\n\n N_rays_all = rays.shape[0]\n rgb_rays, depth_rays_preds = [],[]\n for chunk_idx in range(N_rays_all//args.chunk + int(N_rays_all%args.chunk>0)):\n\n xyz_coarse_sampled, rays_o, rays_d, z_vals = ray_marcher(rays[chunk_idx*args.chunk:(chunk_idx+1)*args.chunk],\n N_samples=args.N_samples)\n\n # Converting world coordinate to ndc coordinate\n H, W = img.shape[:2]\n inv_scale = torch.tensor([W - 1, H - 1]).to(device)\n w2c_ref, intrinsic_ref = pose_source['w2cs'][0], pose_source['intrinsics'][0].clone()\n intrinsic_ref[:2] *= args.imgScale_test/args.imgScale_train\n xyz_NDC = get_ndc_coordinate(w2c_ref, intrinsic_ref, xyz_coarse_sampled, inv_scale,\n near=near_far_source[0], far=near_far_source[1], pad=pad*args.imgScale_test)\n\n\n # rendering\n rgb, disp, acc, depth_pred, alpha, extras = rendering(args, pose_source, xyz_coarse_sampled,\n xyz_NDC, z_vals, rays_o, rays_d,\n volume_feature,imgs_source, **render_kwargs_train)\n\n\n rgb, depth_pred = torch.clamp(rgb.cpu(),0,1.0).numpy(), depth_pred.cpu().numpy()\n rgb_rays.append(rgb)\n depth_rays_preds.append(depth_pred)\n\n\n depth_rays_preds = np.concatenate(depth_rays_preds).reshape(H, W)\n depth_rays_preds, _ = visualize_depth_numpy(depth_rays_preds, near_far_source)\n\n rgb_rays = np.concatenate(rgb_rays).reshape(H, W, 3)\n img_vis = np.concatenate((img*255,rgb_rays*255,depth_rays_preds),axis=1)\n\n img_vis = np.concatenate((torch.cat(torch.split(imgs_source*255, [1]*num_src_views, dim=1),-1).squeeze().permute(1,2,0).cpu().numpy(),img_vis),axis=1)\n\n if save_as_image:\n imageio.imwrite(f'{save_dir}/{scene}_{target_idx[i]:03d}.png', img_vis.astype('uint8'))\n else:\n rgbs.append(img_vis.astype('uint8'))\n\n # quantity\n # center crop 0.8 ratio\n H_crop, W_crop = np.array(rgb_rays.shape[:2])//10\n img = img[H_crop:-H_crop,W_crop:-W_crop]\n rgb_rays = rgb_rays[H_crop:-H_crop,W_crop:-W_crop]\n\n psnr.append( mse2psnr(np.mean((rgb_rays-img)**2)))\n ssim.append( structural_similarity(rgb_rays, img, multichannel=True))\n\n img_tensor = torch.from_numpy(rgb_rays)[None].permute(0,3,1,2).float()*2-1.0 # image should be RGB, IMPORTANT: normalized to [-1,1]\n img_gt_tensor = torch.from_numpy(img)[None].permute(0,3,1,2).float()*2-1.0\n LPIPS_vgg.append( loss_fn_vgg(img_tensor, img_gt_tensor).item())\n\n print(f'=====> scene: {scene} mean psnr {np.mean(psnr)} ssim: {np.mean(ssim)} lpips: {np.mean(LPIPS_vgg)}') \n psnr_all.append(psnr);ssim_all.append(ssim);LPIPS_vgg_all.append(LPIPS_vgg)\n\n if not save_as_image:\n imageio.mimwrite(f'{save_dir}/{scene}_spiral.mp4', np.stack(rgbs), fps=20, quality=10)\n\n print(f'=====> all mean psnr {np.mean(psnr_all)} ssim: {np.mean(ssim_all)} lpips: {np.mean(LPIPS_vgg_all)}') \n\n####\n# # Box 5\n####\n\ndef render_blender_all_settings(scenes=['lego'], num_src_views=3, ckpt='base-3src-dense.tar',source_split='train', target_split='val', select_index=[30,60,90], view_types=[1]):\n \n if 1 in view_types: \n render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)\n\n if 2 in view_types: \n render_blender('dense', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)\n\n if 3 in view_types: \n render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)\n\n if 4 in view_types: \n render_blender('far', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)\n\n if 5 in view_types: \n render_blender('random', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)\n \n\n if 6 in view_types: \n render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=True)\n\n if 7 in view_types: \n render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=True)\n\n\n if 8 in view_types: \n render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None, is_source_target_overlap=True)\n\n if 9 in view_types: \n render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None, is_source_target_overlap=True)\n\n return None\n\n####\n# # Box 6\n####\n\n####\n# # Box 7\n####\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--view_types', nargs=\"+\", type=int,\n help= 'Enter list of view types to render:' \\\n ' 1 - nearest, 2 - dense, 3 - sparse, 4 - far, 5 - random, ' \\\n '6 - fixed nearset, 7 - fixed sparse, 8 - unseen nearest, 9 - unseen sparse')\n\n parser.add_argument('--view_indexes', nargs=\"+\", type=int, const=None, default=None, \n help= 'default - all views (100)')\n\n parser.add_argument('--scenes', nargs='+', default=[])\n\n parser.add_argument('--ckpts', nargs='+', default=[])\n\n parser.add_argument('--source', type=str, default='train')\n\n parser.add_argument('--target', type=str, default='val')\n\n args = parser.parse_args()\n \n for ckpt in args.ckpts:\n\n num_src_views = int(re.findall('[0-9]+', ckpt)[0])\n\n render_blender_all_settings(scenes=args.scenes, \n num_src_views=num_src_views, \n ckpt=ckpt,\n source_split=args.source,\n target_split=args.target,\n select_index=args.view_indexes, \n view_types=args.view_types)\n torch.cuda.empty_cache()" ]
[ [ "torch.no_grad", "torch.split", "torch.from_numpy", "torch.cuda.set_device", "torch.cuda.empty_cache", "torch.tensor" ] ]
Pandinosaurus/KungFu
[ "80dfa463450330e920b413f65cc49d8e013b84a9" ]
[ "examples/mnist_elastic_docker/mnist_slp_estimator.py" ]
[ "import argparse\nimport functools\nimport operator\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom kungfu.tensorflow.v1.helpers.mnist import load_datasets\nfrom tensorflow.python.util import deprecation\n\ndeprecation._PRINT_DEPRECATION_WARNINGS = False\n\n\ndef parse_args():\n p = argparse.ArgumentParser(description='Example.')\n p.add_argument('--data-dir', type=str, default='.', help='')\n p.add_argument('--model-dir', type=str, default='.', help='')\n p.add_argument('--kf-optimizer', type=str, default='sync_sgd', help='')\n p.add_argument('--batch-size', type=int, default=100, help='')\n p.add_argument('--num-epochs', type=int, default=1, help='')\n p.add_argument('--learning-rate', type=float, default=0.01, help='')\n return p.parse_args()\n\n\ndef slp(x, logits):\n n = functools.reduce(operator.mul, [int(d) for d in x.shape[1:]], 1)\n output = tf.layers.dense(inputs=tf.reshape(x, [-1, n]), units=logits)\n return output, tf.argmax(output, axis=1)\n\n\ndef model_fn(features, labels, mode):\n output, predictions = slp(features['x'], 10)\n loss = tf.losses.sparse_softmax_cross_entropy(tf.cast(labels, tf.int32),\n output)\n eval_metric_ops = {\n 'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions)\n }\n optimizer = tf.train.GradientDescentOptimizer(0.1)\n from kungfu.tensorflow.optimizers import SynchronousSGDOptimizer\n optimizer = SynchronousSGDOptimizer(optimizer)\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops)\n\n\ndef input_fn(ds, batch_size, epochs=1, shuffle=True):\n features = {'x': ds.images}\n return tf.estimator.inputs.numpy_input_fn(x=features,\n y=ds.labels,\n batch_size=batch_size,\n num_epochs=epochs,\n shuffle=shuffle)\n\n\ndef get_model_dir(args):\n from kungfu.python import uid\n x = uid()\n port = (x >> 16) & 0xffff\n version = x & 0xffff\n suffix = '%d.%d' % (port, version)\n return os.path.join(args.model_dir, suffix)\n\n\nMNIST_DATA_SIZE = 60000\n\n\ndef main(do_eval=True):\n args = parse_args()\n model_dir = get_model_dir(args)\n\n data = load_datasets(args.data_dir, normalize=True)\n classifier = tf.estimator.Estimator(model_fn, model_dir=model_dir)\n\n from kungfu.tensorflow.experimental.hook import ElasticHook\n hooks = [ElasticHook(args.batch_size, args.num_epochs, MNIST_DATA_SIZE)]\n\n classifier.train(input_fn(data.train,\n args.batch_size,\n epochs=args.num_epochs),\n hooks=hooks)\n\n if not do_eval:\n import time\n time.sleep(1)\n return\n results = classifier.evaluate(input_fn(data.test,\n args.batch_size,\n shuffle=False),\n hooks=[],\n steps=1)\n print('results: %s' % (results, ))\n\n\nif __name__ == '__main__':\n print('main started')\n main(False)\n print('main finished')\n" ]
[ [ "tensorflow.estimator.EstimatorSpec", "tensorflow.argmax", "tensorflow.reshape", "tensorflow.estimator.inputs.numpy_input_fn", "tensorflow.metrics.accuracy", "tensorflow.estimator.Estimator", "tensorflow.train.get_global_step", "tensorflow.cast", "tensorflow.train.GradientDescentOptimizer" ] ]
JetBrains-Research/pubtrends
[ "5352bec2cca3321f8554d8e60728fe6d8494edcb" ]
[ "pysrc/papers/analysis/topics.py" ]
[ "import logging\nfrom collections import Counter\nfrom itertools import chain\n\nimport numpy as np\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.metrics import pairwise_distances\n\nfrom pysrc.papers.analysis.text import get_frequent_tokens\n\nlogger = logging.getLogger(__name__)\n\n\ndef compute_topics_similarity_matrix(papers_vectors, comps):\n logger.debug('Computing mean similarity between topics embeddings')\n n_comps = len(set(comps))\n distances = pairwise_distances(papers_vectors)\n similarity_matrix = np.zeros(shape=(n_comps, n_comps))\n indx = {i: np.flatnonzero([c == i for c in comps]).tolist() for i in range(n_comps)}\n for i in range(n_comps):\n for j in range(i, n_comps):\n mean_distance = np.mean(distances[indx[i], :][:, indx[j]])\n similarity_matrix[i, j] = similarity_matrix[j, i] = 1 / (1 + mean_distance)\n return similarity_matrix\n\n\ndef cluster_and_sort(x, max_clusters, min_cluster_size):\n \"\"\"\n :param x: object representations (X x Features)\n :param max_clusters:\n :param min_cluster_size:\n :return: List[cluster], Hierarchical dendrogram of splits.\n \"\"\"\n logger.debug('Looking for an appropriate number of clusters,'\n f'min_cluster_size={min_cluster_size}, max_clusters={max_clusters}')\n if x.shape[1] == 0:\n return [0] * x.shape[0], None\n r = min(int(x.shape[0] / min_cluster_size), max_clusters) + 1\n l = 1\n\n if l >= r - 2:\n return [0] * x.shape[0], None\n\n prev_min_size = None\n while l < r - 1:\n n_clusters = int((l + r) / 2)\n model = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward').fit(x)\n clusters_counter = Counter(model.labels_)\n min_size = clusters_counter.most_common()[-1][1]\n logger.debug(f'l={l}, r={r}, n_clusters={n_clusters}, min_cluster_size={min_cluster_size}, '\n f'prev_min_size={prev_min_size}, min_size={min_size}')\n if min_size < min_cluster_size:\n if prev_min_size is not None and min_size <= prev_min_size:\n break\n r = n_clusters + 1\n else:\n l = n_clusters\n prev_min_size = min_size\n\n logger.debug(f'Number of clusters = {n_clusters}')\n logger.debug(f'Min cluster size = {prev_min_size}')\n logger.debug('Reorder clusters by size descending')\n reorder_map = {c: i for i, (c, _) in enumerate(clusters_counter.most_common())}\n return [reorder_map[c] for c in model.labels_], model.children_\n\n\ndef get_topics_description(df, comps, corpus, corpus_tokens, corpus_counts, n_words, ignore_comp=None):\n \"\"\"\n Get words from abstracts that describe the components the best way\n using closest to the 'ideal' frequency vector - [0, ..., 0, 1, 0, ..., 0] in tokens of cosine distance\n \"\"\"\n logger.debug(f'Generating topics description, ignore_comp={ignore_comp}')\n # Since some of the components may be skipped, use this dict for continuous indexes'\n comp_idx = {c: i for i, c in enumerate(c for c in comps if c != ignore_comp)}\n # In cases with less than 2 components, return frequencies\n if len(comp_idx) < 2:\n comp = list(comp_idx.keys())[0]\n if ignore_comp is None:\n most_frequent = get_frequent_tokens(chain(*chain(*corpus)))\n return {comp: list(sorted(most_frequent.items(), key=lambda kv: kv[1], reverse=True))[:n_words]}\n else:\n most_frequent = get_frequent_tokens(\n chain(*chain(*[corpus[i] for i in np.flatnonzero(df['id'].isin(set(comps[comp])))]))\n )\n return {comp: list(sorted(most_frequent.items(), key=lambda kv: kv[1], reverse=True))[:n_words],\n ignore_comp: []}\n\n # Pass paper indices (for corpus_tokens and corpus_counts) instead of paper ids\n comps_ids = {comp: list(np.flatnonzero(df['id'].isin(comp_pids))) for comp, comp_pids in comps.items()}\n result = _get_topics_description_cosine(comps_ids, corpus_tokens, corpus_counts, n_words, ignore_comp=ignore_comp)\n kwds = [(comp, ','.join([f'{t}:{v:.3f}' for t, v in vs])) for comp, vs in result.items()]\n logger.debug('Description\\n' + '\\n'.join(f'{comp}: {kwd}' for comp, kwd in kwds))\n\n return result\n\n\ndef _get_topics_description_cosine(comps, corpus_tokens, corpus_counts, n_words, ignore_comp=None):\n \"\"\"\n Select words with the frequency vector that is the closest to the 'ideal' frequency vector\n ([0, ..., 0, 1, 0, ..., 0]) in tokens of cosine distance\n \"\"\"\n logger.debug('Compute average tokens counts per components')\n # Since some of the components may be skipped, use this dict for continuous indexes\n comp_idx = {c: i for i, c in enumerate(c for c in comps if c != ignore_comp)}\n tokens_freqs_per_comp = np.zeros(shape=(len(comp_idx), corpus_counts.shape[1]), dtype=np.float)\n for comp, comp_ids in comps.items():\n if comp != ignore_comp: # Not ignored\n tokens_freqs_per_comp[comp_idx[comp], :] = \\\n np.sum(corpus_counts[comp_ids, :], axis=0)\n\n # Calculate total number of occurrences for each word\n tokens_freqs_total = np.sum(tokens_freqs_per_comp, axis=0)\n\n # Normalize frequency vector for each word to have length of 1\n tokens_freqs_norm = np.sqrt(np.diag(tokens_freqs_per_comp.T @ tokens_freqs_per_comp))\n tokens_freqs_per_comp = tokens_freqs_per_comp / tokens_freqs_norm\n\n logger.debug('Take frequent tokens that have the most descriptive frequency vector for topics')\n # Calculate cosine distance between the frequency vector and [0, ..., 0, 1, 0, ..., 0] for each cluster\n cluster_mask = np.eye(len(comp_idx))\n distance = tokens_freqs_per_comp.T @ cluster_mask\n # Add some weight for more frequent tokens to get rid of extremely rare ones in the top\n adjusted_distance = distance.T * np.log(tokens_freqs_total)\n\n result = {}\n for comp in comps.keys():\n if comp == ignore_comp:\n result[comp] = [] # Ignored component\n continue\n\n c = comp_idx[comp] # Get the continuous index\n cluster_tokens_idx = np.argsort(-adjusted_distance[c, :])[:n_words].tolist()\n result[comp] = [(corpus_tokens[i], adjusted_distance[c, i]) for i in cluster_tokens_idx]\n\n return result\n" ]
[ [ "sklearn.cluster.AgglomerativeClustering", "numpy.log", "numpy.zeros", "numpy.sum", "numpy.mean", "sklearn.metrics.pairwise_distances", "numpy.argsort", "numpy.diag", "numpy.flatnonzero" ] ]
fanwu8/sf
[ "8ce5671a3f8c2e8f3425aabc373fc58954f5bdbf", "8ce5671a3f8c2e8f3425aabc373fc58954f5bdbf" ]
[ "seisflows/tools/graphics.py", "seisflows/plugins/writers.py" ]
[ "\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\n\nfrom obspy.core.stream import Stream\n\n\ndef plot_gll(x, y, z):\n \"\"\" Plots values on 2D unstructured GLL mesh\n \"\"\"\n r = (max(x) - min(x))/(max(y) - min(y))\n rx = r/np.sqrt(1 + r**2)\n ry = 1/np.sqrt(1 + r**2)\n\n f = plt.figure(figsize=(10*rx, 10*ry))\n p = plt.tricontourf(x, y, z, 125)\n plt.axis('image')\n return f, p\n\n\ndef plot_vector(t, v, xlabel='', ylabel='', title=''):\n \"\"\" Plots a vector or time series.\n\n Parameters\n ----------\n v: ndarray, ndims = 1/2\n Vector or time series to plot\n xlabel: str\n x axis label\n ylabel: str\n y axis label\n title: str\n plot title\n\n Raises\n ------\n ValueError\n If dimensions of v are greater than 2\n \"\"\"\n\n # check input dimension\n if v.ndim > 2:\n raise ValueError('v must be a vector or a time series')\n\n if v.ndim == 1:\n x = list(range(len(v)))\n y = v\n else:\n x = v[:, 0]\n y = v[:, 1]\n\n # plot\n plt.plot(t, v)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.show()\n\n\ndef plot_section(stream, ax=None, cmap='seismic', clip=100, title='', x_interval=1.0, y_interval=1.0):\n \"\"\" Plots a seismic section from an obspy stream.\n\n Parameters\n ----------\n stream: Obspy stream object\n Obspy stream object created from a SU data file\n ax: Matplotlib Axes object\n Optional axis object\n cmap: str\n Matplotlib colormap option.\n clip: float\n Percentage value (0-100) for amplitude clipping\n title: str\n plot title\n x_interval: float\n Offset axis tick interval in km\n y_interval: float\n Time axis tick interval in km\n\n Raises\n ------\n NotImplementedError\n If stream object does not have SU format\n \"\"\"\n\n # check format of stream\n if stream[0].stats._format != 'SU':\n raise NotImplemented('plot_section currently only supports streams for SU data files.')\n\n # get dimensions\n nr = len(stream)\n nt = len(stream[0].data)\n dt = stream[0].stats.delta\n d_aspect = nr / float(nt)\n\n # convert stream to image array\n data = _convert_to_array(stream)\n\n # default values\n fsize = 6\n scale_factor = 1.5\n\n if ax is None:\n fig, ax = plt.subplots(figsize=(fsize, scale_factor*fsize))\n\n im = ax.imshow(data, aspect=scale_factor*d_aspect, clim=_cscale(data, clip=clip))\n im.set_cmap(cmap)\n\n # labels\n ax.set_title(title)\n ax.set_xlabel('Offset [km]')\n ax.set_ylabel('Time [s]')\n\n #set ticks\n t = _get_time(stream)\n yticks, ytick_labels = get_regular_ticks(t, y_interval)\n ax.set_yticks(yticks)\n ax.set_yticklabels(ytick_labels)\n\n offsets =_get_offsets(stream)\n xticks, xtick_labels = get_regular_ticks(offsets, x_interval)\n ax.set_xticks(xticks)\n ax.set_xticklabels(xtick_labels)\n\n return ax\n\n\ndef _convert_to_array(stream):\n \"\"\" Extracts trace data from an obspy stream and returns a 2D array.\n\n Parameters\n ----------\n stream: Obspy stream object\n Stream storing trace data\n\n Returns\n -------\n output: ndarray, ndim=2\n Returns an (nt*nr) array. nt and nr are the number of sample points\n and number of traces respectively. Assumes trace lengths are equal\n for all traces.\n\n Raises\n ------\n TypeError\n If stream is not an obspy stream\n \"\"\"\n if not isinstance(stream, Stream):\n raise TypeError('Input object should be an obspy stream.')\n\n nt = len(stream.traces[0].data)\n nr = len(stream)\n output = np.zeros((nt, nr))\n\n for i, trace in enumerate(stream):\n output[:, i] = trace.data[:]\n\n return output\n\n\ndef _cscale(v, clip=100):\n \"\"\" Return limits for colormap.\n \"\"\"\n perc = clip / 100.\n return -perc * abs(v).max(), perc * abs(v).max()\n\n\ndef _get_time(stream):\n \"\"\" Get fixed time vector for stream object.\n \"\"\"\n dt = stream[0].stats.delta\n nt = len(stream[0].data)\n return np.arange(0, nt*dt, dt)\n\n\ndef _get_offsets(stream):\n \"\"\" Return offsets.\n \"\"\"\n nr = len(stream)\n offsets = np.zeros(nr)\n scalco = stream[0].stats.su.trace_header.scalar_to_be_applied_to_all_coordinates\n\n # set scale to km\n if scalco == 0:\n scalco = 1e-3 # assume coords are in m\n else:\n scalco = 1.0e-3 / scalco\n\n for i, tr in enumerate(stream):\n offsets[i] = (tr.stats.su.trace_header.group_coordinate_x -\n tr.stats.su.trace_header.source_coordinate_x) * scalco\n return offsets\n\n\ndef get_regular_ticks(v, interval):\n \"\"\" Returns regular tick intervals.\n \"\"\"\n f = interp1d(v, list(range(len(v))))\n begin = int(v[0] / interval) * interval\n end = v[-1]\n tick_labels = np.arange(begin, end, interval)\n ticks = f(tick_labels)\n\n return ticks, tick_labels\n", "\n# SeisFlows uses obspy stream objects for holding and processing seismic data. In some cases, obspy.read doesn't provide the desired behavior, so we introduce an additonal level of indirection\n\n# used by the PREPROCESS class and specified by the WRITER parameter\n\n\nimport numpy as np\n\ndef su(d, path, filename):\n for t in d:\n # work around obspy data type conversion\n\n t.data = t.data.astype(np.float32)\n\n\n max_delta = 0.065535\n dummy_delta = max_delta\n\n if d[0].stats.delta > max_delta:\n for t in d:\n t.stats.delta = dummy_delta\n\n # write data to file\n d.write(path+'/'+filename, format='SU')\n\n\ndef ascii(stream, path, filenames):\n for ir, tr in enumerate(stream):\n nt = tr.stats.npts\n t1 = float(tr.stats.starttime)\n t2 = t1 + tr.stats.npts*tr.stats.sampling_rate\n print(nt, t1, t2)\n\n t = np.linspace(t1, t2, nt)\n w = tr.data\n\n print(path +'/'+ tr.stats.filename)\n # print(times.shape, tr.data.shape)\n np.savetxt(path +'/'+ tr.stats.filename,\n np.column_stack((t, w)))\n\n" ]
[ [ "matplotlib.pyplot.tricontourf", "numpy.zeros", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.ylabel", "numpy.sqrt", "matplotlib.pyplot.show", "matplotlib.pyplot.axis" ], [ "numpy.linspace", "numpy.column_stack" ] ]
Callidior/semantic-embeddings
[ "0d4177422bafbba685fb6a0f976675864f31e09f", "0d4177422bafbba685fb6a0f976675864f31e09f" ]
[ "sgdr_callback.py", "datasets/cifar.py" ]
[ "import numpy as np\nfrom keras.callbacks import Callback\nfrom keras import backend as K\n\n\nclass SGDR(Callback):\n \"\"\"This callback implements the learning rate schedule for\n Stochastic Gradient Descent with warm Restarts (SGDR),\n as proposed by Loshchilov & Hutter (https://arxiv.org/abs/1608.03983).\n \n The learning rate at each epoch is computed as:\n lr(i) = min_lr + 0.5 * (max_lr - min_lr) * (1 + cos(pi * i/num_epochs))\n \n Here, num_epochs is the number of epochs in the current cycle, which starts\n with base_epochs initially and is multiplied by mul_epochs after each cycle.\n \n # Example\n ```python\n sgdr = SGDR(min_lr=0.0, max_lr=0.05,\n base_epochs=10, mul_epochs=2)\n model.compile(optimizer=keras.optimizers.SGD(decay=1e-4, momentum=0.9),\n loss=loss)\n model.fit(X_train, Y_train, callbacks=[sgdr])\n ```\n \n # Arguments\n min_lr: minimum learning rate reached at the end of each cycle.\n max_lr: maximum learning rate used at the beginning of each cycle.\n base_epochs: number of epochs in the first cycle.\n mul_epochs: factor with which the number of epochs is multiplied\n after each cycle.\n \"\"\"\n\n def __init__(self, min_lr=0.0, max_lr=0.05, base_epochs=10, mul_epochs=2):\n super(SGDR, self).__init__()\n\n self.min_lr = min_lr\n self.max_lr = max_lr\n self.base_epochs = base_epochs\n self.mul_epochs = mul_epochs\n\n self.cycles = 0.\n self.cycle_iterations = 0.\n self.trn_iterations = 0.\n\n self._reset()\n\n def _reset(self, new_min_lr=None, new_max_lr=None,\n new_base_epochs=None, new_mul_epochs=None):\n \"\"\"Resets cycle iterations.\"\"\"\n \n if new_min_lr != None:\n self.min_lr = new_min_lr\n if new_max_lr != None:\n self.max_lr = new_max_lr\n if new_base_epochs != None:\n self.base_epochs = new_base_epochs\n if new_mul_epochs != None:\n self.mul_epochs = new_mul_epochs\n self.cycles = 0.\n self.cycle_iterations = 0.\n \n def sgdr(self):\n \n cycle_epochs = self.base_epochs * (self.mul_epochs ** self.cycles)\n return self.min_lr + 0.5 * (self.max_lr - self.min_lr) * (1 + np.cos(np.pi * (self.cycle_iterations + 1) / cycle_epochs))\n \n def on_train_begin(self, logs=None):\n \n if self.cycle_iterations == 0:\n K.set_value(self.model.optimizer.lr, self.max_lr)\n else:\n K.set_value(self.model.optimizer.lr, self.sgdr())\n \n def on_epoch_end(self, epoch, logs=None):\n \n logs = logs or {}\n logs['lr'] = K.get_value(self.model.optimizer.lr)\n \n self.trn_iterations += 1\n self.cycle_iterations += 1\n if self.cycle_iterations >= self.base_epochs * (self.mul_epochs ** self.cycles):\n self.cycles += 1\n self.cycle_iterations = 0\n K.set_value(self.model.optimizer.lr, self.max_lr)\n else:\n K.set_value(self.model.optimizer.lr, self.sgdr())\n", "import numpy as np\nimport pickle\nimport os\n\nfrom .common import TinyDatasetGenerator\n\n\n\nclass CifarGenerator(TinyDatasetGenerator):\n \"\"\" Data generator for CIFAR-10 and CIFAR-100. \"\"\"\n\n def __init__(self, root_dir, classes = None, reenumerate = False, cifar10 = False, **kwargs):\n \"\"\" Data generator for CIFAR-10 and CIFAR-100.\n\n # Arguments:\n\n - root_dir: Root directory of the dataset.\n\n - classes: List of classes to restrict the dataset to.\n If set to `None`, all available classes will be used.\n \n - reenumerate: If true, the classes given in `classes` will be re-enumerated in ascending order, beginning from 0.\n \n - cifar10: Set this to True for CIFAR-10 and to False for CIFAR-100.\n\n Further keyword arguments such as `generator_kwargs` and `train_generator_kwargs` will be\n forwarded to the constructor of `TinyDatasetGenerator`.\n \"\"\"\n \n self.root_dir = root_dir\n\n # Load dataset\n if cifar10:\n X_train, y_train = [], []\n for i in range(1, 6):\n with open(os.path.join(self.root_dir, 'data_batch_{}'.format(i)), 'rb') as pf:\n dump = pickle.load(pf, encoding='bytes')\n X_train.append(dump[b'data' if b'data' in dump else 'data'].astype(np.float32))\n y_train += dump[b'labels' if b'labels' in dump else 'labels']\n del dump\n X_train = np.concatenate(X_train)\n else:\n with open(os.path.join(self.root_dir, 'train'), 'rb') as pf:\n dump = pickle.load(pf, encoding='bytes')\n X_train = dump[b'data' if b'data' in dump else 'data'].astype(np.float32)\n y_train = dump[b'fine_labels' if b'fine_labels' in dump else 'fine_labels']\n del dump\n\n with open(os.path.join(self.root_dir, 'test_batch' if cifar10 else 'test'), 'rb') as pf:\n dump = pickle.load(pf, encoding='bytes')\n X_test = dump[b'data' if b'data' in dump else 'data'].astype(np.float32)\n if cifar10:\n y_test = dump[b'labels' if b'labels' in dump else 'labels']\n else:\n y_test = dump[b'fine_labels' if b'fine_labels' in dump else 'fine_labels']\n del dump\n \n # Restrict labels to the given classes and re-enumerate them\n if classes is not None:\n \n sel_train = np.array([lbl in classes for lbl in y_train])\n sel_test = np.array([lbl in classes for lbl in y_test])\n X_train = X_train[sel_train]\n y_train = [lbl for lbl, sel in zip(y_train, sel_train) if sel]\n X_test = X_test[sel_test]\n y_test = [lbl for lbl, sel in zip(y_test, sel_test) if sel]\n \n self.classes = classes\n if reenumerate:\n self.class_indices = dict(zip(self.classes, range(len(self.classes))))\n y_train = [self.class_indices[lbl] for lbl in y_train]\n y_test = [self.class_indices[lbl] for lbl in y_test]\n \n else:\n\n self.classes = np.arange(max(y_train) + 1)\n self.class_indices = dict(zip(self.classes, self.classes))\n\n # Reshape data to images\n X_train = X_train.reshape(-1, 3, 32, 32).transpose((0, 2, 3, 1))\n X_test = X_test.reshape(-1, 3, 32, 32).transpose((0, 2, 3, 1))\n\n # Call parent constructor\n super(CifarGenerator, self).__init__(X_train, X_test, y_train, y_test, **kwargs)\n" ]
[ [ "numpy.cos" ], [ "numpy.concatenate", "numpy.array" ] ]
zhuyuanxiang/tensorflow_cookbook
[ "57d7ee719385ddd249a67c3a85bd336e884a67e5" ]
[ "01_Introduction/C0106_operations.py" ]
[ "# -*- encoding: utf-8 -*- \n\"\"\"\n@Author : zYx.Tom\n@Contact : 526614962@qq.com\n@site : https://github.com/zhuyuanxiang/tensorflow_cookbook\n---------------------------\n@Software : PyCharm\n@Project : TensorFlow_Machine_Learning_Cookbook\n@File : C0106_operations.py\n@Version : v0.1\n@Time : 2019-10-29 14:11\n@License : (C)Copyright 2018-2019, zYx.Tom\n@Reference : 《TensorFlow机器学习实战指南,Nick McClure》, Sec0106,P110\n@Desc : TensorFlow 基础,声明操作\n\"\"\"\n# common imports\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np # pip install numpy<1.17,小于1.17就不会报错\nimport sklearn\nimport tensorflow as tf\nimport winsound\nfrom tensorflow.python.framework import ops\n\nfrom tools import show_values\n\n# 设置数据显示的精确度为小数点后3位\nnp.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)\n\n# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样\nnp.random.seed(42)\n\n# 初始化默认的计算图\nops.reset_default_graph()\n# Python ≥3.5 is required\nassert sys.version_info >= (3, 5)\n# Scikit-Learn ≥0.20 is required\nassert sklearn.__version__ >= \"0.20\"\n# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n# Open graph session\nsess = tf.Session()\n\nshow_values(tf.div(3, 4), \"tf.div(3,4) = 整数除\")\nshow_values(tf.truediv(3, 4), \"tf.truediv(3,4) = 浮点除\")\nshow_values(tf.floordiv(3.0, 4.0), \"tf.floordiv(3.0,4.0) = 浮点取整除\")\nshow_values(tf.mod(22.0, 5.0), \"tf.mod(22.0,5.0) = 取模\")\n# 张量点积--Compute the pairwise cross product\n# 张量点积:即两个向量的叉乘,又叫向量积、外积、叉积,叉乘的运算结果是一个向量而不是一个标量。\n# 两个向量的点积与这两个向量组成的坐标平面垂直。\nshow_values(tf.cross([1., 0., 0.], [0., 1., 0.]),\n \"tf.cross([1., 0., 0.], [0., 1., 0.]) = 张量点积\")\n# 张量点积必须是三维的\n# show_values(tf.cross([1., 0., 0., 0.], [0., 1., 0., 0.]),\n# \"tf.cross([1., 0., 0.,0.], [0., 1., 0.,0.]) = 张量点积\")\n\n# ToSee:P11,数学函数列表\n\nshow_values(tf.div(tf.sin(3.1416 / 4.), tf.cos(3.1416 / 4.)),\n \"tan(pi/4) = 1 = tf.div(tf.sin(3.1416/4.),tf.cos(3.1416/4.))\")\n\ntest_nums = range(15)\n# What should we get with list comprehension\nexpected_output = [3 * x * x - x + 10 for x in test_nums]\nprint('-' * 50)\nprint(\"[3 * x ^ 2 - x + 10 for x in test_nums] = \")\nprint(expected_output)\n\n\n# 自定义函数\n# 3x^2-x+10,x=11,=>\ndef custom_polynomial(value):\n # return tf.subtract(3 * tf.square(value), value) + 10\n return 3 * tf.square(value) - value + 10\n\n\nshow_values(custom_polynomial(11), \"custom_polynomial(11) = 3x^2-x+10,x=11=>\")\nfor num in test_nums:\n show_values(custom_polynomial(num), \"custom_polynomial({})\".format(num))\n\n# -----------------------------------------------------------------\n# 运行结束的提醒\nwinsound.Beep(600, 500)\nif len(plt.get_fignums()) != 0:\n plt.show()\npass\n" ]
[ [ "numpy.random.seed", "numpy.set_printoptions", "tensorflow.cross", "tensorflow.Session", "tensorflow.cos", "tensorflow.mod", "tensorflow.truediv", "matplotlib.pyplot.get_fignums", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.div", "tensorflow.sin", "matplotlib.pyplot.show", "tensorflow.floordiv", "tensorflow.square" ] ]
kant/GlennOPT
[ "ca816c3708a2db5b98f8f1a7885305a8e18e179e" ]
[ "test/ProbePlacement_multi/parallel/optimization_setup.py" ]
[ "\"\"\"\n Simple, non parallel optimization set up example. \n\"\"\"\nimport sys,os\nsys.path.insert(0,'../../../')\nfrom glennopt.base import Parameter\nfrom glennopt.helpers import mutation_parameters, de_mutation_type\nfrom glennopt.optimizers import NSGA3\nfrom glennopt.DOE import Default,CCD,FullFactorial,LatinHyperCube\nimport numpy as np\nimport os\n\n# Initialize the DOE \ndoe = LatinHyperCube(samples=128,levels=4) # 128 random samples of the design space\n# These are also available for use\n# doe = FullFactorial(levels=2) \n# doe = Default(15) # Default\n# doe = CCD()\n\neval_parameters = list()\n\n# Define evaluation parameters \nnProbes = 10\nminSpacing = 3\nprobeSpacing = 360/nProbes\ntLo = np.zeros(nProbes)\ntHi = np.zeros(nProbes)\nfor i in range(nProbes):\n tLo[i] = probeSpacing*i\n if i != nProbes-1:\n tHi[i] = probeSpacing*(i+1) - minSpacing\n else:\n tHi[-1] = probeSpacing*(i+1) \n doe.add_parameter(name=\"x\"+str(i+1),min_value=tLo[i],max_value=tHi[i])\nconstraints = (tLo,tHi)\n\ndoe.add_objectives(name='objective1')\ndoe.add_objectives(name='objective2')\n\n# Define any performance parameters you want to keep track of (tracking only)\ndoe.add_perf_parameter(name='PearsonR')\ndoe.add_perf_parameter(name='RMS_Error')\n\n# Set up the optimizer\ncurrent_dir = os.getcwd()\npop_size = 48\nns = NSGA3(eval_command = \"python evaluation.py\", eval_folder=\"Evaluation\",pop_size=pop_size,optimization_folder=current_dir)\nns.add_eval_parameters(eval_params=doe.eval_parameters)\nns.add_objectives(objectives=doe.objectives)\nns.add_performance_parameters(performance_params= doe.perf_parameters)\n\n# Parallel Settings (You don't need to run this block if you only want serial execution)\nns.parallel_settings.concurrent_executions = 8 # Change to 1 for serial\nns.parallel_settings.cores_per_execution= 1 \nns.parallel_settings.execution_timeout = 0.2 # minutes\n\n# Start the optimizer\nns.mutation_params.mutation_type = de_mutation_type.de_rand_1_bin\nns.mutation_params.F = 0.6\nns.mutation_params.C = 0.7\n# Start the Design of Experiments\nns.start_doe(doe.generate_doe())" ]
[ [ "numpy.zeros" ] ]
eldrin/wmf
[ "7a4d72e47034f4289ea3c73d28886eabd6ab5762" ]
[ "test_batched_inv_mp.py" ]
[ "import numpy as np\nimport wmf\nimport batched_inv\nimport batched_inv_mp\nimport solve_mp\nimport solve_gpu\n\nnp.random.seed(123)\n\nB = np.load(\"test_matrix.pkl\")\n\nS = wmf.log_surplus_confidence_matrix(B, alpha=2.0, epsilon=1e-6)\n\n\nnum_factors = 40 + 1\nnum_iterations = 1\nbatch_size = 1000\n\nsolve = batched_inv.solve_sequential\n# solve = solve_mp.solve_mp\n# solve = solve_gpu.solve_gpu\n\n\nU, V = wmf.factorize(S, num_factors=num_factors, lambda_reg=1e-5, num_iterations=num_iterations, init_std=0.01, verbose=True, dtype='float32',\n recompute_factors=batched_inv_mp.recompute_factors_bias_batched_mp, batch_size=batch_size, solve=solve)\n" ]
[ [ "numpy.random.seed", "numpy.load" ] ]
mori97/U-Net_MUSDB18
[ "d452f0e6378c1d74e823dcb1e95d92307f4dea46", "d452f0e6378c1d74e823dcb1e95d92307f4dea46" ]
[ "src/convert_to_wav.py", "src/u_net.py" ]
[ "\"\"\"Convert MUSDB18 dataset to .wav format.\nOutput .wav files contain 5 channels\n- `0` - The mixture,\n- `1` - The drums,\n- `2` - The bass,\n- `3` - The rest of the accompaniment,\n- `4` - The vocals.\n\"\"\"\nimport argparse\nimport os\nimport subprocess\nimport tempfile\n\nimport librosa\nimport numpy as np\nimport soundfile as sf\n\n\ndef main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('origin_dataset_dir',\n help='Path of the original dataset (.mp4)',\n type=str)\n parser.add_argument('new_dataset_dir',\n help='Output path of .wav dataset',\n type=str)\n parser.add_argument('--sr',\n help='Sample rate. (Default: 22050) ',\n type=int, default=22050)\n args = parser.parse_args()\n\n origin_dataset_dir = args.origin_dataset_dir\n new_dataset_dir = args.new_dataset_dir\n\n if os.path.isdir(new_dataset_dir):\n raise FileExistsError(f'{new_dataset_dir} already exists.')\n else:\n os.mkdir(new_dataset_dir)\n\n os.mkdir(os.path.join(new_dataset_dir, 'train'))\n os.mkdir(os.path.join(new_dataset_dir, 'test'))\n\n with tempfile.TemporaryDirectory() as tmpdir:\n for subdir in ('train', 'test'):\n origin_dir = os.path.join(origin_dataset_dir, subdir)\n files = [f for f in os.listdir(origin_dir)\n if os.path.splitext(f)[1] == '.mp4']\n for file in files:\n path = os.path.join(origin_dir, file)\n name = os.path.splitext(file)[0]\n wav_data = []\n # Extract & save the sound of `ch` channel to a temp directory\n # and then concatenate all channels to a single .wav file\n for ch in range(5):\n temp_fn = f'{name}.{ch}.wav'\n out_path = os.path.join(tmpdir, temp_fn)\n subprocess.run(['ffmpeg', '-i', path,\n '-map', f'0:{ch}', out_path])\n sound, _ = librosa.load(out_path, sr=args.sr, mono=True)\n wav_data.append(sound)\n wav_data = np.stack(wav_data, axis=1)\n out_path = os.path.join(\n new_dataset_dir, subdir, f'{name}.wav')\n sf.write(out_path, wav_data, args.sr)\n\n\nif __name__ == '__main__':\n main()\n", "import torch\nimport torch.nn.functional as F\n\nEPS = 1e-8\n\n\nclass UNet(torch.nn.Module):\n \"\"\"An implementation of U-Net for music source separation.\n It has been proposed in \"Singing Voice Separation with Deep U-Net\n Convolutional Networks\".\n (https://ismir2017.smcnus.org/wp-content/uploads/2017/10/171_Paper.pdf)\n\n Args:\n n_class (int): Number of output classes.\n \"\"\"\n def __init__(self, n_class):\n super(UNet, self).__init__()\n\n self.conv1 = torch.nn.Conv2d(\n 1, 16, kernel_size=5, stride=2, padding=2)\n self.conv_bn1 = torch.nn.BatchNorm2d(16)\n self.conv2 = torch.nn.Conv2d(\n 16, 32, kernel_size=5, stride=2, padding=2)\n self.conv_bn2 = torch.nn.BatchNorm2d(32)\n self.conv3 = torch.nn.Conv2d(\n 32, 64, kernel_size=5, stride=2, padding=2)\n self.conv_bn3 = torch.nn.BatchNorm2d(64)\n self.conv4 = torch.nn.Conv2d(\n 64, 128, kernel_size=5, stride=2, padding=2)\n self.conv_bn4 = torch.nn.BatchNorm2d(128)\n self.conv5 = torch.nn.Conv2d(\n 128, 256, kernel_size=5, stride=2, padding=2)\n self.conv_bn5 = torch.nn.BatchNorm2d(256)\n self.conv6 = torch.nn.Conv2d(\n 256, 512, kernel_size=5, stride=2, padding=2)\n self.conv_bn6 = torch.nn.BatchNorm2d(512)\n\n self.deconv1 = torch.nn.ConvTranspose2d(\n 512, 256, kernel_size=5, stride=2, padding=2, output_padding=1)\n self.deconv_bn1 = torch.nn.BatchNorm2d(256)\n self.dropout1 = torch.nn.Dropout2d(0.5)\n self.deconv2 = torch.nn.ConvTranspose2d(\n 512, 128, kernel_size=5, stride=2, padding=2, output_padding=1)\n self.deconv_bn2 = torch.nn.BatchNorm2d(128)\n self.dropout2 = torch.nn.Dropout2d(0.5)\n self.deconv3 = torch.nn.ConvTranspose2d(\n 256, 64, kernel_size=5, stride=2, padding=2, output_padding=1)\n self.deconv_bn3 = torch.nn.BatchNorm2d(64)\n self.dropout3 = torch.nn.Dropout2d(0.5)\n self.deconv4 = torch.nn.ConvTranspose2d(\n 128, 32, kernel_size=5, stride=2, padding=2, output_padding=1)\n self.deconv_bn4 = torch.nn.BatchNorm2d(32)\n self.deconv5 = torch.nn.ConvTranspose2d(\n 64, 16, kernel_size=5, stride=2, padding=2, output_padding=1)\n self.deconv_bn5 = torch.nn.BatchNorm2d(16)\n self.deconv6 = torch.nn.ConvTranspose2d(\n 32, n_class, kernel_size=5, stride=2, padding=2, output_padding=1)\n\n def forward(self, x):\n \"\"\"Compute the separation mask.\n\n Args:\n x (torch.Tensor): Shape of (n_batch, n_frequency, n_frame).\n The number of time frames should be a multiple of 64.\n\n Returns:\n torch.Tensor: Shape of (n_batch, n_part, n_frequency, n_frame).\n Separation mask.\n \"\"\"\n # Add channel dimension\n x = x.unsqueeze(1)\n\n x = torch.log(x + EPS)\n h1 = F.leaky_relu(self.conv_bn1(self.conv1(x)), 0.2)\n h2 = F.leaky_relu(self.conv_bn2(self.conv2(h1)), 0.2)\n h3 = F.leaky_relu(self.conv_bn3(self.conv3(h2)), 0.2)\n h4 = F.leaky_relu(self.conv_bn4(self.conv4(h3)), 0.2)\n h5 = F.leaky_relu(self.conv_bn5(self.conv5(h4)), 0.2)\n h = F.leaky_relu(self.conv_bn6(self.conv6(h5)), 0.2)\n\n h = self.dropout1(F.relu(self.deconv_bn1(self.deconv1(h))))\n h = torch.cat((h, h5), dim=1)\n h = self.dropout2(F.relu(self.deconv_bn2(self.deconv2(h))))\n h = torch.cat((h, h4), dim=1)\n h = self.dropout3(F.relu(self.deconv_bn3(self.deconv3(h))))\n h = torch.cat((h, h3), dim=1)\n h = F.relu(self.deconv_bn4(self.deconv4(h)))\n h = torch.cat((h, h2), dim=1)\n h = F.relu(self.deconv_bn5(self.deconv5(h)))\n h = torch.cat((h, h1), dim=1)\n h = F.softmax(self.deconv6(h), dim=1)\n return h\n\n\ndef padding(sound_stft):\n \"\"\"Apply reflection padding to ensure that number of time frames of\n `sound`'s STFT representation is multiple of 64.\n\n Args:\n sound_stft (torch.Tensor): Spectrogram to be padded.\n\n Returns:\n Tuple[torch.Tensor, Tuple[int, int]]: Reflection padded spectrogram and\n number of rows padded to left-side and right-side, respectively.\n \"\"\"\n n_frames = sound_stft.size(-1)\n n_pad = (64 - n_frames % 64) % 64\n if n_pad:\n left = n_pad // 2\n right = n_pad - left\n return F.pad(sound_stft, (left, right), mode='reflect'), (left, right)\n else:\n return sound_stft, (0, 0)\n" ]
[ [ "numpy.stack" ], [ "torch.cat", "torch.nn.BatchNorm2d", "torch.nn.ConvTranspose2d", "torch.nn.Conv2d", "torch.nn.functional.pad", "torch.log", "torch.nn.Dropout2d" ] ]
petercuret/woonfraude
[ "2602464f9b9a8bf901d89590b61205ba18fe697d" ]
[ "codebase/datasets/adres_dataset.py" ]
[ "####################################################################################################\n\"\"\"\nadres_dataset.py\n\nThis module implements several classes to perform dataset-specific downloading, saving and\ndata-transformation operations.\n\nWritten by Swaan Dekkers & Thomas Jongstra\n\"\"\"\n####################################################################################################\n\n#############\n## Imports ##\n#############\n\nfrom pathlib import Path\nimport pandas.io.sql as sqlio\nimport pandas as pd\nimport numpy as np\nimport requests\nimport psycopg2\nimport time\nimport os\nimport re\n\n# Import own modules.\nimport datasets, clean\n\n# Define HOME and DATA_PATH on a global level.\nHOME = Path.home() # Home path for old VAO.\n# USERNAME = os.path.basename(HOME)\n# HOME = os.path.join('/data', USERNAME) # Set home for new VAO.\nDATA_PATH = os.path.join(HOME, 'Documents/woonfraude/data/')\n\n\n########################\n## AdresDataset class ##\n########################\n\nclass AdresDataset(datasets.MyDataset):\n \"\"\"Create a dataset for the adres data.\"\"\"\n\n # Set the class attributes.\n name = 'adres'\n table_name = 'import_adres'\n id_column = 'adres_id'\n\n\n def extract_leegstand(self):\n \"\"\"Create a column indicating leegstand (no inhabitants on the address).\"\"\"\n self.data['leegstand'] = ~self.data.inwnrs.notnull()\n self.version += '_leegstand'\n self.save()\n\n\n def enrich_with_woning_id(self):\n \"\"\"Add woning ids to the adres dataframe.\"\"\"\n adres_periodes = datasets.download_dataset('bwv_adres_periodes', 'bwv_adres_periodes')\n self.data = self.data.merge(adres_periodes[['ads_id', 'wng_id']], how='left', left_on='adres_id', right_on='ads_id')\n self.version += '_woningId'\n self.save()\n\n\n def prepare_bag(self, bag):\n # To int\n bag['huisnummer_nummeraanduiding'] = bag['huisnummer_nummeraanduiding'].astype(int)\n bag['huisnummer_nummeraanduiding'] = bag['huisnummer_nummeraanduiding'].replace(0, -1)\n\n # Fillna and replace ''\n bag['huisletter_nummeraanduiding'] = bag['huisletter_nummeraanduiding'].replace('', 'None')\n\n # bag['_openbare_ruimte_naam@bag'] = bag['_openbare_ruimte_naam@bag'].fillna('None')\n bag['_openbare_ruimte_naam_nummeraanduiding'] = bag['_openbare_ruimte_naam_nummeraanduiding'].replace('', 'None')\n\n # bag['_huisnummer_toevoeging@bag'] = bag['_huisnummer_toevoeging@bag'].fillna('None')\n bag['huisnummer_toevoeging_nummeraanduiding'] = bag['huisnummer_toevoeging_nummeraanduiding'].replace('', 'None')\n return bag\n\n\n def prepare_adres(self, adres):\n # To int\n adres['hsnr'] = adres['hsnr'].astype(int)\n adres['hsnr'] = adres['hsnr'].replace(0, -1)\n\n return adres\n\n\n def replace_string_nan_adres(self, adres):\n adres['hsnr'] = adres['hsnr'].replace(-1, np.nan)\n adres['sttnaam'] = adres['sttnaam'].replace('None', np.nan)\n adres['hsltr'] = adres['hsltr'].replace('None', np.nan)\n adres['toev'] = adres['toev'].replace('None', np.nan)\n adres['huisnummer_nummeraanduiding'] = adres['huisnummer_nummeraanduiding'].replace(-1, np.nan)\n adres['huisletter_nummeraanduiding'] = adres['huisletter_nummeraanduiding'].replace('None', np.nan)\n adres['_openbare_ruimte_naam_nummeraanduiding'] = adres['_openbare_ruimte_naam_nummeraanduiding'].replace('None', np.nan)\n adres['huisnummer_toevoeging_nummeraanduiding'] = adres['huisnummer_toevoeging_nummeraanduiding'].replace('None', np.nan)\n return adres\n\n\n def match_bwv_bag(self, adres, bag):\n # Merge dataframes on adres dataframe.\n new_df = pd.merge(adres, bag, how='left', left_on=['sttnaam','hsnr'], right_on = ['_openbare_ruimte_naam_nummeraanduiding', 'huisnummer_nummeraanduiding'])\n\n # Find id's that have a direct match and that have multiple matches.\n g = new_df.groupby('adres_id')\n df_direct = g.filter(lambda x: len(x) == 1)\n df_multiple = g.filter(lambda x: len(x) > 1)\n\n # Make multiplematch more specific to construct perfect match.\n df_multiple = df_multiple[(df_multiple['hsltr'] == df_multiple['huisletter_nummeraanduiding']) & (df_multiple['toev'] == df_multiple['huisnummer_toevoeging_nummeraanduiding'])]\n\n # Concat df_direct and df_multiple.\n df_result = pd.concat([df_direct, df_multiple])\n\n # Because of the seperation of an object, there are two matching objects. Keep the oldest object with definif point.\n df_result = df_result.sort_values(['adres_id', 'status_coordinaat_code'])\n df_result = df_result.drop_duplicates(subset='adres_id', keep='first')\n\n # Add adresses without match.\n final_df = pd.merge(adres, df_result, how='left', on='adres_id', suffixes=('', '_y'))\n final_df.drop(list(final_df.filter(regex='_y$')), axis=1, inplace=True)\n\n # Set the name of the final adres dataframe again.\n final_df.name = 'adres'\n\n return final_df\n\n\n def impute_values_for_bagless_addresses(self, adres):\n \"\"\"Impute values for adresses where no BAG-match could be found.\"\"\"\n clean.impute_missing_values(adres)\n # clean.impute_missing_values_mode(adres, ['status_coordinaat_code@bag'])\n adres.fillna(value={'huisnummer_nummeraanduiding': 0,\n 'huisletter_nummeraanduiding': 'None',\n '_openbare_ruimte_naam_nummeraanduiding': 'None',\n 'huisnummer_toevoeging_nummeraanduiding': 'None',\n 'type_woonobject_omschrijving': 'None',\n 'eigendomsverhouding_id': 'None',\n 'financieringswijze_id': -1,\n 'gebruik_id': -1,\n 'reden_opvoer_id': -1,\n 'status_id_verblijfsobject': -1,\n 'toegang_id': 'None'}, inplace=True)\n return adres\n\n\n def enrich_with_bag(self, bag):\n \"\"\"Enrich the adres data with information from the BAG data. Uses the bag dataframe as input.\"\"\"\n bag = self.prepare_bag(bag)\n self.data = self.prepare_adres(self.data)\n self.data = self.match_bwv_bag(self.data, bag)\n self.data = self.replace_string_nan_adres(self.data)\n self.data = self.impute_values_for_bagless_addresses(self.data)\n self.version += '_bag'\n self.save()\n print(\"The adres dataset is now enriched with BAG data.\")\n\n\n def enrich_with_personen_features(self, personen):\n \"\"\"Add aggregated features relating to persons to the address dataframe. Uses the personen dataframe as input.\"\"\"\n\n # Create simple handle to the adres data.\n adres = self.data\n\n # Compute age of people in years (float)\n today = pd.to_datetime('today')\n # Set all dates within range allowed by Pandas (584 years?)\n personen['geboortedatum'] = pd.to_datetime(personen['geboortedatum'], errors='coerce')\n\n\n # Get the most frequent birthdate (mode).\n geboortedatum_mode = personen['geboortedatum'].mode()[0]\n # Compute the age (result is a TimeDelta).\n personen['leeftijd'] = today - personen['geboortedatum']\n # Convert the age to an approximation in years (\"smearin out\" the leap years).\n personen['leeftijd'] = personen['leeftijd'].apply(lambda x: x.days / 365.25)\n\n # Find the matching address ids between the adres df and the personen df.\n adres_ids = adres.adres_id\n personen_adres_ids = personen.ads_id_wa\n intersect = set(adres_ids).intersection(set(personen_adres_ids))\n\n # Iterate over all matching address ids and find all people at each address.\n inhabitant_locs = {}\n print(\"Now looping over all address ids that have a link with one or more inhabitants...\")\n for i, adres_id in enumerate(intersect):\n if i % 1000 == 0:\n print(i)\n inhabitant_locs[adres_id] = personen_adres_ids[personen_adres_ids == adres_id]\n\n # Create a new column in the dataframe showing the amount of people at each address.\n # TODO: this step currently takes a few minutes to complete, should still be optimized.\n adres['aantal_personen'] = 0\n adres['aantal_vertrokken_personen'] = -1\n adres['aantal_overleden_personen'] = -1\n adres['aantal_niet_uitgeschrevenen'] = -1\n adres['leegstand'] = True\n adres['leeftijd_jongste_persoon'] = -1.\n adres['leeftijd_oudste_persoon'] = -1.\n adres['aantal_kinderen'] = 0\n adres['percentage_kinderen'] = -1.\n adres['aantal_mannen'] = 0\n adres['percentage_mannen'] = -1.\n adres['gemiddelde_leeftijd'] = -1.\n adres['stdev_leeftijd'] = -1.\n adres['aantal_achternamen'] = 0\n adres['percentage_achternamen'] = -1.\n for i in range(1,8):\n adres[f'gezinsverhouding_{i}'] = 0\n adres[f'percentage_gezinsverhouding_{i}'] = 0.\n print(\"Now looping over all rows in the adres dataframe in order to add person information...\")\n for i in adres.index:\n if i % 1000 == 0:\n print(i)\n row = adres.iloc[i]\n adres_id = row['adres_id']\n try:\n # Get the inhabitants for the current address.\n inhab_locs = inhabitant_locs[adres_id].keys()\n inhab = personen.loc[inhab_locs]\n\n # Check whether any registered inhabitants have left Amsterdam or have passed away.\n aantal_vertrokken_personen = sum(inhab[\"vertrekdatum_adam\"].notnull())\n aantal_overleden_personen = sum(inhab[\"overlijdensdatum\"].notnull())\n aantal_niet_uitgeschrevenen = len(inhab[inhab[\"vertrekdatum_adam\"].notnull() | inhab[\"overlijdensdatum\"].notnull()])\n adres['aantal_vertrokken_personen'] = aantal_vertrokken_personen\n adres['aantal_overleden_personen'] = aantal_overleden_personen\n adres['aantal_niet_uitgeschrevenen'] = aantal_niet_uitgeschrevenen\n # If there are more inhabitants than people that are incorrectly still registered, then there is no 'leegstand'.\n if len(inhab) > aantal_niet_uitgeschrevenen:\n adres['leegstand'] = False\n\n # Totaal aantal personen (int).\n aantal_personen = len(inhab)\n adres.at[i, 'aantal_personen'] = aantal_personen\n\n # Leeftijd jongste persoon (float).\n leeftijd_jongste_persoon = min(inhab['leeftijd'])\n adres.at[i, 'leeftijd_jongste_persoon'] = leeftijd_jongste_persoon\n\n # Leeftijd oudste persoon (float).\n leeftijd_oudste_persoon = max(inhab['leeftijd'])\n adres.at[i, 'leeftijd_oudste_persoon'] = leeftijd_oudste_persoon\n\n # Aantal kinderen ingeschreven op adres (int/float).\n aantal_kinderen = sum(inhab['leeftijd'] < 18)\n adres.at[i, 'aantal_kinderen'] = aantal_kinderen\n adres.at[i, 'percentage_kinderen'] = aantal_kinderen / aantal_personen\n\n # Aantal mannen (int/float).\n aantal_mannen = sum(inhab.geslacht == 'M')\n adres.at[i, 'aantal_mannen'] = aantal_mannen\n adres.at[i, 'percentage_mannen'] = aantal_mannen / aantal_personen\n\n # Gemiddelde leeftijd (float).\n gemiddelde_leeftijd = inhab.leeftijd.mean()\n adres.at[i, 'gemiddelde_leeftijd'] = gemiddelde_leeftijd\n\n # Standardeviatie van leeftijd (float). Set to 0 when the sample size is 1.\n stdev_leeftijd = inhab.leeftijd.std()\n adres.at[i, 'stdev_leeftijd'] = stdev_leeftijd if aantal_personen > 1 else 0\n\n # Aantal verschillende achternamen (int/float).\n aantal_achternamen = inhab.naam.nunique()\n adres.at[i, 'aantal_achternamen'] = aantal_achternamen\n adres.at[i, 'percentage_achternamen'] = aantal_achternamen / aantal_personen\n\n # Gezinsverhouding (frequency count per klasse) (int/float).\n gezinsverhouding = inhab.gezinsverhouding.value_counts()\n for key in gezinsverhouding.keys():\n val = gezinsverhouding[key]\n adres.at[i, f'gezinsverhouding_{key}'] = val\n adres.at[i, f'percentage_gezinsverhouding_{key}'] = val / aantal_personen\n\n except (KeyError, ValueError) as e:\n pass\n\n print(\"...done!\")\n\n self.data = adres\n self.version += '_personen'\n self.save()\n print(\"The adres dataset is now enriched with personen data.\")\n\n\n def add_hotline_features(self, hotline):\n \"\"\"Add the hotline features to the adres dataframe.\"\"\"\n # Create a temporary merged df using the adres and hotline dataframes.\n merge = self.data.merge(hotline, on='wng_id', how='left')\n # Create a group for each adres_id\n adres_groups = merge.groupby(by='adres_id')\n # Count the number of hotline meldingen per group/adres_id.\n # 'id' should be the primary key of hotline df, so it is usable for hotline entry counting.\n hotline_counts = adres_groups['id'].agg(['count'])\n # Rename column\n hotline_counts.columns = ['aantal_hotline_meldingen']\n # Enrich the 'adres' dataframe with the computed hotline counts.\n self.data = self.data.merge(hotline_counts, on='adres_id', how='left')\n self.version += '_hotline'\n self.save()\n print(\"The adres dataset is now enriched with hotline data.\")" ]
[ [ "pandas.to_datetime", "pandas.merge", "pandas.concat" ] ]
ho-oto/jax
[ "e0f285fd218aa704fa65c47ab6e7695f4a38ddbd" ]
[ "jax/experimental/jax2tf/jax2tf.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Experimental module transforms JAX functions to be executed by TensorFlow.\"\"\"\nimport functools\nimport re\nimport string\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union\n\nimport jax\nfrom jax import ad_util, api_util, config\nfrom jax._src import api\nfrom jax import core, custom_derivatives, dtypes\nfrom jax import linear_util as lu\nfrom jax import numpy as jnp\nfrom jax import random, tree_util\nfrom jax._src import util\nfrom jax._src.lax import control_flow as lax_control_flow\nfrom jax._src.lax import fft as lax_fft\nfrom jax._src.lax import lax\nfrom jax._src.lax import linalg as lax_linalg\nimport jax._src.random\nfrom jax.api_util import flatten_fun\nfrom jax.interpreters import ad\nfrom jax.interpreters import pxla\nfrom jax.interpreters import sharded_jit\nfrom jax.interpreters import xla\nfrom jax.lib import xla_client\n\nfrom . import shape_poly\n\nimport numpy as np\nimport tensorflow as tf # type: ignore[import]\n\n# These don't have public equivalents.\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.compiler.tf2xla.python import xla as tfxla # type: ignore[import]\nfrom tensorflow.compiler.xla import xla_data_pb2 # type: ignore[import]\nfrom tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding # type: ignore[import]\n# pylint: enable=g-direct-tensorflow-import\n\nPolyShape = shape_poly.PolyShape\n\n# The scope name need to be a valid TensorFlow name. See\n# https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/core/framework/node_def_util.cc#L731\n_VALID_SCOPE_REGEX = re.compile(\"^[A-Za-z0-9.][A-Za-z0-9_.\\\\/>-]*$\")\n_INVALID_SCOPE_CHAR = re.compile(\"[^A-Za-z0-9_.\\\\/>-]\")\n\n\ndef _sanitize_scope_name(name):\n scope_name = _INVALID_SCOPE_CHAR.sub(\"_\", name)\n if not _VALID_SCOPE_REGEX.match(scope_name):\n scope_name = \".{}\".format(scope_name)\n return scope_name\n\n\n# A value suitable in a TF tracing context: tf.Tensor, tf.Variable,\n# or Python scalar or numpy.ndarray. (A tf.EagerTensor is a tf.Tensor.)\nTfVal = Any\nDType = Any\nPrecisionType = int # Enum xla_data.PrecisionConfig.Precision\n\n\ndef _is_tfval(v: TfVal) -> bool:\n if isinstance(v, (tf.Tensor, tf.Variable)):\n return True\n try:\n # Note: this conversion is overkill and just intended as a type check; this\n # code is in principle only run if config.jax_enable_checks is True.\n # TODO: it is not true that this code is run only with jax_enable_checks.\n _safe_convert_to_tensor(v)\n return True\n except ValueError:\n return False\n\n\ndef _safe_convert_to_tensor(val, dtype=None) -> TfVal:\n dtype = dtype if dtype else (val.dtype if hasattr(val, \"dtype\") else None)\n conversion_type = to_tf_dtype(dtype) if dtype else None\n # The float0 type is not known to TF.\n if dtype and dtype == dtypes.float0:\n val = np.zeros(np.shape(val), conversion_type.as_numpy_dtype)\n return tf.convert_to_tensor(val, dtype=conversion_type)\n\n\n# The implementation rules for primitives. The rule will be called with the\n# arguments (TfVal) and must return TfVal (or a sequence thereof,\n# if primitive.multiple_results). The vast majority of primitives do not need\n# to worry about core.unit inputs or results. The exception are primarily the\n# control-flow primitives.\ntf_impl: Dict[core.Primitive, Callable[..., Any]] = {}\n\n# Some primitive implementation rules need the abstract values of arguments\n# and the results. This is the case for the primitives implemented using\n# _convert_jax_impl and those that need to adjust the shape of the outputs\n# due to missing TF shape inference rules for TFXLA ops. The rules for these\n# primitives should be added to `tf_impl_with_avals`.\n# The abstract value are passed to the implementation as two special kwargs\n# `_in_avals` (a tuple of core.AbstractValue) and `_out_aval` (a\n# core.AbstractValue, or a tuple thereof when primitive.multiple_results).\ntf_impl_with_avals: Dict[core.Primitive, Callable[..., Any]] = {}\n\n# XLA is not linked in all environments; when converting a primitive, if this\n# variable is disabled, we try harder to use only standard TF ops if they are\n# applicable to the concrete use case; if the resulting conversion path ends up\n# requiring a TFXLA operation, an exception is thrown instead.\n_enable_xla = True\n\ndef _xla_disabled_error(primitive_name: str,\n extra_msg: Optional[str] = None) -> Exception:\n assert not _enable_xla\n msg = f\"Call to {primitive_name} cannot be converted with enable_xla=False.\"\n if extra_msg:\n msg += f\" {extra_msg}\"\n return NotImplementedError(msg)\n\n@functools.partial(api_util.api_hook, tag=\"jax2tf_convert\")\ndef convert(fun: Callable,\n *,\n polymorphic_shapes: Optional[Sequence[Any]] = None,\n with_gradient=True,\n enable_xla=True) -> Callable:\n \"\"\"Transforms `fun` to be executed by TensorFlow.\n\n See\n [README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md)\n for more details about usage and common problems.\n\n Args:\n fun: Function to be transformed. Its arguments and return value should be\n JAX arrays, or nested standard Python containers (tuple/list/dict) thereof\n (pytrees).\n polymorphic_shapes: Specifies input shapes to be treated polymorphically\n during conversion.\n .. warning:: The shape-polymorphic conversion is an experimental feature.\n It is meant to be sound, but it is known to reject some JAX programs\n that are shape polymorphic. The details of this feature can change. It\n should be a Python object with the same pytree structure as, or a prefix\n of, the tuple of arguments to the function, but with a shape\n specification corresponding to each argument. The default value is\n `None`, which is a shortcut for a tuple of `None` one for each argument,\n denoting that all shapes are monomorphic.\n See [how optional parameters are matched to\n arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).\n A shape specification for an array argument should be an object\n `PolyShape(dim0, dim1, ..., dimn)`\n where each `dim` is a dimension specification: a positive integer denoting\n a monomorphic dimension of the given size, or a string denoting a\n dimension variable assumed to range over non-zero dimension sizes, or\n the special placeholder string \"_\" denoting a monomorphic dimension\n whose size is given by the actual argument. As a shortcut, an Ellipsis\n suffix in the list of dimension specifications stands for a list of \"_\"\n placeholders. For convenience, a shape specification can also be given\n as a string\n representation, e.g.: \"batch, ...\", \"batch, height, width, _\", possibly\n with surrounding parentheses: \"(batch, ...)\".\n\n The conversion fails if it cannot ensure that the it would produce the same\n sequence of TF ops for any non-zero values of the dimension variables.\n\n polymorphic_shapes are only supported for positional arguments; shape\n polymorphism is not supported for keyword arguments.\n\n See [the README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)\n for more details.\n\n in_shapes: DEPRECATED in favor of `polymorphic_shapes`.\n with_gradient: if set, will add a tf.custom_gradient to the converted\n function, by converting the ``jax.vjp(fun)``. Only first-order\n differentiation is supported for now. If the converted function is saved\n in a SavedModel, the custom gradients are currently lost and an error will\n be raised if a gradient computation is attempted. This is due to a current\n bug in TensorFlow.\n enable_xla: if unset, the converter will try harder to use pure TF ops to\n convert the function, and raise an error if it can not be converted\n without resorting to XLA ops (default: True).\n\n Returns:\n A version of `fun` that expects TfVals as arguments (or\n tuple/lists/dicts) thereof, and returns TfVals as outputs.\n \"\"\"\n api._check_callable(fun)\n\n def converted_fun(*args: TfVal, **kwargs: TfVal) -> TfVal:\n # TODO: is there a better way to check if we are inside a transformation?\n if not core.trace_state_clean():\n raise ValueError(\"convert must be used outside all JAX transformations.\" +\n f\"Trace state: {core.thread_local_state.trace_state}\")\n\n def check_arg(a):\n if not _is_tfval(a):\n msg = (f\"Argument {a} of type {type(a)} of jax2tf.convert(f) should \"\n \"be NumPy array, scalar, tf.Variable, or tf.Tensor\")\n raise TypeError(msg)\n\n tree_util.tree_map(check_arg, args)\n tree_util.tree_map(check_arg, list(kwargs.values()))\n\n # Name input tensors\n args = tuple(\n tree_util.tree_map(lambda x, i=i: tf.identity(x, f\"jax2tf_arg_{i}\"),\n a) # type: ignore\n for i, a in enumerate(args))\n kwargs = {k: tf.identity(v, f\"jax2tf_arg_{k}\") for k, v in kwargs.items()}\n\n # This function may take pytrees of TfVals. We can only set\n # tf.custom_gradient on functions that take a flat argument list.\n args_flat, in_tree = tree_util.tree_flatten((args, kwargs))\n\n if polymorphic_shapes is None:\n polymorphic_shapes_ = (None,) * len(args)\n else:\n if not isinstance(polymorphic_shapes, Sequence) or len(args) != len(polymorphic_shapes):\n msg = (\"polymorphic_shapes must be a sequence with the same length as the positional argument list \"\n f\"({len(args)}). Got polymorphic_shapes={polymorphic_shapes}.\")\n raise TypeError(msg)\n polymorphic_shapes_ = tuple(polymorphic_shapes)\n\n # Expand the polymorphic_shapes to match the argument pytree\n polymorphic_shapes_flat = tuple(api_util.flatten_axes(\"jax2tf.convert polymorphic_shapes\",\n in_tree.children()[0],\n polymorphic_shapes_))\n # Add kwargs shapes.\n polymorphic_shapes_flat = polymorphic_shapes_flat + tuple(\n (None,) * (len(args_flat) - len(polymorphic_shapes_flat)))\n\n # Construct the abstract values for the flat arguments, possibly based on\n # the input shapes and the polymorphic_shapes if given. May create new shape\n # variables.\n args_avals_flat, shapeenv = _args_to_avals_and_env(args_flat,\n polymorphic_shapes_flat)\n\n f = lu.wrap_init(fun)\n # out_tree_thunk() will be the output tree, after running _interpret_fun.\n flat_fun, out_tree_thunk = flatten_fun(f, in_tree)\n\n # Prepare the grad_fn for tf.custom_gradient.\n def converted_grad_fn(*out_cts_flat: TfVal,\n _out_cts_avals: Sequence[core.AbstractValue],\n variables=None):\n if variables:\n raise ValueError(\n \"Unexpected variables used in forward pass. \"\n \"This should not happen for first-order differentiation. \"\n f\"variables={variables}\")\n\n def fun_vjp_jax(args_jax, out_cts_jax):\n # One may think that we can get the pullback while we are converting\n # the main function in the first place. That is problematic, because the\n # pullback may contain captured tracers from the conversion of the\n # main function. Those tracers will confuse the conversion of the\n # pullback. So, we construct the vjp anew.\n _, pullback_jax = jax.vjp(fun, *args_jax)\n return pullback_jax(out_cts_jax)\n\n if polymorphic_shapes is None:\n vjp_polymorphic_shapes = None\n else:\n args_polymorphic_shapes = tree_util.tree_unflatten(\n in_tree.children()[0], polymorphic_shapes_flat)\n out_cts_polymorphic_shapes = tree_util.tree_unflatten(\n out_tree_thunk(),\n tuple(str(out_aval.shape)\n for out_aval in _out_cts_avals)) # type: ignore\n vjp_polymorphic_shapes = [\n args_polymorphic_shapes, out_cts_polymorphic_shapes\n ]\n out_cts = tree_util.tree_unflatten(out_tree_thunk(), out_cts_flat)\n # TODO: enable higher-order gradients\n with tf.name_scope(\"jax2tf_vjp\"):\n in_cts = convert(\n fun_vjp_jax,\n with_gradient=False,\n polymorphic_shapes=vjp_polymorphic_shapes)(args, out_cts)\n return in_cts\n\n try:\n global _shape_env\n assert not _shape_env, f\"Unexpected shape environment {_shape_env}\"\n global _enable_xla\n prev_enable_xla = _enable_xla\n _enable_xla = enable_xla\n _shape_env = shapeenv\n\n if with_gradient:\n\n @tf.custom_gradient\n def converted_fun_flat_with_custom_gradient(*args_flat: TfVal) -> TfVal:\n out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat)\n outs, out_avals = util.unzip2(out_with_avals)\n return (tuple(outs),\n functools.partial(\n converted_grad_fn, _out_cts_avals=tuple(out_avals)))\n\n out_flat = converted_fun_flat_with_custom_gradient(*args_flat)\n else:\n out_flat_raw = _interpret_fun(flat_fun, args_flat, args_avals_flat)\n message = (\"The jax2tf-converted function does not support gradients. \"\n \"Use `with_gradient` parameter to enable gradients\")\n # We use PreventGradient, which is propagated through a SavedModel.\n out_flat = [\n tf.raw_ops.PreventGradient(input=o, message=message)\n for o, _ in out_flat_raw\n ]\n finally:\n _shape_env = {}\n _enable_xla = prev_enable_xla\n\n out_flat = [tf.identity(x, \"jax2tf_out\") for x in out_flat]\n out = tree_util.tree_unflatten(out_tree_thunk(), out_flat)\n return out\n\n return converted_fun\n\n\n# Internals\n\n\ndef _interpret_fun(\n fun: lu.WrappedFun, in_vals: Sequence[TfVal],\n in_avals: Sequence[core.AbstractValue]\n) -> Sequence[Tuple[TfVal, core.AbstractValue]]:\n with core.new_base_main(TensorFlowTrace) as main: # type: ignore\n fun = _interpret_subtrace(fun, main, in_avals)\n with core.new_sublevel():\n out_vals: Sequence[Tuple[TfVal, core.AbstractValue]] = \\\n fun.call_wrapped(*in_vals)\n del main\n return tuple(out_vals)\n\n\ndef _convert_jax_impl(jax_impl: Callable, *, multiple_results=True) -> Callable:\n \"\"\"Convert the JAX implementation of a primitive.\n\n Args:\n jax_impl: typically the impl-rule for a primitive, with signature\n `(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements\n a primitive in terms of other primitives.\n multiple_results: whether `jax_impl` returns a sequence of results.\n\n Returns:\n a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs)\n -> Sequence[TfVal]`.\n \"\"\"\n\n def wrapped(*tf_args: TfVal, _in_avals: Sequence[core.AbstractValue],\n _out_aval: core.AbstractValue, **kwargs) -> Sequence[TfVal]:\n\n # We wrap the jax_impl under _interpret_fun to abstract the TF values\n # from jax_impl and turn them into JAX abstract values.\n def jax_impl_jax_args(*jax_args):\n jax_results = jax_impl(*jax_args, **kwargs)\n return jax_results if multiple_results else [jax_results]\n\n tf_results_with_avals = _interpret_fun(\n lu.wrap_init(jax_impl_jax_args), tf_args, _in_avals)\n tf_results, _ = util.unzip2(tf_results_with_avals)\n return tf_results if multiple_results else tf_results[0]\n\n return wrapped\n\n\n@lu.transformation\ndef _interpret_subtrace(main: core.MainTrace,\n in_avals: Sequence[core.AbstractValue],\n *in_vals: TfVal):\n trace = TensorFlowTrace(main, core.cur_sublevel())\n in_tracers = tuple(\n TensorFlowTracer(trace, val, aval)\n for val, aval in util.safe_zip(in_vals, in_avals))\n # The outs may be core.unit, see comment in TensorFlowTrace.pure.\n outs = yield in_tracers, {} # type: Sequence[Union[TfVal, core.Unit]]\n out_tracers: Iterable[TensorFlowTracer] = (\n map(trace.full_raise, outs)) # type: ignore\n out_vals_with_avals: Sequence[Tuple[TfVal, core.AbstractValue]] = (\n tuple((t.val, t.aval) for t in out_tracers))\n yield out_vals_with_avals\n\n\ndef _interpret_jaxpr(jaxpr: core.ClosedJaxpr, *args: TfVal) -> Sequence[TfVal]:\n \"\"\"Evaluates a Jaxpr with tf.Tensor arguments.\n\n The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.\n \"\"\"\n fun: lu.WrappedFun = lu.wrap_init(core.jaxpr_as_fun(jaxpr))\n out_with_avals = _interpret_fun(fun, args, jaxpr.in_avals)\n return tuple(v for v, _ in out_with_avals)\n\n\n### tracer\n\n\ndef _aval_to_tf_shape(aval: core.AbstractValue) -> Tuple[Optional[int], ...]:\n \"\"\"Generate a TF shape, possibly containing None for polymorphic dimensions.\"\"\"\n return tuple(\n map(lambda d: None if isinstance(d, shape_poly.DimVar) else d,\n aval.shape)) # type: ignore[attr-defined]\n\n\ndef _tfval_shape_dtype(val: TfVal) -> Tuple[Sequence[Optional[int]], DType]:\n \"\"\"Called for constants that occur in the program, or for input values to the converted function.\n\n The returned shape may have unknown components, but only when called for\n inputs.\n \"\"\"\n if isinstance(val, (tf.Tensor, tf.Variable)):\n # May be partially known\n return tuple(val.shape), to_jax_dtype(val.dtype)\n else: # Must be a numeric value\n assert not config.jax_enable_checks or _is_tfval(val), f\"Non TfVal: {val}\"\n raw_aval = xla.abstractify(val)\n return raw_aval.shape, raw_aval.dtype # type: ignore[attr-defined]\n\n\n# A dimension environment maps dimension variables to TF expressions that\n# compute the value of the dimension. These expressions refer to the TF\n# function arguments.\n_ShapeEnv = Dict[shape_poly.DimVar, TfVal]\ndef _args_to_avals_and_env(args: Sequence[TfVal],\n polymorphic_shapes: Sequence[Optional[Union[str, PolyShape]]]) -> \\\n Tuple[Sequence[core.AbstractValue], _ShapeEnv]:\n \"\"\"Computes abstract values and a dimension environment for arguments.\n\n Args:\n args: the arguments, TF inputs.\n polymorphic_shapes: the polymorphic specifications for the arguments.\n Returns: a tuple of a sequence of abtract values corresponding to the\n arguments and a dimension environment.\n \"\"\"\n shapeenv: _ShapeEnv = {}\n\n def input_aval(arg: TfVal,\n polymorphic_shape: Optional[str]) -> core.AbstractValue:\n \"\"\"The abstract value for an input.\"\"\"\n raw_shape, dtype = _tfval_shape_dtype(arg)\n\n aval_shape = shape_poly.parse_spec(polymorphic_shape, raw_shape)\n\n for i, d in enumerate(aval_shape):\n if type(d) is int:\n assert d == np.shape(arg)[i]\n elif type(d) is shape_poly.DimVar and d not in shapeenv:\n # Even if the shape of `arg` is known, we still use `tf.shape` for\n # safety, because the promise is that we will convert the function\n # to work for any value of the dimension.\n shapeenv[d] = tf.shape(arg)[i] # type: ignore[index]\n else:\n # TODO: add an assertion tf.shape(arg)[i] == env[d]\n pass\n\n return core.ShapedArray(aval_shape, dtype)\n\n avals = tuple(map(input_aval, args, polymorphic_shapes)) # type: ignore\n return avals, shapeenv\n\n\n# A shape environment maps shape variables to TfVal.\n_shape_env = {} # type: _ShapeEnv\n\n\ndef _eval_shape(shape: Sequence[shape_poly.DimSize]) -> Sequence[TfVal]:\n assert all(map(\n lambda x: x is not None,\n shape)), (f\"Argument shape should be a valid JAX shape but got {shape}\")\n return tuple(_shape_env[d] # type: ignore[index]\n if type(d) is shape_poly.DimVar else d\n for d in shape)\n\n\ndef shape_as_value(x):\n \"\"\"Injects the shape of `x` as an array value.\n\n **Experimental: please give feedback, and expect changes!**\n\n This allows the use of a shape expression as array argument to JAX functions.\n A typical example is for implementing a mean operation:\n\n jnp.sum(x) / np.prod(jax2tf.shape_as_value(x))\n \"\"\"\n # return shape_as_value_p.bind(x)\n return NotImplementedError(\"shape_as_value is deprecated\")\n\n\n# # TODO: move this to masking or to some common library, if approved\n# shape_as_value_p = core.Primitive(\"shape_as_value\")\n# shape_as_value_p.multiple_results = True\n# def _shape_as_value_impl(x):\n# x_shape = np.shape(x)\n# def dim_to_int(dim: shape_poly.DimSize) -> int:\n# dim_int = _poly_dim_to_tf_dim(dim)\n# if dim_int is None:\n# msg = (\"shape_as_value is not implemented for non-constant shapes \"\n# \"except for masking and jax2tf. \"\n# f\"Has shape: {x_shape}\")\n# raise TypeError(msg)\n# else:\n# return dim_int\n# return tuple(map(dim_to_int, x_shape))\n#\n# shape_as_value_p.def_impl(_shape_as_value_impl)\n#\n# def _shape_as_value_abstract(x_aval: core.AbstractValue) -> Sequence[core.AbstractValue]:\n# rank = len(x_aval.shape) # type: ignore[attr-defined]\n# return (core.ShapedArray((), dtypes.canonicalize_dtype(np.int_), weak_type=True),) * rank\n#\n# shape_as_value_p.def_abstract_eval(_shape_as_value_abstract)\n#\n# def _shape_as_value_translation(comp, x):\n# return xla_client._xla.ops.Tuple(comp,\n# tuple(xb.constant(comp, d)\n# for d in comp.GetShape(x).dimensions()))\n#\n# xla.translations[shape_as_value_p] = _shape_as_value_translation\n#\n# def _shape_as_value_jvp_rule(primals, tangents):\n# # The shape does not depend on the contents of the input\n# x, = primals\n# zero = ad.Zero.from_value(0.)\n# return shape_as_value(x), (zero,) * len(x.shape)\n#\n# ad.primitive_jvps[shape_as_value_p] = _shape_as_value_jvp_rule\n#\n# def _shape_as_value__batching_rule(batched_args, batch_dims):\n# xv, = batched_args\n# batch_dim, = batch_dims\n# batch_size = xv.shape[batch_dim]\n# batched_shape = shape_as_value(xv)\n# one_shape = batched_shape[0:batch_dim] + batched_shape[batch_dim+1:]\n# res = tuple(jnp.broadcast_to(d, (batch_size, 1)) for d in one_shape)\n# return res, (0,) * len(one_shape)\n#\n# batching.primitive_batchers[shape_as_value_p] = _shape_as_value__batching_rule\n#\n# def _shape_as_value_masking_rule(operands, operands_logical_shapes):\n# x_logical_shape, = operands_logical_shapes\n# return tuple(x_logical_shape)\n#\n# masking.masking_rules[shape_as_value_p] = _shape_as_value_masking_rule\n#\n# def _shape_as_value_tf(x: TfVal,\n# _in_avals: Sequence[core.AbstractValue],\n# _out_aval: core.AbstractValue) -> TfVal:\n# x_aval = _in_avals[0]\n# def dim_to_tfval(dim: shape_poly.DimSize, dim_idx: int) -> TfVal:\n# dim_int = _poly_dim_to_tf_dim(dim)\n# if dim_int is not None:\n# return tf.convert_to_tensor(dim_int)\n# else:\n# return tf.shape(x)[dim_idx]\n# return tuple(dim_to_tfval(dim, dim_idx)\n# for dim_idx, dim in enumerate(x_aval.shape)) # type: ignore[attr-defined]\n#\n# tf_impl_with_avals[shape_as_value_p] = _shape_as_value_tf\n\n# TODO(b/26854495): pylint doesn't understand slots and inheritance.\n# pylint: disable=assigning-non-slot\n\n\nclass TensorFlowTracer(core.Tracer):\n \"\"\"Tracer class that boxes a TF value and a JAX abstract value.\n\n In addition to the TF value we carry the JAX abstract value because there are\n two cases when it cannot be recovered from the value: (a) when the abstract\n value is core.abstract_unit, in which case the value is tf.nan; (b) when we\n are converting with polymorphic shapes, in which case the shape of the value\n may have dimensions set to `None`, which the JAX abstract value may contain\n more precise information.\n\n When the value has a partially-known shape, the dimensions marked as `None`\n must correspond to non-constant dimensions in the abstract value.\n\n See README.md for details.\n \"\"\"\n # val: TfVal\n # _aval: core.AbstractValue\n __slots__ = [\"val\", \"_aval\"]\n\n def __init__(self, trace: \"TensorFlowTrace\", val: TfVal,\n aval: core.AbstractValue):\n self._trace = trace\n self._aval = aval\n if aval is core.abstract_unit:\n self.val = val\n elif isinstance(val, (tf.Tensor, tf.Variable)):\n val_shape, val_dtype = _tfval_shape_dtype(val)\n aval_dtype = np.dtype(self._aval.dtype) # type: ignore[attr-defined]\n if (val_dtype != aval_dtype and not config.x64_enabled and\n (val_dtype == tf.int32 and aval_dtype == jnp.int64 or\n val_dtype == tf.int64 and aval_dtype == jnp.int32 or\n val_dtype == tf.float32 and aval_dtype == jnp.float64 or\n val_dtype == tf.float64 and aval_dtype == jnp.float32 or\n val_dtype == tf.complex128 and aval_dtype == jnp.complex64)):\n # If JAX does not have x64 bit mode enabled, it will force the 64-bit\n # values to use 32-bit precision. In order to make the TF conversion\n # follow JAX's rules, we cast the TF values down to 32-bit mode.\n val = tf.cast(val, dtype=aval_dtype)\n val_dtype = aval_dtype\n\n if config.jax_enable_checks:\n assert aval_dtype == val_dtype, f\"expected {aval_dtype} == {val_dtype}\"\n for aval_dim, val_dim in util.safe_zip(\n self._aval.shape, val_shape): # type: ignore[attr-defined]\n if val_dim is None:\n assert isinstance(\n aval_dim, shape_poly.DimVar\n ), f\"expected {self._aval.shape} == {val_shape}\" # type: ignore[attr-defined]\n elif not isinstance(aval_dim, shape_poly.DimVar):\n assert aval_dim == val_dim, f\"expected {self._aval.shape} == {val_shape}\" # type: ignore[attr-defined]\n else:\n # We have a TF value with known shape, and the abstract shape is a shape variable.\n try:\n aval_int = int(_eval_shape([aval_dim])) # type: ignore\n except TypeError:\n continue\n assert aval_int == val_dim, f\"expected {self._aval.shape} == {val_shape}. Found {aval_int} != {val_dim}.\" # type: ignore\n\n self.val = val\n else: # Must be a numeric value\n self.val = _safe_convert_to_tensor(\n val, dtype=self._aval.dtype) # type: ignore[attr-defined]\n\n @property\n def aval(self):\n return self._aval\n\n def full_lower(self):\n return self\n\n\nclass TensorFlowTrace(core.Trace):\n \"\"\"Trace class that underlies the jax2tf transformation.\n\n We are going to ensure that jax2tf.convert is never nested inside other\n transformations. This is sufficient for intended use cases (converting\n fully-transformed JAX code). It also simplifies our job because we do not have\n to handle situations where we apply primitives on a mix of TF values and\n JAX tracers from an outer transformation. E.g., for addition both the TF\n values\n and the JAX tracers have an override and they get confused if they see values\n from the other world.\n\n Hence a TFT trace does not interact with non-TFT traces at lower-level. For\n higher-order control-flow primitives we invoke recursively\n _interpret_fun on the body of the conditional, which will create a nested TFT.\n\n We do want to allow transformations nested inside a TensorFlowTrace (TFT), but\n those will introduce their own MainTrace, and any operations involving those\n will be done on those traces, i.e., not a concern for TFT.\n \"\"\"\n\n def pure(self, val: Union[TfVal, core.Unit]) -> TensorFlowTracer:\n \"\"\"Lifts a non-Tracer into the TensorFlowTracer.\n\n This function may be called by way of trace.full_raise.\n\n The value may be a core.unit. During JAX transformations we sometimes\n produce a Jaxpr that has arguments of abstract value core.abstract_unit\n and results equal to core.unit. These are arguments and results that are\n not used in the computation.\n\n In TF world, we represent core.unit as NaN. This is safe, as these values\n should never be used.\n \"\"\"\n if val is core.unit:\n return TensorFlowTracer(self, tf.constant(np.nan, tf.float32),\n core.abstract_unit)\n else:\n shape, dtype = _tfval_shape_dtype(val)\n return TensorFlowTracer(self, val, core.ShapedArray(shape, dtype))\n\n def lift(self, val: core.Tracer) -> TensorFlowTracer:\n # This would be called when we need to raise a tracer from a lower-level\n # main into the TensorFlowTrace. Since the TensorFlowTrace is never nested\n # inside another transform, there are no lower-level main traces.\n assert False\n\n def sublift(self, val: TensorFlowTracer) -> TensorFlowTracer:\n # This is called when we need to raise a tracer from the same master,\n # but a lower sublevel. This could come from a nested jit.\n return TensorFlowTracer(self, val.val, val._aval)\n\n def process_primitive(self, primitive: core.Primitive,\n tracers: Sequence[TensorFlowTracer],\n params) -> TensorFlowTracer:\n impl, impl_needs_avals = self.get_primitive_impl(primitive)\n args_avals: Sequence[core.AbstractValue] = tuple(t.aval for t in tracers)\n out_aval = primitive.abstract_eval(*args_avals, **params)\n args_tf: Sequence[TfVal] = [t.val for t in tracers]\n if impl_needs_avals:\n val_out: TfVal = impl(\n *args_tf,\n _in_avals=args_avals, # type: ignore\n _out_aval=out_aval,\n **params)\n else:\n val_out = impl(*args_tf, **params)\n\n if primitive.multiple_results:\n out = [\n TensorFlowTracer(self, v, a)\n for v, a in util.safe_zip(val_out, out_aval)\n ] # type: ignore\n else:\n out = TensorFlowTracer(self, val_out, out_aval) # type: ignore\n\n # Check that the impl rule returned a value of expected shape and dtype\n # TODO: adapt this to match polymorphic shapes\n if config.jax_enable_checks:\n if primitive.multiple_results:\n for o, expected_aval in zip(out, out_aval): # type: ignore\n assert o.aval.strip_weak_type() == expected_aval.strip_weak_type(), (\n f\"{primitive}: out.aval = {o.aval}; expected {expected_aval}\")\n else:\n assert out.aval == out_aval, ( # type: ignore\n f\"{primitive}: out.aval = {out.aval}; expected {out_aval}\"\n ) # type: ignore\n return out # type: ignore\n\n def process_call(self, call_primitive: core.Primitive, f: lu.WrappedFun,\n tracers: Sequence[TensorFlowTracer], params):\n assert call_primitive.multiple_results\n vals: Sequence[TfVal] = [t.val for t in tracers]\n f = _interpret_subtrace(f, self.main, tuple(t.aval for t in tracers))\n with core.new_sublevel():\n if call_primitive == core.named_call_p:\n with tf.name_scope(_sanitize_scope_name(params[\"name\"])):\n vals_out: Sequence[Tuple[TfVal, core.AbstractValue]] = \\\n f.call_wrapped(*vals)\n elif call_primitive == sharded_jit.sharded_call_p:\n vals_out = _sharded_call(f, vals, **params)\n else:\n vals_out = f.call_wrapped(*vals)\n return [TensorFlowTracer(self, v, a) for v, a in vals_out]\n\n def post_process_call(self, call_primitive: core.Primitive,\n out_tracers: Sequence[TensorFlowTracer], params):\n # We encountered a call primitive, e.g., remat_call_p, whose result\n # (out_tracers) include TensorFlowTracer that were not passed through\n # its arguments (captured from the environment).\n vals = tuple(t.val for t in out_tracers)\n main = self.main\n\n def todo(vals: Sequence[TfVal]):\n trace = TensorFlowTrace(main, core.cur_sublevel())\n return [\n TensorFlowTracer(trace, v, out_tracer.aval)\n for v, out_tracer in util.safe_zip(vals, out_tracers)\n ]\n\n return vals, todo\n\n def process_map(self, map_primitive, f, tracers, params):\n raise NotImplementedError(\"process_map\")\n\n def post_process_map(self, map_primitive, out_tracers, params):\n raise NotImplementedError(\"post_process_map\")\n\n def process_custom_jvp_call(self, prim, fun, jvp, tracers):\n # Drop the custom differentiation rule and act like a call primitive. This\n # behavior is desirable because jax2tf stages code out of the JAX system, so\n # there are no more JAX differentiation transformations to be applied.\n del jvp # Unused.\n return self.process_call(core.call_p, fun, tracers, {})\n\n def post_process_custom_jvp_call(self, out_tracers, params):\n assert False # unreachable assuming jax2tf runs with clean trace state\n\n def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees):\n # Drop the custom differentiation rule and act like a call primitive. This\n # behavior is desirable because jax2tf stages code out of the JAX system, so\n # there are no more JAX differentiation transformations to be applied.\n del fwd, bwd, out_trees # Unused.\n return self.process_call(core.call_p, fun, tracers, {})\n\n def post_process_custom_vjp_call(self, out_tracers, params):\n assert False # unreachable assuming jax2tf runs with clean trace state\n\n def get_primitive_impl(self, p: core.Primitive) -> Tuple[Callable, bool]:\n # Returns the primitive implementation and whether the implementation\n # takes abstract values (see definition of tf_impl_with_avals)\n try:\n return tf_impl[p], False\n except KeyError:\n try:\n return tf_impl_with_avals[p], True\n except KeyError as err:\n msg = \"TensorFlow interpretation rule for '{}' not implemented\"\n raise NotImplementedError(msg.format(p)) from err\n\n\ndef to_tf_dtype(jax_dtype):\n if jax_dtype == dtypes.float0:\n jax_dtype = dtypes.bfloat16\n return tf.dtypes.as_dtype(jax_dtype)\n\n\ndef to_jax_dtype(tf_dtype):\n return tf_dtype.as_numpy_dtype\n\n\ndef _unexpected_primitive(p: core.Primitive, *args, **kwargs):\n assert False, f\"Encountered unexpected primitive {p}\"\n\n\nfor unexpected in xla.call_translations: # Call primitives are inlined\n tf_impl[unexpected] = functools.partial(_unexpected_primitive, unexpected)\n\n# Primitives that are not yet implemented must be explicitly declared here.\ntf_not_yet_impl = [\n \"reduce\",\n \"rng_uniform\",\n \"clz\",\n \"igamma_grad_a\",\n \"random_gamma_grad\",\n \"reduce_precision\",\n\n # Not high priority?\n \"after_all\",\n \"all_to_all\",\n \"create_token\",\n \"infeed\",\n \"outfeed\",\n \"pmax_p\",\n \"pmin\",\n \"ppermute\",\n \"psum\",\n \"pmax\",\n \"pgather\",\n \"axis_index\",\n \"pdot\",\n \"all_gather\",\n \"lu_pivots_to_permutation\",\n \"rng_bit_generator\",\n \"xla_pmap\",\n \"call_tf\",\n]\n\ntf_impl[ad_util.stop_gradient_p] = tf.stop_gradient\ntf_impl[ad_util.zeros_like_p] = tf.zeros_like\n\n\ndef _add(x: TfVal, y: TfVal) -> TfVal:\n return tf.raw_ops.AddV2(x=x, y=y)\n\n\ntf_impl[ad_util.add_jaxvals_p] = _add\ntf_impl[xla.device_put_p] = lambda x, device=None: x\n\ntf_impl[lax.neg_p] = tf.math.negative\n\n\ndef _sign(x: TfVal) -> TfVal:\n if x.dtype.is_unsigned:\n # TF and XLA do not support tf.math.sign for unsigned types.\n return tf.where(\n tf.math.equal(x, 0), np.array(0, dtype=x.dtype),\n np.array(1, dtype=x.dtype))\n else:\n return tf.math.sign(x)\n\n\ntf_impl[lax.sign_p] = _sign\ntf_impl[lax.floor_p] = tf.math.floor\ntf_impl[lax.ceil_p] = tf.math.ceil\n\n\ndef _round(operand, *, rounding_method):\n if rounding_method is lax.RoundingMethod.AWAY_FROM_ZERO:\n sign = _sign(operand)\n operand *= sign\n floor = tf.math.floor(operand)\n operand -= floor\n cond = tf.math.equal(operand, tf.constant(np.array(0.5), operand.dtype))\n return sign * (\n tf.where(cond, tf.constant(np.array(1), operand.dtype),\n tf.math.round(operand)) + floor)\n else:\n return tf.math.round(operand)\n\n\ntf_impl[lax.round_p] = _round\ntf_impl[lax.nextafter_p] = tf.math.nextafter\n\n\ndef _population_count(x):\n orig_dtype = x.dtype\n return tf.cast(tf.raw_ops.PopulationCount(x=x), orig_dtype)\n\n\ntf_impl[lax.population_count_p] = _population_count\ntf_impl[lax.is_finite_p] = tf.math.is_finite\n\n\ndef _abs(x: TfVal) -> TfVal:\n # TF and XLA do not support tf.math.abs for unsigned types.\n return tf.math.abs(x) if not x.dtype.is_unsigned else x\n\n\ntf_impl[lax.abs_p] = _abs\ntf_impl[lax.pow_p] = tf.math.pow\n\n\ndef _integer_pow(x, *, y: int, _in_avals: Sequence[core.AbstractValue],\n _out_aval: core.AbstractValue):\n # Follows the implementation in lax._integer_pow_translation_rule\n if y == 0:\n return tf.broadcast_to(\n tf.constant(1, dtype=x.dtype, shape=()), _eval_shape(_out_aval.shape))\n is_reciprocal = y < 0\n if is_reciprocal:\n y = -y\n acc = None\n while y > 0:\n if y & 1:\n acc = x if acc is None else tf.math.multiply(acc, x)\n y >>= 1\n if y > 0:\n x = tf.math.multiply(x, x)\n return tf.math.reciprocal(acc) if is_reciprocal else acc\n\n\ntf_impl_with_avals[lax.integer_pow_p] = _integer_pow\ntf_impl[lax.exp_p] = tf.math.exp\ntf_impl[lax.expm1_p] = tf.math.expm1\ntf_impl[lax.log_p] = tf.math.log\ntf_impl[lax.log1p_p] = tf.math.log1p\ntf_impl[lax.tan_p] = tf.math.tan\ntf_impl[lax.tanh_p] = tf.math.tanh\ntf_impl[lax.sin_p] = tf.math.sin\ntf_impl[lax.sinh_p] = tf.math.sinh\ntf_impl[lax.cos_p] = tf.math.cos\ntf_impl[lax.cosh_p] = tf.math.cosh\ntf_impl[lax.acos_p] = tf.math.acos\ntf_impl[lax.asin_p] = tf.math.asin\ntf_impl[lax.atan_p] = tf.math.atan\ntf_impl[lax.atan2_p] = tf.math.atan2\ntf_impl[lax.acosh_p] = tf.math.acosh\ntf_impl[lax.atanh_p] = tf.math.atanh\ntf_impl[lax.asinh_p] = tf.math.asinh\n\ntf_impl[lax.sqrt_p] = tf.math.sqrt\ntf_impl[lax.rsqrt_p] = tf.math.rsqrt\n\ntf_impl[lax.lgamma_p] = tf.math.lgamma\ntf_impl[lax.digamma_p] = tf.math.digamma\ntf_impl[lax.igamma_p] = tf.math.igamma\ntf_impl[lax.igammac_p] = tf.math.igammac\ntf_impl[lax.regularized_incomplete_beta_p] = tf.math.betainc\ntf_impl[lax.erf_p] = tf.math.erf\ntf_impl[lax.erfc_p] = tf.math.erfc\ntf_impl[lax.erf_inv_p] = tf.math.erfinv\ntf_impl[lax.bessel_i0e_p] = tf.math.bessel_i0e\ntf_impl[lax.bessel_i1e_p] = tf.math.bessel_i1e\n\ntf_impl[lax.complex_p] = tf.complex\n\n\ndef _conj(x, **kwargs):\n # The only dtypes that are allowed are: float32, float64, complex64, and\n # complex128.\n if x.dtype == tf.float32:\n return tf.cast(x, tf.complex64)\n elif x.dtype == tf.float64:\n return tf.cast(x, tf.complex128)\n else:\n return tf.math.conj(x)\n\n\ntf_impl[lax.conj_p] = _conj\ntf_impl[lax.real_p] = tf.math.real\ntf_impl[lax.imag_p] = tf.math.imag\n\ntf_impl[lax.add_p] = _add\ntf_impl[lax.sub_p] = tf.math.subtract\ntf_impl[lax.mul_p] = tf.math.multiply\n\n\ndef _iota(*, dtype, shape, dimension):\n dtype = to_tf_dtype(dtype)\n # Some dtypes are unsupported, like uint32, so we just fall back to int32.\n # TODO(mattjj, necula): improve tf.range dtype handling\n shape_tf = _eval_shape(shape)\n vec = tf.range(tf.cast(shape_tf[dimension], tf.int32), dtype=tf.int32)\n vec_shape = [-1 if i == dimension else 1 for i in range(len(shape))]\n return tf.cast(tf.broadcast_to(tf.reshape(vec, vec_shape), shape_tf), dtype)\n\n\ntf_impl[lax.iota_p] = _iota\n\n\ndef _div(lhs, rhs):\n if lhs.dtype.is_integer:\n quotient = tf.math.floordiv(lhs, rhs)\n select = tf.math.logical_and(\n tf.not_equal(_sign(lhs), _sign(rhs)),\n tf.not_equal(tf.math.floormod(lhs, rhs), 0))\n return tf.where(select, quotient + 1, quotient)\n else:\n return tf.math.truediv(lhs, rhs)\n\n\ndef _rem(lhs, rhs):\n return _sign(lhs) * tf.math.floormod(_abs(lhs), _abs(rhs))\n\n\ntf_impl[lax.div_p] = _div\ntf_impl[lax.rem_p] = _rem\n\ntf_impl[lax.max_p] = tf.math.maximum\ntf_impl[lax.min_p] = tf.math.minimum\n\n# Map from TF signed types to TF unsigned types.\n_SIGNED_TO_UNSIGNED_TABLE = {\n tf.int8: tf.uint8,\n tf.int16: tf.uint16,\n tf.int32: tf.uint32,\n tf.int64: tf.uint64,\n}\n\n# Map from TF unsigned types to TF signed types.\n_UNSIGNED_TO_SIGNED_TABLE = {u: s for s, u in _SIGNED_TO_UNSIGNED_TABLE.items()}\n\n\n# Note: Bitwise operations only yield identical results on unsigned integers!\n# pylint: disable=protected-access\ndef _shift_right_arithmetic_raw(x, y):\n if x.dtype.is_unsigned:\n assert x.dtype == y.dtype\n orig_dtype = x.dtype\n signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[orig_dtype]\n x = tf.cast(x, signed_dtype)\n y = tf.cast(y, signed_dtype)\n res = tf.bitwise.right_shift(x, y)\n return tf.cast(res, orig_dtype)\n else:\n return tf.bitwise.right_shift(x, y)\n\n\ndef _shift_right_arithmetic(x, y):\n # TF shift is \"implementation defined\" if the shift amount is negative\n # or larger or equal to the size of the value. We implement the XLA\n # semantics to return the shift by the max value (x_bits - 1).\n # TODO: it is likely better to add XlaOps for shifts\n x_bits = 8 * x.dtype.size\n clamp_y = tf.where(_shift_in_bounds(x, y), y, x_bits - 1)\n return _shift_right_arithmetic_raw(x, clamp_y)\n\n\ntf_impl[lax.shift_right_arithmetic_p] = _shift_right_arithmetic\n\n\ndef _shift_right_logical_raw(x, y):\n if x.dtype.is_unsigned:\n return tf.bitwise.right_shift(x, y)\n else:\n assert x.dtype == y.dtype\n orig_dtype = x.dtype\n unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[orig_dtype]\n x = tf.cast(x, unsigned_dtype)\n y = tf.cast(y, unsigned_dtype)\n res = tf.bitwise.right_shift(x, y)\n return tf.cast(res, orig_dtype)\n\n\ndef _shift_right_logical(x, y):\n # TF shift is \"implementation defined\" if the shift amount is negative\n # or larger or equal to the size of the value. We implement the XLA semantics\n # to return 0.\n # TODO: it is likely better to add XlaOps for shifts\n return tf.where(\n _shift_in_bounds(x, y), _shift_right_logical_raw(x, y), tf.zeros_like(x))\n\n\ntf_impl[lax.shift_right_logical_p] = _shift_right_logical\n\n\ndef _shift_left(x, y):\n # TF shift is \"implementation defined\" if the shift amount is negative\n # or larger or equal to the size of the value. We implement the XLA semantics\n # to return 0.\n # TODO: it is likely better to add XlaOps for shifts\n return tf.where(\n _shift_in_bounds(x, y), tf.bitwise.left_shift(x, y), tf.zeros_like(x))\n\n\ntf_impl[lax.shift_left_p] = _shift_left\n\n\ndef _shift_in_bounds(x: TfVal, y: TfVal) -> TfVal:\n # Return the TF expression for when y is within bounds (0 <= y < |x|)\n x_bits = 8 * x.dtype.size\n # TF does not have comparisons for uint16 and uint32 (despite what the\n # documentation says)\n y_comp = tf.cast(\n y, _UNSIGNED_TO_SIGNED_TABLE[y.dtype]) if y.dtype.is_unsigned else y\n y_lt_x_bits = tf.math.less(y_comp, x_bits)\n y_ge_0 = tf.math.greater_equal(y_comp, 0)\n return tf.logical_and(y_lt_x_bits, y_ge_0)\n\n\ndef _not(x):\n \"\"\"Computes bitwise not with support for booleans.\n\n Numpy and JAX support bitwise not for booleans by applying a logical not!\n This means that applying bitwise_not yields an unexected result:\n jnp.bitwise_not(jnp.array([True, False]))\n >> DeviceArray([False, True], dtype=bool)\n\n if you assume that booleans are simply casted to integers.\n jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)\n >> DeviceArray([True, True], dtype=bool)\n \"\"\"\n if x.dtype == tf.bool:\n return tf.logical_not(x)\n else:\n return tf.bitwise.invert(x)\n\n\ntf_impl[lax.not_p] = _not\n\n\ndef bool_to_int8(f, argnums):\n \"\"\"Computes bool valued functions using int8.\"\"\"\n argnums = tf.nest.flatten(argnums)\n\n def wrapper(*args, **kwargs):\n if not any(args[i].dtype == tf.bool for i in argnums):\n return f(*args, **kwargs)\n else:\n args_cast = [(tf.cast(a, tf.int8) if i in argnums else a)\n for i, a in enumerate(args)]\n if \"_in_avals\" in kwargs:\n\n def cast_aval(aval):\n return core.ShapedArray(aval.shape, np.int8)\n\n _in_avals_cast = [\n cast_aval(aval) if i in argnums else aval\n for i, aval in enumerate(kwargs[\"_in_avals\"])\n ]\n _out_aval_cast = tf.nest.map_structure(cast_aval, kwargs[\"_out_aval\"])\n kwargs = dict(\n kwargs, _in_avals=_in_avals_cast, _out_aval=_out_aval_cast)\n out = f(*args_cast, **kwargs)\n return tf.nest.map_structure(lambda o: tf.cast(o, tf.bool), out)\n\n return wrapper\n\n\ntf_impl[lax.or_p] = bool_to_int8(tf.bitwise.bitwise_or, argnums=(0, 1))\ntf_impl[lax.and_p] = bool_to_int8(tf.bitwise.bitwise_and, argnums=(0, 1))\ntf_impl[lax.xor_p] = bool_to_int8(tf.bitwise.bitwise_xor, argnums=(0, 1))\n\ntf_impl[lax.eq_p] = tf.math.equal\ntf_impl[lax.ne_p] = tf.math.not_equal\ntf_impl[lax.ge_p] = tf.math.greater_equal\ntf_impl[lax.gt_p] = tf.math.greater\ntf_impl[lax.le_p] = tf.math.less_equal\ntf_impl[lax.lt_p] = tf.math.less\n\ntf_impl[lax_linalg.cholesky_p] = tf.linalg.cholesky\n\n\ndef _convert_element_type(operand, *, new_dtype, weak_type=False):\n old_dtype = operand.dtype.as_numpy_dtype\n if (dtypes.issubdtype(old_dtype, np.complexfloating) and\n not dtypes.issubdtype(new_dtype, np.complexfloating)):\n operand = tf.math.real(operand)\n if (dtypes.issubdtype(old_dtype, np.floating) and\n not (dtypes.issubdtype(new_dtype, np.floating) or dtypes.issubdtype(\n new_dtype, np.complexfloating) or new_dtype == np.bool_)):\n sign = _sign(operand)\n operand = sign * tf.math.floor(sign * operand)\n return tf.dtypes.cast(operand, to_tf_dtype(new_dtype))\n\n\ntf_impl[lax.convert_element_type_p] = _convert_element_type\n\n\ndef _bitcast_convert_type(operand, new_dtype):\n return tf.bitcast(operand, to_tf_dtype(new_dtype))\n\n\ntf_impl[lax.bitcast_convert_type_p] = _bitcast_convert_type\n\n\ndef _clamp(minval, operand, maxval, *, _in_avals, _out_aval):\n # The below permits mirroring the behavior of JAX when maxval < minval\n op_shape_tf_val = _eval_shape(_in_avals[1].shape)\n maxval = tf.broadcast_to(maxval, op_shape_tf_val)\n minval = tf.math.minimum(tf.broadcast_to(minval, op_shape_tf_val), maxval)\n return tf.clip_by_value(operand, minval, maxval)\n\n\ntf_impl_with_avals[lax.clamp_p] = _clamp\n\n\ndef _concatenate(*operands, dimension):\n return tf.concat(operands, axis=dimension)\n\n\ntf_impl[lax.concatenate_p] = _concatenate\n\n\ndef _conv_general_dimension_numbers_proto(dimension_numbers):\n \"\"\"Converts a ConvDimensionNumbers to an XLA ConvolutionDimensionNumbers.\"\"\"\n assert isinstance(dimension_numbers, lax.ConvDimensionNumbers)\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n proto = xla_data_pb2.ConvolutionDimensionNumbers()\n proto.input_batch_dimension = lhs_spec[0]\n proto.input_feature_dimension = lhs_spec[1]\n proto.output_batch_dimension = out_spec[0]\n proto.output_feature_dimension = out_spec[1]\n proto.kernel_output_feature_dimension = rhs_spec[0]\n proto.kernel_input_feature_dimension = rhs_spec[1]\n proto.input_spatial_dimensions.extend(lhs_spec[2:])\n proto.kernel_spatial_dimensions.extend(rhs_spec[2:])\n proto.output_spatial_dimensions.extend(out_spec[2:])\n return proto\n\n\ndef _precision_config_proto(precision: Optional[Tuple[PrecisionType,\n PrecisionType]]):\n \"\"\"Convert an integer to an XLA.PrecisionConfig.\"\"\"\n if precision is None:\n return None\n\n proto = xla_data_pb2.PrecisionConfig()\n proto.operand_precision.append(int(precision[0]))\n proto.operand_precision.append(int(precision[1]))\n return proto\n\n\ndef _try_tf_conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,\n dimension_numbers, feature_group_count, batch_group_count,\n preferred_element_type: Optional[DType],\n out_shape) -> TfVal:\n\n def error(msg):\n suffix = (\"See source code for the precise conditions under which \"\n \"convolutions can be converted without XLA.\")\n return _xla_disabled_error(\"conv_general_dilated\", f\"{msg} - {suffix}\")\n\n # TODO(bchetioui): this function is not exhaustive wrt which convolution cases\n # can be translated into TF primitives. Further investigation is needed to\n # fully flesh it out.\n if lhs.dtype not in [tf.float16, tf.float32, tf.float64]:\n raise error(f\"tf.nn.convolution is not supported for dtype {lhs.dtype}\")\n if feature_group_count != 1:\n raise error(\"tf.nn.convolution does not support grouped convolutions\")\n # TODO(bchetioui): is there something to do with batch_group_count?\n if batch_group_count != 1:\n raise error(\"Unimplemented support for batch_group_count != 1\")\n nb_spatial_dimensions = len(lhs.shape) - 2\n # TF can only deal with 1D, 2D and 3D convolution\n if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:\n raise error(\"TensorFlow can only handle convolutions with 1, 2, or 3 \"\n \"spatial dimensions\")\n # TODO(bchetioui): handle different stride cases\n if list(window_strides) != [1] * nb_spatial_dimensions:\n raise error(\"Unimplemented support for window_strides != \"\n f\"{tuple([1] * nb_spatial_dimensions)}\")\n\n if preferred_element_type is not None and preferred_element_type != lhs.dtype:\n raise error(\"Unimplemented support for preferred_element_type\")\n\n def convert_padding() -> str:\n # TODO(bchetioui): in this instance, we can not use padtype_to_pads as\n # string padding is not implemented for transposed convolution.\n if list(lhs_dilation) != [1] * nb_spatial_dimensions:\n raise error(\"Padding conversion is not supported for transposed \"\n \"convolution.\")\n lhs_perm, rhs_perm, _ = dimension_numbers\n effective_rhs_shape = [\n (k - 1) * r + 1\n for k, r in zip(np.take(rhs.shape, rhs_perm)[2:], rhs_dilation)\n ]\n lhs_shape = np.take(lhs.shape, lhs_perm)[2:]\n # TF only allows 'VALID' and 'SAME' padding\n for pad_str in [\"VALID\", \"SAME\"]:\n gen_padding = lax.padtype_to_pads(\n lhs_shape, effective_rhs_shape, window_strides, pad_str)\n if list(gen_padding) == list(padding):\n return pad_str\n raise error(\"Input padding not supported in TensorFlow.\")\n\n def convert_dim_nums() -> str:\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n # TF only allows filters with shape:\n # spatial_filter_shape + [in_channels, out_channels]. In JAX however,\n # rhs_spec is represented as a tuple containing the following:\n # [out_channels, in_channels] + spatial_filter_shape.\n supported_rhs_shape = ([nb_spatial_dimensions + 1, nb_spatial_dimensions] +\n list(range(nb_spatial_dimensions)))\n if list(rhs_spec) != supported_rhs_shape:\n raise error(\"Input filter (RHS) shape format not supported in \"\n \"TensorFlow.\")\n # TF only supports same LHS and output data format\n if lhs_spec != out_spec:\n raise error(\"TensorFlow requires the same data format for LHS and \"\n \"output.\")\n # Alphabet extracted from the documentation of tf.conv{1,2,3}d\n spatial_dim_alphabet = \"DHW\"[-nb_spatial_dimensions:]\n # TF only supports the following data formats:\n # - [batch_size, in_channels] + input_spatial_shape\n\n # TODO(bchetioui): TF currently does not support the above on CPU. To avoid\n # failing on this platform, this path is commented out for now.\n # if list(lhs_spec) == list(range(len(lhs_spec))):\n # return \"NC\" + spatial_dim_alphabet\n\n # - [batch_size] + input_spatial_shape + [in_channels]\n if list(lhs_spec) == ([0, len(lhs_spec) - 1] +\n list(range(1,\n len(lhs_spec) - 1))):\n return \"N\" + spatial_dim_alphabet + \"C\"\n raise error(\"Data format is unsupported by TensorFlow.\")\n\n def convert_dilation_and_compute_result(tf_padding: str,\n tf_dim_nums: str) -> TfVal:\n no_dilation = [1] * nb_spatial_dimensions\n # TODO(bchetioui): is there a generic way to do a transposed atrous\n # convolution in TensorFlow?\n if not (list(lhs_dilation) == no_dilation or\n list(rhs_dilation) == no_dilation):\n raise error(\"Both LHS and RHS dilations are set.\")\n # This is a non-dilated or atrous convolution\n if list(lhs_dilation) == no_dilation:\n return tf.nn.convolution(\n lhs,\n rhs,\n strides=window_strides,\n padding=tf_padding,\n data_format=tf_dim_nums,\n dilations=rhs_dilation)\n # TODO(bchetioui): the below path is unreachable for now, as passing a lhs\n # dilation to this function will result in convert_padding returning None\n # systematically. This must be investigated further.\n # Dilation of the LHS is transposed convolution\n return tf.nn.conv_transpose(\n lhs,\n rhs,\n out_shape,\n window_strides,\n padding=tf_padding,\n data_format=tf_dim_nums,\n dilations=lhs_dilation)\n\n tf_padding = convert_padding()\n tf_dim_nums = convert_dim_nums()\n return convert_dilation_and_compute_result(tf_padding, tf_dim_nums)\n\n\ndef _conv_general_dilated(lhs, rhs, *,\n window_strides, padding, lhs_dilation,\n rhs_dilation,\n dimension_numbers: lax.ConvDimensionNumbers,\n feature_group_count: int,\n batch_group_count: int,\n lhs_shape: Sequence[int],\n rhs_shape: Sequence[int],\n precision: Optional[Tuple[PrecisionType, PrecisionType]],\n preferred_element_type: Optional[DType],\n _in_avals: Sequence[core.AbstractValue],\n _out_aval: core.AbstractValue):\n \"\"\"Implementation of lax.conv_general_dilated_p using XlaConv.\"\"\"\n out_tf_shape = _aval_to_tf_shape(_out_aval)\n if not _enable_xla:\n return _try_tf_conv(\n lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,\n dimension_numbers, feature_group_count, batch_group_count,\n preferred_element_type, out_tf_shape)\n\n dnums_proto = _conv_general_dimension_numbers_proto(dimension_numbers)\n precision_config_proto = _precision_config_proto(precision)\n assert batch_group_count == 1 # TODO(necula): implement batch_group_count\n\n def gen_conv(lhs, rhs, preferred_element_type: Optional[DType]):\n out = tfxla.conv(\n lhs,\n rhs,\n window_strides,\n padding,\n lhs_dilation,\n rhs_dilation,\n dnums_proto,\n feature_group_count=feature_group_count,\n precision_config=precision_config_proto,\n preferred_element_type=preferred_element_type)\n # TODO: implement shape inference for XlaConv\n out.set_shape(out_tf_shape)\n return out\n\n # Follow the lowering for complex convolutions from\n # lax._conv_general_dilated_translation. We can use the same conversion on all\n # platforms because on XLA:TPU the compiler does the same as a rewrite.\n if np.issubdtype(_in_avals[0].dtype, np.complexfloating):\n if preferred_element_type is not None:\n # Convert complex dtype to types used for real and imaginary parts\n assert np.issubdtype(preferred_element_type, np.complexfloating)\n preferred_float_et = (\n np.float64 if preferred_element_type == np.complex128 else np.float32)\n else:\n preferred_float_et = None\n lhs_real, lhs_imag = tf.math.real(lhs), tf.math.imag(lhs)\n rhs_real, rhs_imag = tf.math.real(rhs), tf.math.imag(rhs)\n k1 = gen_conv(_add(lhs_real, lhs_imag), rhs_real, preferred_float_et)\n k2 = gen_conv(lhs_real, tf.math.subtract(rhs_imag, rhs_real),\n preferred_float_et)\n k3 = gen_conv(lhs_imag, _add(rhs_real, rhs_imag), preferred_float_et)\n return tf.complex(tf.math.subtract(k1, k3), _add(k1, k2))\n else:\n return gen_conv(lhs, rhs, preferred_element_type)\n\n\ntf_impl_with_avals[lax.conv_general_dilated_p] = _conv_general_dilated\n\n\ndef _dot_general(lhs, rhs, *, dimension_numbers,\n precision: Optional[Tuple[PrecisionType, PrecisionType]],\n preferred_element_type: Optional[DType],\n _in_avals: Sequence[core.AbstractValue],\n _out_aval: core.AbstractValue):\n \"\"\"Implementation of lax.dot_general_p in terms of tf.linalg.einsum.\"\"\"\n (lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers\n lhs_ndim, rhs_ndim = len(lhs.shape), len(rhs.shape)\n if _enable_xla:\n dnums_proto = xla_data_pb2.DotDimensionNumbers()\n dnums_proto.lhs_contracting_dimensions.extend(lhs_contracting)\n dnums_proto.rhs_contracting_dimensions.extend(rhs_contracting)\n dnums_proto.lhs_batch_dimensions.extend(lhs_batch)\n dnums_proto.rhs_batch_dimensions.extend(rhs_batch)\n precision_config_proto = _precision_config_proto(precision)\n res = tfxla.dot_general(\n lhs,\n rhs,\n dnums_proto,\n precision_config_proto,\n preferred_element_type=preferred_element_type)\n # TODO: in presence of None dimensions, XlaDot shape inference returns\n # unknown shape.\n res.set_shape(_aval_to_tf_shape(_out_aval))\n return res\n\n # This condition ensures that:\n # 1) the batch dimensions are ordered in the same way in lhs and rhs (this is\n # not strictly necessary, but we would have to reshape the array if that\n # were not the case;\n # 2) lhs and rhs have the same number of dimensions +/- 1\n # 3) the number of non-batch dimensions in both tensors is either 1 or 2\n # 4) the contracting dimensions are consistent with those of a classic\n # matrix/matrix, vector/matrix or matrix/vector multiplication.\n if (lhs_batch == rhs_batch == tuple(range(len(lhs_batch))) and\n lhs_ndim - rhs_ndim in [-1, 0, 1] and\n 1 <= lhs_ndim - len(lhs_batch) <= 2 and\n 1 <= rhs_ndim - len(rhs_batch) <= 2 and\n lhs_contracting == (len(lhs.shape) - 1,) and\n rhs_contracting == (len(lhs_batch),)):\n # All the inputs to tf.linalg.matmul must have 2 inner dimensions,\n # after their batch dimensions, so we need to expand the dimensions\n # appropriately. We can get to this branch with three combinations of\n # inner shapes:\n # - lhs.inner_shape == [a, b], rhs.inner_shape == [b, c]\n # - in this case, the resulting inner shape is [a, c];\n # - lhs.inner_shape == [b] , rhs.inner_shape == [b, c]\n # - in this case, we need to expand lhs to [1, b], and the resulting\n # shape is [c]. We need to squeeze the result of tf.linalg.matmul\n # as it will have shape [1, c];\n # - lhs.shape == [batch] + [a, b], rhs.shape == [batch] + [b]\n # - in this case, we need to expand rhs to [b, 1], and the resulting\n # shape is [a]. We need to squeeze the result of tf.linalg.matmul\n # as it will have shape [a, 1];\n # - lhs.shape == [batch] + [b] , rhs.shape == [batch] + [b]\n # - in this case, we need to expand lhs to [1, b] and rhs to [b, 1],\n # and the resulting shape is (). We need to squeeze the result of\n # tf.linalg.matmul as it will have shape [1, 1].\n squeeze_idxs = []\n if lhs_ndim - len(lhs_batch) == 1:\n lhs = tf.expand_dims(lhs, lhs_ndim - 1)\n squeeze_idxs.append(len(lhs.shape) - 2)\n if rhs_ndim - len(rhs_batch) == 1:\n rhs = tf.expand_dims(rhs, rhs_ndim)\n squeeze_idxs.append(len(rhs.shape) - 1)\n result = tf.linalg.matmul(lhs, rhs)\n if len(squeeze_idxs) != 0:\n assert all([result.shape[i] == 1 for i in squeeze_idxs])\n result = tf.squeeze(result, squeeze_idxs)\n return result\n\n new_id = iter(string.ascii_letters)\n lhs_axis_ids = [next(new_id) for _ in lhs.shape]\n rhs_axis_ids = [next(new_id) for _ in rhs.shape]\n lhs_out_axis_ids = lhs_axis_ids[:]\n rhs_out_axis_ids = rhs_axis_ids[:]\n\n for lhs_axis, rhs_axis in zip(lhs_contracting, rhs_contracting):\n shared_id = next(new_id)\n lhs_axis_ids[lhs_axis] = shared_id\n rhs_axis_ids[rhs_axis] = shared_id\n lhs_out_axis_ids[lhs_axis] = None # type: ignore[call-overload]\n rhs_out_axis_ids[rhs_axis] = None # type: ignore[call-overload]\n\n batch_ids = []\n for lhs_axis, rhs_axis in zip(lhs_batch, rhs_batch):\n shared_id = next(new_id)\n lhs_axis_ids[lhs_axis] = shared_id\n rhs_axis_ids[rhs_axis] = shared_id\n lhs_out_axis_ids[lhs_axis] = None # type: ignore[call-overload]\n rhs_out_axis_ids[rhs_axis] = None # type: ignore[call-overload]\n batch_ids.append(shared_id)\n\n not_none = lambda x: x is not None\n out_axis_ids = list(\n filter(not_none, batch_ids + lhs_out_axis_ids + rhs_out_axis_ids))\n assert lhs.dtype == rhs.dtype\n spec = \"{},{}->{}\".format(\"\".join(lhs_axis_ids), \"\".join(rhs_axis_ids),\n \"\".join(out_axis_ids))\n return tf.linalg.einsum(spec, lhs, rhs)\n\n\ntf_impl_with_avals[lax.dot_general_p] = _dot_general\n\n\ndef _broadcast(operand, *, sizes):\n result_shape = tf.TensorShape(sizes).concatenate(operand.shape)\n return tf.broadcast_to(operand, result_shape)\n\n\ntf_impl[lax.broadcast_p] = _broadcast\n\n\ndef _broadcast_in_dim(operand, *, shape, broadcast_dimensions):\n inshape = [1] * len(shape)\n for orig_shape_i, broadcast_dim_i in zip(operand.shape, broadcast_dimensions):\n if orig_shape_i != 1:\n inshape[broadcast_dim_i] = shape[broadcast_dim_i]\n inshape_tf = _eval_shape(inshape)\n shape_tf = _eval_shape(shape)\n return tf.broadcast_to(tf.reshape(operand, inshape_tf), shape_tf)\n\n\ntf_impl[lax.broadcast_in_dim_p] = _broadcast_in_dim\n\n\ndef _reshape(operand, *, new_sizes, dimensions):\n if dimensions is None:\n dimensions = tf.range(tf.rank(operand))\n new_sizes_tf = _eval_shape(new_sizes)\n return tf.reshape(tf.transpose(operand, dimensions), new_sizes_tf)\n\n\ntf_impl[lax.reshape_p] = _reshape\n\n\ndef _squeeze(operand, *, dimensions, _in_avals, _out_aval):\n op_shape = _in_avals[0].shape\n new_shape = tuple(d for i, d in enumerate(op_shape) if i not in dimensions)\n new_shape_tf = _eval_shape(new_shape)\n return tf.reshape(operand, new_shape_tf)\n\n\ntf_impl_with_avals[lax.squeeze_p] = _squeeze\n\n\ndef _pad(operand, padding_value, *, padding_config,\n _in_avals: Sequence[core.AbstractValue],\n _out_aval: core.AbstractValue):\n del _in_avals\n low, high, interior = util.unzip3(padding_config)\n if _enable_xla:\n out = tfxla.pad(operand, padding_value, low, high, interior)\n return out\n\n if all(lo >= 0 and hi >= 0 and i == 0 for lo, hi, i in padding_config):\n return tf.pad(\n operand,\n util.safe_zip(low, high),\n mode=\"CONSTANT\",\n constant_values=padding_value)\n raise _xla_disabled_error(\"pad\", \"Only use cases without interior or negative padding can be converted without XLA.\")\n\ntf_impl_with_avals[lax.pad_p] = _pad\n\n\ndef _rev(operand, *, dimensions):\n return tf.reverse(operand, dimensions)\n\n\ntf_impl[lax.rev_p] = _rev\n\ntf_impl[lax.select_p] = tf.where\n\n\ndef _transpose(operand, *, permutation):\n return tf.transpose(operand, perm=permutation)\n\n\ntf_impl[lax.transpose_p] = _transpose\n\naxes_to_axis = lambda func: lambda operand, axes: func(operand, axis=axes)\n\ntf_impl[lax.reduce_sum_p] = (\n bool_to_int8(axes_to_axis(tf.reduce_sum), argnums=0))\ntf_impl[lax.reduce_prod_p] = (\n bool_to_int8(axes_to_axis(tf.reduce_prod), argnums=0))\ntf_impl[lax.reduce_max_p] = (\n bool_to_int8(axes_to_axis(tf.reduce_max), argnums=0))\ntf_impl[lax.reduce_min_p] = (\n bool_to_int8(axes_to_axis(tf.reduce_min), argnums=0))\ntf_impl[lax.reduce_or_p] = axes_to_axis(tf.reduce_any)\ntf_impl[lax.reduce_and_p] = axes_to_axis(tf.reduce_all)\n\n\ndef _argminmax(fn, operand, axes, index_dtype):\n axis, = axes\n output_type = tf.int32\n if dtypes.iinfo(index_dtype).bits > 32:\n output_type = tf.int64\n # TODO(phawkins): handle axes larger than 2^31.\n result = fn(operand, axis=axis, output_type=output_type)\n return tf.cast(result, to_tf_dtype(index_dtype))\n\n\ntf_impl[lax.argmin_p] = functools.partial(_argminmax, tf.math.argmin)\ntf_impl[lax.argmax_p] = functools.partial(_argminmax, tf.math.argmax)\n\n_add_fn = tf.function(_add, autograph=False)\n_ge_fn = tf.function(tf.math.greater_equal, autograph=False)\n\n\ndef _select_and_gather_add(\n tangents: TfVal, operand: TfVal, select_prim: core.Primitive,\n window_dimensions: Sequence[int], window_strides: Sequence[int],\n base_dilation: Sequence[int], window_dilation: Sequence[int],\n padding: Sequence[Tuple[int, int]], _in_avals: Sequence[core.AbstractValue],\n _out_aval: core.AbstractValue):\n # Note: this function follows the pattern in\n # jax.lax._select_and_gather_add_translation.\n dtype = operand.dtype\n nbits = dtypes.finfo(dtype.as_numpy_dtype).bits\n\n # Specializing the function for 64 bits. Only up to 32 bits are supported on TPU,\n # we thus intend to let the code throw a different exception on this platform.\n max_bits = 64\n\n assert nbits <= max_bits\n double_word_reduction = nbits * 2 <= max_bits\n\n const = lambda dtype, x: tf.constant(np.array(x), dtype)\n\n if double_word_reduction:\n word_dtype = lax._UINT_DTYPES[nbits]\n double_word_dtype = lax._UINT_DTYPES[nbits * 2]\n\n # Packs two values into a tuple.\n def pack(a, b):\n a = _bitcast_convert_type(a, word_dtype)\n b = _bitcast_convert_type(b, word_dtype)\n a = _convert_element_type(a, new_dtype=double_word_dtype)\n b = _convert_element_type(b, new_dtype=double_word_dtype)\n a = tf.bitwise.left_shift(a, const(double_word_dtype, nbits))\n return tf.bitwise.bitwise_or(a, b)\n\n # Unpacks the first element of a tuple.\n def fst(t):\n assert t.dtype == double_word_dtype\n st = _shift_right_logical(t, const(double_word_dtype, nbits))\n return _bitcast_convert_type(\n _convert_element_type(st, new_dtype=word_dtype), dtype)\n\n # Unpacks the second element of a tuple.\n def snd(t):\n return _bitcast_convert_type(\n _convert_element_type(t, new_dtype=word_dtype), dtype)\n\n else:\n raise NotImplementedError(\n f\"TODO: need to pack {nbits * 2} bits but this platform can only go up to {max_bits} bits.\"\n )\n\n assert select_prim is lax.ge_p or select_prim is lax.le_p, select_prim\n\n def reducer(x, y):\n which = tf_impl[select_prim]\n return tf_impl[lax.select_p](which(fst(x), fst(y)), x=x, y=y)\n\n init = -np.inf if select_prim is lax.ge_p else np.inf\n init_identity = lambda x: pack(const(dtype, init), const(dtype, 0))\n\n out = _specialized_reduce_window(\n reducer,\n init_identity,\n pack(operand, tangents),\n window_dimensions=window_dimensions,\n window_strides=window_strides,\n padding=padding,\n base_dilation=base_dilation,\n window_dilation=window_dilation,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n\n return snd(out)\n\n\ntf_impl_with_avals[lax.select_and_gather_add_p] = _select_and_gather_add\n\n\ndef _get_shape_from_tensor_or_array(x):\n if isinstance(x.shape, tf.TensorShape):\n return tuple(x.shape.as_list())\n return tuple(x.shape)\n\n\ndef _common_reduce_window(operand, init_val, reducer, window_dimensions,\n window_strides, padding, base_dilation,\n window_dilation, _in_avals, _out_aval):\n o_spec = tf.TensorSpec((), dtype=operand.dtype)\n reducer_fn = tf.function(\n reducer, autograph=False).get_concrete_function(o_spec, o_spec)\n\n if not isinstance(init_val, tf.Tensor):\n assert not config.jax_enable_checks or _is_tfval(\n init_val), f\"Non TfVal: {init_val}\"\n init_val = tf.constant(init_val, operand.dtype)\n out = tfxla.reduce_window(\n operand,\n init_val,\n reducer_fn,\n window_dimensions,\n window_strides,\n base_dilations=base_dilation,\n window_dilations=window_dilation,\n padding=padding)\n # TODO: implement shape inference for XlaReduceWindow\n out.set_shape(_aval_to_tf_shape(_out_aval))\n return out\n\n\ndef _reduce_window(operand, init_value, *, jaxpr, consts, window_dimensions,\n window_strides, padding, base_dilation, window_dilation,\n _in_avals, _out_aval):\n \"\"\"TensorFlow implementation of reduce_window.\n\n Args:\n operand: N dimensional array containing elements of type T\n init_value: starting value of the reduction\n jaxpr: the jaxpr corresponding to the reduction function\n consts: the constants associated with jaxpr.\n window_dimensions: array of integers for window dimension values\n window_strides: array of integers for window stride values\n padding: array of pairs of integers for padding values\n base_dilation: array of integers for base dilation values\n window_dilation: array of integers for window dilation values\n\n Returns:\n The reduced operand.\n \"\"\"\n assert len(consts) == 0, \"Reduction computation cannot have constants\"\n\n if not _enable_xla:\n raise _xla_disabled_error(\"reduce_window\")\n\n def reducer(arg1: TfVal, arg2: TfVal) -> TfVal:\n closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)\n res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)\n return res\n\n return _common_reduce_window(operand, init_value, reducer, window_dimensions,\n window_strides, padding, base_dilation,\n window_dilation, _in_avals, _out_aval)\n\n\n# _try_tf_pool currently only supports reduce_window_max and reduce_window_sum.\n# TODO(bchetioui): this function is not exhaustive wrt which\n# reduce_window_max or reduce_window_sum cases can be translated into a call to\n# max_pool or avg_pool. Further investigation is needed to fully flesh it out.\ndef _try_tf_pool(op_name, operand, window_dimensions, window_strides, padding,\n base_dilation, window_dilation) -> TfVal:\n\n def error(msg):\n suffix = (\"See source code for the precise conditions under which \"\n \"reduce_window can be converted without XLA.\")\n return _xla_disabled_error(\"reduce_window\", f\"{msg} - {suffix}\")\n\n dtype = operand.dtype\n # Contrarily to the main path, tf.int8 is actually a valid type for\n # tf.nn.max_pool.\n if op_name == \"reduce_window_max\" and dtype in [\n tf.bool, tf.uint32, tf.uint64, tf.complex64, tf.complex128\n ]:\n raise error(f\"tf.nn.max_pool does not support operands of type {dtype}\")\n if op_name == \"reduce_window_sum\" and operand.dtype not in [\n tf.float16, tf.float32, tf.float64\n ]:\n raise error(f\"tf.nn.avg_pool does not support operands of type {dtype}\")\n has_batch_dim = window_dimensions[0] == 1\n has_channel_dim = window_dimensions[-1] == 1\n nb_spatial_dimensions = len(operand.shape) - has_batch_dim - has_channel_dim\n if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:\n raise error(\"TensorFlow can only handle pooling for arrays with 1, 2, or \"\n \"3 spatial dimensions\")\n # TODO(bchetioui): does a simple conversion with another base dilation exist?\n if list(base_dilation) != [1] * len(operand.shape):\n raise error(\"Unimplemented support for base dilation\")\n # TODO(bchetioui): does a simple conversion with another window_dilation\n # exist? The whole story seems similar to convolution.\n if list(window_dilation) != [1] * len(operand.shape):\n raise error(\"Unimplemented support for window dilation\")\n if list(padding) != [(0, 0)] * len(operand.shape):\n raise error(\"Unimplemented support for padding\")\n # ReduceWindow in XLA takes an array of rank N as a parameter, but\n # tf.nn.max_pool / tf.nn.avg_pool take an array of rank N+2, with a default\n # shape of the form [batch_size] + input_spatial_shape + [num_channels]\n tf_operand = operand\n tf_window_dimensions = list(window_dimensions)\n tf_window_strides = list(window_strides)\n if not has_batch_dim:\n tf_operand = tf.expand_dims(tf_operand, 0)\n tf_window_dimensions = [1] + tf_window_dimensions\n tf_window_strides = [1] + tf_window_strides\n if not has_channel_dim:\n tf_operand = tf.expand_dims(tf_operand, -1)\n tf_window_dimensions.append(1)\n tf_window_strides.append(1)\n tf_data_format = \"N\" + \"DHW\"[-nb_spatial_dimensions:] + \"C\"\n tf_padding = \"VALID\"\n if op_name == \"reduce_window_max\":\n result = tf.nn.max_pool(tf_operand, tf_window_dimensions, tf_window_strides,\n tf_padding, tf_data_format)\n elif op_name == \"reduce_window_sum\":\n avg = tf.nn.avg_pool(tf_operand, tf_window_dimensions, tf_window_strides,\n tf_padding, tf_data_format)\n result = avg * np.prod(tf_window_dimensions)\n else:\n raise error(f\"Unimplemented support for {op_name}\")\n\n if not has_batch_dim:\n result = tf.squeeze(result, 0)\n if not has_channel_dim:\n result = tf.squeeze(result, -1)\n return result\n\n\ndef _specialized_reduce_window(reducer,\n identity,\n operand,\n *,\n window_dimensions,\n window_strides,\n padding,\n base_dilation,\n window_dilation,\n _in_avals,\n _out_aval,\n name=None):\n \"\"\"Wraps the TensorFlow reduce window operation based on a reducer and an\n\n identity function defining the initial value of the reduction depending on\n the dtype of the operand.\n\n Args:\n reducer: reduction function of type TfVal -> TfVal -> TfVal\n identity: function that takes a TensorFlow dtype as a parameter and returns\n the starting value of the reduction.\n operand: N dimensional array containing elements of type T\n window_dimensions: array of integers for window dimension values\n window_strides: array of integers for window stride values\n padding: array of pairs of integers for padding values\n base_dilation: array of integers for base dilation values\n window_dilation: array of integers for window dilation values\n name: the name of the specialized reduce window primitive for which this\n conversion function is called. This information may help to choose a\n different conversion path (optional)\n\n Returns:\n The reduced operand.\n \"\"\"\n if not _enable_xla and name in [\"reduce_window_max\", \"reduce_window_sum\"]:\n return _try_tf_pool(name, operand, window_dimensions, window_strides,\n padding, base_dilation, window_dilation)\n\n return _common_reduce_window(operand, identity(operand.dtype), reducer,\n window_dimensions, window_strides, padding,\n base_dilation, window_dilation, _in_avals,\n _out_aval)\n\n\n\ndef _get_max_identity(tf_dtype):\n numpy_tf_dtype = tf_dtype.as_numpy_dtype\n if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):\n return numpy_tf_dtype(-np.inf)\n elif dtypes.issubdtype(numpy_tf_dtype, np.integer):\n return dtypes.iinfo(numpy_tf_dtype).min\n else:\n assert dtypes.issubdtype(\n numpy_tf_dtype, np.bool_), (f\"{tf_dtype} has no defined max identity\")\n return False\n\n\ndef _get_min_identity(tf_dtype):\n numpy_tf_dtype = tf_dtype.as_numpy_dtype\n if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):\n return numpy_tf_dtype(np.inf)\n elif dtypes.issubdtype(numpy_tf_dtype, np.integer):\n return dtypes.iinfo(numpy_tf_dtype).max\n else:\n assert dtypes.issubdtype(\n numpy_tf_dtype, np.bool_), (f\"{tf_dtype} has no defined min identity\")\n return True\n\n\n# pylint: disable=protected-access\ntf_impl_with_avals[lax.reduce_window_sum_p] = (\n functools.partial(\n _specialized_reduce_window, _add, lambda x: 0,\n name=\"reduce_window_sum\"))\ntf_impl_with_avals[lax.reduce_window_min_p] = (\n functools.partial(\n _specialized_reduce_window,\n tf.math.minimum,\n _get_min_identity,\n name=\"reduce_window_min\"))\ntf_impl_with_avals[lax.reduce_window_max_p] = (\n functools.partial(\n _specialized_reduce_window,\n tf.math.maximum,\n _get_max_identity,\n name=\"reduce_window_max\"))\ntf_impl_with_avals[lax.reduce_window_p] = _reduce_window\n# pylint: enable=protected-access\n\n# We use lax_control_flow._cumred_tpu_translation_rule to convert cummax,\n# cummin, cumsum and cumprod. This is efficient on TPU, but the complexity is\n# O(n^2) on other backends. This may be implemented using associative_scan\n# instead to favor different backends.\ntf_impl_with_avals[lax_control_flow.cummin_p] = _convert_jax_impl(\n functools.partial(lax_control_flow._cumred_tpu_translation_rule,\n lax._reduce_window_min),\n multiple_results=False)\ntf_impl_with_avals[lax_control_flow.cummax_p] = _convert_jax_impl(\n functools.partial(lax_control_flow._cumred_tpu_translation_rule,\n lax._reduce_window_max),\n multiple_results=False)\n# TODO(bchetioui): cumsum and cumprod can be converted using pure TF ops for\n# certain dtypes: bfloat16, float16, float32, float64, and int32. Other dtypes\n# will fail when running in compiled mode, but are otherwise compatible with\n# the operation. A non-XLA path can thus be defined for all dtypes, though the\n# tests will crash.\ntf_impl_with_avals[lax_control_flow.cumsum_p] = _convert_jax_impl(\n functools.partial(lax_control_flow._cumred_tpu_translation_rule,\n lax._reduce_window_sum),\n multiple_results=False)\ntf_impl_with_avals[lax_control_flow.cumprod_p] = _convert_jax_impl(\n functools.partial(lax_control_flow._cumred_tpu_translation_rule,\n lax._reduce_window_prod),\n multiple_results=False)\n\n\ndef _select_and_scatter(operand, source, init_value, select_jaxpr,\n select_consts, scatter_jaxpr, scatter_consts,\n window_dimensions, window_strides, padding):\n raise NotImplementedError(\"TODO: jax2tf can not convert _select_and_scatter\")\n\n\ntf_impl[lax.select_and_scatter_p] = _select_and_scatter\n\n\n@functools.partial(bool_to_int8, argnums=(0, 1))\ndef _select_and_scatter_add(source, operand, *, select_prim, window_dimensions,\n window_strides, padding, _in_avals, _out_aval):\n if not _enable_xla:\n raise _xla_disabled_error(\"select_and_scatter_add\")\n init_value = tf.zeros((), operand.dtype)\n select_fn = (\n tf.function(tf_impl[select_prim], autograph=False).get_concrete_function(\n init_value, init_value))\n scatter_fn = _add_fn.get_concrete_function(init_value, init_value)\n out = tfxla.select_and_scatter(operand, window_dimensions, window_strides,\n padding, source, init_value, select_fn,\n scatter_fn)\n out.set_shape(_aval_to_tf_shape(_out_aval))\n return out\n\n\ntf_impl_with_avals[lax.select_and_scatter_add_p] = _select_and_scatter_add\n\n\ndef _threefry2x32_jax_impl(*args: TfVal, _in_avals, _out_aval):\n res = _convert_jax_impl(\n functools.partial(\n jax._src.random._threefry2x32_lowering, use_rolled_loops=False),\n multiple_results=True)(\n *args, _in_avals=_in_avals, _out_aval=_out_aval)\n return res\n\n\ntf_impl_with_avals[jax.random.threefry2x32_p] = _threefry2x32_jax_impl\n\n# Use the vmap implementation, otherwise on TPU the performance is really bad\n# With use_vmap=True on, we get about the same performance for JAX and jax2tf.\ntf_impl_with_avals[random.random_gamma_p] = _convert_jax_impl(\n functools.partial(jax._src.random._gamma_impl, use_vmap=True),\n multiple_results=False)\n\n\ndef _gather_dimensions_proto(indices_shape, dimension_numbers):\n proto = xla_data_pb2.GatherDimensionNumbers()\n proto.offset_dims.extend(dimension_numbers.offset_dims)\n proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)\n proto.start_index_map.extend(dimension_numbers.start_index_map)\n assert indices_shape\n proto.index_vector_dim = len(indices_shape) - 1\n return proto\n\n\n@functools.partial(bool_to_int8, argnums=0)\ndef _gather(operand, start_indices, *, dimension_numbers, slice_sizes,\n _in_avals, _out_aval):\n \"\"\"Tensorflow implementation of gather.\"\"\"\n del _in_avals\n if not _enable_xla:\n raise _xla_disabled_error(\"gather\")\n proto = _gather_dimensions_proto(start_indices.shape, dimension_numbers)\n slice_sizes_tf = _eval_shape(slice_sizes)\n out = tfxla.gather(operand, start_indices, proto, slice_sizes_tf, False)\n out.set_shape(_aval_to_tf_shape(_out_aval))\n return out\n\n\ntf_impl_with_avals[lax.gather_p] = _gather\n\n\ndef _slice(operand, start_indices, limit_indices, strides, _in_avals,\n _out_aval):\n if strides is None:\n strides = [1] * len(start_indices)\n slices = tuple(\n map(slice, _eval_shape(start_indices), _eval_shape(limit_indices),\n _eval_shape(strides)))\n out = operand[slices]\n # TODO(b/184503314): improve shape inference for __getitem__\n out.set_shape(_aval_to_tf_shape(_out_aval))\n return out\n\n\ntf_impl_with_avals[lax.slice_p] = _slice\n\n\ndef _dynamic_slice(operand, *start_indices, slice_sizes,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # Here we could use tf.slice. Similarly, for lax.gather we can sometimes use\n # tf.gather. But those have different semantics for index-out-of-bounds than\n # JAX (and XLA). We have tried to force compilation, by wrapping into\n # tf.xla.experimental.compile, or tf.function(jit_compile=True), but\n # those solutions are brittle because they do not work when nested into an\n # outer compilation (see b/162814494 and b/163006262). They also do not\n # survive well being put in a SavedModel. Hence, we now use TFXLA slicing\n # and gather ops.\n if not _enable_xla:\n raise _xla_disabled_error(\"dynamic_slice\")\n res = tfxla.dynamic_slice(\n operand, tf.stack(start_indices), size_indices=_eval_shape(slice_sizes))\n # TODO: implement shape inference for XlaDynamicSlice\n res.set_shape(_aval_to_tf_shape(_out_aval))\n return res\n\n\ntf_impl_with_avals[lax.dynamic_slice_p] = _dynamic_slice\n\n\ndef _scatter_dimensions_proto(indices_shape, dimension_numbers):\n proto = xla_data_pb2.ScatterDimensionNumbers()\n proto.update_window_dims.extend(dimension_numbers.update_window_dims)\n proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)\n proto.scatter_dims_to_operand_dims.extend(\n dimension_numbers.scatter_dims_to_operand_dims)\n assert indices_shape\n proto.index_vector_dim = len(indices_shape) - 1\n return proto\n\n\ndef _scatter(operand, scatter_indices, updates, *, update_jaxpr, update_consts,\n dimension_numbers, indices_are_sorted, unique_indices,\n _in_avals: Sequence[core.AbstractValue],\n _out_aval: core.AbstractValue):\n del unique_indices, _in_avals\n assert len(update_consts) == 0, \"Update computation cannot have constants\"\n\n if not _enable_xla:\n raise _xla_disabled_error(\"scatter\")\n\n proto = _scatter_dimensions_proto(scatter_indices.shape, dimension_numbers)\n\n def update_computation(arg1: TfVal, arg2: TfVal) -> TfVal:\n closed_jaxpr = core.ClosedJaxpr(update_jaxpr, update_consts)\n res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)\n return res\n\n o_spec = tf.TensorSpec((), dtype=operand.dtype)\n xla_update_computation = (\n tf.function(update_computation,\n autograph=False).get_concrete_function(o_spec, o_spec))\n out = tfxla.scatter(\n operand,\n scatter_indices,\n updates,\n xla_update_computation,\n proto,\n indices_are_sorted=indices_are_sorted)\n # TODO: implement shape analysis for XlaScatter\n out.set_shape(_aval_to_tf_shape(_out_aval))\n return out\n\n\ntf_impl_with_avals[lax.scatter_p] = _scatter\ntf_impl_with_avals[lax.scatter_min_p] = _scatter\ntf_impl_with_avals[lax.scatter_max_p] = _scatter\ntf_impl_with_avals[lax.scatter_mul_p] = _scatter\ntf_impl_with_avals[lax.scatter_add_p] = _scatter\n\n\ndef _dynamic_update_slice(operand, update, *start_indices):\n if not _enable_xla:\n raise _xla_disabled_error(\"dynamic_update_slice\")\n return tfxla.dynamic_update_slice(operand, update, tf.stack(start_indices))\n\n\ntf_impl[lax.dynamic_update_slice_p] = _dynamic_update_slice\n\n\ndef _cond(index: TfVal, *operands: TfVal, branches: Sequence[core.ClosedJaxpr],\n linear: Sequence[bool]) -> Sequence[TfVal]:\n del linear\n # tf.cond needs lambdas with no arguments.\n branches_tf = [\n functools.partial(_interpret_jaxpr, jaxpr, *operands)\n for jaxpr in branches\n ]\n return tf.switch_case(index, branches_tf)\n\n\ntf_impl[lax_control_flow.cond_p] = _cond\n\n\ndef _while(*args: TfVal, cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr,\n body_nconsts: int, body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:\n cond_consts, body_consts, init_carry = util.split_list(\n args, [cond_nconsts, body_nconsts])\n if cond_jaxpr.out_avals[0].shape: # type: ignore[attr-defined]\n # The conditional is not a scalar, this must be a batched while\n return _batched_cond_while(\n *args,\n cond_nconsts=cond_nconsts,\n cond_jaxpr=cond_jaxpr,\n body_nconsts=body_nconsts,\n body_jaxpr=body_jaxpr)\n\n # The conditional must return a single value to TF\n def cond_tf_func(*args: TfVal) -> TfVal:\n pred, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *args)\n return pred\n\n body_tf_func = functools.partial(_interpret_jaxpr, body_jaxpr, *body_consts)\n return tf.while_loop(cond_tf_func, body_tf_func, init_carry)\n\n\ndef _batched_cond_while(*args: TfVal, cond_nconsts: int,\n cond_jaxpr: core.ClosedJaxpr, body_nconsts: int,\n body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:\n \"\"\"Interprets a while_loop with a batched condition.\n\n A batched while has a conditional that returns a tensor of booleans, and\n a body that returns a list of tensors whose leading dimensions match those\n of the conditional tensor.\n\n We need to turn it into a while with scalar boolean conditional. We will\n expand the loop carry to include a prefix with the current tensor boolean\n condition. We prepend to the loop the first calculation of the tensor boolean\n condition. The loop condition will use a \"reduce_any\" to calculate a scalar\n boolean from the tensor boolean condition. The end of the loop body will\n compute the new carry using a \"tf.where\", and we compute the new tensor\n boolean condition.\n \"\"\"\n cond_consts, body_consts, init_carry = util.split_list(\n args, [cond_nconsts, body_nconsts])\n # Initial computation of batched condition\n init_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *init_carry)\n assert init_pred_b is not core.unit\n\n def new_cond_tf_func(pred_b: TfVal, *carry: TfVal) -> TfVal:\n pred = tf.reduce_any(pred_b, axis=list(range(len(pred_b.shape))))\n return pred\n\n def new_body_tf_func(pred_b: TfVal, *carry: TfVal) -> Sequence[TfVal]:\n new_carry: Sequence[TfVal] = _interpret_jaxpr(body_jaxpr, *body_consts,\n *carry)\n\n def select_one_carry(new_c: TfVal, c: TfVal) -> TfVal:\n pred_b_bcast = _broadcast_in_dim(\n pred_b,\n shape=new_c.shape,\n broadcast_dimensions=list(range(len(pred_b.shape))))\n return tf.where(pred_b_bcast, new_c, c)\n\n selected_carry: Sequence[TfVal] = list(\n util.safe_map(select_one_carry, new_carry, carry))\n next_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *selected_carry)\n return (next_pred_b, *selected_carry)\n\n _, *res_carry = tf.while_loop(new_cond_tf_func, new_body_tf_func,\n (init_pred_b, *init_carry))\n return res_carry\n\n\ntf_impl[lax_control_flow.while_p] = _while\n\n# We use the scan impl rule to rewrite in terms of while.\ntf_impl_with_avals[lax_control_flow.scan_p] = _convert_jax_impl(\n lax_control_flow._scan_impl)\n\n\ndef _top_k(operand: TfVal, k: int) -> Tuple[TfVal, TfVal]:\n # Some types originally incompatible with tf.math.top_k can be promoted\n # to a compatible type without loss of precision.\n def promote_tf_dtype(tf_dtype):\n if tf_dtype in [tf.bool, tf.uint8, tf.uint16]:\n return tf.uint32\n if tf_dtype in [tf.int8, tf.int16]:\n return tf.int32\n if tf_dtype is tf.float16:\n return tf.float32\n return None\n\n conversion_dtype = promote_tf_dtype(operand.dtype)\n if conversion_dtype:\n values, indices = tf.math.top_k(\n tf.dtypes.cast(operand, conversion_dtype), k=k, sorted=True)\n return tf.dtypes.cast(values, operand.dtype), indices\n else:\n return tf.math.top_k(operand, k=k, sorted=True)\n\n\ntf_impl[lax.top_k_p] = _top_k\n\n\ndef _sort(*operands: TfVal, dimension: int, is_stable: bool,\n num_keys: int) -> Tuple[TfVal, ...]:\n if not _enable_xla:\n raise _xla_disabled_error(\"sort\")\n assert 1 <= num_keys <= len(operands)\n assert 0 <= dimension < len(\n operands[0].shape\n ), f\"Invalid {dimension} for ndim {len(operands[0].shape)}\"\n\n # The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]\n # corresponding to two scalars from operand[k].\n def lexicographic_comparator_old(*tf_args: TfVal) -> TfVal:\n assert len(tf_args) == 2 * len(operands)\n # We build a comparison:\n # arg[0] < arg[1] or (arg[0] == arg[1] and (arg[2] < arg[3] or ...))\n # all the way to arg[2 * num_keys - 2] < arg[2 * num_keys - 1]\n inside_comparison = None\n for key_idx in range(num_keys - 1, -1, -1):\n a = tf_args[2 * key_idx]\n b = tf_args[2 * key_idx + 1]\n a_lt_b = tf.math.less(a, b)\n if inside_comparison is None:\n inside_comparison = a_lt_b\n else:\n inside_comparison = tf.math.logical_or(\n a_lt_b, tf.math.logical_and(tf.math.equal(a, b), inside_comparison))\n return inside_comparison\n\n comparator_spec: List[tf.TensorSpec] = []\n comparator_jax_in_avals: List[core.AbstractValue] = []\n for op in operands:\n o_spec = tf.TensorSpec((), dtype=op.dtype)\n comparator_spec.extend([o_spec, o_spec])\n o_aval = core.ShapedArray((), to_jax_dtype(op.dtype))\n comparator_jax_in_avals.extend([o_aval, o_aval])\n\n # Use the same comparator that JAX uses when compiling to XLA, to get the\n # proper NaN/Inf total order, and the lexicographic ordering.\n # The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]\n # corresponding to two scalars from operand[k].\n def lexicographic_comparator(*tf_args: TfVal) -> TfVal:\n return _convert_jax_impl(\n lax._sort_lt_comparator, multiple_results=False)(\n *tf_args,\n _in_avals=comparator_jax_in_avals,\n _out_aval=core.ShapedArray((), np.bool_),\n num_keys=num_keys)\n\n xla_comparator_computation = (\n tf.function(lexicographic_comparator,\n autograph=False).get_concrete_function(*comparator_spec))\n results = tfxla.variadic_sort(\n operands,\n dimension=dimension,\n is_stable=is_stable,\n comparator=xla_comparator_computation)\n return results\n\n\ntf_impl[lax.sort_p] = _sort\n\n\ndef _fft(x, fft_type, fft_lengths):\n FFT, IFFT, RFFT, IRFFT = list(map(xla_client.FftType, [0, 1, 2, 3]))\n if fft_type == IRFFT:\n expected_lengths = x.shape[-len(fft_lengths):-1] + ((x.shape[-1] - 1) * 2,)\n else:\n expected_lengths = x.shape[-len(fft_lengths):]\n if expected_lengths != fft_lengths:\n raise NotImplementedError(\n f\"Unsupported fft_lengths={fft_lengths} for fft_type={fft_type} of \"\n f\"array with shape={x.shape}.\")\n tf_funcs = {\n FFT: [tf.signal.fft, tf.signal.fft2d, tf.signal.fft3d],\n IFFT: [tf.signal.ifft, tf.signal.ifft2d, tf.signal.ifft3d],\n RFFT: [tf.signal.rfft, tf.signal.rfft2d, tf.signal.rfft3d],\n IRFFT: [tf.signal.irfft, tf.signal.irfft2d, tf.signal.irfft3d]\n }\n return tf_funcs[fft_type][len(fft_lengths) - 1](x)\n\n\ntf_impl[lax_fft.fft_p] = _fft\n\n\ndef _qr(operand, full_matrices):\n return tf.linalg.qr(operand, full_matrices=full_matrices)\n\n\ntf_impl[lax_linalg.qr_p] = _qr\n\n\ndef _svd(operand, full_matrices, compute_uv):\n result = tf.linalg.svd(operand, full_matrices, compute_uv)\n if not compute_uv:\n return result,\n s, u, v = result\n return s, u, tf.linalg.adjoint(v)\n\n\ntf_impl[lax_linalg.svd_p] = _svd\n\n\ndef _eig(operand: TfVal, compute_left_eigenvectors: bool,\n compute_right_eigenvectors: bool):\n if compute_left_eigenvectors and compute_right_eigenvectors:\n # TODO(bchetioui): didn't find a 100% reliable, easy and satisfying way to\n # sort the left eigenvectors in the right order. The jax.numpy.linalg API\n # suggests to me that left eigenvectors are anyway seldom used, so I\n # think it is acceptable to leave as unimplemented for now.\n msg = (\"Conversion of eig is not implemented when both \"\n \"compute_left_eigenvectors and compute_right_eigenvectors are set \"\n \"to True.\")\n raise NotImplementedError(msg)\n elif not (compute_left_eigenvectors or compute_right_eigenvectors):\n return tuple([tf.linalg.eigvals(operand)])\n elif compute_right_eigenvectors:\n return tuple(tf.linalg.eig(operand))\n else: # compute_left_eigenvectors == True\n wH, vl = tf.linalg.eig(tf.linalg.adjoint(operand))\n wHH = tf.math.conj(wH)\n return tuple([wHH, vl])\n\n\ntf_impl[lax_linalg.eig_p] = _eig\n\n\ndef _eigh(operand: TfVal, lower: bool, _in_avals, _out_aval):\n if operand.shape[-1] == 0:\n v, w = operand, tf.reshape(operand, _eval_shape(_in_avals[0].shape[:-1]))\n else:\n if not lower:\n operand = tf.linalg.adjoint(operand)\n w, v = tf.linalg.eigh(operand)\n cast_type = {\n tf.complex64: tf.float32,\n tf.complex128: tf.float64\n }.get(operand.dtype)\n if cast_type is not None:\n w = tf.cast(w, cast_type)\n return v, w\n\n\ntf_impl_with_avals[lax_linalg.eigh_p] = _eigh\n\n\ndef _lu(operand: TfVal, _in_avals, _out_aval):\n return _convert_jax_impl(lax_linalg._lu_python)(\n operand, _in_avals=_in_avals, _out_aval=_out_aval)\n\n\ntf_impl_with_avals[lax_linalg.lu_p] = _lu\n\n\ndef _triangular_solve(a: TfVal, b: TfVal, *, left_side: bool, lower: bool,\n transpose_a: bool, conjugate_a: bool, unit_diagonal: bool,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n if unit_diagonal:\n a_aval, _ = _in_avals\n a_shape = _eval_shape(a_aval.shape)\n a = tf.linalg.set_diag(a, tf.ones(a_shape[:-1], dtype=a.dtype))\n if not left_side:\n rank = len(a.shape)\n transpose_dimensions = list(range(rank - 2)) + [rank - 1, rank - 2]\n a = tf.transpose(a, transpose_dimensions)\n b = tf.transpose(b, transpose_dimensions)\n lower = not lower\n # adjoint == transpose for real dtypes, so special care need only be taken\n # for complex types.\n if a.dtype in [tf.complex64, tf.complex128]:\n if (transpose_a and not conjugate_a) or (not transpose_a and conjugate_a):\n a = tf.math.conj(a)\n result = tf.linalg.triangular_solve(a, b, lower=lower, adjoint=transpose_a)\n if not left_side:\n result = tf.transpose(result, transpose_dimensions)\n return result\n\n\ntf_impl_with_avals[lax_linalg.triangular_solve_p] = _triangular_solve\n\n\ndef _linear_solve(*args: TfVal, const_lengths, jaxprs, _in_avals, _out_aval):\n return _convert_jax_impl(lax_control_flow._custom_linear_solve_impl)(\n *args,\n const_lengths=const_lengths,\n jaxprs=jaxprs,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n\n\ntf_impl_with_avals[lax_control_flow.linear_solve_p] = _linear_solve\n\n\ndef _custom_jvp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,\n jvp_jaxpr_thunk: Callable,\n num_consts: int) -> Sequence[TfVal]:\n # TODO(necula): ensure that there is no AD transformation in scope\n return _interpret_jaxpr(fun_jaxpr, *args)\n\n\ntf_impl[custom_derivatives.custom_jvp_call_jaxpr_p] = _custom_jvp_call_jaxpr\n\n\ndef _custom_vjp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,\n **_) -> Sequence[TfVal]:\n # TODO(necula): ensure that there is no AD transformation in scope\n return _interpret_jaxpr(fun_jaxpr, *args)\n\n\ntf_impl[custom_derivatives.custom_vjp_call_jaxpr_p] = _custom_vjp_call_jaxpr\n\n\ndef _custom_lin(*args: TfVal, **_) -> Sequence[TfVal]:\n raise TypeError(\"can't apply forward-mode autodiff (jvp) to a custom_vjp \"\n \"function.\")\n\n\ntf_impl[ad.custom_lin_p] = _custom_lin\n\n\ndef split_to_logical_devices(tensor: TfVal,\n partition_dimensions: pxla.PartitionsOrReplicated):\n \"\"\"Like TPUMPStrategy.experimental_split_to_logical_devices.\n\n For jax2tf purposes we want to avoid needing to thread the `strategy` object\n through the generated computation. It seems that the original function needs\n the strategy object only for error checking, which we assume is done upstream\n by JAX.\n\n Args:\n tensor: Input tensor to annotate.\n partition_dimensions: A list of integers, with one integer per tensor\n dimension, specifying in how many parts the dimension should be split. The\n product of integers must equal the number of devices per replica.\n use_sharding_op: whether to use a sharding op, or not.\n\n Returns:\n an annotated tensor.\n \"\"\"\n # This corresponds to the sharding annotations in\n # xla_bridge._sharding_to_proto.\n if partition_dimensions is None:\n return xla_sharding.replicate(tensor, use_sharding_op=True)\n num_partition_splits = np.prod(partition_dimensions)\n tile_assignment = np.arange(num_partition_splits).reshape(\n partition_dimensions)\n return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)\n\n\ndef _sharded_call(f: lu.WrappedFun, vals: Sequence[TfVal],\n in_parts: Sequence[pxla.PartitionsOrReplicated],\n out_parts_thunk,\n **_) -> Sequence[Tuple[TfVal, core.AbstractValue]]:\n sharded_vals = util.safe_map(split_to_logical_devices, vals, in_parts)\n vals_out = f.call_wrapped(*sharded_vals) # caller handles new_sublevel\n out_parts_flat = out_parts_thunk()\n assert len(out_parts_flat) == len(\n vals_out), f\"expected {len(out_parts_flat)} == {len(vals_out)}\"\n sharded_vals_out = [\n (split_to_logical_devices(val, val_part), val_aval)\n for (val, val_aval), val_part in util.safe_zip(vals_out, out_parts_flat)\n ]\n return sharded_vals_out\n\n\ndef _sharding_constraint(arg: TfVal, *,\n partitions: pxla.PartitionsOrReplicated):\n return split_to_logical_devices(arg, partitions)\n\n\ntf_impl[sharded_jit.sharding_constraint_p] = _sharding_constraint\n\n\ndef _register_checkpoint_pytrees():\n \"\"\"Registers TF custom container types as pytrees.\"\"\"\n m = tf.Module()\n # The types here are automagically changed by TensorFlow's checkpointing\n # infrastructure.\n m.a = (tf.Module(), tf.Module())\n m.b = [tf.Module(), tf.Module()]\n m.c = {\"a\": tf.Module()}\n tuple_wrapper = type(m.a)\n list_wrapper = type(m.b)\n dict_wrapper = type(m.c)\n\n # TF AutoTrackable swaps container types out for wrappers.\n assert tuple_wrapper is not tuple\n assert list_wrapper is not list\n assert dict_wrapper is not dict\n\n jax.tree_util.register_pytree_node(tuple_wrapper, lambda xs:\n (tuple(xs), None), lambda _, xs: tuple(xs))\n\n jax.tree_util.register_pytree_node(list_wrapper, lambda xs: (tuple(xs), None),\n lambda _, xs: list(xs))\n\n jax.tree_util.register_pytree_node(\n dict_wrapper, lambda s: (tuple(s.values()), tuple(s.keys())),\n lambda k, xs: dict(zip(k, xs)))\n\n\n_register_checkpoint_pytrees()\n" ]
[ [ "tensorflow.compiler.xla.experimental.xla_sharding.xla_sharding.tile", "tensorflow.ones", "tensorflow.compiler.xla.experimental.xla_sharding.xla_sharding.replicate", "tensorflow.compiler.tf2xla.python.xla.dot_general", "tensorflow.zeros_like", "tensorflow.clip_by_value", "tensorflow.stack", "tensorflow.nn.avg_pool", "tensorflow.compiler.tf2xla.python.xla.reduce_window", "tensorflow.TensorShape", "tensorflow.constant", "numpy.prod", "tensorflow.math.real", "tensorflow.compiler.xla.xla_data_pb2.ScatterDimensionNumbers", "tensorflow.where", "tensorflow.expand_dims", "tensorflow.raw_ops.AddV2", "tensorflow.nn.convolution", "tensorflow.math.floormod", "tensorflow.logical_and", "tensorflow.math.imag", "tensorflow.math.multiply", "tensorflow.math.truediv", "tensorflow.linalg.eig", "tensorflow.compiler.tf2xla.python.xla.gather", "tensorflow.logical_not", "tensorflow.reverse", "tensorflow.broadcast_to", "tensorflow.math.top_k", "numpy.issubdtype", "tensorflow.raw_ops.PopulationCount", "tensorflow.identity", "tensorflow.shape", "tensorflow.transpose", "tensorflow.squeeze", "numpy.arange", "tensorflow.compiler.xla.xla_data_pb2.DotDimensionNumbers", "numpy.array", "tensorflow.zeros", "tensorflow.math.reciprocal", "tensorflow.linalg.matmul", "tensorflow.name_scope", "tensorflow.compiler.tf2xla.python.xla.scatter", "tensorflow.bitwise.bitwise_or", "tensorflow.compiler.tf2xla.python.xla.pad", "tensorflow.raw_ops.PreventGradient", "tensorflow.math.subtract", "tensorflow.linalg.svd", "tensorflow.math.sign", "tensorflow.reshape", "tensorflow.cast", "tensorflow.linalg.einsum", "tensorflow.rank", "tensorflow.bitwise.invert", "tensorflow.concat", "tensorflow.while_loop", "tensorflow.nest.flatten", "numpy.take", "tensorflow.compiler.xla.xla_data_pb2.GatherDimensionNumbers", "tensorflow.linalg.adjoint", "tensorflow.bitwise.right_shift", "tensorflow.nest.map_structure", "tensorflow.dtypes.as_dtype", "tensorflow.linalg.eigvals", "tensorflow.bitwise.left_shift", "tensorflow.math.round", "tensorflow.compiler.tf2xla.python.xla.conv", "tensorflow.compiler.tf2xla.python.xla.variadic_sort", "tensorflow.function", "tensorflow.switch_case", "tensorflow.linalg.qr", "tensorflow.compiler.xla.xla_data_pb2.ConvolutionDimensionNumbers", "tensorflow.math.equal", "numpy.dtype", "tensorflow.Module", "tensorflow.math.abs", "tensorflow.nn.max_pool", "tensorflow.compiler.xla.xla_data_pb2.PrecisionConfig", "tensorflow.nn.conv_transpose", "tensorflow.math.greater_equal", "tensorflow.math.less", "tensorflow.math.floordiv", "numpy.shape", "tensorflow.math.conj", "tensorflow.dtypes.cast", "tensorflow.TensorSpec", "tensorflow.convert_to_tensor", "tensorflow.compiler.tf2xla.python.xla.select_and_scatter", "tensorflow.linalg.eigh", "tensorflow.linalg.triangular_solve", "tensorflow.math.floor" ] ]
sh-divya/flare
[ "93219ff03df10528abb8f7a5309f15f7899a3f12" ]
[ "tests/test_flare_io.py" ]
[ "import pytest\n\npmgout = pytest.importorskip(\"pymatgen.io.vasp.outputs\")\nVasprun = pmgout.Vasprun\nimport os\nimport numpy as np\nfrom flare.struc import Structure, get_unique_species\nfrom flare.dft_interface.vasp_util import md_trajectory_from_vasprun\nfrom flare.utils.flare_io import md_trajectory_to_file, md_trajectory_from_file\n\npytestmark = pytest.mark.filterwarnings(\n \"ignore::UserWarning\", \"ignore::pymatgen.io.vasp.outputs.UnconvergedVASPWarning\"\n)\n\n\ndef test_read_write_trajectory():\n structures = md_trajectory_from_vasprun(\"test_files/test_vasprun.xml\")\n fname = \"tst_traj.json\"\n md_trajectory_to_file(fname, structures)\n fstructures = md_trajectory_from_file(fname)\n for s, f in zip(structures, fstructures):\n assert np.isclose(s.forces, f.forces).all()\n assert np.isclose(s.positions, f.positions).all()\n os.system(\"rm tst_traj.json\")\n" ]
[ [ "numpy.isclose" ] ]
neilchristanto/ValDashboard
[ "d62d04020081c114c67d80e52726ad827a180ba0" ]
[ "src/webparse.py" ]
[ "import re\r\nimport pandas as pd\r\n\r\nimport requests\r\nfrom lxml import html as lhtml\r\nfrom fake_useragent import UserAgent\r\n\r\nimport logging\r\n\r\nWS_TO_STR = 0\r\nWS_SRC = 1\r\nWS_PATH = 2\r\nWS_CACHE = 3\r\n\r\nclass WebParse:\r\n\r\n websource = {\r\n # Readable Source unique path caching\r\n \"mkt_cap\" : ['Mkt Cap' , \"ycharts\" , \"market_cap\", 0],\r\n \"inc_qtr\" : ['Inc Qtr' , \"ycharts\" , \"net_income\", 1],\r\n \"inc_ttm\" : ['Inc TTM' , \"ycharts\" , \"net_income_ttm\", 1],\r\n \"rev_qtr\" : ['Rev Qtr' , \"ycharts\" , \"revenues\", 1],\r\n \"rev_ttm\" : ['Rev TTM' , \"ycharts\" , \"revenues_ttm\", 1],\r\n \"p_rev_ttm\" : ['Prv Rev TTM', \"ycharts\" , \"revenues_ttm\", 1],\r\n \r\n \"rev_fy\" : ['Rev FY' , \"cml\" , \"analysts\", 1],\r\n \"ref_1fy\" : ['Rev 1FY' , \"cml\" , \"analysts\", 1],\r\n \"ref_2fy\" : ['Rev 2FY' , \"cml\" , \"analysts\", 1],\r\n \r\n # All PS depends on MktCap and Rev\r\n \"ps_fy\" : ['PS FY' , \"NA\"],\r\n \"ps_1fy\" : ['PS 1FY' , \"NA\"],\r\n \"ps_2fy\" : ['PS 2FY' , \"NA\"],\r\n \"ps_ttm\" : ['PS TTM' , \"NA\"],\r\n \"ps_nxt\" : ['PS Nxt' , \"NA\"],\r\n\r\n # upside and growth are just ratios between 2 numbers in different times\r\n \"upside\" : ['Upside' , \"NA\"],\r\n \"rev_grow\" : ['Rev Grow' , \"NA\"],\r\n \"inc_grow\" : ['Inc Grow' , \"NA\"],\r\n 'revgw_fy' : ['RevGw FY' , 'NA'],\r\n 'revgw_1fy' : ['RevGw 1FY' , 'NA'],\r\n 'revgw_2fy' : ['RevGw_2FY' , 'NA'],\r\n \r\n }\r\n\r\n \r\n # cache the entire http response\r\n cached_web = {}\r\n\r\n # handle to portfolio extracted data\r\n pdata = {}\r\n \r\n # state to specify whether the latest date is the same\r\n # if so, skip the parses\r\n skip_metric_parse = 0\r\n \r\n # fy_idx is for indexing the fiscal year calculation for revenue\r\n fy_idx = 0\r\n \r\n # logger\r\n def __init__(self):\r\n self.logger = logging.getLogger('root.' + __name__)\r\n \r\n def clear_webcache(self):\r\n self.cached_web = {}\r\n\r\n def val_toB(self, istr):\r\n # return value in billion\r\n if istr == 'NA':\r\n val = -1\r\n elif istr[-1] == 'B':\r\n val = float(istr[0:-1].replace(',', ''))\r\n elif istr[-1] == 'T':\r\n val = float(istr[0:-1].replace(',', ''))*1000.0\r\n else: # observed value is in Mill\r\n val = float(istr[0:-1].replace(',', ''))/1000.0\r\n return val\r\n\r\n def val_toM(self, istr):\r\n if istr == 'NA':\r\n val = -1\r\n elif istr[-1] == 'B':\r\n val = float(istr[0:-1].replace(',', ''))*1000.0\r\n else:\r\n val = float(istr[0:-1].replace(',', ''))\r\n return val\r\n\r\n # Return the full xml, considering caching enabled or not\r\n # if caching is enabled and is present, no need to query the website again\r\n def get_xml(self, **kwargs):\r\n s = kwargs['stock']\r\n m = kwargs['metric']\r\n u = kwargs['url']\r\n \r\n key = (s,self.websource[m][WS_PATH])\r\n # check for caching enable\r\n if self.websource[m][WS_CACHE]:\r\n if key in self.cached_web.keys():\r\n self.logger.debug('get cached url = %s' % u)\r\n return self.cached_web[key]\r\n\r\n # here, either caching is not enabled, or cache entry is not present\r\n self.logger.debug('get url = %s' % u)\r\n ua = UserAgent()\r\n hdr = {\"User-Agent\": ua.random}\r\n req = requests.get(u, headers=hdr) \r\n root = lhtml.fromstring(req.content)\r\n \r\n # cache if enabled\r\n if self.websource[m][WS_CACHE]:\r\n self.cached_web[key] = root\r\n \r\n return root\r\n \r\n def check_skip_metric(self, **kwargs):\r\n s = kwargs['stock']\r\n m = kwargs['metric']\r\n \r\n if self.skip_metric_parse:\r\n self.logger.debug('{0} - {1} - skipped'.format(s, m))\r\n return 1, self.pdata[s][self.websource[m][WS_TO_STR]]\r\n else:\r\n return 0, 0\r\n\r\n def check_gph_skip_metric(self, **kwargs):\r\n s = kwargs['stock']\r\n m = kwargs['metric']\r\n \r\n if self.skip_metric_parse:\r\n self.logger.debug('{0} - {1} - skipped'.format(s, m))\r\n return 1, self.pdata[s][self.websource[m][WS_TO_STR] + ' date'], \\\r\n self.pdata[s][self.websource[m][WS_TO_STR]]\r\n else:\r\n return 0, 0, 0\r\n \r\n \r\n def parse_ycharts_pgNameVal(self, **kwargs):\r\n root = self.get_xml(**kwargs)\r\n res = root.xpath(\"//span[@class='page-name-date']\")\r\n stk = kwargs['stock']\r\n metric = kwargs['metric']\r\n \r\n if len(res) != 1:\r\n self.logger.error(\"ERROR: stock %s, %s list not unique, or not available\" %\r\n (kwargs['stock'], kwargs['metric']))\r\n return -1\r\n \r\n res = res[0].text\r\n [val, date] = res.split(\" for \")\r\n val = self.val_toB(val)\r\n \r\n try:\r\n if date == self.pdata[stk]['latest']:\r\n self.skip_metric_parse = 1\r\n self.logger.debug('%s latest data matches (%s).. skipping ycharts metric parse' % (stk, date))\r\n \r\n # if date is not the same and this is not market cap, that means this is new data.. \r\n # empty out the stocks data\r\n elif metric != 'mkt_cap':\r\n self.pdata[stk] = {'Mkt Cap' : self.pdata[stk]['Mkt Cap'], 'latest' : ''}\r\n \r\n except KeyError:\r\n pass\r\n \r\n return val\r\n\r\n\r\n def parse_mkt_cap(self, **kwargs):\r\n self.skip_metric_parse = 0\r\n self.fy_idx = 0\r\n retval = self.parse_ycharts_pgNameVal(**kwargs) \r\n return float(\"{0:.3f}\".format(retval))\r\n \r\n \r\n def parse_rev_ttm(self, **kwargs):\r\n skip, retval = self.check_skip_metric(**kwargs)\r\n if skip:\r\n return retval\r\n \r\n retval = self.parse_ycharts_pgNameVal(**kwargs) \r\n return float(\"{0:.3f}\".format(retval))\r\n \r\n '''\r\n def parse_inc_qtr(self, **kwargs):\r\n if self.skip_metric_parse:\r\n return self.pdata[kwargs['stock']][kwargs['metric']]\r\n\r\n retval = self.parse_ycharts_pgNameVal(**kwargs) \r\n return float(\"{0:.3f}\".format(retval))\r\n\r\n\r\n def parse_inc_ttm(self, **kwargs):\r\n if self.skip_metric_parse:\r\n return self.pdata[kwargs['stock']][kwargs['metric']]\r\n \r\n retval = self.parse_ycharts_pgNameVal(**kwargs) \r\n return float(\"{0:.3f}\".format(retval))\r\n '''\r\n \r\n def parse_p_rev_ttm(self, **kwargs):\r\n root = self.get_xml(**kwargs)\r\n td = root.xpath(\"//td\")\r\n # prev ttm is located at TD[8] and TD[9]\r\n # [0][1] is for current quarter\r\n # [2][3] is for prev quarter\r\n # [8][9] is for a year ago\r\n try:\r\n retval = td[9].text.strip()\r\n # return value in billion\r\n retval = self.val_toB(retval)\r\n except IndexError:\r\n retval = -1\r\n\r\n return float(\"{0:.4f}\".format(retval))\r\n \r\n \r\n def parse_rev_nxt_zacks(self, root):\r\n tb = root.xpath(\"//section[@id='detailed_earnings_estimates']\")[0]\r\n hdr = [th.text_content().split('(')[0].strip() for th in tb.xpath('.//th')]\r\n row = [[td.text_content() for td in tr.xpath('.//td')] for tr in tb.xpath('.//tbody/tr')]\r\n \r\n # create indexes and proper row\r\n hdr = hdr[1:]\r\n idx = [r[0] for r in row]\r\n row = [r[1:] for r in row]\r\n \r\n df = pd.DataFrame(data = row, columns = hdr, index = idx)\r\n val = df['Next Year']['Zacks Consensus Estimate']\r\n retval = self.val_toB(val)\r\n \r\n return float(\"{0:.3f}\".format(retval))\r\n \r\n \r\n def parse_rev_nxt(self, **kwargs):\r\n skip, retval = self.check_skip_metric(**kwargs)\r\n if skip:\r\n return retval\r\n \r\n root = self.get_xml(**kwargs)\r\n \r\n if self.websource[kwargs['metric']][WS_SRC] == 'yahoo':\r\n retval = self.parse_rev_nxt_yahoo(root)\r\n elif self.websource[kwargs['metric']][WS_SRC] == 'zacks':\r\n retval =self.parse_rev_nxt_zacks(root)\r\n \r\n return float(\"{0:.3f}\".format(retval))\r\n \r\n '''\r\n parsing from CML\r\n '''\r\n def parse_rev_fy(self, **kwargs):\r\n root = self.get_xml(**kwargs)\r\n \r\n # current FY = 7, next = 8, onward\r\n xpath = \"//table[@class='responsive']/tbody/tr[{}]/td[@class='mean']\".format(self.fy_idx + 7)\r\n res = root.xpath(xpath)[0].text\r\n # returned value is in millions\r\n return self.val_toB(res)\r\n\r\n\r\n '''\r\n # parsing that requires ratio\r\n # ps = market_cap / rev_ttm\r\n # ps_nxt = market_cap / rev_nxt\r\n # rev_growth = rev_ttm / p_rev_ttm\r\n # upside = rev_nxt / rev_ttm\r\n '''\r\n \r\n # helper function to get ratio\r\n def get_two_metrics(self, stk, a, b):\r\n \r\n if stk not in self.pdata.keys():\r\n aval = self.parse(stk, a)\r\n bval = self.parse(stk, b)\r\n else:\r\n try:\r\n aval = self.pdata[stk][self.websource[a][WS_TO_STR]]\r\n except KeyError:\r\n aval = self.parse(stk, a)\r\n try:\r\n bval = self.pdata[stk][self.websource[b][WS_TO_STR]]\r\n except KeyError:\r\n bval = self.parse(stk, b)\r\n return aval, bval\r\n \r\n \r\n # PS TTM is basically mkt_cap/rev_ttm\r\n # if the required data is not present, parse them first\r\n def parse_ps_ttm(self, **kwargs):\r\n skip, retval = self.check_skip_metric(**kwargs)\r\n if skip:\r\n return retval\r\n \r\n mkt_cap, rev_ttm = self.get_two_metrics(kwargs['stock'], 'mkt_cap', 'rev_ttm')\r\n retval = mkt_cap / rev_ttm\r\n return float(\"{0:.3f}\".format(retval)) \r\n\r\n # this is basically market_cap/rev_nxt\r\n def parse_ps_nxt(self, **kwargs):\r\n skip, retval = self.check_skip_metric(**kwargs)\r\n if skip:\r\n return retval\r\n \r\n mkt_cap, rev_nxt = self.get_two_metrics(kwargs['stock'], 'mkt_cap', 'rev_nxt') \r\n retval = mkt_cap / rev_nxt\r\n return float(\"{0:.3f}\".format(retval))\r\n\r\n # rev growth need the rev_ttm and prev year's rev_ttm\r\n def parse_rev_grow(self, **kwargs):\r\n skip, retval = self.check_skip_metric(**kwargs)\r\n if skip:\r\n return retval\r\n \r\n crev_ttm, prev_ttm = self.get_two_metrics(kwargs['stock'], 'rev_ttm', 'p_rev_ttm') \r\n retval = crev_ttm * 100.0 / prev_ttm - 100\r\n return \"{0:.0f}%\".format(retval)\r\n \r\n # upside = rev_nxt / rev_ttm\r\n def parse_upside(self, **kwargs):\r\n skip, retval = self.check_skip_metric(**kwargs)\r\n if skip:\r\n return retval\r\n \r\n rev_nxt, rev_ttm = self.get_two_metrics(kwargs['stock'], 'rev_nxt', 'rev_ttm')\r\n retval = rev_nxt * 100.0 / rev_ttm - 100\r\n return \"{0:.0f}%\".format(retval)\r\n \r\n \r\n '''\r\n Parse PS that depends on CML website\r\n '''\r\n # ps_fy = market_cap / rev_fy\r\n # rev_fy is not part of the JSON valuation, so we'll always parse it again (from cached web)\r\n def parse_ps_fy(self, **kwargs):\r\n mkt_cap, rev_fy = self.get_two_metrics(kwargs['stock'], 'mkt_cap', 'rev_fy')\r\n retval = mkt_cap / rev_fy\r\n return float(\"{0:.2f}\".format(retval))\r\n \r\n def parse_ps_1fy(self, **kwargs):\r\n self.fy_idx = 1\r\n return self.parse_ps_fy(**kwargs)\r\n \r\n def parse_ps_2fy(self, **kwargs):\r\n self.fy_idx = 2\r\n return self.parse_ps_fy(**kwargs)\r\n \r\n def parse_revgw_fy(self, **kwargs):\r\n curr, nxt = self.get_two_metrics(kwargs['stock'], 'ps_ttm', 'ps_fy')\r\n return '{0:.0f}%'.format((curr-nxt)*100.0 / nxt)\r\n \r\n def parse_revgw_1fy(self, **kwargs):\r\n curr, nxt = self.get_two_metrics(kwargs['stock'], 'ps_fy', 'ps_1fy')\r\n return '{0:.0f}%'.format((curr-nxt)*100.0 / nxt)\r\n \r\n def parse_revgw_2fy(self, **kwargs):\r\n curr, nxt = self.get_two_metrics(kwargs['stock'], 'ps_1fy', 'ps_2fy')\r\n return '{0:.0f}%'.format((curr-nxt)*100.0 / nxt)\r\n \r\n def parse_ycharts_td(self, **kwargs):\r\n \"\"\"\r\n Parse ycharts.com, indexing into the 'dataTableBox' id.\r\n Each <tr> will have a pair of <td>: date and value.\r\n Data from ycharts.com is most recent first, so new entry is prepended to the list\r\n to create chronological order.\r\n\r\n list[0] = oldest data\r\n list[-1] = newest data\r\n\r\n :param kwargs: Passed on to get_xml (contains stock, metric, url)\r\n :return: date: list of dates (string)\r\n :return: val: list of values converted to million\r\n \"\"\"\r\n root = self.get_xml(**kwargs)\r\n td = root.xpath(\"//table[@class='table']\")[0].xpath('.//td')\r\n tdlen = len(td)\r\n date, val = [], []\r\n for i in range(0, tdlen, 2):\r\n # if content is 0, skip\r\n if td[i].text_content() == '': continue\r\n if td[i+1].text_content().strip() == '': continue\r\n date = [td[i].text_content()] + date\r\n val = [self.val_toM(td[i+1].text_content().strip())] + val\r\n \r\n return date, val\r\n \r\n def parse_gph_inc_qtr(self, **kwargs):\r\n skip, date_ls, val_ls = self.check_gph_skip_metric(**kwargs)\r\n if skip:\r\n return date_ls, val_ls\r\n date, val = self.parse_ycharts_td(**kwargs)\r\n return date, val\r\n \r\n def parse_gph_inc_ttm(self, **kwargs):\r\n skip, date_ls, val_ls = self.check_gph_skip_metric(**kwargs)\r\n if skip:\r\n return date_ls, val_ls\r\n date, val = self.parse_ycharts_td(**kwargs)\r\n return date, val\r\n \r\n def parse_gph_rev_qtr(self, **kwargs):\r\n skip, date_ls, val_ls = self.check_gph_skip_metric(**kwargs)\r\n if skip:\r\n return date_ls, val_ls\r\n date, val = self.parse_ycharts_td(**kwargs)\r\n return date, val\r\n \r\n def parse_gph_rev_ttm(self, **kwargs):\r\n skip, date_ls, val_ls = self.check_gph_skip_metric(**kwargs)\r\n if skip:\r\n return date_ls, val_ls\r\n date, val = self.parse_ycharts_td(**kwargs)\r\n return date, val\r\n\r\n def parse_gph_metric(self, stk, m):\r\n \"\"\"\r\n Parse graph metric\r\n :param stk:\r\n :param m:\r\n :return:\r\n \"\"\"\r\n if stk not in self.pdata.keys():\r\n date, val = self.parse(stk, m, fn_type=\"graph\")\r\n else:\r\n try:\r\n date = self.pdata[stk][self.websource[m][WS_TO_STR] + ' date']\r\n val = self.pdata[stk][self.websource[m][WS_TO_STR]]\r\n except KeyError:\r\n date, val = self.parse(stk, m, fn_type='graph')\r\n return date, val\r\n\r\n def parse_gph_grow(self, **kwargs):\r\n metric = re.sub(\"grow\", \"ttm\", kwargs['metric']).lower()\r\n date, val = self.parse_gph_metric(kwargs['stock'], metric)\r\n \r\n # can't compute YoY growth if only 4 quarters or less\r\n if len(val) <= 4:\r\n return [], []\r\n \r\n retval = [float(\"{0:.2f}\".format(val[i] * 100.0 / val[i-4] - 100)) for i in range(4, len(val))]\r\n retdate = date[4:]\r\n return retdate, retval\r\n \r\n\r\n def parse_gph_inc_grow(self, **kwargs):\r\n return [], []\r\n\r\n def parse_gph_rev_grow(self, **kwargs):\r\n return self.parse_gph_grow(**kwargs)\r\n\r\n \r\n \r\n '''\r\n parser main entry point and helper functions\r\n '''\r\n \r\n # pre_parse takes in the metric and give the correct URL to go to\r\n # input : stock, metric\r\n # output : stock, modified metric, proper URL\r\n def pre_parse(self, stock, metric):\r\n wp_metric = re.sub(\" \", \"_\", metric).lower()\r\n \r\n try: \r\n mainurl = self.websource[wp_metric][WS_SRC]\r\n if mainurl == 'ycharts':\r\n url = \"https://ycharts.com/companies/{}/{}\".format(\r\n stock, self.websource[wp_metric][WS_PATH])\r\n elif mainurl == \"yahoo\":\r\n url = \"https://www.finance.yahoo.com/quote/{}/{}\".format(\r\n stock, self.websource[wp_metric][WS_PATH])\r\n elif mainurl == \"zacks\":\r\n url = \"https://zacks.com/stock/quote/{}/{}\".format(\r\n stock, self.websource[wp_metric][WS_PATH])\r\n elif mainurl == 'cml':\r\n url = 'https://www.cmlviz.com/inc/{1}.php?ticker={0}'.format(\r\n stock, self.websource[wp_metric][WS_PATH])\r\n elif mainurl == 'NA':\r\n url = \"NA\"\r\n else:\r\n url = None\r\n except KeyError:\r\n url = None\r\n \r\n return stock, wp_metric, url\r\n \r\n \r\n def parse(self, stock, metric, **kwargs):\r\n stock, metric, url = self.pre_parse(stock, metric)\r\n \r\n if url == None:\r\n msg = \"\"\"\r\n ERROR: url returned None from pre_parse\r\n stock: %s; metric: %s\r\n \"\"\" % (stock, metric)\r\n print(msg)\r\n return -1\r\n \r\n try:\r\n if kwargs['fn_type'] == 'graph':\r\n fn_prefix = \"parse_gph_\"\r\n else:\r\n raise KeyError\r\n except KeyError:\r\n fn_prefix = \"parse_\"\r\n \r\n try:\r\n func = getattr(self, fn_prefix + metric)\r\n except AttributeError:\r\n print(\"ERROR: no function: %s\" % (fn_prefix + metric))\r\n return -1\r\n \r\n return func(stock=stock, metric=metric, url=url)\r\n \r\n" ]
[ [ "pandas.DataFrame" ] ]
CkiChen/pymindaffectBCI
[ "0119145a8b280c776f4c4e6cd776fed0f0156404" ]
[ "mindaffectBCI/decoder/UtopiaDataInterface.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) 2019 MindAffect B.V. \n# Author: Jason Farquhar <jason@mindaffect.nl>\n# This file is part of pymindaffectBCI <https://github.com/mindaffect/pymindaffectBCI>.\n#\n# pymindaffectBCI is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# pymindaffectBCI is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with pymindaffectBCI. If not, see <http://www.gnu.org/licenses/>\n\nfrom mindaffectBCI.utopiaclient import UtopiaClient, Subscribe, StimulusEvent, NewTarget, Selection, DataPacket, UtopiaMessage, SignalQuality\nfrom collections import deque\nfrom mindaffectBCI.decoder.utils import RingBuffer, extract_ringbuffer_segment\nfrom mindaffectBCI.decoder.lower_bound_tracker import lower_bound_tracker\nfrom mindaffectBCI.decoder.linear_trend_tracker import linear_trend_tracker\nfrom time import sleep\nimport numpy as np\n\nclass UtopiaDataInterface:\n \"\"\"Adaptor class for interfacing between the decoder logic and the data source\n\n This class provides functionality to wrap a real time data and stimulus stream to make\n it easier to implement standard machine learning pipelines. In particular it provides streamed\n pre-processing for both EEG and stimulus streams, and ring-buffers for the same with time-stamp based indexing. \n \"\"\"\n\n\n # TODO [X] : infer valid data time-stamps\n # TODO [X] : smooth and de-jitter the data time-stamps\n # TODO [] : expose a (potentially blocking) message generator interface\n # TODO [X] : ring-buffer for the stimulus-state also, so fast random access\n # TODO [X] : rate limit waiting to reduce computational load\n VERBOSITY = 1\n\n def __init__(self, datawindow_ms=60000, msgwindow_ms=60000,\n data_preprocessor=None, stimulus_preprocessor=None, send_signalquality=True, \n timeout_ms=100, mintime_ms=50, fs=None, U=None, sample2timestamp='lower_bound_tracker',\n clientid=None):\n # rate control\n self.timeout_ms = timeout_ms\n self.mintime_ms = mintime_ms # minimum time to spend in update => max processing rate\n # amout of data in the ring-buffer\n self.datawindow_ms = datawindow_ms\n self.msgwindow_ms = msgwindow_ms\n # connect to the mindaffectDecoder\n self.host = None\n self.port = -1\n self.U = UtopiaClient(clientid) if U is None else U\n self.t0 = self.getTimeStamp()\n # init the buffers\n\n # Messages\n self.msg_ringbuffer = deque()\n self.msg_timestamp = None # ts of most recent processed message\n\n # DataPackets\n self.data_ringbuffer = None # init later...\n self.data_timestamp = None # ts of last data packet seen\n self.sample2timestamp = sample2timestamp # sample tracker to de-jitter time-stamp information\n self.data_preprocessor = data_preprocessor # function to pre-process the incomming data\n\n # StimulusEvents\n self.stimulus_ringbuffer = None # init later...\n self.stimulus_timestamp = None # ts of most recent processed data\n self.stimulus_preprocessor = stimulus_preprocessor # function to pre-process the incomming data\n\n # Info about the data sample rate -- estimated from packet rates..\n self.raw_fs = fs\n self.fs = None\n self.newmsgs = [] # list new unprocssed messages since last update call\n\n # BODGE: running statistics for sig2noise estimation\n # TODO []: move into it's own Sig2Noise computation class\n self.send_signalquality = send_signalquality\n self.last_sigquality_ts = None\n self.last_log_ts = None\n self.send_sigquality_interval = 1000 # send signal qualities every 1000ms = 1Hz\n # noise2sig estimate halflife_ms, running-offset, de-trended power\n self.noise2sig_halflife_ms = (5000, 500) # 10s for offset, .5s for power\n # TODO [x]: move into a exp-move-ave power est class\n self.raw_power = None\n self.preproc_power = None\n\n def connect(self, host=None, port=-1, queryifhostnotfound=True):\n \"\"\"[make a connection to the utopia host]\n\n Args:\n host ([type], optional): [description]. Defaults to None.\n port (int, optional): [description]. Defaults to -1.\n queryifhostnotfound (bool, optional): [description]. Defaults to True.\n\n Returns:\n [type]: [description]\n \"\"\" \n \n if host:\n self.host = host\n if port > 0:\n self.port = port\n self.U.autoconnect(self.host, self.port, timeout_ms=5000, queryifhostnotfound=queryifhostnotfound)\n if self.U.isConnected:\n # subscribe to messages: data, stim, mode, selection\n self.U.sendMessage(Subscribe(None, \"DEMSN\"))\n return self.U.isConnected\n \n def isConnected(self):\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\" \n\n return self.U.isConnected if self.U is not None else False\n\n def getTimeStamp(self):\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\" \n\n return self.U.getTimeStamp()\n\n def sendMessage(self, msg: UtopiaMessage):\n \"\"\"[send a UtopiaMessage to the utopia hub]\n\n Args:\n msg (UtopiaMessage): [description]\n \"\"\" \n\n self.U.sendMessage(msg)\n\n def getNewMessages(self, timeout_ms=0):\n \"\"\"[get new messages from the UtopiaHub]\n\n Args:\n timeout_ms (int, optional): [description]. Defaults to 0.\n\n Returns:\n [type]: [description]\n \"\"\" \n \n return self.U.getNewMessages(timeout_ms)\n\n def initDataRingBuffer(self):\n \"\"\"[initialize the data ring buffer, by getting some seed messages and datapackets to get the data sizes etc.]\n\n Returns:\n [type]: [description]\n \"\"\" \n \n print(\"geting some initial data to setup the ring buffer\")\n # get some initial data to get data shape and sample rate\n databuf = []\n nmsg = 0\n iter = 0\n data_start_ts = None\n data_ts = 0\n while data_start_ts is None or data_ts - data_start_ts < 3000:\n msgs = self.getNewMessages(100)\n for m in msgs:\n m = self.preprocess_message(m)\n if m.msgID == DataPacket.msgID: # data-packets are special\n if len(m.samples) > 0:\n databuf.append(m) # append raw data\n if data_start_ts is None:\n data_start_ts = m.timestamp\n data_ts = m.timestamp\n else:\n print(\"Huh? got empty data packet: {}\".format(m))\n else:\n self.msg_ringbuffer.append(m)\n self.msg_timestamp = m.timestamp\n nmsg = nmsg+1\n nsamp = [len(m.samples) for m in databuf]\n data_ts = [ m.timestamp for m in databuf]\n if self.raw_fs is None:\n self.raw_fs = np.median( np.array(nsamp[1:]) / np.diff(data_ts) * 1000.0)\n print('Estimated sample rate {} samp in {} s ={}'.format(sum(nsamp),(data_ts[-1]-data_ts[0])/1000.0,self.raw_fs))\n\n # init the pre-processor (if one)\n if self.data_preprocessor:\n self.data_preprocessor.fit(np.array(databuf[0].samples)[0:1,:], fs=self.raw_fs) # tell it the sample rate\n\n # apply the data packet pre-processing -- to get the info\n # on the data state after pre-processing\n tmpdatabuf = [self.processDataPacket(m) for m in databuf]\n # strip empty packets\n tmpdatabuf = [d for d in tmpdatabuf if d.shape[0]>0]\n # estimate the sample rate of the pre-processed data\n pp_nsamp = [m.shape[0] for m in tmpdatabuf]\n pp_ts = [ m[-1,-1] for m in tmpdatabuf]\n self.fs = np.median( np.array(pp_nsamp[1:]) / np.diff(pp_ts) * 1000.0)# fs = nSamp/time\n print('Estimated pre-processed sample rate={}'.format(self.fs))\n\n # create the ring buffer, big enough to store the pre-processed data\n if self.data_ringbuffer:\n print(\"Warning: re-init data ring buffer\")\n # TODO []: why does the datatype of the ring buffer matter so much? Is it because of uss?\n # Answer[]: it's the time-stamps, float32 rounds time-stamps to 24bits\n self.data_ringbuffer = RingBuffer(maxsize=self.fs*self.datawindow_ms/1000, shape=tmpdatabuf[0].shape[1:], dtype=np.float32)\n\n # insert the warmup data into the ring buffer\n self.data_timestamp=None # reset last seen data\n nsamp=0\n # re-init the preprocessor for consistency with off-line\n if self.data_preprocessor:\n self.data_preprocessor.fit(np.array(databuf[0].samples)[0:1,:], fs=self.raw_fs)\n # use linear trend tracker to de-jitter the sample timestamps\n if self.sample2timestamp is None or isinstance(self.sample2timestamp,str):\n self.sample2timestamp = timestamp_interpolation(fs=self.fs,\n sample2timestamp=self.sample2timestamp)\n for m in databuf:\n # apply the pre-processing again (this time with fs estimated)\n d = self.processDataPacket(m)\n self.data_ringbuffer.extend(d)\n nsamp = nsamp + d.shape[0]\n\n return (nsamp, nmsg)\n\n def initStimulusRingBuffer(self):\n '''initialize the data ring buffer, by getting some seed messages and datapackets to get the data sizes etc.'''\n # TODO []: more efficient memory use, with different dtype for 'real' data and the time-stamps?\n self.stimulus_ringbuffer = RingBuffer(maxsize=self.fs*self.datawindow_ms/1000, shape=(257,), dtype=np.float32)\n\n def preprocess_message(self, m:UtopiaMessage):\n \"\"\"[apply pre-processing to topia message before any more work]\n\n Args:\n m (UtopiaMessage): [description]\n\n Returns:\n [type]: [description]\n \"\"\" \n \n # WARNING BODGE: fit time-stamp in 24bits for float32 ring buffer\n # Note: this leads to wrap-arroung in (1<<24)/1000/3600 = 4.6 hours\n # but that shouldn't matter.....\n m.timestamp = m.timestamp % (1<<24)\n return m\n \n def processDataPacket(self, m: DataPacket):\n \"\"\"[pre-process a datapacket message ready to be inserted into the ringbuffer]\n\n Args:\n m (DataPacket): [description]\n\n Returns:\n [type]: [description]\n \"\"\" \n \n #print(\"DP: {}\".format(m))\n # extract the raw data\n d = np.array(m.samples, dtype=np.float32) # process as singles\n # apply the pre-processor, if one was given\n\n if self.data_preprocessor:\n d_raw = d.copy()\n # warning-- with agressive downsample this may not produce any data!\n d = self.data_preprocessor.transform(d)\n\n # BODGE: running estimate of the electrode-quality, ONLY after initialization!\n if self.send_signalquality and self.data_ringbuffer is not None:\n self.update_and_send_ElectrodeQualities(d_raw, d, m.timestamp)\n\n #if self.VERBOSITY > 0 and self.data_ringbuffer is not None:\n # self.plot_raw_preproc_data(d_raw,d,m.timestamp)\n\n if d.size > 0 :\n # If have data to add to the ring-buffer, guarding for time-stamp wrap-around\n # TODO [ ]: de-jitter and better timestamp interpolation\n # guard for wrap-around!\n if self.data_timestamp is not None and m.timestamp < self.data_timestamp:\n print(\"Warning: Time-stamp wrap-around detected!!\")\n\n d = self.add_sample_timestamps(d,m.timestamp,self.fs)\n\n # update the last time-stamp tracking\n self.data_timestamp= m.timestamp\n return d\n\n def add_sample_timestamps(self,d:np.ndarray,timestamp:float,fs:float):\n \"\"\"add per-sample timestamp information to the data matrix\n\n Args:\n d (np.ndarray): (t,d) the data matrix to attach time stamps to\n timestamp (float): the timestamp of the last sample of d\n fs (float): the nomional sample rate of d\n\n Returns:\n np.ndarray: (t,d+1) data matrix with attached time-stamp channel\n \"\"\"\n if self.sample2timestamp is not None and not isinstance(self.sample2timestamp,str):\n sample_ts = self.sample2timestamp.transform(timestamp, len(d))\n else: # all the same ts\n sample_ts = np.ones((len(d),),dtype=int)*timestamp\n # combine data with timestamps, ensuring type is preserved\n d = np.append(np.array(d), sample_ts[:, np.newaxis], -1).astype(d.dtype)\n return d\n\n def plot_raw_preproc_data(self, d_raw, d_preproc, ts):\n \"\"\"[debugging function to check the diff between the raw and pre-processed data]\n\n Args:\n d_raw ([type]): [description]\n d_preproc ([type]): [description]\n ts ([type]): [description]\n \"\"\" \n \n if not hasattr(self,'rawringbuffer'):\n self.preprocringbuffer=RingBuffer(maxsize=self.fs*3,shape=(d_preproc.shape[-1]+1,))\n self.rawringbuffer=RingBuffer(maxsize=self.raw_fs*3,shape=(d_raw.shape[-1]+1,))\n d_preproc = self.add_sample_timestamps(d_preproc,ts,self.fs)\n self.preprocringbuffer.extend(d_preproc)\n d_raw = self.add_sample_timestamps(d_raw,ts,self.raw_fs)\n self.rawringbuffer.extend(d_raw)\n if self.last_sigquality_ts is None or ts > self.last_sigquality_ts + self.send_sigquality_interval:\n import matplotlib.pyplot as plt\n plt.figure(10);plt.clf();\n idx = np.flatnonzero(self.rawringbuffer[:,-1])[0]\n plt.subplot(211); plt.cla(); plt.plot(self.rawringbuffer[idx:,-1],self.rawringbuffer[idx:,:-1])\n idx = np.flatnonzero(self.preprocringbuffer[:,-1])[0]\n plt.subplot(212); plt.cla(); plt.plot(self.preprocringbuffer[idx:,-1],self.preprocringbuffer[idx:,:-1])\n plt.show(block=False)\n\n\n def processStimulusEvent(self, m: StimulusEvent):\n \"\"\"[pre-process a StimulusEvent message ready to be inserted into the stimulus ringbuffer]\n\n Args:\n m (StimulusEvent): [description]\n\n Returns:\n [type]: [description]\n \"\"\" \n \n # get the vector to hold the stimulus info\n d = np.zeros((257,),dtype=np.float32)\n\n if self.stimulus_ringbuffer is not None and self.stimulus_timestamp is not None:\n # hold value of used objIDs from previous time stamp\n d[:] = self.stimulus_ringbuffer[-1,:]\n\n # insert the updated state\n d[m.objIDs] = m.objState\n d[-1] = m.timestamp\n # apply the pre-processor, if one was given\n if self.stimulus_preprocessor:\n d = self.stimulus_preprocessor.transform(d)\n\n # update the last time-stamp tracking\n self.stimulus_timestamp= m.timestamp\n return d\n\n def update_and_send_ElectrodeQualities(self, d_raw: np.ndarray, d_preproc: np.ndarray, ts: int):\n \"\"\"[compute running estimate of electrode qality and stream it]\n\n Args:\n d_raw (np.ndarray): [description]\n d_preproc (np.ndarray): [description]\n ts (int): [description]\n \"\"\" \n \n raw_power, preproc_power = self.update_electrode_powers(d_raw, d_preproc)\n\n # convert to average amplitude\n raw_amp = np.sqrt(raw_power)\n preproc_amp = np.sqrt(preproc_power)\n\n # noise2signal estimated as removed raw amplitude (assumed=noise) to preprocessed amplitude (assumed=signal)\n noise2sig = np.maximum(float(1e-6), np.abs(raw_amp - preproc_amp)) / np.maximum(float(1e-8),preproc_amp)\n\n # hack - detect disconnected channels\n noise2sig[ raw_power < 1e-6 ] = 100\n\n # hack - detect filter artifacts = preproc power is too big..\n noise2sig[ preproc_amp > raw_amp*10 ] = 100\n\n # hack - cap to 100\n noise2sig = np.minimum(noise2sig,100)\n\n # rate limit sending of signal-quality messages\n if self.last_sigquality_ts is None or ts > self.last_sigquality_ts + self.send_sigquality_interval:\n print(\"SigQ:\\nraw_power=({}/{})\\npp_power=({}/{})\\nnoise2sig={}\".format(\n raw_amp,d_raw.shape[0],\n preproc_amp,d_preproc.shape[0],\n noise2sig))\n print(\"Q\",end='')\n # N.B. use *our* time-stamp for outgoing messages!\n self.sendMessage(SignalQuality(None, noise2sig))\n self.last_sigquality_ts = ts\n\n if self.VERBOSITY>2:\n # plot the sample time-stamp jitter...\n import matplotlib.pyplot as plt\n plt.figure(10)\n ts = self.data_ringbuffer[:,-1]\n idx = np.flatnonzero(ts)\n if len(idx)>0:\n ts = ts[idx[0]:]\n plt.subplot(211); plt.cla(); plt.plot(np.diff(ts)); plt.title('diff time-sample')\n plt.subplot(212); plt.cla(); plt.plot((ts-ts[0])-np.arange(len(ts))*1000.0/self.fs); plt.title('regression against sample-number')\n plt.show(block=False)\n\n def update_electrode_powers(self, d_raw: np.ndarray, d_preproc:np.ndarray):\n \"\"\"[track exp-weighted-moving average centered power for 2 input streams]\n\n Args:\n d_raw (np.ndarray): [description]\n d_preproc (np.ndarray): [description]\n\n Returns:\n [type]: [description]\n \"\"\" \n \n if self.raw_power is None:\n mu_hl, pow_hl = self.noise2sig_halflife_ms\n self.raw_power = power_tracker(mu_hl, pow_hl, self.raw_fs)\n self.preproc_power = power_tracker(mu_hl, pow_hl, self.fs)\n self.raw_power.transform(d_raw)\n self.preproc_power.transform(d_preproc)\n return (self.raw_power.power(), self.preproc_power.power())\n\n\n def update(self, timeout_ms=None, mintime_ms=None):\n '''Update the tracking state w.r.t. the data source\n\n By adding data to the data_ringbuffer, stimulus info to the stimulus_ringbuffer, \n and other messages to the messages ring buffer.\n\n Args\n timeout_ms : int\n max block waiting for messages before returning\n mintime_ms : int\n min time to accumulate messages before returning\n Returns\n newmsgs : [newMsgs :UtopiaMessage]\n list of the *new* utopia messages from the server\n nsamp: int\n number of new data samples in this call\n Note: use data_ringbuffer[-nsamp:,...] to get the new data\n nstimulus : int\n number of new stimulus events in this call\n Note: use stimulus_ringbuffer[-nstimulus:,...] to get the new data\n '''\n if timeout_ms is None:\n timeout_ms = self.timeout_ms\n if mintime_ms is None:\n mintime_ms = self.mintime_ms\n if not self.isConnected():\n self.connect()\n if not self.isConnected():\n return [],0,0\n\n t0 = self.getTimeStamp()\n nsamp = 0\n nmsg = 0\n nstimulus = 0\n\n if self.data_ringbuffer is None: # do special init stuff if not done\n nsamp, nmsg = self.initDataRingBuffer()\n if self.stimulus_ringbuffer is None: # do special init stuff if not done\n self.initStimulusRingBuffer()\n if self.last_log_ts is None:\n self.last_log_ts = self.getTimeStamp()\n if t0 is None:\n t0 = self.getTimeStamp()\n\n # record the list of new messages from this call\n newmsgs = self.newmsgs # start with any left-overs from old calls \n self.newmsgs=[] # clear the left-over messages stack\n \n ttg = timeout_ms - (self.getTimeStamp() - t0) # time-to-go in the update loop\n while ttg > 0:\n\n # rate limit\n if ttg >= mintime_ms:\n sleep(mintime_ms/1000.0)\n ttg = timeout_ms - (self.getTimeStamp() - t0) # udate time-to-go\n \n # get the new messages\n msgs = self.getNewMessages(ttg)\n\n # process the messages - basically to split datapackets from the rest\n print(\".\",end='')\n #print(\"{} in {}\".format(len(msgs),self.getTimeStamp()-t0),end='',flush=True)\n for m in msgs:\n m = self.preprocess_message(m)\n \n print(\"{:c}\".format(m.msgID), end='', flush=True)\n \n if m.msgID == DataPacket.msgID: # data-packets are special\n d = self.processDataPacket(m) # (samp x ...)\n self.data_ringbuffer.extend(d)\n nsamp = nsamp + d.shape[0]\n \n elif m.msgID == StimulusEvent.msgID: # as are stmiuluse events\n d = self.processStimulusEvent(m) # (nY x ...)\n self.stimulus_ringbuffer.append(d)\n nstimulus = nstimulus + 1\n \n else:\n # NewTarget/Selection are also special in that they clear stimulus state...\n if m.msgID == NewTarget.msgID or m.msgID == Selection.msgID :\n # Make a dummy stim-event to reset all objIDs to off\n d = self.processStimulusEvent(StimulusEvent(m.timestamp,\n np.arange(255,dtype=np.int32),\n np.zeros(255,dtype=np.int8)))\n self.stimulus_ringbuffer.append(d)\n self.stimulus_timestamp= m.timestamp\n \n if len(self.msg_ringbuffer)>0 and m.timestamp > self.msg_ringbuffer[0].timestamp + self.msgwindow_ms: # slide msg buffer\n self.msg_ringbuffer.popleft()\n self.msg_ringbuffer.append(m)\n newmsgs.append(m)\n nmsg = nmsg+1\n self.msg_timestamp = m.timestamp\n \n # update time-to-go\n ttg = timeout_ms - (self.getTimeStamp() - t0)\n\n # new line\n if self.getTimeStamp() > self.last_log_ts + 2000:\n print(\"\",flush=True)\n self.last_log_ts = self.getTimeStamp()\n \n # return new messages, and count new samples/stimulus \n return (newmsgs, nsamp, nstimulus)\n\n\n\n def push_back_newmsgs(self,oldmsgs):\n \"\"\"[put unprocessed messages back onto the newmessages queue]\n\n Args:\n oldmsgs ([type]): [description]\n \"\"\" \n \n # TODO []: ensure this preserves message time-stamp order?\n self.newmsgs.extend(oldmsgs)\n\n\n\n\n def extract_data_segment(self, bgn_ts, end_ts=None):\n \"\"\"extract a segment of data based on a start and end time-stamp\n\n Args:\n bgn_ts (float): segment start time-stamp\n end_ts (float, optional): segment end time-stamp. Defaults to None.\n\n Returns:\n (np.ndarray): the data between these time-stamps, or None if timestamps invalid\n \"\"\" \n return extract_ringbuffer_segment(self.data_ringbuffer,bgn_ts,end_ts)\n \n def extract_stimulus_segment(self, bgn_ts, end_ts=None):\n \"\"\"extract a segment of the stimulus stream based on a start and end time-stamp\n\n Args:\n bgn_ts (float): segment start time-stamp\n end_ts (float, optional): segment end time-stamp. Defaults to None.\n\n Returns:\n (np.ndarray): the stimulus events between these time-stamps, or None if timestamps invalid\n \"\"\" \n return extract_ringbuffer_segment(self.stimulus_ringbuffer,bgn_ts,end_ts)\n \n def extract_msgs_segment(self, bgn_ts, end_ts=None):\n \"\"\"[extract the messages between start/end time stamps]\n\n Args:\n bgn_ts ([type]): [description]\n end_ts ([type], optional): [description]. Defaults to None.\n\n Returns:\n [type]: [description]\n \"\"\" \n \n msgs = [] # store the trial stimEvents\n for m in reversed(self.msg_ringbuffer):\n if m.timestamp <= bgn_ts:\n # stop as soon as earlier than bgn_ts\n break\n if end_ts is None or m.timestamp < end_ts:\n msgs.append(m)\n # reverse back to input order\n msgs.reverse()\n return msgs\n\n def run(self, timeout_ms=30000):\n \"\"\"[test run the interface forever, just getting and storing data]\n\n Args:\n timeout_ms (int, optional): [description]. Defaults to 30000.\n \"\"\" \n \n t0 = self.getTimeStamp()\n # test getting 5s data\n tstart = self.data_timestamp\n trlen_ms = 5000\n while self.getTimeStamp() < t0+timeout_ms:\n self.update()\n # test getting a data segment\n if tstart is None :\n tstart = self.data_timestamp\n if tstart and self.data_timestamp > tstart + trlen_ms:\n X = self.extract_data_segment(tstart, tstart+trlen_ms)\n print(\"Got data: {}->{}\\n{}\".format(tstart, tstart+trlen_ms, X[:, -1]))\n Y = self.extract_stimulus_segment(tstart, tstart+trlen_ms)\n print(\"Got stimulus: {}->{}\\n{}\".format(tstart, tstart+trlen_ms, Y[:, -1]))\n tstart = self.data_timestamp + 5000\n print('.', flush=True)\n\n\ntry:\n from sklearn.base import TransformerMixin\nexcept:\n # fake the class if sklearn is not available, e.g. Android/iOS\n class TransformerMixin:\n def __init__():\n pass\n def fit(self,X):\n pass\n def transform(self,X):\n pass\n\n\n\n\n\n\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\nfrom mindaffectBCI.decoder.utils import sosfilt, butter_sosfilt, sosfilt_zi_warmup\nclass butterfilt_and_downsample(TransformerMixin):\n \"\"\"Incremental streaming transformer to provide filtering and downsampling data transformations\n\n Args:\n TransformerMixin ([type]): sklearn compatible transformer\n \"\"\" \n def __init__(self, stopband=((0,5),(5,-1)), order:int=6, fs:float =250, fs_out:float =60, ftype='butter'):\n self.stopband = stopband\n self.fs = fs\n self.fs_out = fs_out if fs_out is not None and fs_out < fs else fs\n self.order = order\n self.axis = -2\n if not self.axis == -2:\n raise ValueError(\"axis != -2 is not yet supported!\")\n self.nsamp = 0\n self.ftype = ftype\n\n def fit(self, X, fs:float =None, zi=None):\n \"\"\"[summary]\n\n Args:\n X ([type]): [description]\n fs (float, optional): [description]. Defaults to None.\n zi ([type], optional): [description]. Defaults to None.\n\n Returns:\n [type]: [description]\n \"\"\" \n if fs is not None: # parameter overrides stored fs\n self.fs = fs\n\n # preprocess -> spectral filter\n if isinstance(self.stopband, str):\n import pickle\n import os\n # load coefficients from file -- when scipy isn't available\n if os.path.isfile(self.stopband):\n fn = self.stopband \n else: # try relative to our py file\n fn = os.path.join(os.path.dirname(os.path.abspath(__file__)),self.stopband)\n with open(fn,'rb') as f:\n self.sos_ = pickle.load(f)\n self.zi_ = pickle.load(f)\n f.close()\n # tweak the shape/scale of zi to the actual data shape\n self.zi_ = sosfilt_zi_warmup(self.zi_, X, self.axis)\n print(\"X={} zi={}\".format(X.shape,self.zi_.shape))\n\n else:\n # estimate them from the given information\n X, self.sos_, self.zi_ = butter_sosfilt(X, self.stopband, self.fs, order=self.order, axis=self.axis, zi=zi, ftype=self.ftype)\n \n # preprocess -> downsample\n self.nsamp = 0\n self.resamprate_ = int(round(self.fs*2.0/self.fs_out))/2.0 if self.fs_out is not None else 1\n self.out_fs_ = self.fs/self.resamprate_\n print(\"resample: {}->{}hz rsrate={}\".format(self.fs, self.out_fs_, self.resamprate_))\n\n return self\n\n def transform(self, X, Y=None):\n \"\"\"[summary]\n\n Args:\n X ([type]): [description]\n Y ([type], optional): [description]. Defaults to None.\n\n Returns:\n [type]: [description]\n \"\"\" \n # propogate the filter coefficients between calls\n if not hasattr(self,'sos_'):\n self.fit(X[0:1,:])\n\n if self.sos_ is not None:\n X, self.zi_ = sosfilt(self.sos_, X, axis=self.axis, zi=self.zi_)\n\n nsamp = self.nsamp\n self.nsamp = self.nsamp + X.shape[self.axis] # track *raw* sample counter\n\n # preprocess -> downsample @60hz\n if self.resamprate_ > 1:\n # number samples through this cycle due to remainder of last block\n resamp_start = nsamp%self.resamprate_\n # convert to number samples needed to complete this cycle\n # this is then the sample to take for the next cycle\n if resamp_start > 0:\n resamp_start = self.resamprate_ - resamp_start\n \n # allow non-integer resample rates\n idx = np.arange(resamp_start,X.shape[self.axis],self.resamprate_)\n\n if self.resamprate_%1 > 0 and idx.size>0 : # non-integer re-sample, interpolate\n idx_l = np.floor(idx).astype(int) # sample above\n idx_u = np.ceil(idx).astype(int) # sample below\n # BODGE: guard for packet ending at sample boundary.\n idx_u[-1] = idx_u[-1] if idx_u[-1]<X.shape[self.axis] else X.shape[self.axis]-1\n w_u = idx - idx_l # linear weight of the upper sample\n X = X[...,idx_u,:] * w_u[:,np.newaxis] + X[...,idx_l,:] * (1-w_u[:,np.newaxis]) # linear interpolation\n if Y is not None:\n Y = Y[...,idx_u,:] * w_u[:,np.newaxis] + Y[...,idx_l,:] * (1-w_u[:,np.newaxis])\n\n else:\n idx = idx.astype(int)\n X = X[..., idx, :] # decimate X (trl, samp, d)\n if Y is not None:\n Y = Y[..., idx, :] # decimate Y (trl, samp, y)\n \n return X if Y is None else (X, Y)\n\n @staticmethod\n def testcase():\n ''' test the filt+downsample transformation filter by incremental calling '''\n #X=np.cumsum(np.random.randn(100,1),axis=0)\n X=np.sin(np.arange(100)[:,np.newaxis]*2*np.pi/30)\n xs = np.arange(X.shape[0])[:,np.newaxis]\n # high-pass and decimate\n bands = ((0,20,'bandpass'))\n fs = 200\n fs_out = 130\n fds = butterfilt_and_downsample(stopband=bands,fs=fs,fs_out=fs_out)\n\n \n print(\"single step\")\n fds.fit(X[0:1,:])\n m0,xs0 = fds.transform(X,xs) # (samp,ny,ne)\n print(\"M0 -> {}\".format(m0[:20]))\n\n step=6\n print(\"Step size = {}\".format(step))\n fds.fit(X[0:1,:])\n m1=np.zeros(m0.shape,m0.dtype)\n xs1 = np.zeros(xs0.shape,xs0.dtype)\n t=0\n for i in range(0,len(X),step):\n idx=np.arange(i,min(i+step,len(X)))\n mm, idx1=fds.transform(X[idx,:],idx[:,np.newaxis])\n m1[t:t+mm.shape[0],:]=mm\n xs1[t:t+mm.shape[0]]=idx1\n t = t +mm.shape[0]\n print(\"M1 -> {}\".format(m1[:20]))\n print(\"diff: {}\".format(np.max(np.abs(m0-m1))))\n\n import matplotlib.pyplot as plt \n plt.plot(xs,X,'*-',label='X')\n plt.plot(xs0,m0,'*-',label='{} {}->{}Hz single'.format(bands,fs,fs_out))\n plt.plot(xs1,m1,'*-',label='{} {}->{}Hz incremental'.format(bands,fs,fs_out))\n plt.legend()\n plt.show()\n\n\n\n\n\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\nfrom mindaffectBCI.decoder.stim2event import stim2event\nclass stim2eventfilt(TransformerMixin):\n ''' Incremental streaming transformer to transform a sequence of stimulus states to a brain event sequence\n \n For example by transforming a sequence of stimulus intensities, to rising and falling edge events.\n '''\n def __init__(self, evtlabs=None, histlen=20):\n self.evtlabs = evtlabs\n self.histlen = histlen\n self.prevX = None\n\n def fit(self, X):\n \"\"\"[summary]\n\n Args:\n X ([type]): [description]\n\n Returns:\n [type]: [description]\n \"\"\" \n return self\n\n def transform(self, X):\n \"\"\"[transform Stimulus-encoded to brain-encoded]\n\n Args:\n X ([type]): [description]\n\n Returns:\n [type]: [description]\n \"\"\" \n \n if X is None:\n return None\n \n # keep old fitler state for the later transformation call\n prevX = self.prevX\n\n # grab the new filter state (if wanted)\n if self.histlen>0:\n #print('prevX={}'.format(prevX))\n #print(\"X={}\".format(X))\n if X.shape[0] >= self.histlen or prevX is None:\n self.prevX = X\n else:\n self.prevX = np.append(prevX, X, 0)\n # only keep the last bit -- copy in case gets changed in-place\n self.prevX = self.prevX[-self.histlen:,:].copy()\n #print('new_prevX={}'.format(self.prevX))\n\n # convert from stimulus coding to brain response coding, with old state\n X = stim2event(X, self.evtlabs, axis=-2, oM=prevX)\n return X\n\n def testcase():\n ''' test the stimulus transformation filter by incremental calling '''\n M=np.array([0,0,0,1,0,0,1,1,0,1])[:,np.newaxis] # samp,nY\n s2ef = stim2eventfilt(evtlabs=('re','fe'),histlen=3)\n\n print(\"single step\")\n m0=s2ef.transform(M) # (samp,ny,ne)\n print(\"{} -> {}\".format(M,m0))\n\n print(\"Step size = 1\")\n m1=np.zeros(m0.shape,m0.dtype)\n for i in range(len(M)):\n idx=slice(i,i+1)\n mm=s2ef.transform(M[idx,:])\n m1[idx,...]=mm\n print(\"{} {} -> {}\".format(i,M[idx,...],mm))\n\n print(\"Step size=4\")\n m4=np.zeros(m0.shape,m0.dtype)\n for i in range(0,len(M),4):\n idx=slice(i,i+4)\n mm=s2ef.transform(M[idx,:])\n m4[idx,...]=mm\n print(\"{} {} -> {}\".format(i,M[idx,...],mm))\n\n print(\"m0={}\\nm1={}\\n,m4={}\\n\".format(m0,m1,m4))\n \n\n\n\n\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\nclass power_tracker(TransformerMixin):\n \"\"\"Incremental streaming transformer from raw n-channel data, to exponientially smoothed channel powers\n\n Args:\n TransformerMixin ([type]): sklearn compatiable transformer\n \"\"\"\n\n def __init__(self,halflife_mu_ms, halflife_power_ms, fs, car=True):\n # convert to per-sample decay factor\n self.alpha_mu = self.hl2alpha(fs * halflife_mu_ms / 1000.0 ) \n self.alpha_power= self.hl2alpha(fs * halflife_power_ms / 1000.0 )\n self.car = car\n self.sX_N = None\n self.sX = None\n self.sXX_N = None\n self.sXX = None\n\n def hl2alpha(self,hl):\n \"\"\"[summary]\n\n Args:\n hl ([type]): [description]\n\n Returns:\n [type]: [description]\n \"\"\" \n return np.exp(np.log(.5)/hl)\n\n def fit(self,X):\n \"\"\"[summary]\n\n Args:\n X ([type]): [description]\n\n Returns:\n [type]: [description]\n \"\"\" \n self.sX_N = X.shape[0]\n if self.car and X.shape[-1]>4:\n X = X.copy() - np.mean(X,-1,keepdims=True)\n self.sX = np.sum(X,axis=0)\n self.sXX_N = X.shape[0]\n self.sXX = np.sum((X-(self.sX/self.sX_N))**2,axis=0)\n return self.power()\n\n def transform(self, X: np.ndarray):\n \"\"\"[compute the exponientially weighted centered power of X]\n\n Args:\n X (np.ndarray): [description]\n\n Returns:\n [type]: [description]\n \"\"\" \n \n if self.sX is None: # not fitted yet!\n return self.fit(X)\n if self.car and X.shape[-1]>4:\n ch_power = self.power()\n # identify the active channels, i.e. are attached and have some signal\n act_ch = ch_power > np.max(ch_power)*1e-3\n X = X.copy() - np.mean(X[...,act_ch], -1, keepdims=True)\n # compute updated mean\n alpha_mu = self.alpha_mu ** X.shape[0]\n self.sX_N = self.sX_N*alpha_mu + X.shape[0]\n self.sX = self.sX*alpha_mu + np.sum(X, axis=0)\n # center and compute updated power\n alpha_pow = self.alpha_power ** X.shape[0]\n self.sXX_N = self.sXX_N*alpha_pow + X.shape[0]\n self.sXX = self.sXX*alpha_pow + np.sum((X-(self.sX/self.sX_N))**2, axis=0) \n return self.power()\n \n def mean(self):\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\" \n return self.sX / self.sX_N\n def power(self):\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\" \n return self.sXX / self.sXX_N\n \n def testcase(self):\n \"\"\"[summary]\n \"\"\" \n import matplotlib.pyplot as plt\n X = np.random.randn(10000,2)\n #X = np.cumsum(X,axis=0)\n pt = power_tracker(100,100,100)\n print(\"All at once: power={}\".format(pt.transform(X))) # all at once\n pt = power_tracker(100,1000,1000)\n print(\"alpha_mu={} alpha_pow={}\".format(pt.alpha_mu,pt.alpha_power) )\n step = 30\n idxs = list(range(step,X.shape[0],step))\n powers = np.zeros((len(idxs),X.shape[-1]))\n mus = np.zeros((len(idxs),X.shape[-1]))\n for i,j in enumerate(idxs):\n powers[i,:] = np.sqrt(pt.transform(X[j-step:j,:]))\n mus[i,:]=pt.mean()\n for d in range(X.shape[-1]):\n plt.subplot(X.shape[-1],1,d+1)\n plt.plot(X[:,d])\n plt.plot(idxs,mus[:,d])\n plt.plot(idxs,powers[:,d])\n\n\n\n\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\nclass timestamp_interpolation(TransformerMixin):\n \"\"\"Incremental streaming tranformer to transform from per-packet time-stamps to per-sample timestamps \n with time-stamp smoothing, de-jittering, and dropped-sample detection.\n \"\"\"\n\n def __init__(self,fs=None,sample2timestamp=None, max_delta=200):\n \"\"\"tranform from per-packet (i.e. multiple-samples) to per-sample timestamps\n\n Args:\n fs (float): default sample rate, used when no other timing info is available\n sample2timestamp (transformer, optional): class to de-jitter timestamps based on sample-count. Defaults to None.\n \"\"\" \n self.fs=fs\n a0 = 1000/self.fs if self.fs is not None else 1\n # BODGE: special cases for particular mapping functions so can include the prior slope\n if sample2timestamp=='lower_bound_tracker':\n self.sample2timestamp = lower_bound_tracker(a0=a0)\n elif sample2timestamp=='linear_trend_tracker':\n self.sample2timestamp = linear_trend_tracker(a0=a0)\n else:\n self.sample2timestamp = sample2timestamp\n self.max_delta = max_delta\n\n def fit(self,ts,nsamp=1):\n \"\"\"[summary]\n\n Args:\n ts ([type]): [description]\n nsamp (int, optional): [description]. Defaults to 1.\n \"\"\" \n self.last_sample_timestamp_ = ts\n self.n_ = 0\n\n def transform(self,timestamp:float,nsamp:int=1):\n \"\"\"add per-sample timestamp information to the data matrix\n\n Args:\n timestamp (float): the timestamp of the last sample of d\n nsamp(int): number of samples to interpolate\n\n Returns:\n np.ndarray: (nsamp) the interpolated time-stamps\n \"\"\"\n if not hasattr(self,'last_sample_timestamp_'):\n self.fit(timestamp,nsamp)\n\n # update tracking number samples processed\n self.n_ = self.n_ + nsamp\n\n if self.last_sample_timestamp_ < timestamp or self.sample2timestamp is not None:\n # update the tracker for the sample-number to sample timestamp mapping\n if self.sample2timestamp is not None:\n #print(\"n={} ts={}\".format(n,timestamp))\n newtimestamp = self.sample2timestamp.transform(self.n_, timestamp)\n #print(\"ts={} newts={} diff={}\".format(timestamp,newtimestamp,timestamp-newtimestamp))\n # use the corrected de-jittered time-stamp -- if it's not tooo different\n if abs(timestamp-newtimestamp) < self.max_delta:\n timestamp = int(newtimestamp)\n\n # simple linear interpolation for the sample time-stamps\n samples_ts = np.linspace(self.last_sample_timestamp_, timestamp, nsamp+1, endpoint=True, dtype=int)\n samples_ts = samples_ts[1:]\n else:\n if self.fs :\n # interpolate with the estimated sample rate \n samples_ts = np.arange(-nsamp+1,1,dtype=int)*(1000/self.fs) + timestamp\n else:\n # give all same timestamp\n samples_ts = np.ones(nsamp,dtype=int)*timestamp\n\n # update the tracking info\n self.last_sample_timestamp_ = timestamp\n\n return samples_ts\n\n def testcase(self, npkt=1000, fs=100):\n \"\"\"[summary]\n\n Args:\n npkt (int, optional): [description]. Defaults to 1000.\n fs (int, optional): [description]. Defaults to 100.\n \"\"\" \n # generate random packet sizes\n nsamp = np.random.random_integers(0,10,size=(npkt,))\n # generate true sample timestamps\n ts_true = np.arange(np.sum(nsamp))*1000/fs\n # packet end indices\n idx = np.cumsum(nsamp)-1\n # packet end time-stamps\n pkt_ts = ts_true[idx]\n # add some time-stamp jitter, always positive..\n pkt_ts = pkt_ts + np.random.uniform(0,.5*1000/fs,size=pkt_ts.shape)\n # apply the time-stamp interplotation\n sts=[]\n tsfn = timestamp_interpolation(fs=fs,sample2timestamp = 'lower_bound_tracker')\n for i,(n,t) in enumerate(zip(nsamp,pkt_ts)):\n samp_ts = tsfn.transform(t,n)\n sts.extend(samp_ts)\n # plot the result.\n import matplotlib.pyplot as plt\n plt.plot(ts_true - sts)\n plt.show()\n\n\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\nfrom mindaffectBCI.decoder.preprocess import temporally_decorrelate\nclass temporal_decorrelator(TransformerMixin):\n \"\"\"Incremental streaming tranformer to decorrelate temporally channels in an input stream\n \"\"\"\n\n def __init__(self, order=10, reg=1e-4, eta=1e-5, axis=-2):\n self.reg=reg\n self.eta=eta\n self.axis=axis\n\n def fit(self,X):\n \"\"\"[summary]\n\n Args:\n X ([type]): [description]\n \"\"\" \n self.W_ = np.zeros((self.order,X.shape[-1]),dtype=X.dtype)\n self.W_[-1,:]=1\n _, self.W_ = self.transform(X[1:,:])\n\n def transform(self,X):\n \"\"\"add per-sample timestamp information to the data matrix\n\n Args:\n X (float): the data to decorrelate\n nsamp(int): number of samples to interpolate\n\n Returns:\n np.ndarray: the decorrelated data\n \"\"\"\n if not hasattr(self,'W_'):\n self.fit(X)\n\n X, self.W_ = temporally_decorrelate(X, W=self.W_, reg=self.reg, eta=self.eta, axis=self.axis)\n\n return X\n\n def testcase(self, dur=3, fs=100, blksize=10):\n \"\"\"[summary]\n\n Args:\n dur (int, optional): [description]. Defaults to 3.\n fs (int, optional): [description]. Defaults to 100.\n blksize (int, optional): [description]. Defaults to 10.\n \"\"\" \n import numpy as np\n import matplotlib.pyplot as plt\n from mindaffectBCI.decoder.preprocess import plot_grand_average_spectrum\n fs=100\n X = np.random.standard_normal((2,fs*dur,2)) # flat spectrum\n #X = X + np.sin(np.arange(X.shape[-2])*2*np.pi/10)[:,np.newaxis]\n X = X[:,:-1,:]+X[:,1:,:] # weak low-pass\n\n #X = np.cumsum(X,-2) # 1/f spectrum\n print(\"X={}\".format(X.shape))\n plt.figure(1)\n plot_grand_average_spectrum(X, fs)\n plt.suptitle('Raw')\n plt.show(block=False)\n\n tdc = temporal_decorrelator()\n wX = np.zeros(X.shape,X.dtype)\n for i in range(0,X.shape[-1],blksize):\n idx = range(i,i+blksize)\n wX[idx,:] = tdc.transform(X[idx,:])\n \n # compare raw vs summed filterbank\n plt.figure(2)\n plot_grand_average_spectrum(wX,fs)\n plt.suptitle('Decorrelated')\n plt.show()\n\n\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\nfrom mindaffectBCI.decoder.preprocess import standardize_channel_power\nclass channel_power_standardizer(TransformerMixin):\n \"\"\"Incremental streaming tranformer to channel power normalization in an input stream\n \"\"\"\n\n def __init__(self, reg=1e-4, axis=-2):\n self.reg=reg\n self.axis=axis\n\n def fit(self,X):\n \"\"\"[summary]\n\n Args:\n X ([type]): [description]\n \"\"\" \n self.sigma2_ = np.zeros((X.shape[-1],), dtype=X.dtype)\n self.sigma2_ = X[0,:]*X[0,:] # warmup with 1st sample power\n self.transform(X[1:,:])\n\n def transform(self,X):\n \"\"\"add per-sample timestamp information to the data matrix\n\n Args:\n X (float): the data to decorrelate\n\n Returns:\n np.ndarray: the decorrelated data\n \"\"\"\n if not hasattr(self,'sigma2_'):\n self.fit(X)\n\n X, self.W_ = standardize_channel_power(X, sigma2=self.sigma2_, reg=self.reg, axis=self.axis)\n\n return X\n\n def testcase(self, dur=3, fs=100, blksize=10):\n \"\"\"[summary]\n\n Args:\n dur (int, optional): [description]. Defaults to 3.\n fs (int, optional): [description]. Defaults to 100.\n blksize (int, optional): [description]. Defaults to 10.\n \"\"\" \n import numpy as np\n import matplotlib.pyplot as plt\n from mindaffectBCI.decoder.preprocess import plot_grand_average_spectrum\n fs=100\n X = np.random.standard_normal((2,fs*dur,2)) # flat spectrum\n #X = X + np.sin(np.arange(X.shape[-2])*2*np.pi/10)[:,np.newaxis]\n X = X[:,:-1,:]+X[:,1:,:] # weak low-pass\n\n #X = np.cumsum(X,-2) # 1/f spectrum\n print(\"X={}\".format(X.shape))\n plt.figure(1)\n plot_grand_average_spectrum(X, fs)\n plt.suptitle('Raw')\n plt.show(block=False)\n\n cps = channel_power_standardizer()\n wX = np.zeros(X.shape,X.dtype)\n for i in range(0,X.shape[-1],blksize):\n idx = range(i,i+blksize)\n wX[idx,:] = cps.transform(X[idx,:])\n \n # compare raw vs summed filterbank\n plt.figure(2)\n plot_grand_average_spectrum(wX,fs)\n plt.suptitle('Decorrelated')\n plt.show()\n\n\ndef testRaw():\n \"\"\"[summary]\n \"\"\" \n # test with raw\n ui = UtopiaDataInterface()\n ui.connect()\n sigViewer(ui,30000) # 30s sigviewer\n\ndef testPP():\n \"\"\"[summary]\n \"\"\" \n from sigViewer import sigViewer\n # test with a filter + downsampler\n ppfn= butterfilt_and_downsample(order=4, stopband=((0,1),(25,-1)), fs_out=100)\n #ppfn= butterfilt_and_downsample(order=4, stopband='butter_stopband((0, 5), (25, -1))_fs200.pk', fs_out=80) \n ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None)\n ui.connect()\n sigViewer(ui)\n\ndef testFileProxy(filename,fs_out=999):\n \"\"\"[summary]\n\n Args:\n filename ([type]): [description]\n fs_out (int, optional): [description]. Defaults to 999.\n \"\"\" \n from mindaffectBCI.decoder.FileProxyHub import FileProxyHub\n U = FileProxyHub(filename)\n from sigViewer import sigViewer\n # test with a filter + downsampler\n #ppfn= butterfilt_and_downsample(order=4, stopband=((0,3),(25,-1)), fs_out=fs_out)\n ppfn= butterfilt_and_downsample(order=4, stopband=(1,15,'bandpass'), fs_out=fs_out)\n #ppfn = None\n ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None, mintime_ms=0, U=U)\n ui.connect()\n sigViewer(ui)\n\ndef testFileProxy2(filename):\n \"\"\"[summary]\n\n Args:\n filename ([type]): [description]\n \"\"\" \n from mindaffectBCI.decoder.FileProxyHub import FileProxyHub\n U = FileProxyHub(filename)\n fs = 200\n fs_out = 200\n # test with a filter + downsampler\n ppfn= butterfilt_and_downsample(order=4, stopband=((45,65),(0,3),(25,-1)), fs=fs, fs_out=fs_out)\n ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None, mintime_ms=0, U=U, fs=fs)\n ui.connect()\n # run in bits..\n data=[]\n stim=[]\n emptycount = 0 \n while True:\n newmsg, nsamp, nstim = ui.update()\n if len(newmsg) == 0 and nsamp == 0 and nstim == 0: \n emptycount = emptycount + 1\n if emptycount > 10:\n break\n else:\n emptycount=0\n if nsamp > 0:\n data.append(ui.data_ringbuffer[-nsamp:,:].copy())\n if nstim > 0:\n stim.append(ui.stimulus_ringbuffer[-nstim:,:].copy())\n # convert to single data block\n data = np.vstack(data)\n stim = np.vstack(stim)\n # dump as pickle\n import pickle\n if ppfn is None:\n pickle.dump(dict(data=data,stim=stim),open('raw_udi.pk','wb'))\n else:\n pickle.dump(dict(data=data,stim=stim),open('pp_udi.pk','wb'))\n\ndef testERP():\n \"\"\"[summary]\n \"\"\" \n ui = UtopiaDataInterface()\n ui.connect()\n erpViewer(ui,evtlabs=None) # 30s sigviewer\n\ndef testElectrodeQualities(X,fs=200,pktsize=20):\n \"\"\"[summary]\n\n Args:\n X ([type]): [description]\n fs (int, optional): [description]. Defaults to 200.\n pktsize (int, optional): [description]. Defaults to 20.\n\n Returns:\n [type]: [description]\n \"\"\" \n # recurse if more dims than we want...\n if X.ndim>2:\n sigq=[]\n for i in range(X.shape[0]):\n sigqi = testElectrodeQualities(X[i,...],fs,pktsize)\n sigq.append(sigqi)\n sigq=np.concatenate(sigq,0)\n return sigq\n \n ppfn= butterfilt_and_downsample(order=6, stopband='butter_stopband((0, 5), (25, -1))_fs200.pk', fs_out=100)\n ppfn.fit(X[:10,:],fs=200)\n noise2sig = np.zeros((int(X.shape[0]/pktsize),X.shape[-1]),dtype=np.float32)\n for pkti in range(noise2sig.shape[0]):\n t = pkti*pktsize\n Xi = X[t:t+pktsize,:]\n Xip = ppfn.transform(Xi)\n raw_power, preproc_power = UtopiaDataInterface.update_electrode_powers(Xi,Xip)\n noise2sig[pkti,:] = np.maximum(float(1e-6), (raw_power - preproc_power)) / np.maximum(float(1e-8),preproc_power)\n return noise2sig\n\n \nif __name__ == \"__main__\":\n #timestamp_interpolation().testcase()\n #butterfilt_and_downsample.testcase()\n #testRaw()\n #testPP()\n #testERP()\n filename=\"~/Desktop/mark/mindaffectBCI_*.txt\"\n testFileProxy(filename)\n #testFileProxy2(filename)\n # \"C:\\\\Users\\\\Developer\\\\Downloads\\\\mark\\\\mindaffectBCI_brainflow_200911_1229_90cal.txt\")\n #\"..\\..\\Downloads\\khash\\mindaffectBCI_noisetag_bci_200907_1433.txt\"\n" ]
[ [ "numpy.minimum", "numpy.mean", "numpy.cumsum", "numpy.concatenate", "numpy.max", "numpy.random.random_integers", "numpy.log", "numpy.arange", "numpy.sqrt", "numpy.append", "numpy.vstack", "matplotlib.pyplot.subplot", "numpy.flatnonzero", "numpy.array", "numpy.zeros", "matplotlib.pyplot.title", "numpy.random.randn", "matplotlib.pyplot.figure", "numpy.diff", "matplotlib.pyplot.clf", "matplotlib.pyplot.show", "numpy.floor", "numpy.ceil", "numpy.random.standard_normal", "numpy.sum", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.cla", "numpy.ones", "numpy.random.uniform", "numpy.abs", "numpy.linspace" ] ]
thejasvibr/batracker
[ "def2ae9a0f18df0b9b95d67a203d2afd8be0f2ce" ]
[ "batracker/signal_detection/detection.py" ]
[ "'''\nDeals with the actual detection of signals in multichannel audio files. \nThere are two problems that need to solved while detecting a signal of interest.\n #. within-channel signal detection\n #. across-channel correspondence matching\n\nWithin-channel signal detection\n-------------------------------\nThis task involves `locally` checking if there are any signals of interest in one channel at a time. The exact methods used for \nthe within-channel can be set by the user, though the simplest is of course a basic threshold-type detector. Whenever the \nsignal goes beyond a particular threshold, a signal is considered to be in that region.\n\n\nBuilt-in detection routines\n---------------------------\nThe detection module has a few simple detection routines. More advanced routines\nare unlikely to form a core part of the package, and need to be written by the \nuser. \n\n#. dBrms_detector : Calculates the moving dB rms profile of an audio clip. The\nUser needs to define the size of the moving window and the threshold in dB rms. \n\n#. envelope_detector : Generates the Hilbert envelop of the audio clip. Regions above\nthe set threshold in dB peak amplitude are defined as detections. This method is faster\nthan the dBrms_detector.\n'''\n\nimport matplotlib.pyplot as plt\nplt.rcParams['agg.path.chunksize']=10000\nimport numpy as np\nimport scipy.signal as signal\nimport scipy.io.wavfile as wav\nimport scipy.ndimage as ndimage\nimport tqdm\n\nfrom batracker.common_dsp.sigproc import *\n\n\ndef cross_channel_threshold_detector(multichannel, fs, **kwargs):\n '''\n Parameters\n ----------\n multichannel : np.array\n Msamples x Nchannels audio data\n fs : float >0\n detector_function : function, optional \n The function used to detect the start and end of a signal. \n Any custom detector function can be given, the compulsory inputs\n are audio np.array, sample rate and the function should accept keyword\n arguments (even if it doesn't use them.)\n Defaults to dBrms_detector. \n \n \n Returns\n -------\n all_detections : list\n A list with sublists containing start-stop times of the detections \n in each channel. Each sublist contains the detections in one channel.\n \n Notes\n -----\n For further keyword arguments see the `threshold_detector` function\n \n See Also\n --------\n dBrms_detector\n \n '''\n samples, channels = multichannel.shape\n detector_function = kwargs.get('detector_function', dBrms_detector)\n print(channels, samples)\n all_detections = []\n for each in tqdm.tqdm(range(channels)):\n all_detections.append(detector_function(multichannel[:,each], fs, **kwargs))\n return all_detections\n \n\n\n\ndef dBrms_detector(one_channel, fs, **kwargs):\n '''\n Calculates the dB rms profile of the input audio and \n selects regions which arae above the profile. \n \n Parameters\n ----------\n one_channel\n fs\n dbrms_threshold: float, optional\n Defaults to -50 dB rms\n dbrms_window: float, optional\n The window which is used to calculate the dB rms profile\n in seconds. Defaults to 0.001 seconds.\n \n Returns\n -------\n detections : list with tuples\n Each tuple corresponds to a candidate signal region\n '''\n if one_channel.ndim > 1:\n raise IndexError(f'Input audio must be flattened, and have only 1 dimension. \\\n Current audio has {one_channel.ndim} dimensions')\n dbrms_window = kwargs.get('dbrms_window',0.001) # seconds\n dbrms_threshold = kwargs.get('dbrms_threshold', -50)\n \n window_samples = int(fs*dbrms_window)\n dBrms_profile = dB(moving_rms(one_channel, window_size=window_samples))\n \n labelled, num_regions = ndimage.label(dBrms_profile>dbrms_threshold)\n if num_regions==0:\n print (f'No regions above threshold: {dbrms_threshold} dBrms found in this channel!')\n regions_above = ndimage.find_objects(labelled.flatten())\n regions_above_timestamps = [get_start_stop_times(each, fs) for each in regions_above]\n \n return regions_above_timestamps\n\n\ndef envelope_detector(audio, fs, **kwargs):\n '''\n Generates the Hilbert envelope of the audio. Signals are detected\n wherever the envelope goes beyond a user-defined threshold value.\n \n Two main options are to segment loud signals with reference to dB peak or \n with reference dB above floor level. \n \n Parameters\n ----------\n audio\n fs\n \n \n Keyword Arguments\n -----------------\n threshold_db_floor: float, optional\n The threshold for signal detection in dB above the floor level. The 5%ile level of the whole envelope is chosen as\n the floor level. If not specified, then threshold_dbpeak is used to segment signals.\n threshold_dbpeak : float, optional\n The value beyond which a signal is considered to start.\n Used only if relative_to_baseline is True.\n lowpass_durn: float, optional\n The highest time-resolution of envelope fluctuation to keep. \n This effectively performs a low-pass at 1/lowpass_durn Hz on the raw envelope\n signal. \n \n\n Returns\n -------\n regions_above_timestamps \n \n \n \n '''\n envelope = np.abs(signal.hilbert(audio))\n \n \n if not kwargs.get('lowpass_durn') is None:\n lowpass_durn = kwargs['lowpass_durn'] # seconds\n freq = 1.0/lowpass_durn\n b,a = signal.butter(1, freq/(fs*0.5),'lowpass')\n envelope = signal.filtfilt(b,a,envelope)\n \n if not kwargs.get('threshold_db_floor', None) is None:\n floor_level = np.percentile(20*np.log10(envelope),5)\n threshold_db = floor_level + kwargs['threshold_db_floor']\n else:\n # get regions above the threshold\n threshold_db = kwargs['threshold_dbpeak']\n linear_threshold = 10**(threshold_db/20)\n labelled, num_detections = ndimage.label(envelope>=linear_threshold)\n regions_above = ndimage.find_objects(labelled.flatten())\n regions_above_timestamps = [get_start_stop_times(each, fs ) for each in regions_above]\n return regions_above_timestamps\n\n\n \ndef get_start_stop_times(findobjects_tuple, fs):\n '''\n \n '''\n only_tuple = findobjects_tuple[0]\n start, stop = only_tuple.start/fs, only_tuple.stop/fs\n return start, stop\n\n\ndef moving_rms(X, **kwargs):\n '''Calculates moving rms of a signal with given window size. \n Outputs np.array of *same* size as X. The rms of the \n last few samples <= window_size away from the end are assigned\n to last full-window rms calculated\n Parameters\n ----------\n X : np.array\n Signal of interest. \n window_size : int, optional\n Defaults to 125 samples. \n Returns\n -------\n all_rms : np.array\n Moving rms of the signal. \n '''\n window_size = kwargs.get('window_size', 125)\n starts = np.arange(0, X.size)\n stops = starts+window_size\n valid = stops<X.size\n valid_starts = np.int32(starts[valid])\n valid_stops = np.int32(stops[valid])\n all_rms = np.ones(X.size).reshape(-1,1)*999\n\n for i, (start, stop) in enumerate(zip(valid_starts, valid_stops)):\n rms_value = rms(X[start:stop])\n all_rms[i] = rms_value\n \n # replace all un-assigned samples with the last rms value\n all_rms[all_rms==999] = np.nan\n\n return all_rms\n# \n#if __name__ == '__main__':\n# import scipy.signal as signal \n# # trying out the hilbert envelope method:\n# fs = 250000\n# background = -60 # dB rms\n# audio = np.random.normal(0, 10**(background/20), fs)\n# duration = 0.005\n# sound_start = 0.05\n# t = np.linspace(0, duration, int(fs*duration))\n# bat_call = signal.chirp(t,90000, 25000, t[-1])\n# bat_call *= 0.5\n# sound_stop = sound_start+duration\n# \n# start, end = np.int32(np.array([sound_start,\n# sound_stop])*fs)\n# audio[start:end] += bat_call\n# \n# envelope = np.abs(signal.hilbert(audio))\n# \n# dets = envelope_detector(audio, fs, threshold_dbpeak=-20)\n# print(dets)\n## " ]
[ [ "scipy.ndimage.label", "numpy.ones", "scipy.signal.butter", "scipy.signal.filtfilt", "numpy.arange", "scipy.signal.hilbert", "numpy.int32", "numpy.log10" ] ]
FlorianPfisterer/2D-LSTM-Seq2Seq
[ "1b07273fc73237259ae99eabfc509f54ad233ccf" ]
[ "test/test_lstm2d_cell.py" ]
[ "from unittest import TestCase\nimport torch\nfrom model.lstm2d_cell import LSTM2dCell\n\n\nclass LSTM2dCellTest(TestCase):\n \"\"\"\n Unit tests for the 2D-LSTM cell.\n \"\"\"\n embed_dim = 50\n encoder_state_dim = 20\n input_dim = 2 * encoder_state_dim + embed_dim\n cell_state_dim = 25\n batch_size = 42\n\n def setUp(self):\n torch.manual_seed(42)\n\n self.x_j = torch.randn(self.batch_size, self.input_dim)\n self.s_prev_hor = torch.randn(self.batch_size, self.cell_state_dim)\n self.s_prev_ver = torch.randn(self.batch_size, self.cell_state_dim)\n self.c_prev_hor = torch.randn(self.batch_size, self.cell_state_dim)\n self.c_prev_ver = torch.randn(self.batch_size, self.cell_state_dim)\n\n self.device = torch.device('cpu')\n\n def test_dimensions(self):\n \"\"\"\n Tests if the input and output dimensions of the cell are as expected.\n \"\"\"\n cell = LSTM2dCell(self.input_dim, self.cell_state_dim, self.device)\n c_ji, s_ji = cell.forward(x=self.x_j, s_prev_hor=self.s_prev_hor, s_prev_ver=self.s_prev_ver,\n c_prev_hor=self.c_prev_hor, c_prev_ver=self.c_prev_ver)\n\n c_shape = list(c_ji.shape)\n s_shape = list(s_ji.shape)\n\n self.assertEqual(c_shape, [self.batch_size, self.cell_state_dim], 'Next cell state has unexpected shape')\n self.assertEqual(s_shape, [self.batch_size, self.cell_state_dim], 'Next hidden state has unexpected shape')\n\n def test_same_over_batch(self):\n \"\"\"\n Tests if the outputs of the cell are the same over the batch if the same input is fed in multiple times.\n \"\"\"\n toy_input_dim = 4\n toy_batch_size = 7\n toy_state_dim = 3\n\n # create toy values and repeat them over the batch\n toy_x = torch.Tensor([1.5, 4.2, 3.1415, 2.71]).expand(toy_batch_size, toy_input_dim)\n\n toy_s_prev_hor = torch.Tensor([-.4, 1.2, 42.195]).expand(toy_batch_size, toy_state_dim)\n toy_s_prev_ver = torch.Tensor([2.3, 7.12, -3.14]).expand(toy_batch_size, toy_state_dim)\n\n toy_c_prev_hor = torch.Tensor([-10.1, 4.5, -0.1]).expand(toy_batch_size, toy_state_dim)\n toy_c_prev_ver = torch.Tensor([17, 1.001, -2.23]).expand(toy_batch_size, toy_state_dim)\n\n cell = LSTM2dCell(toy_input_dim, toy_state_dim, self.device)\n c, s = cell.forward(x=toy_x, s_prev_hor=toy_s_prev_hor, s_prev_ver=toy_s_prev_ver,\n c_prev_hor=toy_c_prev_hor, c_prev_ver=toy_c_prev_ver)\n\n # check if the cell and hidden state are the same across the whole batch\n c_first = c[0, :]\n repeated_c_first = c_first.expand(toy_batch_size, c_first.shape[-1])\n self.assertTrue(repeated_c_first.allclose(c), 'Next cell state varies across same-input batch')\n\n s_first = s[0, :]\n repeated_s_first = s_first.expand(toy_batch_size, s_first.shape[-1])\n self.assertTrue(repeated_s_first.allclose(s), 'Next hidden state varies across same-input batch')\n\n" ]
[ [ "torch.manual_seed", "torch.device", "torch.Tensor", "torch.randn" ] ]
colon3ltocard/pythonalgorithms
[ "60e2a46d4e53430570142f79e9930b02c3f89ed0" ]
[ "bidir_dijkstra.py" ]
[ "\"\"\"\nVisualizing bidirectionnal Dijkstra\nusing matplotlib\n\"\"\"\nimport sys\nfrom dataclasses import dataclass\nfrom heapq import heappush, heappop\nfrom itertools import permutations\nfrom collections import defaultdict\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport matplotlib.animation as animation\nfrom dijkstra import (\n Node,\n generate_random_graph,\n build_shortest_path,\n dijkstra,\n)\n\n\n@dataclass\nclass Context:\n distances: dict\n previous: dict\n node: None\n visited_nodes: set\n\n\ndef dijkstra_iterator(nodes: list[Node], src_id: int, hf=lambda x: 0.0):\n \"\"\"\n Internal loop of the Dijkstra algorithm\n as a step by step iterator\n hf is an optional heuristic\n \"\"\"\n visited_nodes = set()\n h: list[tuple[float, Node]] = []\n previous = dict()\n distances = defaultdict(lambda: sys.maxsize)\n distances[src_id] = hf(nodes[src_id])\n ctx: Context = Context(\n previous=previous,\n distances=distances,\n node=None,\n visited_nodes=visited_nodes,\n )\n\n heappush(h, (0.0, nodes[src_id]))\n\n while h:\n _, node = heappop(h)\n\n if node.id in visited_nodes:\n continue\n\n dist = distances[node.id]\n\n for n, d in (\n (nodes[k], v)\n for k, v in node.neighbours.items()\n if k not in visited_nodes\n ):\n new_dist = dist + d\n cost = new_dist + hf(n) - hf(node)\n if cost <= distances[n.id]:\n distances[n.id] = cost\n previous[n.id] = node.id\n\n heappush(h, (cost, n))\n\n visited_nodes.add(node.id)\n ctx.node = node\n yield ctx\n\n ctx.node = None\n yield ctx\n\n\ndef dijkstra_forward(\n nodes: list[Node], src_id: int, dst_id: int, hf=lambda x: 0.0\n) -> list[int]:\n \"\"\"\n 'classical' forward Dijkstra but based on our iterator.\n \"\"\"\n coro = dijkstra_iterator(nodes, src_id, hf=hf)\n for ctx in coro:\n if ctx.node is None:\n return [], []\n elif ctx.node.id == dst_id:\n return ctx.distances[dst_id], list(\n build_shortest_path(ctx.previous, dst_id, src_id)\n )\n\n\ndef bidir_dijkstra(\n nodes: list[Node],\n src_id: int,\n dst_id: int,\n hff=lambda _: 0.0,\n hfb=lambda _: 0.0,\n consistent: bool = True,\n) -> list[int]:\n \"\"\"\n bidirectionnal dijkstra, we search from both start => end\n and end => start using two iterators.\n hff and hfb are optional heuristics\n for respectively the forward and backward iterators\n (for later bidir A*)\n \"\"\"\n forward = dijkstra_iterator(nodes, src_id, hf=hff)\n backward = dijkstra_iterator(nodes, dst_id, hf=hfb)\n\n shortest = sys.maxsize\n forward_node = backward_node = None\n f = []\n b = []\n for idx, (ctx_forward, ctx_backward) in enumerate(zip(forward, backward)):\n\n if any(x.node is None for x in (ctx_forward, ctx_backward)):\n # no path between the two nodes\n return [], [], (f, b)\n\n f.append(ctx_forward.node)\n b.append(ctx_backward.node)\n\n if forward_node and (\n not consistent\n or sum(\n x.distances[x.node.id] - hf(x.node)\n for x, hf in ((ctx_forward, hff), (ctx_backward, hfb))\n )\n >= shortest\n ):\n\n forward_path = build_shortest_path(\n ctx_forward.previous, forward_node.id, src_id\n )\n backward_path = build_shortest_path(\n ctx_backward.previous, backward_node.id, dst_id\n )[::-1]\n path = forward_path + backward_path\n return (\n shortest,\n path,\n (f, b),\n )\n\n else:\n for (ctx, hf), (ctx2, hf2) in permutations(\n ((ctx_forward, hff), (ctx_backward, hfb)), 2\n ):\n for n, d in ctx.node.neighbours.items():\n if n in ctx2.visited_nodes:\n distance = (\n ctx.distances[ctx.node.id]\n + ctx2.distances[n]\n + d\n - hf(ctx.node)\n - hf2(nodes[n])\n )\n\n if distance < shortest:\n shortest = distance\n forward_node = (\n ctx.node if ctx is ctx_forward else nodes[n]\n )\n backward_node = (\n ctx.node if ctx is ctx_backward else nodes[n]\n )\n print(\n f'Iter_{idx}: contact between {forward_node}->{backward_node} with d={shortest}'\n )\n\n\nclass Animator:\n \"\"\"\n Builds an animation from\n a bidir shortest path finder.\n \"\"\"\n\n def __init__(self, nodes: list[Node], title='', draw_edges=True) -> None:\n self.fig, self.ax = plt.subplots()\n plt.title(title)\n plt.tight_layout()\n self.ax.set_aspect('equal')\n self.i = True\n if draw_edges:\n edges = {\n tuple(sorted((n.id, x))) for n in nodes for x in n.neighbours\n }\n for edge in edges:\n from_node, to_node = [nodes[x] for x in edge]\n x = [n.x for n in (from_node, to_node)]\n y = [n.y for n in (from_node, to_node)]\n plt.plot(x, y, color='gray', linewidth=0.5)\n\n x, y = [n.x for n in nodes], [n.y for n in nodes]\n self.ax.scatter = plt.scatter(\n x,\n y,\n c=[0 for _ in range(len(x))],\n s=[30] + [10] * (len(nodes) - 2) + [30],\n vmin=0,\n vmax=3,\n cmap=matplotlib.colors.ListedColormap(\n ['grey', 'springgreen', 'red', 'white']\n ),\n )\n self._colors = self.ax.scatter.get_array()\n\n for n in nodes:\n if not n.neighbours:\n self._colors[n.id] = 3\n\n def update(self, nodes: tuple[Node, Node, list[Node]]):\n \"\"\"\n Updates the plot with a tuple of nodes (forward, backward, shortest_path)\n \"\"\"\n f, b, s = nodes\n\n if not s:\n self._colors[f.id] = 1\n self._colors[b.id] = 2\n self.ax.scatter.set_array(self._colors)\n return (self.ax.scatter,)\n else:\n x = [n.x for n in s]\n y = [n.y for n in s]\n\n if self.i:\n c = 'green'\n else:\n c = 'orange'\n\n ap = self.ax.plot(x, y, color=c, linewidth=2)\n self.i = not (self.i)\n return ap\n\n\ndef make_animated_gif(\n title: str,\n g: list[Node],\n dst_file: str,\n fs: list[Node],\n bs: list[Node],\n shortest: list[Node],\n draw_edges: bool = True,\n writer: str = 'ffmpeg',\n interval: int = 250,\n blinking_ratio=0.5,\n):\n \"\"\"\n Makes an animated gif out of two sequences of forward (fs) and backward (bs)\n path-finding algorithm. The final shortest path will be blinked.\n \"\"\"\n anim = Animator(g, title=title, draw_edges=draw_edges)\n\n def node_gen():\n for fn, bn in zip(fs, bs):\n yield fn, bn, []\n\n res = [g[i] for i in shortest]\n for _ in range(int(len(fs) * blinking_ratio)):\n yield _, _, res\n\n ani = animation.FuncAnimation(\n anim.fig,\n anim.update,\n node_gen(),\n interval=interval,\n blit=True,\n repeat_delay=500,\n save_count=len(fs) * 2,\n )\n ani.save(f'imgs/{dst_file}', writer=writer)\n\n\nif __name__ == '__main__':\n # sanity check on the iterator versus 'simple' implementation\n g = generate_random_graph(100, connect_probability=0.1)\n cost, sp = dijkstra_forward(g, 0, len(g) - 1)\n cost2, sp2 = dijkstra(g, 0, len(g) - 1)\n # we also compare our bidir version agaisnt the other two ^^\n cost3, sp3, (f, b) = bidir_dijkstra(g, 0, len(g) - 1)\n\n # and against a backward run only\n cost4, sp4 = dijkstra_forward(g, len(g) - 1, 0)\n sp4 = sp4[::-1]\n\n print(cost, cost2, cost3, cost4)\n\n for p in (sp, sp2, sp4, sp3):\n print(' -> '.join(str(p) for p in p))\n\n assert sp == sp2 == sp3 == sp4\n\n make_animated_gif(\n f'Bidir Dijkstra n={len(f)}', g, 'bidir_100.gif', f, b, sp3\n )\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplots", "matplotlib.pyplot.tight_layout", "matplotlib.colors.ListedColormap" ] ]
lu-w/criticality-recognition
[ "5ad2e12699ad4bf2d7f60ce9e30f26110adce436" ]
[ "auto/auto_visualizer/auto_visualizer.py" ]
[ "# Visualizer is for debugging purposes only\nimport logging\nimport math\nimport random\nimport threading\nimport http.server\nimport socketserver\nimport os\nimport re\n\nfrom shapely import wkt\nimport matplotlib.pyplot as plt\nimport mpld3\nimport screeninfo\nimport tempfile\nimport webbrowser\nimport owlready2\nfrom shapely import geometry\nimport numpy as np\nfrom tqdm import tqdm\nimport time as pytime\n\nimport auto.auto\n\nfrom criticality_recognition import phenomena_extraction\n\n# TODO\n# - visualize scenario level CPs\n# - show has distance to in table for each individual - as ternary relations - instead of omitting it\n\n####################\n# Config constants #\n####################\n\n# Classes to not show in visualization\n_NO_PRINTING_CLASSES = {\"physics.Has_Distance_To\", \"perception.Is_Full_Occlusion\", \"perception.Is_Occlusion\"}\n# Data/object properties to hide from the individual tables shown when hovering\n_NO_PRINTING_PROPERTIES = {\"perceptional_property\", \"traffic_related_concept_property\",\n \"descriptive_traffic_entity_property\", \"traffic_entity_property\", \"activity_property\",\n \"physical_property\", \"traffic_modeling_property\", \"traffic_entity_property\",\n \"automotive_urban_traffic_property\", \"L1_property\", \"L2_property\", \"L3_property\",\n \"L4_property\", \"L5_property\", \"L6_property\", \"traffic_model_element_property\",\n \"criticality_phenomenon_as_object_property\", \"has_positional_relation\",\n \"has_spatial_relation\", \"has_dynamical_relation\", \"SF_spatial_relation\",\n \"performance_spatial_relation\", \"EH_spatial_relation\", \"RCC8_spatial_relation\", \"rcc8dc\",\n \"ehDisjoint\"}\n# If one hides long property lists, this is the number after which the list is cut off\n_MAX_PROPS_DISPLAY = 4\n_AVOID_LABEL_COLLISIONS = False\n\n# Logging\nlogger = logging.getLogger(__name__)\n\n\n# Helper function for sorting CPs & individuals\ndef natural_sort_key(s, _nsre=re.compile(\"([0-9]+)\")):\n return [int(text) if text.isdigit() else text.lower() for text in _nsre.split(str(s))]\n\n\n#######\n# CSS #\n#######\n\n# Scene CSS (added is iframes to scenario HTML)\nscene_css = \"\"\"\n <style>\n svg * {\n font-size: 4pt;\n }\n table {\n border: solid 1px #DDEEEE;\n border-collapse: collapse;\n border-spacing: 0;\n font: normal 8px, sans-serif;\n }\n thead th {\n background-color: #DDEFEF;\n border: solid 1px #DDEEEE;\n color: #336B6B;\n padding: 3px;\n text-align: left;\n text-shadow: 1px 1px 1px #fff;\n font-size: 10pt;\n }\n tbody td {\n background-color: #FFFFFF;\n border: solid 1px #DDEEEE;\n color: #333;\n padding: 3px;\n text-shadow: 1px 1px 1px #fff;\n font-size: 8pt;\n }\n .cp-tooltip {}\n </style>\n \"\"\"\n\n# Scenario CSS (main CSS)\nscenario_css = \"\"\"\n <style>\n .slider {\n -webkit-appearance: none; /* Override default CSS styles */\n appearance: none;\n width: 100%; /* Full-width */\n height: 25px; /* Specified height */\n background: #d3d3d3; /* Grey background */\n outline: none; /* Remove outline */\n opacity: 0.7; /* Set transparency (for mouse-over effects on hover) */\n -webkit-transition: .2s; /* 0.2 seconds transition on hover */\n transition: opacity .2s;\n }\n .slider:hover {\n opacity: 1; /* Fully shown on mouse-over */\n }\n .slider::-webkit-slider-thumb {\n -webkit-appearance: none; /* Override default look */\n appearance: none;\n width: 25px; /* Set a specific slider handle width */\n height: 25px; /* Slider handle height */\n background: #04AA6D; /* Green background */\n cursor: pointer; /* Cursor on hover */\n }\n .slider::-moz-range-thumb {\n width: 25px; /* Set a specific slider handle width */\n height: 25px; /* Slider handle height */\n background: #04AA6D; /* Green background */\n cursor: pointer; /* Cursor on hover */\n }\n </style>\"\"\"\n\n\ndef visualize_scenario(scenario, cps=None):\n \"\"\"\n Creates an HTML visualization of the given scenario. Starts a simple web server at localhost:8000 (blocking).\n :param scenario: Either a list of worlds, each world representing a single scene or a single world representing a\n whole scenario\n :param cps: A list of criticality phenomena which optionally to visualize as well.\n :return: The path to the directory in which to find the created HTML visualization.\n \"\"\"\n pl_html = []\n scenario_inst = None\n if cps is None:\n cps = []\n\n # Fetch scene list\n if type(scenario) == list:\n scenes = [scene_world.search(type=auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scene_world).Scene)\n [0] for scene_world in scenario]\n elif type(scenario) == owlready2.namespace.World or type(scenario) == owlready2.World:\n tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scenario)\n scenario_inst = scenario.search(type=tm.Scenario)[0]\n scenes = list(filter(lambda x: tm.Scene in x.is_a, scenario_inst.has_traffic_model))\n else:\n raise ValueError\n scenes = sorted(scenes, key=lambda x: x.inTimePosition[0].numericPosition[0])\n\n # Assemble scenario title\n title = \"Scenario\"\n if scenario_inst and hasattr(scenario_inst, \"identifier\") and len(scenario_inst.identifier) > 0:\n title += \" \" + str(scenario_inst.identifier[0])\n scenario_info = \"(\" + str(len(scenes)) + \" Scenes)\"\n # Main HTML code for index.html\n html_body = \"\"\"<!DOCTYPE html>\n<html>\n <head>\n <link href=\"https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css\" rel=\"stylesheet\" integrity=\"sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3\" crossorigin=\"anonymous\">\n <script src=\"https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js\" integrity=\"sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p\" crossorigin=\"anonymous\"></script>\n <script src=\"https://code.jquery.com/jquery-3.6.0.min.js\"></script>\n <meta charset=\"utf-8\">\"\"\" + scenario_css + \"\"\"\n <title>\"\"\" + title + \"\"\"</title>\n </head>\n <body>\n <div class=\\\"d-flex flex-row justify-content-center\\\"><div class=\\\"mt-3 py-1 px-6 alert alert-info\\\" style=\\\"display: inline-block\\\" role=\\\"alert\\\"><center><h5>\"\"\" + title + \"\"\" \"\"\" + scenario_info + \"\"\"</h5></center></div></div>\n <div class=\"slidecontainer m-2\">\n <input type=\"range\" min=\"1\" max=\\\"\"\"\" + str(len(scenes)) + \"\"\"\\\" value=\"1\" class=\"slider\" id=\"myRange\">\n </div>\n <script>\n var slider = document.getElementById(\"myRange\");\n var last_set = 1\n var show_all_cps = true\n slider.oninput = function() {\n var output = document.getElementById(\"plt\" + this.value);\n var last_output = document.getElementById(\"plt\" + last_set);\n last_output.style.display = 'none';\n output.style.display = 'block';\n last_set = this.value\n }\n function toggle_cps_all_iframes() {\n show_all_cps = !show_all_cps\n $(\".cp-all-button\").each(function(i) {\n if (show_all_cps) {\n this.parentElement.classList.add(\"active\")\n this.checked = true\n } else {\n this.parentElement.classList.remove(\"active\")\n this.checked = false\n }\n })\n $(\".cp-button\").each(function(i) {\n if (show_all_cps) {\n this.parentElement.classList.add(\"active\")\n this.checked = true\n } else {\n this.parentElement.classList.remove(\"active\")\n this.checked = false\n }\n })\n $(\".scene-plot\").each(function(i) {\n this.contentWindow.toggle_cps(show_all_cps)\n })\n }\n function toggle_cp_class(ele, cp_cls_id) {\n // 0. disable automatically checked checkbox (will be added again at step 3)\n ele.checked = !ele.checked\n // 1. find active scene plot\n active_scene = $(\".scene-plot-container\").filter(function(i) {\n return this.style.display !== \"none\"\n })[0]\n // 2. get CP pred. str for given cp_cls_id\n cp_pred = active_scene.getElementsByClassName(\"scene-plot\")[0].contentWindow.cp_predicates[cp_cls_id]\n // 3. Toggle all buttons for this CP pred\n $(\"label > span:contains(\" + cp_pred + \")\").each(function(i) {\n this.parentElement.classList.toggle(\"active\")\n this.parentElement.querySelector(\".cp-button\").checked = !this.parentElement.querySelector(\".cp-button\").checked\n })\n // 4. check if (and where) CP pred. str is present in cp_predicates, pass the resulting index\n $(\".scene-plot\").each(function(k) {\n cp_cls_id_scene = -1\n for (var i = 0; i < this.contentWindow.cp_predicates.length; i++) {\n if (cp_pred === this.contentWindow.cp_predicates[i]) {\n cp_cls_id_scene = i\n }\n }\n if (cp_cls_id_scene >= 0) {\n this.contentWindow.toggle_cp_class(cp_cls_id_scene, ele.checked)\n }\n })\n }\n </script>\n \"\"\"\n pl_html.append(html_body)\n iframes = []\n\n def get_color(p):\n # Fetches a different color each time, but ensures that it has a readable contrast.\n _LUMA_LIMIT = 170\n color = 0\n luma = _LUMA_LIMIT\n while luma >= _LUMA_LIMIT:\n color = random.randrange(0, 0xFFFFFF, 0xF)\n luma = 0.2126 * ((color >> 16) & 0xff) + 0.7152 * ((color >> 8) & 0xff) + 0.0722 * ((color >> 0) & 0xff)\n return \"#\" + \"%06x\" % color\n\n # Create HTML for each scene\n for i, scene in enumerate(scenes):\n logger.info(\"Plotting scene \" + str(i + 1) + \" / \" + str(len(scenes)))\n scene_cps = [cp for cp in cps if cp.is_representable_in_scene(scene)]\n cp_colors = list(map(get_color, range(len([x for c in scene_cps for x in c.subjects]))))\n cp_color = 0\n no_geo_entities = []\n width = 24.5\n height = 10\n try:\n primary_screens = list(filter(lambda x: x.is_primary, screeninfo.get_monitors()))\n if len(primary_screens) > 0:\n width = (primary_screens[0].width_mm / 25.4) * 0.73\n height = (primary_screens[0].height_mm / 25.4) * 0.73\n except screeninfo.common.ScreenInfoError:\n logger.info(\"No screens found, using default plot size of \" + str(width) + \" in x \" + str(height) + \" in\")\n fig = plt.figure(figsize=(width, height))\n plt.axis(\"equal\")\n entity_labels = []\n entity_relations = []\n relations_per_cp_class = dict()\n cps_relations = []\n cps_for_tooltips = []\n centroids_x = []\n centroids_y = []\n plotted_labels = []\n entity_points = dict()\n traffic_entities = tqdm(scene.has_traffic_entity)\n for entity in traffic_entities:\n traffic_entities.set_description(str(entity))\n if len(entity.hasGeometry) > 0:\n for geo in entity.hasGeometry:\n shape = wkt.loads(geo.asWKT[0])\n entity_cp_relations = []\n points = None\n if hasattr(shape, \"exterior\"):\n points = shape.exterior.xy\n try:\n hasattr(shape, \"coords\")\n points = shape.coords.xy\n except NotImplementedError:\n pass\n if points:\n if (np.isclose(centroids_x, shape.centroid.x) & np.isclose(centroids_y, shape.centroid.y))\\\n .any():\n x = shape.centroid.x + 0.0\n y = shape.centroid.y + 0.8\n plt.plot((shape.centroid.x, x), (shape.centroid.y, y), \"k-\")\n else:\n x = shape.centroid.x\n y = shape.centroid.y\n entity_points[entity] = (x, y)\n centroids_x.append(x)\n centroids_y.append(y)\n plt.plot(*points, alpha=.6)\n if auto.auto.get_ontology(auto.auto.Ontology.Physics, scenario).Dynamical_Object in \\\n entity.INDIRECT_is_a:\n plt.fill(*points, alpha=.3)\n if entity.has_yaw is not None:\n x_dir = (0.9 * math.cos(math.radians(entity.has_yaw)))\n y_dir = (0.9 * math.sin(math.radians(entity.has_yaw)))\n plt.arrow(shape.centroid.x, shape.centroid.y, dx=x_dir, dy=y_dir, shape=\"full\",\n length_includes_head=True, color=\"gray\", alpha=0.6, head_width=1)\n entity_labels.append(_describe_entity(entity))\n # Plot CPs\n entity_scene_cps = list(filter(lambda scp: entity in scp.subjects, scene_cps))\n if len(entity_scene_cps) > 0:\n plt.plot(x, y, \"o\", color=\"r\", mec=\"k\", markersize=3, alpha=1)\n ent_color = \"red\"\n else:\n ent_color = \"black\"\n if entity.identifier and len(entity.identifier) > 0 and not entity.is_persistent and not \\\n (isinstance(entity.identifier[0], str) and entity.identifier[0].startswith(\"repr\")):\n plt.annotate(entity.identifier[0], (x+0.2, y+0.2), color=ent_color)\n already_drawn_cps = []\n # init dict\n for cp in entity_scene_cps:\n if cp.predicate not in relations_per_cp_class.keys():\n relations_per_cp_class[cp.predicate] = []\n for cp in entity_scene_cps:\n if cp not in already_drawn_cps:\n same_line_cps = [x for x in entity_scene_cps if\n [y for z in x.objects.values() for y in z] ==\n [y for z in cp.objects.values() for y in z]]\n labels = [(x.predicate.split(\"(\")[0],\n (x.predicate.split(\"(\")[1].replace(\")\", \"\"), str(x)))\n for x in same_line_cps]\n already_drawn_cps += same_line_cps\n subj_x = x\n subj_y = y\n for objs in cp.objects.values():\n for obj in objs:\n if len(obj.hasGeometry) > 0:\n if obj in entity_points.keys():\n obj_x = entity_points[obj][0]\n obj_y = entity_points[obj][1]\n else:\n geom_o = wkt.loads(obj.hasGeometry[0].asWKT[0])\n obj_x = geom_o.centroid.x\n obj_y = geom_o.centroid.y\n m = (obj_y - subj_y) / (obj_x - subj_x)\n b = subj_y - m * subj_x\n head_width = 0.2\n head_length = 1.5 * head_width\n arrow = plt.arrow(subj_x, subj_y, dx=(obj_x - subj_x), dy=(obj_y - subj_y),\n color=cp_colors[cp_color], shape=\"full\",\n length_includes_head=True, head_width=head_width,\n head_length=head_length)\n if len(labels[0]) > 1:\n label_row = \" \".join([label[0] for label in labels])\n else:\n label_row = labels[0]\n x_offset = (len(label_row) * 0.055) / 2 - 0.055\n if subj_x > obj_x:\n label_x = obj_x + abs(subj_x - obj_x) / 2 - x_offset\n else:\n label_x = obj_x - abs(subj_x - obj_x) / 2 - x_offset\n a = math.degrees(math.atan(m))\n for l_i, label in enumerate(labels):\n label_string = label[0].replace(\"CP_\", \"\")\n label_len = (len(label_string) * 0.09 + 0.1)\n label_x_offset = abs(math.cos(math.atan(m)) * label_len)\n while True:\n # Finds a free space to plot label\n label_y = m * label_x + b + 0.05\n label_x_1 = label_x - label_x_offset / 2 + 0.05\n label_y_1 = m * label_x_1 + b\n label_x_2 = label_x + label_x_offset / 2 + 0.05\n label_y_2 = m * label_x_2 + b\n label_line1 = geometry.LineString([(label_x_1, label_y_1),\n (label_x_2, label_y_2)])\n new_bb = label_line1.buffer(0.1, cap_style=2)\n new_bb_rect = list(zip(*new_bb.exterior.xy))[:-1]\n if not _AVOID_LABEL_COLLISIONS or not \\\n _has_collision_with_bbs(plotted_labels, new_bb_rect):\n break\n label_x += label_x_offset / 10\n annot = plt.annotate(label_string,\n (label_x, label_y), color=cp_colors[cp_color],\n rotation=a, fontsize=2, rotation_mode=\"anchor\")\n entity_cp_relations.append(annot)\n cps_relations.append(annot)\n relations_per_cp_class[same_line_cps[l_i].predicate] += [annot, arrow]\n cps_for_tooltips.append(same_line_cps[l_i])\n plotted_labels.append(new_bb_rect)\n label_x += label_x_offset\n subj_x = obj_x\n subj_y = obj_y\n entity_cp_relations += [arrow]\n cp_color = (cp_color + 1) % len(cp_colors)\n entity_relations.append(entity_cp_relations)\n elif len(set([str(y) for y in entity.INDIRECT_is_a]).intersection(_NO_PRINTING_CLASSES)) == 0:\n no_geo_entities.append(_describe_entity(entity))\n logger.info(\"Done with layout, creating MPLD3 plot, JS plugins, and HTML string\")\n pl2 = plt.plot(centroids_x, centroids_y, \"o\", color=\"b\", mec=\"k\", markersize=2, mew=1, alpha=.4)\n tooltip_individuals = ToolTipAndClickInfo(pl2[0], labels=entity_labels, targets=entity_relations,\n targets_per_cp=relations_per_cp_class)\n fig.tight_layout()\n mpld3.plugins.connect(fig, tooltip_individuals)\n for h, cp_text in enumerate(cps_relations):\n tooltip_cp = CPTooltip(cp_text, cps_for_tooltips[h])\n mpld3.plugins.connect(fig, tooltip_cp)\n html = \"\\n\\t\\t<div class=\\\"container-fluid scene-plot-container\\\" id=\\\"plt\" + str(i + 1) + \"\\\" style =\\\"\"\n if i != 0:\n html += \"display: none;\"\n html += \"\\\">\"\n html += \"\"\"\n <div class=\"row\">\n <div class=\"col-md-1\">\n \"\"\"\n cp_count_total = len([x for x in cps if (isinstance(x.traffic_model, list) and scene in x.traffic_model) or\n x.traffic_model == scenario_inst])\n html += \"\"\"<div class=\"\">\n <label class=\"btn btn-primary active\" style=\"margin-bottom: 10px; width: %s\">\n <input type=\"checkbox\" class=\"cp-all-button\" id=\"cp-all-button-%s\" autocomplete=\"off\" onclick=\"toggle_cps_all_iframes();\" checked>\n <span>Show all criticality phenomena (%s)</span>\n </label>\"\"\" % (\"100%\", str(i), str(cp_count_total))\n for l, pred in enumerate(sorted(relations_per_cp_class.keys(), key=natural_sort_key)):\n cp_count = len([x for x in cps if x.predicate == pred and ((isinstance(x.traffic_model, list) and\n scene in x.traffic_model) or x.traffic_model == scenario_inst)])\n html += \"\"\"\n <br />\n <label class=\"btn btn-secondary active\" style=\"margin-bottom: 5px; width: %s\">\n <input type=\"checkbox\" class=\"cp-button\" id=\"cp-button-%s-%s\" autocomplete=\"off\" onclick=\"toggle_cp_class(this, %s);\" checked>\n <span>%s (%s)</span>\n </label>\"\"\" % (\"100%\", str(i), str(l), str(l), pred, str(cp_count))\n html += \"\"\"\n </div>\n </div>\n <div class=\"col-md-11\">\n \"\"\"\n html += \"<div class=\\\"embed-responsive embed-responsive-16by9\\\">\\n\"\n html += \"\\t\\t\\t\\t\\t\\t<iframe class=\\\"scene-plot\\\" src=\\\"scene\" + str(i + 1) + \".html\\\" class=\\\"embed-responsive-item\\\" style=\\\"width: 100%; height: \" + str(height*1.27) + \"in\\\" allowfullscreen></iframe>\\n\\t\\t\\t\\t\\t</div>\\n\"\n iframe_html = \"\"\"<!DOCTYPE html>\n<html>\n <head>\n <meta charset=\"utf-8\">\n <meta HTTP-EQUIV=\"Access-Control-Allow-Origin\" CONTENT=\"localhost\">\n <link href=\"https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css\" rel=\"stylesheet\" integrity=\"sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3\" crossorigin=\"anonymous\">\n <script src=\"https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js\" integrity=\"sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p\" crossorigin=\"anonymous\"></script>\n <script src=\"https://code.jquery.com/jquery-3.6.0.min.js\"></script>\n </head>\n <body>\"\"\"\n iframe_html += scene_css\n iframe_html += \"\"\"\n <div class=\"d-flex flex-row justify-content-center\">\n <div class=\"btn-group btn-group-toggle\" data-bs-toggle=\"buttons\">\n <label class=\"btn btn-secondary active\">\n <input type=\"checkbox\" id=\"tooltip_button\" checked autocomplete=\"off\" onclick=\"toggle_tooltips(this);\"> Show tooltip with information of individuals\n </label>\n <label class=\"btn btn-secondary active\">\n <input type=\"checkbox\" id=\"descr_button\" checked autocomplete=\"off\" onclick=\"toggle_all_ind_relations(this);\"> Show full individual relations in tooltip\n </label>\n </div>\n </div>\n <script>\n var show_tooltips = true\n var show_long_ind = true\n cps = []\n cp_targets = []\n cp_targets_per_class = []\n function toggle_tooltips(ele) {\n ele.parentElement.classList.toggle(\"active\")\n show_tooltips = !show_tooltips\n }\n function toggle_all_ind_relations(ele) {\n ele.parentElement.classList.toggle(\"active\")\n show_long_ind = !show_long_ind\n }\n function toggle_cp_targets(targets, state) {\n for (let j = 0; j < targets.length; j++) {\n var x = mpld3.get_element(targets[j])\n if (x) {\n if (\"path\" in x) {\n tog = x.path\n } else if (\"obj\" in x) {\n tog = x.obj\n }\n for (var k = 0; k < tog._groups.length; k++) {\n for (var l = 0; l < tog._groups[k].length; l++){\n if (state) {\n tog._groups[k][l].style.display = \"block\"\n } else {\n tog._groups[k][l].style.display = \"none\"\n }\n }\n }\n }\n }\n }\n function toggle_cps(state) {\n for (let i = 0; i < cp_targets.length; i++) {\n toggle_cp_targets(cp_targets[i], state)\n }\n }\n function toggle_cp_class(cp_class, state) {\n targets = cp_targets_per_class[cp_class]\n toggle_cp_targets(targets, state)\n }\n </script>\n <div class=\"card m-2\">\n <div class=\"card-title d-flex flex-row justify-content-center m-1\">\n <h5>\"\"\"\n if len(scene.inTimePosition) > 0 and len(scene.inTimePosition[0].numericPosition) > 0:\n time = \"%.2f s\" % scene.inTimePosition[0].numericPosition[0]\n if scenario_inst and len(scenario_inst.hasEnd) > 0 and len(scenario_inst.hasEnd[0].inTimePosition) > 0 and \\\n len(scenario_inst.hasEnd[0].inTimePosition[0].numericPosition) > 0:\n time += \" / %.2f s\" % scenario_inst.hasEnd[0].inTimePosition[0].numericPosition[0]\n else:\n time += \" / \" + str(len(scenes))\n else:\n time = str(i) + \" / \" + str(len(scenes))\n iframe_html += \"Scene \" + time + \"<br />\"\n iframe_html += \"\"\"\n </h5>\n </div>\n <div class=\"card-body m-0 p-0 d-flex justify-content-center\">\n \"\"\"\n scene_html = mpld3.fig_to_html(fig)\n iframe_html += ''.join(\"\\t\\t\"+line+\"\\n\" for line in scene_html.splitlines())\n iframe_html += \"\"\"\n </div>\n </div>\"\"\"\n if len(no_geo_entities) > 0:\n iframe_html += \"\"\"\n <div class=\"d-flex flex-row justify-content-center\">\n <a class=\"btn btn-primary\" data-bs-toggle=\"collapse\" href=\"#noGeoCollapse\" role=\"button\" aria-expanded=\"false\" aria-controls=\"noGeoCollapse\">\n Show scene individuals with no geometric representation (%s)\n </a>\n </div>\n <div class=\"container-fluid collapse\" id=\"noGeoCollapse\">\n <div class=\"card card-body m-2\">\"\"\" % str(len(no_geo_entities))\n iframe_html += \"\".join(no_geo_entities)\n iframe_html += \"\"\"\n </div>\n </div>\"\"\"\n iframe_html += \"\\t</body>\\n</html>\"\n iframes.append(iframe_html)\n html += \"\\t\\t\\t\\t</div>\\n\\t\\t\\t</div>\\n\\t\\t</div>\"\n pl_html.append(html)\n\n # Assemble main HTML\n pl_html.append(\"\\n\\t</body>\\n</html>\")\n # Write main HTML to index.html\n tmp_dir = tempfile.mkdtemp()\n index_path = tmp_dir + \"/index.html\"\n with open(index_path, \"w\") as file:\n for html in pl_html:\n file.write(html)\n\n # Write each scene HTML to a single file\n for i, iframe in enumerate(iframes):\n frame_path = tmp_dir + \"/scene\" + str(i + 1) + \".html\"\n with open(frame_path, \"w\") as file:\n for html in iframe:\n file.write(html)\n\n # Starts webserver\n os.chdir(tmp_dir)\n threading.Thread(target=socketserver.TCPServer((\"\", 8000),\n http.server.SimpleHTTPRequestHandler).serve_forever).start()\n logger.info(\"Visualization is available at: http://localhost:8000\")\n webbrowser.open(\"http://localhost:8000\")\n return tmp_dir\n\n\ndef _describe_entity(entity):\n \"\"\"\n Describes the given traffic entity as an HTML list.\n :param entity: An object of an owlready2 class.\n :return: The HTML-representation of entity.\n \"\"\"\n cls = phenomena_extraction.get_most_specific_classes([entity])\n label = \"<table class=\\\"m-2\\\"><thead><tr><th>Individual</th><th>\" + str(entity)\n label += \" (\" + \", \".join(cls[0][1]) + \")</th></tr></thead><tbody><tr><td>is_a</td><td>\"\n label += \", \".join([str(x) for x in entity.is_a])\n label += \"</td></tr>\"\n for prop in entity.get_properties():\n if str(prop.python_name) not in _NO_PRINTING_PROPERTIES:\n label += \"<tr>\"\n label += \"<td>\"\n label += str(prop.python_name)\n label += \"</td>\"\n label += \"<td>\"\n label += \", \".join([str(x) for x in prop[entity][:_MAX_PROPS_DISPLAY]])\n if len(prop[entity]) > _MAX_PROPS_DISPLAY:\n label += \"<text class=\\\"extended_ind_props\\\">\"\n label += \", \".join([str(x) for x in prop[entity][_MAX_PROPS_DISPLAY:]]) + \"</text>\"\n label += \"<text class=\\\"extended_ind_props_dots\\\" style=\\\"display: none;\\\">...</text>\"\n label += \"</td>\"\n label += \"</tr>\"\n label += \"</tbody></table>\"\n return label\n\n\ndef _describe_cp(cp):\n label = \"<table class=\\\"m-2\\\"><thead><tr><th>Criticality Phenomenon</th><th>\" + \\\n str(cp.predicate).split(\"(\")[1].replace(\")\", \"\")\n label += \"</th></tr></thead><tbody><tr><td>Start time</td><td>\"\n time = cp.at_time()\n if isinstance(time, tuple):\n label += str(time[0])\n else:\n label += str(time)\n label += \"</td></tr><tr><td>End time</td><td>\"\n if isinstance(time, tuple):\n label += str(time[1])\n else:\n label += str(time)\n label += \"</td></tr><tr><td>Subject(s)</td><td>\"\n if len(cp.subjects) > 0:\n subj_and_classes = phenomena_extraction.get_most_specific_classes(cp.subjects)\n label += \"<br />\".join([str(x[0]) + \" (\" + \", \".join(x[1]) + \")\" for x in subj_and_classes])\n label += \"</td></tr><tr><td>Predicate</td><td>\"\n label += str(cp.predicate)\n label += \"</td></tr><tr><td>Object(s)</td><td>\"\n if len(cp.objects) > 0:\n for obj_predicate in cp.objects.keys():\n obj_and_classes = phenomena_extraction.get_most_specific_classes(cp.objects[obj_predicate])\n label += obj_predicate + \":<br/>\" + \"<br />\".join([str(x[0]) + \" (\" + \", \".join(x[1]) + \")\" for x in\n obj_and_classes])\n if len(cp.objects.keys()) > 1:\n label += \"<br/>\"\n label += \"</td></tr>\"\n label += \"</tbody></table>\"\n return label\n\n\n#################\n# MPLD3 Plugins #\n#################\n\nclass ToolTipAndClickInfo(mpld3.plugins.PointHTMLTooltip):\n # Handles:\n # 1. the criticality phenomena toggling when clicking on CP subjects (red circles)\n # 2. the mouse-overs when hovering over subjects\n # 3. the Ctrl+Click new window action when clicking on subjects\n\n JAVASCRIPT = \"\"\"\n var scene_css = `\"\"\" + scene_css + \"\"\"`\n mpld3.register_plugin(\"htmltooltip\", HtmlTooltipPlugin);\n HtmlTooltipPlugin.prototype = Object.create(mpld3.Plugin.prototype);\n HtmlTooltipPlugin.prototype.constructor = HtmlTooltipPlugin;\n HtmlTooltipPlugin.prototype.requiredProps = [\"id\"];\n HtmlTooltipPlugin.prototype.defaultProps = {labels:null,\n targets_per_cp:null,\n cps:null,\n hoffset:0,\n voffset:10,\n targets:null};\n function HtmlTooltipPlugin(fig, props){\n mpld3.Plugin.call(this, fig, props);\n };\n\n HtmlTooltipPlugin.prototype.draw = function(){\n var obj = mpld3.get_element(this.props.id)\n var labels = this.props.labels\n cps = obj.elements()\n cp_targets = this.props.targets\n cp_targets_per_class = this.props.targets_per_cp\n cp_predicates = this.props.cps\n var tooltip = d3.select(\"body\").append(\"div\")\n .attr(\"class\", \"mpld3-tooltip\")\n .style(\"position\", \"absolute\")\n .style(\"z-index\", \"10\")\n .style(\"visibility\", \"hidden\");\n \n function show_cp(d, i) {\n if (!window.event.ctrlKey) {\n for (let j = 0; j < cp_targets[i].length; j++) { \n var x = mpld3.get_element(cp_targets[i][j]);\n if (x) {\n if (\"path\" in x) {\n tog = x.path\n } else if (\"obj\" in x) {\n tog = x.obj\n }\n for (var k = 0; k < tog._groups.length; k++){\n for (var l = 0; l < tog._groups[k].length; l++){\n if (tog._groups[k][l].style.display === \"none\"){\n tog._groups[k][l].style.display = \"block\"\n } else {\n tog._groups[k][l].style.display = \"none\"\n }\n }\n }\n }\n }\n }\n }\n\n obj.elements()\n .on(\"mouseover\", function(d, i) {\n if (show_tooltips) {\n tooltip.html(labels[i]).style(\"visibility\", \"visible\");\n var long_descrs = document.getElementsByClassName(\"extended_ind_props\")\n var dots_descrs = document.getElementsByClassName(\"extended_ind_props_dots\")\n for (let i = 0; i < long_descrs.length; i++) {\n if(!show_long_ind) {\n long_descrs[i].style.display = \"none\";\n } else {\n long_descrs[i].style.display = \"inline\";\n }\n }\n for (let i = 0; i < dots_descrs.length; i++) {\n if(!show_long_ind) {\n dots_descrs[i].style.display = \"inline\";\n } else {\n dots_descrs[i].style.display = \"none\";\n }\n }\n }\n })\n .on(\"mousemove\", function(d, i) {\n tooltip\n .style(\"top\", d3.event.pageY + this.props.voffset + \"px\")\n .style(\"left\",d3.event.pageX + this.props.hoffset + \"px\");\n }.bind(this))\n .on(\"mousedown.callout\", show_cp)\n .on(\"mouseout\", function(d, i){\n tooltip.style(\"visibility\", \"hidden\");\n })\n .on(\"click\", function(d, i) {\n if (window.event.ctrlKey) {\n var newWindow = window.open();\n newWindow.document.write(\n `<link href=\"https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css\" rel=\"stylesheet\" integrity=\"sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3\" crossorigin=\"anonymous\">` + scene_css + tooltip.html(labels[i])._groups[0][0].innerHTML\n );\n }\n });\n };\n \"\"\"\n\n def __init__(self, points, labels=None, targets=None, targets_per_cp=None, hoffset=0, voffset=10, css=None):\n targets_ = []\n for x in targets or []:\n x_ = []\n for y in x:\n x_.append(mpld3.utils.get_id(y))\n targets_.append(x_)\n self.targets_per_cp = []\n self.cps = []\n if targets_per_cp:\n self.cps = sorted(targets_per_cp.keys(), key=natural_sort_key)\n for cp in self.cps:\n x_ = []\n for y in targets_per_cp[cp]:\n x_.append(mpld3.utils.get_id(y))\n self.targets_per_cp.append(x_)\n super().__init__(points, labels, targets_, hoffset, voffset, css)\n self.dict_[\"targets_per_cp\"] = self.targets_per_cp\n self.dict_[\"cps\"] = self.cps\n\n\nclass CPTooltip(mpld3.plugins.PluginBase):\n # Handles the Ctrl+Click action on criticality phenomena ID (opens a new tab).\n\n JAVASCRIPT = \"\"\"\n var scene_css = `\"\"\" + scene_css + \"\"\"`\n mpld3.register_plugin(\"cpstooltip\", CPTooltip);\n CPTooltip.prototype = Object.create(mpld3.Plugin.prototype);\n CPTooltip.prototype.constructor = CPTooltip;\n CPTooltip.prototype.requiredProps = [\"id\", \"tooltip_html\"];\n function CPTooltip(fig, props){\n mpld3.Plugin.call(this, fig, props);\n };\n\n CPTooltip.prototype.draw = function(){\n var obj = mpld3.get_element(this.props.id);\n var tooltip_html = this.props.tooltip_html;\n var tooltip = d3.select(\"body\").append(\"div\")\n .attr(\"class\", \"cp-tooltip\")\n .style(\"position\", \"absolute\")\n .style(\"z-index\", \"10\")\n .style(\"visibility\", \"hidden\");\n \n obj.obj._groups[0][0].onmouseover = function(d, i) {\n tooltip.html(tooltip_html).style(\"visibility\", \"visible\");\n };\n \n obj.obj._groups[0][0].onmousemove = function(d, i) {\n tooltip\n .style(\"top\", d.clientY + 10 + \"px\")\n .style(\"left\", d.clientX + 0 + \"px\");\n }.bind(this);\n \n obj.obj._groups[0][0].onclick = function(d, i) {\n if (window.event.ctrlKey) {\n var newWindow = window.open();\n newWindow.document.write(\n `<link href=\"https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css\" rel=\"stylesheet\" integrity=\"sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3\" crossorigin=\"anonymous\">` + scene_css + tooltip_html\n );\n }\n };\n \n obj.obj._groups[0][0].onmouseout = function(d, i) {\n tooltip.style(\"visibility\", \"hidden\");\n };\n }\n \"\"\"\n\n def __init__(self, text, cp):\n tooltip_html = _describe_cp(cp)\n self.dict_ = {\"type\": \"cpstooltip\",\n \"id\": mpld3.utils.get_id(text),\n \"tooltip_html\": tooltip_html}\n\n\ndef _has_collision_with_bbs(existing_bbs, new_bb):\n \"\"\"\n Checks if the new rectangle (new_bb) collides with some existing rectangles.\n \"\"\"\n a_left = min([x[0] for x in new_bb])\n a_right = max([x[0] for x in new_bb])\n a_bottom = min([x[1] for x in new_bb])\n a_top = max([x[1] for x in new_bb])\n for bb in existing_bbs:\n b_left = min([x[0] for x in bb])\n b_right = max([x[0] for x in bb])\n b_bottom = min([x[1] for x in bb])\n b_top = max([x[1] for x in bb])\n if a_left <= b_right and b_left <= a_right and a_top >= b_bottom and b_top >= a_bottom:\n return True\n return False\n" ]
[ [ "matplotlib.pyplot.annotate", "numpy.isclose", "matplotlib.pyplot.arrow", "matplotlib.pyplot.fill", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "matplotlib.pyplot.axis" ] ]
bamdada/UdacityProj10FinaltfModels
[ "db39ef826193d0802f644ba30397242a7272676e" ]
[ "research/object_detection/metrics/coco_tools.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Wrappers for third party pycocotools to be used within object_detection.\n\nNote that nothing in this file is tensorflow related and thus cannot\nbe called directly as a slim metric, for example.\n\nTODO(jonathanhuang): wrap as a slim metric in metrics.py\n\n\nUsage example: given a set of images with ids in the list image_ids\nand corresponding lists of numpy arrays encoding groundtruth (boxes and classes)\nand detections (boxes, scores and classes), where elements of each list\ncorrespond to detections/annotations of a single image,\nthen evaluation (in multi-class mode) can be invoked as follows:\n\n groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(\n image_ids, groundtruth_boxes_list, groundtruth_classes_list,\n max_num_classes, output_path=None)\n detections_list = coco_tools.ExportDetectionsToCOCO(\n image_ids, detection_boxes_list, detection_scores_list,\n detection_classes_list, output_path=None)\n groundtruth = coco_tools.COCOWrapper(groundtruth_dict)\n detections = groundtruth.LoadAnnotations(detections_list)\n evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,\n agnostic_mode=False)\n metrics = evaluator.ComputeMetrics()\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import OrderedDict\nimport copy\nimport time\nimport numpy as np\n\nfrom pycocotools import coco\nfrom pycocotools import cocoeval\nfrom pycocotools import mask\n\nimport six\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow.compat.v1 as tf\n\nfrom object_detection.utils import json_utils\n\n\nclass COCOWrapper(coco.COCO):\n \"\"\"Wrapper for the pycocotools COCO class.\"\"\"\n\n def __init__(self, dataset, detection_type='bbox'):\n \"\"\"COCOWrapper constructor.\n\n See http://mscoco.org/dataset/#format for a description of the format.\n By default, the coco.COCO class constructor reads from a JSON file.\n This function duplicates the same behavior but loads from a dictionary,\n allowing us to perform evaluation without writing to external storage.\n\n Args:\n dataset: a dictionary holding bounding box annotations in the COCO format.\n detection_type: type of detections being wrapped. Can be one of ['bbox',\n 'segmentation']\n\n Raises:\n ValueError: if detection_type is unsupported.\n \"\"\"\n supported_detection_types = ['bbox', 'segmentation']\n if detection_type not in supported_detection_types:\n raise ValueError('Unsupported detection type: {}. '\n 'Supported values are: {}'.format(\n detection_type, supported_detection_types))\n self._detection_type = detection_type\n coco.COCO.__init__(self)\n self.dataset = dataset\n self.createIndex()\n\n def LoadAnnotations(self, annotations):\n \"\"\"Load annotations dictionary into COCO datastructure.\n\n See http://mscoco.org/dataset/#format for a description of the annotations\n format. As above, this function replicates the default behavior of the API\n but does not require writing to external storage.\n\n Args:\n annotations: python list holding object detection results where each\n detection is encoded as a dict with required keys ['image_id',\n 'category_id', 'score'] and one of ['bbox', 'segmentation'] based on\n `detection_type`.\n\n Returns:\n a coco.COCO datastructure holding object detection annotations results\n\n Raises:\n ValueError: if annotations is not a list\n ValueError: if annotations do not correspond to the images contained\n in self.\n \"\"\"\n results = coco.COCO()\n results.dataset['images'] = [img for img in self.dataset['images']]\n\n tf.logging.info('Loading and preparing annotation results...')\n tic = time.time()\n\n if not isinstance(annotations, list):\n raise ValueError('annotations is not a list of objects')\n annotation_img_ids = [ann['image_id'] for ann in annotations]\n if (set(annotation_img_ids) != (set(annotation_img_ids)\n & set(self.getImgIds()))):\n raise ValueError('Results do not correspond to current coco set')\n results.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n if self._detection_type == 'bbox':\n for idx, ann in enumerate(annotations):\n bb = ann['bbox']\n ann['area'] = bb[2] * bb[3]\n ann['id'] = idx + 1\n ann['iscrowd'] = 0\n elif self._detection_type == 'segmentation':\n for idx, ann in enumerate(annotations):\n ann['area'] = mask.area(ann['segmentation'])\n ann['bbox'] = mask.toBbox(ann['segmentation'])\n ann['id'] = idx + 1\n ann['iscrowd'] = 0\n tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic))\n\n results.dataset['annotations'] = annotations\n results.createIndex()\n return results\n\n\nclass COCOEvalWrapper(cocoeval.COCOeval):\n \"\"\"Wrapper for the pycocotools COCOeval class.\n\n To evaluate, create two objects (groundtruth_dict and detections_list)\n using the conventions listed at http://mscoco.org/dataset/#format.\n Then call evaluation as follows:\n\n groundtruth = coco_tools.COCOWrapper(groundtruth_dict)\n detections = groundtruth.LoadAnnotations(detections_list)\n evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,\n agnostic_mode=False)\n\n metrics = evaluator.ComputeMetrics()\n \"\"\"\n\n def __init__(self, groundtruth=None, detections=None, agnostic_mode=False,\n iou_type='bbox', oks_sigmas=None):\n \"\"\"COCOEvalWrapper constructor.\n\n Note that for the area-based metrics to be meaningful, detection and\n groundtruth boxes must be in image coordinates measured in pixels.\n\n Args:\n groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding\n groundtruth annotations\n detections: a coco.COCO (or coco_tools.COCOWrapper) object holding\n detections\n agnostic_mode: boolean (default: False). If True, evaluation ignores\n class labels, treating all detections as proposals.\n iou_type: IOU type to use for evaluation. Supports `bbox', `segm`,\n `keypoints`.\n oks_sigmas: Float numpy array holding the OKS variances for keypoints.\n \"\"\"\n cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type)\n if oks_sigmas is not None:\n self.params.kpt_oks_sigmas = oks_sigmas\n if agnostic_mode:\n self.params.useCats = 0\n self._iou_type = iou_type\n\n def GetCategory(self, category_id):\n \"\"\"Fetches dictionary holding category information given category id.\n\n Args:\n category_id: integer id\n Returns:\n dictionary holding 'id', 'name'.\n \"\"\"\n return self.cocoGt.cats[category_id]\n\n def GetAgnosticMode(self):\n \"\"\"Returns true if COCO Eval is configured to evaluate in agnostic mode.\"\"\"\n return self.params.useCats == 0\n\n def GetCategoryIdList(self):\n \"\"\"Returns list of valid category ids.\"\"\"\n return self.params.catIds\n\n def ComputeMetrics(self,\n include_metrics_per_category=False,\n all_metrics_per_category=False):\n \"\"\"Computes detection/keypoint metrics.\n\n Args:\n include_metrics_per_category: If True, will include metrics per category.\n all_metrics_per_category: If true, include all the summery metrics for\n each category in per_category_ap. Be careful with setting it to true if\n you have more than handful of categories, because it will pollute\n your mldash.\n\n Returns:\n 1. summary_metrics: a dictionary holding:\n 'Precision/mAP': mean average precision over classes averaged over IOU\n thresholds ranging from .5 to .95 with .05 increments\n 'Precision/mAP@.50IOU': mean average precision at 50% IOU\n 'Precision/mAP@.75IOU': mean average precision at 75% IOU\n 'Precision/mAP (small)': mean average precision for small objects\n (area < 32^2 pixels). NOTE: not present for 'keypoints'\n 'Precision/mAP (medium)': mean average precision for medium sized\n objects (32^2 pixels < area < 96^2 pixels)\n 'Precision/mAP (large)': mean average precision for large objects\n (96^2 pixels < area < 10000^2 pixels)\n 'Recall/AR@1': average recall with 1 detection\n 'Recall/AR@10': average recall with 10 detections\n 'Recall/AR@100': average recall with 100 detections\n 'Recall/AR@100 (small)': average recall for small objects with 100\n detections. NOTE: not present for 'keypoints'\n 'Recall/AR@100 (medium)': average recall for medium objects with 100\n detections\n 'Recall/AR@100 (large)': average recall for large objects with 100\n detections\n 2. per_category_ap: a dictionary holding category specific results with\n keys of the form: 'Precision mAP ByCategory/category'\n (without the supercategory part if no supercategories exist).\n For backward compatibility 'PerformanceByCategory' is included in the\n output regardless of all_metrics_per_category.\n If evaluating class-agnostic mode, per_category_ap is an empty\n dictionary.\n\n Raises:\n ValueError: If category_stats does not exist.\n \"\"\"\n self.evaluate()\n self.accumulate()\n self.summarize()\n\n summary_metrics = {}\n if self._iou_type in ['bbox', 'segm']:\n summary_metrics = OrderedDict([('Precision/mAP', self.stats[0]),\n ('Precision/mAP@.50IOU', self.stats[1]),\n ('Precision/mAP@.75IOU', self.stats[2]),\n ('Precision/mAP (small)', self.stats[3]),\n ('Precision/mAP (medium)', self.stats[4]),\n ('Precision/mAP (large)', self.stats[5]),\n ('Recall/AR@1', self.stats[6]),\n ('Recall/AR@10', self.stats[7]),\n ('Recall/AR@100', self.stats[8]),\n ('Recall/AR@100 (small)', self.stats[9]),\n ('Recall/AR@100 (medium)', self.stats[10]),\n ('Recall/AR@100 (large)', self.stats[11])])\n elif self._iou_type == 'keypoints':\n category_id = self.GetCategoryIdList()[0]\n category_name = self.GetCategory(category_id)['name']\n summary_metrics = OrderedDict([])\n summary_metrics['Precision/mAP ByCategory/{}'.format(\n category_name)] = self.stats[0]\n summary_metrics['Precision/mAP@.50IOU ByCategory/{}'.format(\n category_name)] = self.stats[1]\n summary_metrics['Precision/mAP@.75IOU ByCategory/{}'.format(\n category_name)] = self.stats[2]\n summary_metrics['Precision/mAP (medium) ByCategory/{}'.format(\n category_name)] = self.stats[3]\n summary_metrics['Precision/mAP (large) ByCategory/{}'.format(\n category_name)] = self.stats[4]\n summary_metrics['Recall/AR@1 ByCategory/{}'.format(\n category_name)] = self.stats[5]\n summary_metrics['Recall/AR@10 ByCategory/{}'.format(\n category_name)] = self.stats[6]\n summary_metrics['Recall/AR@100 ByCategory/{}'.format(\n category_name)] = self.stats[7]\n summary_metrics['Recall/AR@100 (medium) ByCategory/{}'.format(\n category_name)] = self.stats[8]\n summary_metrics['Recall/AR@100 (large) ByCategory/{}'.format(\n category_name)] = self.stats[9]\n if not include_metrics_per_category:\n return summary_metrics, {}\n if not hasattr(self, 'category_stats'):\n raise ValueError('Category stats do not exist')\n per_category_ap = OrderedDict([])\n if self.GetAgnosticMode():\n return summary_metrics, per_category_ap\n for category_index, category_id in enumerate(self.GetCategoryIdList()):\n category = self.GetCategory(category_id)['name']\n # Kept for backward compatilbility\n per_category_ap['PerformanceByCategory/mAP/{}'.format(\n category)] = self.category_stats[0][category_index]\n if all_metrics_per_category:\n per_category_ap['Precision mAP ByCategory/{}'.format(\n category)] = self.category_stats[0][category_index]\n per_category_ap['Precision mAP@.50IOU ByCategory/{}'.format(\n category)] = self.category_stats[1][category_index]\n per_category_ap['Precision mAP@.75IOU ByCategory/{}'.format(\n category)] = self.category_stats[2][category_index]\n per_category_ap['Precision mAP (small) ByCategory/{}'.format(\n category)] = self.category_stats[3][category_index]\n per_category_ap['Precision mAP (medium) ByCategory/{}'.format(\n category)] = self.category_stats[4][category_index]\n per_category_ap['Precision mAP (large) ByCategory/{}'.format(\n category)] = self.category_stats[5][category_index]\n per_category_ap['Recall AR@1 ByCategory/{}'.format(\n category)] = self.category_stats[6][category_index]\n per_category_ap['Recall AR@10 ByCategory/{}'.format(\n category)] = self.category_stats[7][category_index]\n per_category_ap['Recall AR@100 ByCategory/{}'.format(\n category)] = self.category_stats[8][category_index]\n per_category_ap['Recall AR@100 (small) ByCategory/{}'.format(\n category)] = self.category_stats[9][category_index]\n per_category_ap['Recall AR@100 (medium) ByCategory/{}'.format(\n category)] = self.category_stats[10][category_index]\n per_category_ap['Recall AR@100 (large) ByCategory/{}'.format(\n category)] = self.category_stats[11][category_index]\n\n return summary_metrics, per_category_ap\n\n\ndef _ConvertBoxToCOCOFormat(box):\n \"\"\"Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.\n\n This is a utility function for converting from our internal\n [ymin, xmin, ymax, xmax] convention to the convention used by the COCO API\n i.e., [xmin, ymin, width, height].\n\n Args:\n box: a [ymin, xmin, ymax, xmax] numpy array\n\n Returns:\n a list of floats representing [xmin, ymin, width, height]\n \"\"\"\n return [float(box[1]), float(box[0]), float(box[3] - box[1]),\n float(box[2] - box[0])]\n\n\ndef _RleCompress(masks):\n \"\"\"Compresses mask using Run-length encoding provided by pycocotools.\n\n Args:\n masks: uint8 numpy array of shape [mask_height, mask_width] with values in\n {0, 1}.\n\n Returns:\n A pycocotools Run-length encoding of the mask.\n \"\"\"\n rle = mask.encode(np.asfortranarray(masks))\n rle['counts'] = six.ensure_str(rle['counts'])\n return rle\n\n\ndef ExportSingleImageGroundtruthToCoco(image_id,\n next_annotation_id,\n category_id_set,\n groundtruth_boxes,\n groundtruth_classes,\n groundtruth_keypoints=None,\n groundtruth_keypoint_visibilities=None,\n groundtruth_masks=None,\n groundtruth_is_crowd=None,\n groundtruth_area=None):\n \"\"\"Export groundtruth of a single image to COCO format.\n\n This function converts groundtruth detection annotations represented as numpy\n arrays to dictionaries that can be ingested by the COCO evaluation API. Note\n that the image_ids provided here must match the ones given to\n ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in\n correspondence - that is: groundtruth_boxes[i, :], and\n groundtruth_classes[i] are associated with the same groundtruth annotation.\n\n In the exported result, \"area\" fields are always set to the area of the\n groundtruth bounding box.\n\n Args:\n image_id: a unique image identifier either of type integer or string.\n next_annotation_id: integer specifying the first id to use for the\n groundtruth annotations. All annotations are assigned a continuous integer\n id starting from this value.\n category_id_set: A set of valid class ids. Groundtruth with classes not in\n category_id_set are dropped.\n groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]\n groundtruth_classes: numpy array (int) with shape [num_gt_boxes]\n groundtruth_keypoints: optional float numpy array of keypoints\n with shape [num_gt_boxes, num_keypoints, 2].\n groundtruth_keypoint_visibilities: optional integer numpy array of keypoint\n visibilities with shape [num_gt_boxes, num_keypoints]. Integer is treated\n as an enum with 0=not labels, 1=labeled but not visible and 2=labeled and\n visible.\n groundtruth_masks: optional uint8 numpy array of shape [num_detections,\n image_height, image_width] containing detection_masks.\n groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes]\n indicating whether groundtruth boxes are crowd.\n groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If\n provided, then the area values (in the original absolute coordinates) will\n be populated instead of calculated from bounding box coordinates.\n\n Returns:\n a list of groundtruth annotations for a single image in the COCO format.\n\n Raises:\n ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the\n right lengths or (2) if each of the elements inside these lists do not\n have the correct shapes or (3) if image_ids are not integers\n \"\"\"\n\n if len(groundtruth_classes.shape) != 1:\n raise ValueError('groundtruth_classes is '\n 'expected to be of rank 1.')\n if len(groundtruth_boxes.shape) != 2:\n raise ValueError('groundtruth_boxes is expected to be of '\n 'rank 2.')\n if groundtruth_boxes.shape[1] != 4:\n raise ValueError('groundtruth_boxes should have '\n 'shape[1] == 4.')\n num_boxes = groundtruth_classes.shape[0]\n if num_boxes != groundtruth_boxes.shape[0]:\n raise ValueError('Corresponding entries in groundtruth_classes, '\n 'and groundtruth_boxes should have '\n 'compatible shapes (i.e., agree on the 0th dimension).'\n 'Classes shape: %d. Boxes shape: %d. Image ID: %s' % (\n groundtruth_classes.shape[0],\n groundtruth_boxes.shape[0], image_id))\n has_is_crowd = groundtruth_is_crowd is not None\n if has_is_crowd and len(groundtruth_is_crowd.shape) != 1:\n raise ValueError('groundtruth_is_crowd is expected to be of rank 1.')\n has_keypoints = groundtruth_keypoints is not None\n has_keypoint_visibilities = groundtruth_keypoint_visibilities is not None\n if has_keypoints and not has_keypoint_visibilities:\n groundtruth_keypoint_visibilities = np.full(\n (num_boxes, groundtruth_keypoints.shape[1]), 2)\n groundtruth_list = []\n for i in range(num_boxes):\n if groundtruth_classes[i] in category_id_set:\n iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0\n if groundtruth_area is not None and groundtruth_area[i] > 0:\n area = float(groundtruth_area[i])\n else:\n area = float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) *\n (groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1]))\n export_dict = {\n 'id':\n next_annotation_id + i,\n 'image_id':\n image_id,\n 'category_id':\n int(groundtruth_classes[i]),\n 'bbox':\n list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])),\n 'area': area,\n 'iscrowd':\n iscrowd\n }\n if groundtruth_masks is not None:\n export_dict['segmentation'] = _RleCompress(groundtruth_masks[i])\n if has_keypoints:\n keypoints = groundtruth_keypoints[i]\n visibilities = np.reshape(groundtruth_keypoint_visibilities[i], [-1])\n coco_keypoints = []\n num_valid_keypoints = 0\n for keypoint, visibility in zip(keypoints, visibilities):\n # Convert from [y, x] to [x, y] as mandated by COCO.\n coco_keypoints.append(float(keypoint[1]))\n coco_keypoints.append(float(keypoint[0]))\n coco_keypoints.append(int(visibility))\n if int(visibility) > 0:\n num_valid_keypoints = num_valid_keypoints + 1\n export_dict['keypoints'] = coco_keypoints\n export_dict['num_keypoints'] = num_valid_keypoints\n\n groundtruth_list.append(export_dict)\n return groundtruth_list\n\n\ndef ExportGroundtruthToCOCO(image_ids,\n groundtruth_boxes,\n groundtruth_classes,\n categories,\n output_path=None):\n \"\"\"Export groundtruth detection annotations in numpy arrays to COCO API.\n\n This function converts a set of groundtruth detection annotations represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\n Inputs to this function are three lists: image ids for each groundtruth image,\n groundtruth boxes for each image and groundtruth classes respectively.\n Note that the image_ids provided here must match the ones given to the\n ExportDetectionsToCOCO function in order for evaluation to work properly.\n We assume that for each image, boxes, scores and classes are in\n correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and\n groundtruth_classes[i] are associated with the same groundtruth annotation.\n\n In the exported result, \"area\" fields are always set to the area of the\n groundtruth bounding box and \"iscrowd\" fields are always set to 0.\n TODO(jonathanhuang): pass in \"iscrowd\" array for evaluating on COCO dataset.\n\n Args:\n image_ids: a list of unique image identifier either of type integer or\n string.\n groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4]\n (note that num_gt_boxes can be different for each entry in the list)\n groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes]\n (note that num_gt_boxes can be different for each entry in the list)\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list has the following keys:\n 'id': (required) an integer id uniquely identifying this category\n 'name': (required) string representing category name\n e.g., 'cat', 'dog', 'pizza'\n 'supercategory': (optional) string representing the supercategory\n e.g., 'animal', 'vehicle', 'food', etc\n output_path: (optional) path for exporting result to JSON\n Returns:\n dictionary that can be read by COCO API\n Raises:\n ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the\n right lengths or (2) if each of the elements inside these lists do not\n have the correct shapes or (3) if image_ids are not integers\n \"\"\"\n category_id_set = set([cat['id'] for cat in categories])\n groundtruth_export_list = []\n image_export_list = []\n if not len(image_ids) == len(groundtruth_boxes) == len(groundtruth_classes):\n raise ValueError('Input lists must have the same length')\n\n # For reasons internal to the COCO API, it is important that annotation ids\n # are not equal to zero; we thus start counting from 1.\n annotation_id = 1\n for image_id, boxes, classes in zip(image_ids, groundtruth_boxes,\n groundtruth_classes):\n image_export_list.append({'id': image_id})\n groundtruth_export_list.extend(ExportSingleImageGroundtruthToCoco(\n image_id,\n annotation_id,\n category_id_set,\n boxes,\n classes))\n num_boxes = classes.shape[0]\n annotation_id += num_boxes\n\n groundtruth_dict = {\n 'annotations': groundtruth_export_list,\n 'images': image_export_list,\n 'categories': categories\n }\n if output_path:\n with tf.gfile.GFile(output_path, 'w') as fid:\n json_utils.Dump(groundtruth_dict, fid, float_digits=4, indent=2)\n return groundtruth_dict\n\n\ndef ExportSingleImageDetectionBoxesToCoco(image_id,\n category_id_set,\n detection_boxes,\n detection_scores,\n detection_classes,\n detection_keypoints=None,\n detection_keypoint_visibilities=None):\n \"\"\"Export detections of a single image to COCO format.\n\n This function converts detections represented as numpy arrays to dictionaries\n that can be ingested by the COCO evaluation API. Note that the image_ids\n provided here must match the ones given to the\n ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in\n correspondence - that is: boxes[i, :], and classes[i]\n are associated with the same groundtruth annotation.\n\n Args:\n image_id: unique image identifier either of type integer or string.\n category_id_set: A set of valid class ids. Detections with classes not in\n category_id_set are dropped.\n detection_boxes: float numpy array of shape [num_detections, 4] containing\n detection boxes.\n detection_scores: float numpy array of shape [num_detections] containing\n scored for the detection boxes.\n detection_classes: integer numpy array of shape [num_detections] containing\n the classes for detection boxes.\n detection_keypoints: optional float numpy array of keypoints\n with shape [num_detections, num_keypoints, 2].\n detection_keypoint_visibilities: optional integer numpy array of keypoint\n visibilities with shape [num_detections, num_keypoints]. Integer is\n treated as an enum with 0=not labels, 1=labeled but not visible and\n 2=labeled and visible.\n\n Returns:\n a list of detection annotations for a single image in the COCO format.\n\n Raises:\n ValueError: if (1) detection_boxes, detection_scores and detection_classes\n do not have the right lengths or (2) if each of the elements inside these\n lists do not have the correct shapes or (3) if image_ids are not integers.\n \"\"\"\n\n if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:\n raise ValueError('All entries in detection_classes and detection_scores'\n 'expected to be of rank 1.')\n if len(detection_boxes.shape) != 2:\n raise ValueError('All entries in detection_boxes expected to be of '\n 'rank 2.')\n if detection_boxes.shape[1] != 4:\n raise ValueError('All entries in detection_boxes should have '\n 'shape[1] == 4.')\n num_boxes = detection_classes.shape[0]\n if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]:\n raise ValueError('Corresponding entries in detection_classes, '\n 'detection_scores and detection_boxes should have '\n 'compatible shapes (i.e., agree on the 0th dimension). '\n 'Classes shape: %d. Boxes shape: %d. '\n 'Scores shape: %d' % (\n detection_classes.shape[0], detection_boxes.shape[0],\n detection_scores.shape[0]\n ))\n detections_list = []\n for i in range(num_boxes):\n if detection_classes[i] in category_id_set:\n export_dict = {\n 'image_id':\n image_id,\n 'category_id':\n int(detection_classes[i]),\n 'bbox':\n list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])),\n 'score':\n float(detection_scores[i]),\n }\n if detection_keypoints is not None:\n keypoints = detection_keypoints[i]\n num_keypoints = keypoints.shape[0]\n if detection_keypoint_visibilities is None:\n detection_keypoint_visibilities = np.full((num_boxes, num_keypoints),\n 2)\n visibilities = np.reshape(detection_keypoint_visibilities[i], [-1])\n coco_keypoints = []\n for keypoint, visibility in zip(keypoints, visibilities):\n # Convert from [y, x] to [x, y] as mandated by COCO.\n coco_keypoints.append(float(keypoint[1]))\n coco_keypoints.append(float(keypoint[0]))\n coco_keypoints.append(int(visibility))\n export_dict['keypoints'] = coco_keypoints\n export_dict['num_keypoints'] = num_keypoints\n detections_list.append(export_dict)\n\n return detections_list\n\n\ndef ExportSingleImageDetectionMasksToCoco(image_id,\n category_id_set,\n detection_masks,\n detection_scores,\n detection_classes):\n \"\"\"Export detection masks of a single image to COCO format.\n\n This function converts detections represented as numpy arrays to dictionaries\n that can be ingested by the COCO evaluation API. We assume that\n detection_masks, detection_scores, and detection_classes are in correspondence\n - that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]\n are associated with the same annotation.\n\n Args:\n image_id: unique image identifier either of type integer or string.\n category_id_set: A set of valid class ids. Detections with classes not in\n category_id_set are dropped.\n detection_masks: uint8 numpy array of shape [num_detections, image_height,\n image_width] containing detection_masks.\n detection_scores: float numpy array of shape [num_detections] containing\n scores for detection masks.\n detection_classes: integer numpy array of shape [num_detections] containing\n the classes for detection masks.\n\n Returns:\n a list of detection mask annotations for a single image in the COCO format.\n\n Raises:\n ValueError: if (1) detection_masks, detection_scores and detection_classes\n do not have the right lengths or (2) if each of the elements inside these\n lists do not have the correct shapes or (3) if image_ids are not integers.\n \"\"\"\n\n if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:\n raise ValueError('All entries in detection_classes and detection_scores'\n 'expected to be of rank 1.')\n num_boxes = detection_classes.shape[0]\n if not num_boxes == len(detection_masks) == detection_scores.shape[0]:\n raise ValueError('Corresponding entries in detection_classes, '\n 'detection_scores and detection_masks should have '\n 'compatible lengths and shapes '\n 'Classes length: %d. Masks length: %d. '\n 'Scores length: %d' % (\n detection_classes.shape[0], len(detection_masks),\n detection_scores.shape[0]\n ))\n detections_list = []\n for i in range(num_boxes):\n if detection_classes[i] in category_id_set:\n detections_list.append({\n 'image_id': image_id,\n 'category_id': int(detection_classes[i]),\n 'segmentation': _RleCompress(detection_masks[i]),\n 'score': float(detection_scores[i])\n })\n return detections_list\n\n\ndef ExportDetectionsToCOCO(image_ids,\n detection_boxes,\n detection_scores,\n detection_classes,\n categories,\n output_path=None):\n \"\"\"Export detection annotations in numpy arrays to COCO API.\n\n This function converts a set of predicted detections represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\n Inputs to this function are lists, consisting of boxes, scores and\n classes, respectively, corresponding to each image for which detections\n have been produced. Note that the image_ids provided here must\n match the ones given to the ExportGroundtruthToCOCO function in order\n for evaluation to work properly.\n\n We assume that for each image, boxes, scores and classes are in\n correspondence --- that is: detection_boxes[i, :], detection_scores[i] and\n detection_classes[i] are associated with the same detection.\n\n Args:\n image_ids: a list of unique image identifier either of type integer or\n string.\n detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4]\n detection_scores: list of numpy arrays (float) with shape\n [num_detection_boxes]. Note that num_detection_boxes can be different\n for each entry in the list.\n detection_classes: list of numpy arrays (int) with shape\n [num_detection_boxes]. Note that num_detection_boxes can be different\n for each entry in the list.\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list must have an integer 'id' key uniquely identifying\n this category.\n output_path: (optional) path for exporting result to JSON\n\n Returns:\n list of dictionaries that can be read by COCO API, where each entry\n corresponds to a single detection and has keys from:\n ['image_id', 'category_id', 'bbox', 'score'].\n Raises:\n ValueError: if (1) detection_boxes and detection_classes do not have the\n right lengths or (2) if each of the elements inside these lists do not\n have the correct shapes or (3) if image_ids are not integers.\n \"\"\"\n category_id_set = set([cat['id'] for cat in categories])\n detections_export_list = []\n if not (len(image_ids) == len(detection_boxes) == len(detection_scores) ==\n len(detection_classes)):\n raise ValueError('Input lists must have the same length')\n for image_id, boxes, scores, classes in zip(image_ids, detection_boxes,\n detection_scores,\n detection_classes):\n detections_export_list.extend(ExportSingleImageDetectionBoxesToCoco(\n image_id,\n category_id_set,\n boxes,\n scores,\n classes))\n if output_path:\n with tf.gfile.GFile(output_path, 'w') as fid:\n json_utils.Dump(detections_export_list, fid, float_digits=4, indent=2)\n return detections_export_list\n\n\ndef ExportSegmentsToCOCO(image_ids,\n detection_masks,\n detection_scores,\n detection_classes,\n categories,\n output_path=None):\n \"\"\"Export segmentation masks in numpy arrays to COCO API.\n\n This function converts a set of predicted instance masks represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\n Inputs to this function are lists, consisting of segments, scores and\n classes, respectively, corresponding to each image for which detections\n have been produced.\n\n Note this function is recommended to use for small dataset.\n For large dataset, it should be used with a merge function\n (e.g. in map reduce), otherwise the memory consumption is large.\n\n We assume that for each image, masks, scores and classes are in\n correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i]\n and detection_classes[i] are associated with the same detection.\n\n Args:\n image_ids: list of image ids (typically ints or strings)\n detection_masks: list of numpy arrays with shape [num_detection, h, w, 1]\n and type uint8. The height and width should match the shape of\n corresponding image.\n detection_scores: list of numpy arrays (float) with shape\n [num_detection]. Note that num_detection can be different\n for each entry in the list.\n detection_classes: list of numpy arrays (int) with shape\n [num_detection]. Note that num_detection can be different\n for each entry in the list.\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list must have an integer 'id' key uniquely identifying\n this category.\n output_path: (optional) path for exporting result to JSON\n\n Returns:\n list of dictionaries that can be read by COCO API, where each entry\n corresponds to a single detection and has keys from:\n ['image_id', 'category_id', 'segmentation', 'score'].\n\n Raises:\n ValueError: if detection_masks and detection_classes do not have the\n right lengths or if each of the elements inside these lists do not\n have the correct shapes.\n \"\"\"\n if not (len(image_ids) == len(detection_masks) == len(detection_scores) ==\n len(detection_classes)):\n raise ValueError('Input lists must have the same length')\n\n segment_export_list = []\n for image_id, masks, scores, classes in zip(image_ids, detection_masks,\n detection_scores,\n detection_classes):\n\n if len(classes.shape) != 1 or len(scores.shape) != 1:\n raise ValueError('All entries in detection_classes and detection_scores'\n 'expected to be of rank 1.')\n if len(masks.shape) != 4:\n raise ValueError('All entries in masks expected to be of '\n 'rank 4. Given {}'.format(masks.shape))\n\n num_boxes = classes.shape[0]\n if not num_boxes == masks.shape[0] == scores.shape[0]:\n raise ValueError('Corresponding entries in segment_classes, '\n 'detection_scores and detection_boxes should have '\n 'compatible shapes (i.e., agree on the 0th dimension).')\n\n category_id_set = set([cat['id'] for cat in categories])\n segment_export_list.extend(ExportSingleImageDetectionMasksToCoco(\n image_id, category_id_set, np.squeeze(masks, axis=3), scores, classes))\n\n if output_path:\n with tf.gfile.GFile(output_path, 'w') as fid:\n json_utils.Dump(segment_export_list, fid, float_digits=4, indent=2)\n return segment_export_list\n\n\ndef ExportKeypointsToCOCO(image_ids,\n detection_keypoints,\n detection_scores,\n detection_classes,\n categories,\n output_path=None):\n \"\"\"Exports keypoints in numpy arrays to COCO API.\n\n This function converts a set of predicted keypoints represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\n Inputs to this function are lists, consisting of keypoints, scores and\n classes, respectively, corresponding to each image for which detections\n have been produced.\n\n We assume that for each image, keypoints, scores and classes are in\n correspondence --- that is: detection_keypoints[i, :, :, :],\n detection_scores[i] and detection_classes[i] are associated with the same\n detection.\n\n Args:\n image_ids: list of image ids (typically ints or strings)\n detection_keypoints: list of numpy arrays with shape\n [num_detection, num_keypoints, 2] and type float32 in absolute\n x-y coordinates.\n detection_scores: list of numpy arrays (float) with shape\n [num_detection]. Note that num_detection can be different\n for each entry in the list.\n detection_classes: list of numpy arrays (int) with shape\n [num_detection]. Note that num_detection can be different\n for each entry in the list.\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list must have an integer 'id' key uniquely identifying\n this category and an integer 'num_keypoints' key specifying the number of\n keypoints the category has.\n output_path: (optional) path for exporting result to JSON\n\n Returns:\n list of dictionaries that can be read by COCO API, where each entry\n corresponds to a single detection and has keys from:\n ['image_id', 'category_id', 'keypoints', 'score'].\n\n Raises:\n ValueError: if detection_keypoints and detection_classes do not have the\n right lengths or if each of the elements inside these lists do not\n have the correct shapes.\n \"\"\"\n if not (len(image_ids) == len(detection_keypoints) ==\n len(detection_scores) == len(detection_classes)):\n raise ValueError('Input lists must have the same length')\n\n keypoints_export_list = []\n for image_id, keypoints, scores, classes in zip(\n image_ids, detection_keypoints, detection_scores, detection_classes):\n\n if len(classes.shape) != 1 or len(scores.shape) != 1:\n raise ValueError('All entries in detection_classes and detection_scores'\n 'expected to be of rank 1.')\n if len(keypoints.shape) != 3:\n raise ValueError('All entries in keypoints expected to be of '\n 'rank 3. Given {}'.format(keypoints.shape))\n\n num_boxes = classes.shape[0]\n if not num_boxes == keypoints.shape[0] == scores.shape[0]:\n raise ValueError('Corresponding entries in detection_classes, '\n 'detection_keypoints, and detection_scores should have '\n 'compatible shapes (i.e., agree on the 0th dimension).')\n\n category_id_set = set([cat['id'] for cat in categories])\n category_id_to_num_keypoints_map = {\n cat['id']: cat['num_keypoints'] for cat in categories\n if 'num_keypoints' in cat}\n\n for i in range(num_boxes):\n if classes[i] not in category_id_set:\n raise ValueError('class id should be in category_id_set\\n')\n\n if classes[i] in category_id_to_num_keypoints_map:\n num_keypoints = category_id_to_num_keypoints_map[classes[i]]\n # Adds extra ones to indicate the visibility for each keypoint as is\n # recommended by MSCOCO.\n instance_keypoints = np.concatenate(\n [keypoints[i, 0:num_keypoints, :],\n np.expand_dims(np.ones(num_keypoints), axis=1)],\n axis=1).astype(int)\n\n instance_keypoints = instance_keypoints.flatten().tolist()\n keypoints_export_list.append({\n 'image_id': image_id,\n 'category_id': int(classes[i]),\n 'keypoints': instance_keypoints,\n 'score': float(scores[i])\n })\n\n if output_path:\n with tf.gfile.GFile(output_path, 'w') as fid:\n json_utils.Dump(keypoints_export_list, fid, float_digits=4, indent=2)\n return keypoints_export_list\n" ]
[ [ "numpy.full", "tensorflow.compat.v1.logging.info", "numpy.reshape", "numpy.ones", "numpy.asfortranarray", "numpy.squeeze", "tensorflow.compat.v1.gfile.GFile" ] ]
qilei123/FCOS
[ "53d355456460a2a45830e3953508f41173ddb9bf", "53d355456460a2a45830e3953508f41173ddb9bf" ]
[ "fcos_core/modeling/roi_heads/box_head/inference.py", "fcos_core/data/datasets/coco.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom torch import nn\r\n\r\nfrom fcos_core.structures.bounding_box import BoxList\r\nfrom fcos_core.structures.boxlist_ops import boxlist_nms\r\nfrom fcos_core.structures.boxlist_ops import cat_boxlist\r\nfrom fcos_core.modeling.box_coder import BoxCoder\r\n\r\n\r\nclass PostProcessor(nn.Module):\r\n \"\"\"\r\n From a set of classification scores, box regression and proposals,\r\n computes the post-processed boxes, and applies NMS to obtain the\r\n final results\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n score_thresh=0.05,\r\n nms=0.5,\r\n detections_per_img=100,\r\n box_coder=None,\r\n cls_agnostic_bbox_reg=False,\r\n bbox_aug_enabled=False\r\n ):\r\n \"\"\"\r\n Arguments:\r\n score_thresh (float)\r\n nms (float)\r\n detections_per_img (int)\r\n box_coder (BoxCoder)\r\n \"\"\"\r\n super(PostProcessor, self).__init__()\r\n self.score_thresh = score_thresh\r\n self.nms = nms\r\n self.detections_per_img = detections_per_img\r\n if box_coder is None:\r\n box_coder = BoxCoder(weights=(10., 10., 5., 5.))\r\n self.box_coder = box_coder\r\n self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg\r\n self.bbox_aug_enabled = bbox_aug_enabled\r\n\r\n def forward(self, x, boxes):\r\n \"\"\"\r\n Arguments:\r\n x (tuple[tensor, tensor]): x contains the class logits\r\n and the box_regression from the model.\r\n boxes (list[BoxList]): bounding boxes that are used as\r\n reference, one for ech image\r\n\r\n Returns:\r\n results (list[BoxList]): one BoxList for each image, containing\r\n the extra fields labels and scores\r\n \"\"\"\r\n class_logits, box_regression = x\r\n class_prob = F.softmax(class_logits, -1)\r\n\r\n # TODO think about a representation of batch of boxes\r\n image_shapes = [box.size for box in boxes]\r\n boxes_per_image = [len(box) for box in boxes]\r\n concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)\r\n\r\n if self.cls_agnostic_bbox_reg:\r\n box_regression = box_regression[:, -4:]\r\n proposals = self.box_coder.decode(\r\n box_regression.view(sum(boxes_per_image), -1), concat_boxes\r\n )\r\n if self.cls_agnostic_bbox_reg:\r\n proposals = proposals.repeat(1, class_prob.shape[1])\r\n\r\n num_classes = class_prob.shape[1]\r\n\r\n proposals = proposals.split(boxes_per_image, dim=0)\r\n class_prob = class_prob.split(boxes_per_image, dim=0)\r\n\r\n results = []\r\n for prob, boxes_per_img, image_shape in zip(\r\n class_prob, proposals, image_shapes\r\n ):\r\n boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape)\r\n boxlist = boxlist.clip_to_image(remove_empty=False)\r\n if not self.bbox_aug_enabled: # If bbox aug is enabled, we will do it later\r\n boxlist = self.filter_results(boxlist, num_classes)\r\n results.append(boxlist)\r\n return results\r\n\r\n def prepare_boxlist(self, boxes, scores, image_shape):\r\n \"\"\"\r\n Returns BoxList from `boxes` and adds probability scores information\r\n as an extra field\r\n `boxes` has shape (#detections, 4 * #classes), where each row represents\r\n a list of predicted bounding boxes for each of the object classes in the\r\n dataset (including the background class). The detections in each row\r\n originate from the same object proposal.\r\n `scores` has shape (#detection, #classes), where each row represents a list\r\n of object detection confidence scores for each of the object classes in the\r\n dataset (including the background class). `scores[i, j]`` corresponds to the\r\n box at `boxes[i, j * 4:(j + 1) * 4]`.\r\n \"\"\"\r\n boxes = boxes.reshape(-1, 4)\r\n scores = scores.reshape(-1)\r\n boxlist = BoxList(boxes, image_shape, mode=\"xyxy\")\r\n boxlist.add_field(\"scores\", scores)\r\n return boxlist\r\n\r\n def filter_results(self, boxlist, num_classes):\r\n \"\"\"Returns bounding-box detection results by thresholding on scores and\r\n applying non-maximum suppression (NMS).\r\n \"\"\"\r\n # unwrap the boxlist to avoid additional overhead.\r\n # if we had multi-class NMS, we could perform this directly on the boxlist\r\n boxes = boxlist.bbox.reshape(-1, num_classes * 4)\r\n scores = boxlist.get_field(\"scores\").reshape(-1, num_classes)\r\n\r\n device = scores.device\r\n result = []\r\n # Apply threshold on detection probabilities and apply NMS\r\n # Skip j = 0, because it's the background class\r\n inds_all = scores > self.score_thresh\r\n for j in range(1, num_classes):\r\n inds = inds_all[:, j].nonzero().squeeze(1)\r\n scores_j = scores[inds, j]\r\n boxes_j = boxes[inds, j * 4 : (j + 1) * 4]\r\n boxlist_for_class = BoxList(boxes_j, boxlist.size, mode=\"xyxy\")\r\n boxlist_for_class.add_field(\"scores\", scores_j)\r\n boxlist_for_class = boxlist_nms(\r\n boxlist_for_class, self.nms\r\n )\r\n num_labels = len(boxlist_for_class)\r\n boxlist_for_class.add_field(\r\n \"labels\", torch.full((num_labels,), j, dtype=torch.int64, device=device)\r\n )\r\n result.append(boxlist_for_class)\r\n\r\n result = cat_boxlist(result)\r\n number_of_detections = len(result)\r\n\r\n # Limit to max_per_image detections **over all classes**\r\n if number_of_detections > self.detections_per_img > 0:\r\n cls_scores = result.get_field(\"scores\")\r\n image_thresh, _ = torch.kthvalue(\r\n cls_scores.cpu(), number_of_detections - self.detections_per_img + 1\r\n )\r\n keep = cls_scores >= image_thresh.item()\r\n keep = torch.nonzero(keep).squeeze(1)\r\n result = result[keep]\r\n return result\r\n\r\n\r\ndef make_roi_box_post_processor(cfg):\r\n use_fpn = cfg.MODEL.ROI_HEADS.USE_FPN\r\n\r\n bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS\r\n box_coder = BoxCoder(weights=bbox_reg_weights)\r\n\r\n score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH\r\n nms_thresh = cfg.MODEL.ROI_HEADS.NMS\r\n detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG\r\n cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG\r\n bbox_aug_enabled = cfg.TEST.BBOX_AUG.ENABLED\r\n\r\n postprocessor = PostProcessor(\r\n score_thresh,\r\n nms_thresh,\r\n detections_per_img,\r\n box_coder,\r\n cls_agnostic_bbox_reg,\r\n bbox_aug_enabled\r\n )\r\n return postprocessor\r\n", "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\r\nimport torch\r\nimport torchvision\r\n\r\nfrom fcos_core.structures.bounding_box import BoxList\r\nfrom fcos_core.structures.segmentation_mask import SegmentationMask\r\nfrom fcos_core.structures.keypoint import PersonKeypoints\r\n\r\n\r\nmin_keypoints_per_image = 10\r\n\r\n\r\ndef _count_visible_keypoints(anno):\r\n return sum(sum(1 for v in ann[\"keypoints\"][2::3] if v > 0) for ann in anno)\r\n\r\n\r\ndef _has_only_empty_bbox(anno):\r\n return all(any(o <= 1 for o in obj[\"bbox\"][2:]) for obj in anno)\r\n\r\n\r\ndef has_valid_annotation(anno):\r\n # if it's empty, there is no annotation\r\n if len(anno) == 0:\r\n return False\r\n # if all boxes have close to zero area, there is no annotation\r\n if _has_only_empty_bbox(anno):\r\n return False\r\n # keypoints task have a slight different critera for considering\r\n # if an annotation is valid\r\n if \"keypoints\" not in anno[0]:\r\n return True\r\n # for keypoint detection tasks, only consider valid images those\r\n # containing at least min_keypoints_per_image\r\n if _count_visible_keypoints(anno) >= min_keypoints_per_image:\r\n return True\r\n return False\r\n\r\n\r\nclass COCODataset(torchvision.datasets.coco.CocoDetection):\r\n def __init__(\r\n self, ann_file, root, remove_images_without_annotations, transforms=None\r\n ):\r\n super(COCODataset, self).__init__(root, ann_file)\r\n # sort indices for reproducible results\r\n self.ids = sorted(self.ids)\r\n\r\n # filter images without detection annotations\r\n if remove_images_without_annotations:\r\n ids = []\r\n for img_id in self.ids:\r\n ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)\r\n anno = self.coco.loadAnns(ann_ids)\r\n if has_valid_annotation(anno):\r\n ids.append(img_id)\r\n self.ids = ids\r\n\r\n self.json_category_id_to_contiguous_id = {\r\n v: i + 1 for i, v in enumerate(self.coco.getCatIds())\r\n }\r\n self.contiguous_category_id_to_json_id = {\r\n v: k for k, v in self.json_category_id_to_contiguous_id.items()\r\n }\r\n self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}\r\n self._transforms = transforms\r\n\r\n def __getitem__(self, idx):\r\n img, anno = super(COCODataset, self).__getitem__(idx)\r\n\r\n # filter crowd annotations\r\n # TODO might be better to add an extra field\r\n anno = [obj for obj in anno if obj[\"iscrowd\"] == 0]\r\n\r\n boxes = [obj[\"bbox\"] for obj in anno]\r\n boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes\r\n target = BoxList(boxes, img.size, mode=\"xywh\").convert(\"xyxy\")\r\n\r\n classes = [obj[\"category_id\"] for obj in anno]\r\n classes = [self.json_category_id_to_contiguous_id[c] for c in classes]\r\n classes = torch.tensor(classes)\r\n target.add_field(\"labels\", classes)\r\n\r\n masks = [obj[\"segmentation\"] for obj in anno]\r\n masks = SegmentationMask(masks, img.size, mode='poly')\r\n target.add_field(\"masks\", masks)\r\n\r\n if anno and \"keypoints\" in anno[0]:\r\n keypoints = [obj[\"keypoints\"] for obj in anno]\r\n keypoints = PersonKeypoints(keypoints, img.size)\r\n target.add_field(\"keypoints\", keypoints)\r\n\r\n target = target.clip_to_image(remove_empty=True)\r\n\r\n if self._transforms is not None:\r\n img, target = self._transforms(img, target)\r\n\r\n return img, target, idx\r\n\r\n def get_img_info(self, index):\r\n img_id = self.id_to_img_map[index]\r\n img_data = self.coco.imgs[img_id]\r\n return img_data\r\n" ]
[ [ "torch.nonzero", "torch.cat", "torch.full", "torch.nn.functional.softmax" ], [ "torch.as_tensor", "torch.tensor" ] ]
CurisZhou/bert4keras
[ "216f408b0501a1e6e6903c7a6271213d88f7725c" ]
[ "bert4keras/models.py" ]
[ "#! -*- coding: utf-8 -*-\n# 主要模型\n\nimport numpy as np\nfrom bert4keras.layers import *\nfrom bert4keras.snippets import insert_arguments\nfrom bert4keras.snippets import delete_arguments\nfrom bert4keras.snippets import is_string\nfrom keras.models import Model\nimport json\n\n\nclass Transformer(object):\n \"\"\"模型基类\n \"\"\"\n def __init__(\n self,\n vocab_size, # 词表大小\n hidden_size, # 编码维度\n num_hidden_layers, # Transformer总层数\n num_attention_heads, # Attention的头数\n intermediate_size, # FeedForward的隐层维度\n hidden_act, # FeedForward隐层的激活函数\n dropout_rate=None, # Dropout比例\n embedding_size=None, # 是否指定embedding_size\n attention_head_size=None, # Attention中V的head_size\n attention_key_size=None, # Attention中Q,K的head_size\n sequence_length=None, # 是否固定序列长度\n keep_tokens=None, # 要保留的词ID列表\n compound_tokens=None, # 扩展Embedding\n residual_attention_scores=False, # Attention矩阵加残差\n layers=None, # 外部传入的Keras层\n prefix=None, # 层名前缀\n name=None, # 模型名称\n **kwargs\n ):\n if keep_tokens is not None:\n vocab_size = len(keep_tokens)\n if compound_tokens is not None:\n vocab_size += len(compound_tokens)\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.attention_head_size = attention_head_size or hidden_size // num_attention_heads\n self.attention_key_size = attention_key_size or self.attention_head_size\n self.intermediate_size = intermediate_size\n self.dropout_rate = dropout_rate or 0\n self.hidden_act = hidden_act\n self.embedding_size = embedding_size or hidden_size\n self.sequence_length = sequence_length\n self.keep_tokens = keep_tokens\n self.compound_tokens = compound_tokens\n self.attention_bias = None\n self.position_bias = None\n self.attention_scores = None\n self.residual_attention_scores = residual_attention_scores\n self.layers = {} if layers is None else layers\n self.prefix = prefix or ''\n self.name = name\n self.built = False\n\n def build(\n self,\n attention_caches=None,\n layer_norm_cond=None,\n layer_norm_cond_hidden_size=None,\n layer_norm_cond_hidden_act=None,\n additional_input_layers=None,\n **kwargs\n ):\n \"\"\"模型构建函数\n attention_caches:为Attention的K,V的缓存序列字典,格式为\n {Attention层名: [K缓存, V缓存]};\n layer_norm_*系列参数:实现Conditional Layer Normalization时使用,\n 用来实现以“固定长度向量”为条件的条件Bert。\n \"\"\"\n if self.built:\n return None\n # Input\n inputs = self.get_inputs()\n self.set_inputs(inputs, additional_input_layers)\n # Other\n self.attention_caches = attention_caches or {}\n self.layer_norm_conds = [\n layer_norm_cond,\n layer_norm_cond_hidden_size,\n layer_norm_cond_hidden_act or 'linear',\n ]\n # Call\n outputs = self.call(inputs)\n self.set_outputs(outputs)\n # Model\n self.model = Model(self.inputs, self.outputs, name=self.name)\n self.built = True\n\n def call(self, inputs):\n \"\"\"定义模型的执行流程\n \"\"\"\n # Embedding\n outputs = self.apply_embeddings(inputs)\n # Main\n for i in range(self.num_hidden_layers):\n outputs = self.apply_main_layers(outputs, i)\n # Final\n outputs = self.apply_final_layers(outputs)\n return outputs\n\n def prefixed(self, name):\n \"\"\"给名字加前缀\n \"\"\"\n if name is not None:\n return self.prefix + name\n\n def apply(self, inputs=None, layer=None, arguments=None, **kwargs):\n \"\"\"通过apply调用层会自动重用同名层\n inputs: 上一层的输出;\n layer: 要调用的层类名;\n arguments: 传递给layer.call的参数;\n kwargs: 传递给层初始化的参数。\n \"\"\"\n if layer is Dropout and self.dropout_rate == 0:\n return inputs\n\n if layer is MultiHeadAttention and self.residual_attention_scores:\n kwargs['return_attention_scores'] = True\n\n arguments = arguments or {}\n name = self.prefixed(kwargs.get('name'))\n kwargs['name'] = name\n if name not in self.layers:\n layer = layer(**kwargs)\n name = layer.name\n self.layers[name] = layer\n\n if inputs is None:\n return self.layers[name]\n else:\n if isinstance(self.layers[name], MultiHeadAttention):\n if name in self.attention_caches:\n # 如果检测到Cache的传入,那么自动在Key,Value处拼接起来\n k_cache, v_cache = self.attention_caches[name]\n k_name, v_name = name + '-Cached-Key', name + '-Cached-Value'\n k = Concatenate1D(name=k_name)([k_cache, inputs[1]])\n v = Concatenate1D(name=v_name)([v_cache, inputs[2]])\n inputs = inputs[:1] + [k, v] + inputs[3:]\n if self.residual_attention_scores:\n # 如果使用残差Attention矩阵,则给每个Attention矩阵加上前上一层的Attention\n # 矩阵,这对应RealFormer设计(https://arxiv.org/abs/2012.11747)。目前\n # 该实现还相对粗糙,可能欠缺通用性。\n if self.attention_scores is not None:\n if arguments.get('a_bias'):\n a_bias = Add(name=name + '-Attention-Bias'\n )([inputs[3], self.attention_scores])\n else:\n a_bias = self.attention_scores\n inputs = inputs[:3] + [a_bias] + inputs[4:]\n arguments['a_bias'] = True\n o, a = self.layers[name](inputs, **arguments)\n self.attention_scores = a\n return o\n return self.layers[name](inputs, **arguments)\n\n def get_inputs(self):\n raise NotImplementedError\n\n def apply_embeddings(self, inputs):\n raise NotImplementedError\n\n def apply_main_layers(self, inputs, index):\n raise NotImplementedError\n\n def apply_final_layers(self, inputs):\n raise NotImplementedError\n\n def compute_attention_bias(self, inputs=None):\n \"\"\"定义每一层的Attention Bias\n \"\"\"\n return self.attention_bias\n\n def compute_position_bias(self, inputs=None):\n \"\"\"定义每一层的Position Bias(一般相对位置编码用)\n \"\"\"\n return self.position_bias\n\n def set_inputs(self, inputs, additional_input_layers=None):\n \"\"\"设置input和inputs属性\n \"\"\"\n if inputs is None:\n inputs = []\n elif not isinstance(inputs, list):\n inputs = [inputs]\n\n inputs = inputs[:]\n if additional_input_layers is not None:\n if not isinstance(additional_input_layers, list):\n additional_input_layers = [additional_input_layers]\n inputs.extend(additional_input_layers)\n\n self.inputs = inputs\n if len(inputs) > 1:\n self.input = inputs\n else:\n self.input = inputs[0]\n\n def set_outputs(self, outputs):\n \"\"\"设置output和oututs属性\n \"\"\"\n if not isinstance(outputs, list):\n outputs = [outputs]\n\n outputs = outputs[:]\n self.outputs = outputs\n if len(outputs) > 1:\n self.output = outputs\n else:\n self.output = outputs[0]\n\n @property\n def initializer(self):\n \"\"\"默认使用截断正态分布初始化\n \"\"\"\n return keras.initializers.TruncatedNormal(stddev=0.02)\n\n def simplify(self, inputs):\n \"\"\"将list中的None过滤掉\n \"\"\"\n inputs = [i for i in inputs if i is not None]\n if len(inputs) == 1:\n inputs = inputs[0]\n\n return inputs\n\n def load_embeddings(self, embeddings):\n \"\"\"处理Embedding层权重\n \"\"\"\n if self.keep_tokens is not None:\n embeddings = embeddings[self.keep_tokens]\n\n if self.compound_tokens is not None:\n ext_embeddings = []\n for item in self.compound_tokens:\n if isinstance(item, list):\n item = (item, [1] * len(item))\n ext_embeddings.append(\n np.average(embeddings[item[0]], 0, item[1])\n )\n embeddings = np.concatenate([embeddings, ext_embeddings], 0)\n\n return embeddings\n\n def load_variable(self, checkpoint, name):\n \"\"\"加载单个变量的函数\n \"\"\"\n if isinstance(checkpoint, dict):\n return checkpoint[name]\n else:\n return tf.train.load_variable(checkpoint, name)\n\n def create_variable(self, name, value, dtype=None):\n \"\"\"创建一个变量\n \"\"\"\n dtype = dtype or K.floatx()\n return K.variable(\n self.initializer(value.shape, dtype), dtype, name=name\n ), value\n\n def variable_mapping(self):\n \"\"\"构建keras层与checkpoint的变量名之间的映射表\n \"\"\"\n return {}\n\n def load_weights_from_checkpoint(self, checkpoint, mapping=None):\n \"\"\"根据mapping从checkpoint加载权重\n \"\"\"\n mapping = mapping or self.variable_mapping()\n mapping = {self.prefixed(k): v for k, v in mapping.items()}\n mapping = {k: v for k, v in mapping.items() if k in self.layers}\n\n weight_value_pairs = []\n for layer, variables in mapping.items():\n layer = self.layers[layer]\n weights = layer.trainable_weights\n values = [self.load_variable(checkpoint, v) for v in variables]\n\n if isinstance(layer, MultiHeadAttention):\n \"\"\"如果key_size不等于head_size,则可以通过\n 正交矩阵将相应的权重投影到合适的shape。\n \"\"\"\n count = 2\n if layer.use_bias:\n count += 2\n heads = self.num_attention_heads\n head_size = self.attention_head_size\n key_size = self.attention_key_size\n W = np.linalg.qr(np.random.randn(key_size, head_size))[0].T\n if layer.attention_scale:\n W = W * key_size**0.25 / head_size**0.25\n for i in range(count):\n w, v = weights[i], values[i]\n w_shape, v_shape = K.int_shape(w), v.shape\n if w_shape[-1] != v_shape[-1]:\n pre_shape = w_shape[:-1]\n v = v.reshape(pre_shape + (heads, head_size))\n v = np.dot(v, W)\n v = v.reshape(pre_shape + (heads * key_size,))\n values[i] = v\n\n weight_value_pairs.extend(zip(weights, values))\n\n K.batch_set_value(weight_value_pairs)\n\n def save_weights_as_checkpoint(self, filename, mapping=None, dtype=None):\n \"\"\"根据mapping将权重保存为checkpoint格式\n \"\"\"\n mapping = mapping or self.variable_mapping()\n mapping = {self.prefixed(k): v for k, v in mapping.items()}\n mapping = {k: v for k, v in mapping.items() if k in self.layers}\n\n with tf.Graph().as_default():\n all_variables, all_values = [], []\n for layer, variables in mapping.items():\n layer = self.layers[layer]\n values = K.batch_get_value(layer.trainable_weights)\n for name, value in zip(variables, values):\n variable, value = self.create_variable(name, value, dtype)\n all_variables.append(variable)\n all_values.append(value)\n with tf.Session() as sess:\n K.batch_set_value(zip(all_variables, all_values))\n saver = tf.train.Saver()\n saver.save(sess, filename)\n\n\nclass LM_Mask(object):\n \"\"\"定义下三角Attention Mask(语言模型用)\n \"\"\"\n def compute_attention_bias(self, inputs=None):\n \"\"\"通过idxs序列的比较来得到对应的mask\n \"\"\"\n if self.attention_bias is None:\n\n def lm_mask(s):\n seq_len = K.shape(s)[1]\n idxs = K.arange(0, seq_len)\n mask = idxs[None, :] <= idxs[:, None]\n mask = K.cast(mask, K.floatx())\n return -(1 - mask[None, None]) * 1e12\n\n self.attention_bias = self.apply(\n inputs=self.inputs[0],\n layer=Lambda,\n function=lm_mask,\n name='Attention-LM-Mask'\n )\n\n return self.attention_bias\n\n\nclass UniLM_Mask(object):\n \"\"\"定义UniLM的Attention Mask(Seq2Seq模型用)\n 其中source和target的分区,由segment_ids来表示。\n UniLM: https://arxiv.org/abs/1905.03197\n \"\"\"\n def compute_attention_bias(self, inputs=None):\n \"\"\"通过idxs序列的比较来得到对应的mask\n \"\"\"\n if self.attention_bias is None:\n\n def unilm_mask(s):\n idxs = K.cumsum(s, axis=1)\n mask = idxs[:, None, :] <= idxs[:, :, None]\n mask = K.cast(mask, K.floatx())\n return -(1 - mask[:, None]) * 1e12\n\n self.attention_bias = self.apply(\n inputs=self.inputs[1],\n layer=Lambda,\n function=unilm_mask,\n name='Attention-UniLM-Mask'\n )\n\n return self.attention_bias\n\n\nclass BERT(Transformer):\n \"\"\"构建BERT模型\n \"\"\"\n def __init__(\n self,\n max_position, # 序列最大长度\n segment_vocab_size=2, # segment总数目\n with_pool=False, # 是否包含Pool部分\n with_nsp=False, # 是否包含NSP部分\n with_mlm=False, # 是否包含MLM部分\n hierarchical_position=None, # 是否层次分解位置编码\n custom_position_ids=False, # 是否自行传入位置id\n shared_segment_embeddings=False, # 若True,则segment跟token共用embedding\n **kwargs # 其余参数\n ):\n super(BERT, self).__init__(**kwargs)\n self.max_position = max_position\n self.segment_vocab_size = segment_vocab_size\n self.with_pool = with_pool\n self.with_nsp = with_nsp\n self.with_mlm = with_mlm\n self.hierarchical_position = hierarchical_position\n self.custom_position_ids = custom_position_ids\n self.shared_segment_embeddings = shared_segment_embeddings\n if self.with_nsp and not self.with_pool:\n self.with_pool = True\n\n def get_inputs(self):\n \"\"\"BERT的输入是token_ids和segment_ids\n (但允许自行传入位置id,以实现一些特殊需求)\n \"\"\"\n x_in = self.apply(\n layer=Input, shape=(self.sequence_length,), name='Input-Token'\n )\n inputs = [x_in]\n\n if self.segment_vocab_size > 0:\n s_in = self.apply(\n layer=Input,\n shape=(self.sequence_length,),\n name='Input-Segment'\n )\n inputs.append(s_in)\n\n if self.custom_position_ids:\n p_in = self.apply(\n layer=Input,\n shape=(self.sequence_length,),\n name='Input-Position'\n )\n inputs.append(p_in)\n\n return inputs\n\n def apply_embeddings(self, inputs):\n \"\"\"BERT的embedding是token、position、segment三者embedding之和\n \"\"\"\n inputs = inputs[:]\n x = inputs.pop(0)\n if self.segment_vocab_size > 0:\n s = inputs.pop(0)\n if self.custom_position_ids:\n p = inputs.pop(0)\n else:\n p = None\n z = self.layer_norm_conds[0]\n\n x = self.apply(\n inputs=x,\n layer=Embedding,\n input_dim=self.vocab_size,\n output_dim=self.embedding_size,\n embeddings_initializer=self.initializer,\n mask_zero=True,\n name='Embedding-Token'\n )\n if self.segment_vocab_size > 0:\n if self.shared_segment_embeddings:\n name = 'Embedding-Token'\n else:\n name = 'Embedding-Segment'\n s = self.apply(\n inputs=s,\n layer=Embedding,\n input_dim=self.segment_vocab_size,\n output_dim=self.embedding_size,\n embeddings_initializer=self.initializer,\n name=name\n )\n x = self.apply(\n inputs=[x, s], layer=Add, name='Embedding-Token-Segment'\n )\n x = self.apply(\n inputs=self.simplify([x, p]),\n layer=PositionEmbedding,\n input_dim=self.max_position,\n output_dim=self.embedding_size,\n merge_mode='add',\n hierarchical=self.hierarchical_position,\n embeddings_initializer=self.initializer,\n custom_position_ids=self.custom_position_ids,\n name='Embedding-Position'\n )\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='Embedding-Norm'\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='Embedding-Dropout'\n )\n if self.embedding_size != self.hidden_size:\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=self.hidden_size,\n kernel_initializer=self.initializer,\n name='Embedding-Mapping'\n )\n\n return x\n\n def apply_main_layers(self, inputs, index):\n \"\"\"BERT的主体是基于Self-Attention的模块\n 顺序:Att --> Add --> LN --> FFN --> Add --> LN\n \"\"\"\n x = inputs\n z = self.layer_norm_conds[0]\n\n attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index\n feed_forward_name = 'Transformer-%d-FeedForward' % index\n attention_mask = self.compute_attention_bias(index)\n\n # Self Attention\n xi, x, arguments = x, [x, x, x], {'a_bias': None}\n if attention_mask is not None:\n arguments['a_bias'] = True\n x.append(attention_mask)\n\n x = self.apply(\n inputs=x,\n layer=MultiHeadAttention,\n arguments=arguments,\n heads=self.num_attention_heads,\n head_size=self.attention_head_size,\n out_dim=self.hidden_size,\n key_size=self.attention_key_size,\n kernel_initializer=self.initializer,\n name=attention_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % attention_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % attention_name\n )\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % attention_name\n )\n\n # Feed Forward\n xi = x\n x = self.apply(\n inputs=x,\n layer=FeedForward,\n units=self.intermediate_size,\n activation=self.hidden_act,\n kernel_initializer=self.initializer,\n name=feed_forward_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % feed_forward_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name\n )\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % feed_forward_name\n )\n\n return x\n\n def apply_final_layers(self, inputs):\n \"\"\"根据剩余参数决定输出\n \"\"\"\n x = inputs\n z = self.layer_norm_conds[0]\n outputs = [x]\n\n if self.with_pool:\n # Pooler部分(提取CLS向量)\n x = outputs[0]\n x = self.apply(\n inputs=x,\n layer=Lambda,\n function=lambda x: x[:, 0],\n name='Pooler'\n )\n pool_activation = 'tanh' if self.with_pool is True else self.with_pool\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=self.hidden_size,\n activation=pool_activation,\n kernel_initializer=self.initializer,\n name='Pooler-Dense'\n )\n if self.with_nsp:\n # Next Sentence Prediction部分\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=2,\n activation='softmax',\n kernel_initializer=self.initializer,\n name='NSP-Proba'\n )\n outputs.append(x)\n\n if self.with_mlm:\n # Masked Language Model部分\n x = outputs[0]\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=self.embedding_size,\n activation=self.hidden_act,\n kernel_initializer=self.initializer,\n name='MLM-Dense'\n )\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='MLM-Norm'\n )\n x = self.apply(\n inputs=x,\n layer=Embedding,\n arguments={'mode': 'dense'},\n name='Embedding-Token'\n )\n x = self.apply(inputs=x, layer=BiasAdd, name='MLM-Bias')\n mlm_activation = 'softmax' if self.with_mlm is True else self.with_mlm\n x = self.apply(\n inputs=x,\n layer=Activation,\n activation=mlm_activation,\n name='MLM-Activation'\n )\n outputs.append(x)\n\n if len(outputs) == 1:\n outputs = outputs[0]\n elif len(outputs) == 2:\n outputs = outputs[1]\n else:\n outputs = outputs[1:]\n\n return outputs\n\n def load_variable(self, checkpoint, name):\n \"\"\"加载单个变量的函数\n \"\"\"\n variable = super(BERT, self).load_variable(checkpoint, name)\n if name in [\n 'bert/embeddings/word_embeddings',\n 'cls/predictions/output_bias',\n ]:\n return self.load_embeddings(variable)\n elif name == 'cls/seq_relationship/output_weights':\n return variable.T\n else:\n return variable\n\n def create_variable(self, name, value, dtype=None):\n \"\"\"在tensorflow中创建一个变量\n \"\"\"\n if name == 'cls/seq_relationship/output_weights':\n value = value.T\n return super(BERT, self).create_variable(name, value, dtype)\n\n def variable_mapping(self):\n \"\"\"映射到官方BERT权重格式\n \"\"\"\n mapping = {\n 'Embedding-Token': ['bert/embeddings/word_embeddings'],\n 'Embedding-Segment': ['bert/embeddings/token_type_embeddings'],\n 'Embedding-Position': ['bert/embeddings/position_embeddings'],\n 'Embedding-Norm': [\n 'bert/embeddings/LayerNorm/beta',\n 'bert/embeddings/LayerNorm/gamma',\n ],\n 'Embedding-Mapping': [\n 'bert/encoder/embedding_hidden_mapping_in/kernel',\n 'bert/encoder/embedding_hidden_mapping_in/bias',\n ],\n 'Pooler-Dense': [\n 'bert/pooler/dense/kernel',\n 'bert/pooler/dense/bias',\n ],\n 'NSP-Proba': [\n 'cls/seq_relationship/output_weights',\n 'cls/seq_relationship/output_bias',\n ],\n 'MLM-Dense': [\n 'cls/predictions/transform/dense/kernel',\n 'cls/predictions/transform/dense/bias',\n ],\n 'MLM-Norm': [\n 'cls/predictions/transform/LayerNorm/beta',\n 'cls/predictions/transform/LayerNorm/gamma',\n ],\n 'MLM-Bias': ['cls/predictions/output_bias'],\n }\n\n for i in range(self.num_hidden_layers):\n prefix = 'bert/encoder/layer_%d/' % i\n mapping.update({\n 'Transformer-%d-MultiHeadSelfAttention' % i: [\n prefix + 'attention/self/query/kernel',\n prefix + 'attention/self/query/bias',\n prefix + 'attention/self/key/kernel',\n prefix + 'attention/self/key/bias',\n prefix + 'attention/self/value/kernel',\n prefix + 'attention/self/value/bias',\n prefix + 'attention/output/dense/kernel',\n prefix + 'attention/output/dense/bias',\n ],\n 'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [\n prefix + 'attention/output/LayerNorm/beta',\n prefix + 'attention/output/LayerNorm/gamma',\n ],\n 'Transformer-%d-FeedForward' % i: [\n prefix + 'intermediate/dense/kernel',\n prefix + 'intermediate/dense/bias',\n prefix + 'output/dense/kernel',\n prefix + 'output/dense/bias',\n ],\n 'Transformer-%d-FeedForward-Norm' % i: [\n prefix + 'output/LayerNorm/beta',\n prefix + 'output/LayerNorm/gamma',\n ],\n })\n\n return mapping\n\n\nclass ALBERT(BERT):\n \"\"\"构建ALBERT模型\n \"\"\"\n def apply_main_layers(self, inputs, index):\n \"\"\"ALBERT的主体是基于Self-Attention的模块\n 顺序:Att --> Add --> LN --> FFN --> Add --> LN\n \"\"\"\n x = inputs\n z = self.layer_norm_conds[0]\n\n attention_name = 'Transformer-MultiHeadSelfAttention'\n feed_forward_name = 'Transformer-FeedForward'\n attention_mask = self.compute_attention_bias(index)\n\n # Self Attention\n xi, x, arguments = x, [x, x, x], {'a_bias': None}\n if attention_mask is not None:\n arguments['a_bias'] = True\n x.append(attention_mask)\n\n x = self.apply(\n inputs=x,\n layer=MultiHeadAttention,\n arguments=arguments,\n heads=self.num_attention_heads,\n head_size=self.attention_head_size,\n out_dim=self.hidden_size,\n key_size=self.attention_key_size,\n kernel_initializer=self.initializer,\n name=attention_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % attention_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % attention_name\n )\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % attention_name\n )\n\n # Feed Forward\n xi = x\n x = self.apply(\n inputs=x,\n layer=FeedForward,\n units=self.intermediate_size,\n activation=self.hidden_act,\n kernel_initializer=self.initializer,\n name=feed_forward_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % feed_forward_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name\n )\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % feed_forward_name\n )\n\n return x\n\n def variable_mapping(self):\n \"\"\"映射到官方ALBERT权重格式\n \"\"\"\n mapping = super(ALBERT, self).variable_mapping()\n\n prefix = 'bert/encoder/transformer/group_0/inner_group_0/'\n mapping.update({\n 'Transformer-MultiHeadSelfAttention': [\n prefix + 'attention_1/self/query/kernel',\n prefix + 'attention_1/self/query/bias',\n prefix + 'attention_1/self/key/kernel',\n prefix + 'attention_1/self/key/bias',\n prefix + 'attention_1/self/value/kernel',\n prefix + 'attention_1/self/value/bias',\n prefix + 'attention_1/output/dense/kernel',\n prefix + 'attention_1/output/dense/bias',\n ],\n 'Transformer-MultiHeadSelfAttention-Norm': [\n prefix + 'LayerNorm/beta',\n prefix + 'LayerNorm/gamma',\n ],\n 'Transformer-FeedForward': [\n prefix + 'ffn_1/intermediate/dense/kernel',\n prefix + 'ffn_1/intermediate/dense/bias',\n prefix + 'ffn_1/intermediate/output/dense/kernel',\n prefix + 'ffn_1/intermediate/output/dense/bias',\n ],\n 'Transformer-FeedForward-Norm': [\n prefix + 'LayerNorm_1/beta',\n prefix + 'LayerNorm_1/gamma',\n ],\n })\n\n return mapping\n\n\nclass ALBERT_Unshared(BERT):\n \"\"\"解开ALBERT共享约束,当成BERT用\n \"\"\"\n def variable_mapping(self):\n \"\"\"映射到官方ALBERT权重格式\n \"\"\"\n mapping = super(ALBERT_Unshared, self).variable_mapping()\n\n prefix = 'bert/encoder/transformer/group_0/inner_group_0/'\n for i in range(self.num_hidden_layers):\n mapping.update({\n 'Transformer-%d-MultiHeadSelfAttention' % i: [\n prefix + 'attention_1/self/query/kernel',\n prefix + 'attention_1/self/query/bias',\n prefix + 'attention_1/self/key/kernel',\n prefix + 'attention_1/self/key/bias',\n prefix + 'attention_1/self/value/kernel',\n prefix + 'attention_1/self/value/bias',\n prefix + 'attention_1/output/dense/kernel',\n prefix + 'attention_1/output/dense/bias',\n ],\n 'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [\n prefix + 'LayerNorm/beta',\n prefix + 'LayerNorm/gamma',\n ],\n 'Transformer-%d-FeedForward' % i: [\n prefix + 'ffn_1/intermediate/dense/kernel',\n prefix + 'ffn_1/intermediate/dense/bias',\n prefix + 'ffn_1/intermediate/output/dense/kernel',\n prefix + 'ffn_1/intermediate/output/dense/bias',\n ],\n 'Transformer-%d-FeedForward-Norm' % i: [\n prefix + 'LayerNorm_1/beta',\n prefix + 'LayerNorm_1/gamma',\n ],\n })\n\n return mapping\n\n\nclass NEZHA(BERT):\n \"\"\"华为推出的NAZHA模型\n 链接:https://arxiv.org/abs/1909.00204\n \"\"\"\n def apply_embeddings(self, inputs):\n \"\"\"NEZHA的embedding是token、segment两者embedding之和\n \"\"\"\n inputs = inputs[:]\n x = inputs.pop(0)\n if self.segment_vocab_size > 0:\n s = inputs.pop(0)\n z = self.layer_norm_conds[0]\n\n x = self.apply(\n inputs=x,\n layer=Embedding,\n input_dim=self.vocab_size,\n output_dim=self.embedding_size,\n embeddings_initializer=self.initializer,\n mask_zero=True,\n name='Embedding-Token'\n )\n if self.segment_vocab_size > 0:\n if self.shared_segment_embeddings:\n name = 'Embedding-Token'\n else:\n name = 'Embedding-Segment'\n s = self.apply(\n inputs=s,\n layer=Embedding,\n input_dim=2,\n output_dim=self.embedding_size,\n embeddings_initializer=self.initializer,\n name=name\n )\n x = self.apply(\n inputs=[x, s], layer=Add, name='Embedding-Token-Segment'\n )\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='Embedding-Norm'\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='Embedding-Dropout'\n )\n if self.embedding_size != self.hidden_size:\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=self.hidden_size,\n kernel_initializer=self.initializer,\n name='Embedding-Mapping'\n )\n\n return x\n\n def apply_main_layers(self, inputs, index):\n \"\"\"NEZHA的主体是基于Self-Attention的模块\n 顺序:Att --> Add --> LN --> FFN --> Add --> LN\n \"\"\"\n x = inputs\n z = self.layer_norm_conds[0]\n\n attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index\n feed_forward_name = 'Transformer-%d-FeedForward' % index\n attention_mask = self.compute_attention_bias(index)\n position_bias = self.compute_position_bias(x)\n\n # Self Attention\n xi, x = x, [x, x, x, position_bias]\n arguments = {'a_bias': None, 'p_bias': 'typical_relative'}\n if attention_mask is not None:\n arguments['a_bias'] = True\n x.insert(3, attention_mask)\n\n x = self.apply(\n inputs=x,\n layer=MultiHeadAttention,\n arguments=arguments,\n heads=self.num_attention_heads,\n head_size=self.attention_head_size,\n out_dim=self.hidden_size,\n key_size=self.attention_key_size,\n kernel_initializer=self.initializer,\n name=attention_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % attention_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % attention_name\n )\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % attention_name\n )\n\n # Feed Forward\n xi = x\n x = self.apply(\n inputs=x,\n layer=FeedForward,\n units=self.intermediate_size,\n activation=self.hidden_act,\n kernel_initializer=self.initializer,\n name=feed_forward_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % feed_forward_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name\n )\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % feed_forward_name\n )\n\n return x\n\n def compute_position_bias(self, inputs=None):\n \"\"\"经典相对位置编码\n \"\"\"\n if self.position_bias is None:\n\n x = inputs\n self.position_bias = self.apply(\n inputs=[x, x],\n layer=RelativePositionEmbedding,\n input_dim=2 * 64 + 1,\n output_dim=self.attention_head_size,\n embeddings_initializer='Sinusoidal',\n name='Embedding-Relative-Position',\n trainable=False\n )\n\n return self.position_bias\n\n\nclass ELECTRA(BERT):\n \"\"\"Google推出的ELECTRA模型\n 链接:https://arxiv.org/abs/2003.10555\n \"\"\"\n @insert_arguments(with_discriminator=False)\n @delete_arguments('with_pool', 'with_mlm')\n def __init__(\n self,\n max_position, # 序列最大长度\n **kwargs # 其余参数\n ):\n super(ELECTRA, self).__init__(max_position, **kwargs)\n\n def apply_final_layers(self, inputs):\n x = inputs\n\n if self.with_discriminator:\n if self.with_discriminator is True:\n final_activation = 'sigmoid'\n else:\n final_activation = self.with_discriminator\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=self.hidden_size,\n activation=self.hidden_act,\n kernel_initializer=self.initializer,\n name='Discriminator-Dense'\n )\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=1,\n activation=final_activation,\n kernel_initializer=self.initializer,\n name='Discriminator-Prediction'\n )\n\n return x\n\n def load_variable(self, checkpoint, name):\n \"\"\"加载单个变量的函数\n \"\"\"\n variable = super(ELECTRA, self).load_variable(checkpoint, name)\n if name == 'electra/embeddings/word_embeddings':\n return self.load_embeddings(variable)\n else:\n return variable\n\n def variable_mapping(self):\n mapping = super(ELECTRA, self).variable_mapping()\n mapping['Embedding-Mapping'] = [\n 'electra/embeddings_project/kernel',\n 'electra/embeddings_project/bias',\n ]\n mapping = {\n k: [i.replace('bert/', 'electra/') for i in v]\n for k, v in mapping.items()\n }\n mapping['Discriminator-Dense'] = [\n 'discriminator_predictions/dense/kernel',\n 'discriminator_predictions/dense/bias',\n ]\n mapping['Discriminator-Prediction'] = [\n 'discriminator_predictions/dense_1/kernel',\n 'discriminator_predictions/dense_1/bias',\n ]\n return mapping\n\n\nclass GPT(LM_Mask, BERT):\n \"\"\"构建GPT模型\n 链接:https://github.com/openai/finetune-transformer-lm\n \"\"\"\n @insert_arguments(final_activation='softmax')\n @delete_arguments('with_pool', 'with_mlm')\n def __init__(self, **kwargs):\n super(GPT, self).__init__(**kwargs)\n\n def apply_embeddings(self, inputs):\n \"\"\"GPT的embedding是token、position、segment三者embedding之和\n 跟BERT的主要区别是三者相加之后没有加LayerNormalization层。\n \"\"\"\n inputs = inputs[:]\n x = inputs.pop(0)\n if self.segment_vocab_size > 0:\n s = inputs.pop(0)\n if self.custom_position_ids:\n p = inputs.pop(0)\n else:\n p = None\n\n x = self.apply(\n inputs=x,\n layer=Embedding,\n input_dim=self.vocab_size,\n output_dim=self.embedding_size,\n embeddings_initializer=self.initializer,\n mask_zero=True,\n name='Embedding-Token'\n )\n if self.segment_vocab_size > 0:\n if self.shared_segment_embeddings:\n name = 'Embedding-Token'\n else:\n name = 'Embedding-Segment'\n s = self.apply(\n inputs=s,\n layer=Embedding,\n input_dim=self.segment_vocab_size,\n output_dim=self.embedding_size,\n embeddings_initializer=self.initializer,\n name=name\n )\n x = self.apply(\n inputs=[x, s], layer=Add, name='Embedding-Token-Segment'\n )\n x = self.apply(\n inputs=self.simplify([x, p]),\n layer=PositionEmbedding,\n input_dim=self.max_position,\n output_dim=self.embedding_size,\n merge_mode='add',\n hierarchical=self.hierarchical_position,\n embeddings_initializer=self.initializer,\n custom_position_ids=self.custom_position_ids,\n name='Embedding-Position'\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='Embedding-Dropout'\n )\n if self.embedding_size != self.hidden_size:\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=self.hidden_size,\n kernel_initializer=self.initializer,\n name='Embedding-Mapping'\n )\n\n return x\n\n def apply_final_layers(self, inputs):\n \"\"\"剩余部分\n \"\"\"\n x = inputs\n\n # Language Model部分\n x = self.apply(\n inputs=x,\n layer=Embedding,\n arguments={'mode': 'dense'},\n name='Embedding-Token'\n )\n x = self.apply(\n inputs=x,\n layer=Activation,\n activation=self.final_activation,\n name='LM-Activation'\n )\n\n return x\n\n def load_variable(self, checkpoint, name):\n \"\"\"加载单个变量的函数\n \"\"\"\n variable = super(GPT, self).load_variable(checkpoint, name)\n if name == 'gpt/embeddings/word_embeddings':\n return self.load_embeddings(variable)\n else:\n return variable\n\n def variable_mapping(self):\n \"\"\"映射到TF版GPT权重格式\n \"\"\"\n mapping = super(GPT, self).variable_mapping()\n mapping = {\n k: [\n i.replace('bert/', 'gpt/').replace('encoder', 'transformer')\n for i in v\n ]\n for k, v in mapping.items()\n }\n return mapping\n\n\nclass GPT2(GPT):\n \"\"\"构建GPT2模型\n 链接: https://github.com/openai/gpt-2\n \"\"\"\n def get_inputs(self):\n \"\"\"GPT2的输入是token_ids\n \"\"\"\n x_in = self.apply(\n layer=Input, shape=(self.sequence_length,), name='Input-Token'\n )\n return x_in\n\n def apply_embeddings(self, inputs):\n \"\"\"GPT2的embedding是token、position两者embedding之和\n \"\"\"\n x = inputs\n\n x = self.apply(\n inputs=x,\n layer=Embedding,\n input_dim=self.vocab_size,\n output_dim=self.embedding_size,\n embeddings_initializer=self.initializer,\n mask_zero=True,\n name='Embedding-Token'\n )\n x = self.apply(\n inputs=x,\n layer=PositionEmbedding,\n input_dim=self.max_position,\n output_dim=self.embedding_size,\n merge_mode='add',\n hierarchical=self.hierarchical_position,\n embeddings_initializer=self.initializer,\n name='Embedding-Position'\n )\n if self.embedding_size != self.hidden_size:\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=self.hidden_size,\n kernel_initializer=self.initializer,\n name='Embedding-Mapping'\n )\n\n return x\n\n def apply_main_layers(self, inputs, index):\n \"\"\"GPT2的主体是基于Self-Attention的模块\n 顺序:LN --> Att --> Add --> LN --> FFN --> Add\n \"\"\"\n x = inputs\n z = self.layer_norm_conds[0]\n\n attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index\n feed_forward_name = 'Transformer-%d-FeedForward' % index\n attention_mask = self.compute_attention_bias(index)\n\n # Self Attention\n xi = x\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n epsilon=1e-5,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % attention_name\n )\n x = self.apply(\n inputs=[x, x, x, attention_mask],\n layer=MultiHeadAttention,\n arguments={'a_bias': True},\n heads=self.num_attention_heads,\n head_size=self.attention_head_size,\n out_dim=self.hidden_size,\n key_size=self.attention_key_size,\n kernel_initializer=self.initializer,\n name=attention_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % attention_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % attention_name\n )\n\n # Feed Forward\n xi = x\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n epsilon=1e-5,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % feed_forward_name\n )\n x = self.apply(\n inputs=x,\n layer=FeedForward,\n units=self.intermediate_size,\n activation=self.hidden_act,\n kernel_initializer=self.initializer,\n name=feed_forward_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % feed_forward_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name\n )\n return x\n\n def apply_final_layers(self, inputs):\n \"\"\"剩余部分\n \"\"\"\n x = inputs\n z = self.layer_norm_conds[0]\n\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n epsilon=1e-5,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='Output-Norm'\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='Output-Dropout'\n )\n x = super(GPT2, self).apply_final_layers(x)\n\n return x\n\n def variable_mapping(self):\n \"\"\"映射到TF版GPT2权重格式\n \"\"\"\n mapping = super(GPT2, self).variable_mapping()\n mapping = {\n k: [i.replace('output/LayerNorm', 'input/LayerNorm') for i in v]\n for k, v in mapping.items()\n }\n mapping['Output-Norm'] = [\n 'gpt/output/LayerNorm/beta',\n 'gpt/output/LayerNorm/gamma',\n ]\n\n return mapping\n\n\nclass GPT2_ML(GPT):\n \"\"\"构建GPT2_ML模型\n 链接: https://github.com/imcaspar/gpt2-ml\n 注意:GPT2_ML虽然号称GPT2,但是它的结构其实更接近GPT,它自称GPT2的\n 原因大概是因为它开源的版本参数量达到了GPT2的15亿参数。\n \"\"\"\n def get_inputs(self):\n \"\"\"GPT2_ML的输入是token_ids\n \"\"\"\n x_in = self.apply(\n layer=Input, shape=(self.sequence_length,), name='Input-Token'\n )\n return x_in\n\n def apply_embeddings(self, inputs):\n \"\"\"GPT2_ML的embedding是token、position两者embedding之和\n \"\"\"\n x = inputs\n z = self.layer_norm_conds[0]\n\n x = self.apply(\n inputs=x,\n layer=Embedding,\n input_dim=self.vocab_size,\n output_dim=self.embedding_size,\n embeddings_initializer=self.initializer,\n mask_zero=True,\n name='Embedding-Token'\n )\n x = self.apply(\n inputs=x,\n layer=PositionEmbedding,\n input_dim=self.max_position,\n output_dim=self.embedding_size,\n merge_mode='add',\n hierarchical=self.hierarchical_position,\n embeddings_initializer=self.initializer,\n name='Embedding-Position'\n )\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n epsilon=1e-5,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='Embedding-Norm'\n )\n if self.embedding_size != self.hidden_size:\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=self.hidden_size,\n kernel_initializer=self.initializer,\n name='Embedding-Mapping'\n )\n\n return x\n\n def apply_main_layers(self, inputs, index):\n \"\"\"GPT2_ML的主体是基于Self-Attention的模块\n 顺序:Att --> LN --> FFN --> Add --> LN\n \"\"\"\n x = inputs\n z = self.layer_norm_conds[0]\n\n attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index\n feed_forward_name = 'Transformer-%d-FeedForward' % index\n attention_mask = self.compute_attention_bias(index)\n\n # Self Attention\n xi, x, arguments = x, [x, x, x, attention_mask], {'a_bias': True}\n\n x = self.apply(\n inputs=x,\n layer=MultiHeadAttention,\n arguments=arguments,\n heads=self.num_attention_heads,\n head_size=self.attention_head_size,\n out_dim=self.hidden_size,\n key_size=self.attention_key_size,\n kernel_initializer=self.initializer,\n name=attention_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % attention_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % attention_name\n )\n\n # Feed Forward\n xi = x\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n epsilon=1e-5,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm-0' % feed_forward_name\n )\n x = self.apply(\n inputs=x,\n layer=FeedForward,\n units=self.intermediate_size,\n activation=self.hidden_act,\n kernel_initializer=self.initializer,\n name=feed_forward_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % feed_forward_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name\n )\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n epsilon=1e-5,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm-1' % feed_forward_name\n )\n\n return x\n\n def load_variable(self, checkpoint, name):\n \"\"\"加载单个变量的函数\n \"\"\"\n variable = super(GPT2_ML, self).load_variable(checkpoint, name)\n if name == 'newslm/embeddings/word_embed':\n return self.load_embeddings(variable)\n else:\n return variable\n\n def variable_mapping(self):\n \"\"\"映射到官方GPT2_ML权重格式\n \"\"\"\n mapping = {\n 'Embedding-Token': ['newslm/embeddings/word_embed'],\n 'Embedding-Position': ['newslm/embeddings/pos_embed'],\n 'Embedding-Norm': [\n 'newslm/embeddings/LayerNorm_embed_norm/beta',\n 'newslm/embeddings/LayerNorm_embed_norm/gamma',\n ],\n }\n\n for i in range(self.num_hidden_layers):\n prefix = 'newslm/layer%02d/' % i\n mapping.update({\n 'Transformer-%d-MultiHeadSelfAttention' % i: [\n prefix + 'query_layer/kernel',\n prefix + 'query_layer/bias',\n prefix + 'key_layer/kernel',\n prefix + 'key_layer/bias',\n prefix + 'value_layer/kernel',\n prefix + 'value_layer/bias',\n prefix + 'context_projection_layer/kernel',\n prefix + 'context_projection_layer/bias',\n ],\n 'Transformer-%d-FeedForward-Norm-0' % i: [\n prefix + 'LayerNorm_mlp_ln0/beta',\n prefix + 'LayerNorm_mlp_ln0/gamma',\n ],\n 'Transformer-%d-FeedForward' % i: [\n prefix + 'intermediate/kernel',\n prefix + 'intermediate/bias',\n prefix + 'output/kernel',\n prefix + 'output/bias',\n ],\n 'Transformer-%d-FeedForward-Norm-1' % i: [\n prefix + 'LayerNorm_mlp_ln1/beta',\n prefix + 'LayerNorm_mlp_ln1/gamma',\n ],\n })\n\n return mapping\n\n\nclass T5_Base(Transformer):\n \"\"\"Google的T5模型(基类)\n 注意T5有两个版本,一开始放出来的版本称为t5.1.0,而后来放出了一个升级\n 版本称为t5.1.1,两者结构略有不同,包括后来放出来的多国语言版T5也采用\n 了t5.1.1的结构。\n t5.1.0: https://github.com/google-research/text-to-text-transfer-transformer\n t5.1.1: https://github.com/google-research/text-to-text-transfer-transformer/blob/master/released_checkpoints.md#t511\n multilingual-t5: https://github.com/google-research/multilingual-t5\n \"\"\"\n @insert_arguments(version='t5.1.0')\n def __init__(self, **kwargs):\n super(T5_Base, self).__init__(**kwargs)\n\n def load_variable(self, checkpoint, name):\n \"\"\"加载单个变量的函数\n \"\"\"\n variable = super(T5_Base, self).load_variable(checkpoint, name)\n if name == 'shared/embedding':\n return self.load_embeddings(variable)\n elif name == 'decoder/logits/kernel':\n return self.load_embeddings(variable.T).T\n elif 'relative_attention_bias' in name:\n return variable.T\n else:\n return variable\n\n def create_variable(self, name, value, dtype=None):\n \"\"\"在tensorflow中创建一个变量\n \"\"\"\n if 'relative_attention_bias' in name:\n value = value.T\n return super(T5_Base, self).create_variable(name, value, dtype)\n\n def variable_mapping(self):\n \"\"\"映射到官方T5权重格式\n \"\"\"\n mapping = {\n 'Embedding-Token': ['shared/embedding'],\n 'Encoder-Embedding-Relative-Position': [\n 'encoder/block_000/layer_000/SelfAttention/relative_attention_bias'\n ],\n 'Encoder-Output-Norm': ['encoder/final_layer_norm/scale'],\n 'Decoder-Embedding-Relative-Position': [\n 'decoder/block_000/layer_000/SelfAttention/relative_attention_bias',\n ],\n 'Decoder-Output-Norm': ['decoder/final_layer_norm/scale'],\n }\n\n for i in range(self.num_hidden_layers):\n # Encoder主体\n prefix = 'encoder/block_%03d/' % i\n mapping.update({\n 'Encoder-Transformer-%d-MultiHeadSelfAttention' % i: [\n prefix + 'layer_000/SelfAttention/q',\n prefix + 'layer_000/SelfAttention/k',\n prefix + 'layer_000/SelfAttention/v',\n prefix + 'layer_000/SelfAttention/o',\n ],\n 'Encoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [\n prefix + 'layer_000/layer_norm/scale',\n ],\n 'Encoder-Transformer-%d-FeedForward' % i: [\n prefix + 'layer_001/DenseReluDense/wi/kernel',\n prefix + 'layer_001/DenseReluDense/wo/kernel',\n ],\n 'Encoder-Transformer-%d-FeedForward-Norm' % i: [\n prefix + 'layer_001/layer_norm/scale',\n ],\n })\n # Decoder主体\n prefix = 'decoder/block_%03d/' % i\n mapping.update({\n 'Decoder-Transformer-%d-MultiHeadSelfAttention' % i: [\n prefix + 'layer_000/SelfAttention/q',\n prefix + 'layer_000/SelfAttention/k',\n prefix + 'layer_000/SelfAttention/v',\n prefix + 'layer_000/SelfAttention/o',\n ],\n 'Decoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [\n prefix + 'layer_000/layer_norm/scale',\n ],\n 'Decoder-Transformer-%d-MultiHeadCrossAttention' % i: [\n prefix + 'layer_001/EncDecAttention/q',\n prefix + 'layer_001/EncDecAttention/k',\n prefix + 'layer_001/EncDecAttention/v',\n prefix + 'layer_001/EncDecAttention/o',\n ],\n 'Decoder-Transformer-%d-MultiHeadCrossAttention-Norm' % i: [\n prefix + 'layer_001/layer_norm/scale',\n ],\n 'Decoder-Transformer-%d-FeedForward' % i: [\n prefix + 'layer_002/DenseReluDense/wi/kernel',\n prefix + 'layer_002/DenseReluDense/wo/kernel',\n ],\n 'Decoder-Transformer-%d-FeedForward-Norm' % i: [\n prefix + 'layer_002/layer_norm/scale',\n ],\n })\n\n if self.version == 't5.1.1':\n mapping['Encoder-Output-Norm'] = ['encoder/rms_norm/scale']\n mapping['Decoder-Output-Norm'] = ['decoder/rms_norm/scale']\n mapping['Decoder-Output-LM'] = ['decoder/logits/kernel']\n mapping = {\n k: [i.replace('layer_norm', 'rms_norm') for i in v]\n for k, v in mapping.items()\n }\n for i in range(self.num_hidden_layers):\n for layer in [\n 'Encoder-Transformer-%d-FeedForward' % i,\n 'Decoder-Transformer-%d-FeedForward' % i\n ]:\n mapping[layer] = [\n mapping[layer][0][:-7] + '_0' + mapping[layer][0][-7:],\n mapping[layer][0][:-7] + '_1' + mapping[layer][0][-7:],\n mapping[layer][1]\n ]\n\n return mapping\n\n\nclass T5_Encoder(T5_Base):\n \"\"\"Google的T5模型(Encoder)\n \"\"\"\n def get_inputs(self):\n \"\"\"T5的Encoder的输入只有token_ids\n \"\"\"\n x_in = self.apply(\n layer=Input,\n shape=(self.sequence_length,),\n name='Encoder-Input-Token'\n )\n return x_in\n\n def apply_embeddings(self, inputs):\n \"\"\"T5的embedding只有token embedding,\n 并把relative position embedding准备好,待attention使用。\n \"\"\"\n x = inputs\n\n x = self.apply(\n inputs=x,\n layer=Embedding,\n input_dim=self.vocab_size,\n output_dim=self.embedding_size,\n embeddings_initializer=self.initializer,\n mask_zero=True,\n name='Embedding-Token'\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='Encoder-Embedding-Dropout'\n )\n if self.embedding_size != self.hidden_size:\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=self.hidden_size,\n kernel_initializer=self.initializer,\n name='Encoder-Embedding-Mapping'\n )\n\n return x\n\n def apply_main_layers(self, inputs, index):\n \"\"\"T5的Encoder的主体是基于Self-Attention的模块\n 顺序:LN --> Att --> Add --> LN --> FFN --> Add\n \"\"\"\n x = inputs\n z = self.layer_norm_conds[0]\n\n attention_name = 'Encoder-Transformer-%d-MultiHeadSelfAttention' % index\n feed_forward_name = 'Encoder-Transformer-%d-FeedForward' % index\n attention_mask = self.compute_attention_bias(index)\n position_bias = self.compute_position_bias(x)\n\n # Self Attention\n xi = x\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n center=False,\n epsilon=1e-6,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % attention_name\n )\n x = self.apply(\n inputs=[x, x, x, position_bias],\n layer=MultiHeadAttention,\n arguments={'p_bias': 't5_relative'},\n heads=self.num_attention_heads,\n head_size=self.attention_head_size,\n out_dim=self.hidden_size,\n key_size=self.attention_key_size,\n use_bias=False,\n attention_scale=False,\n kernel_initializer=self.initializer,\n name=attention_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % attention_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % attention_name\n )\n\n # Feed Forward\n xi = x\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n center=False,\n epsilon=1e-6,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % feed_forward_name\n )\n x = self.apply(\n inputs=x,\n layer=FeedForward,\n units=self.intermediate_size,\n activation=self.hidden_act,\n use_bias=False,\n kernel_initializer=self.initializer,\n name=feed_forward_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % feed_forward_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name\n )\n\n return x\n\n def apply_final_layers(self, inputs):\n \"\"\"剩余部分\n \"\"\"\n x = inputs\n z = self.layer_norm_conds[0]\n\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n center=False,\n epsilon=1e-6,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='Encoder-Output-Norm'\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='Encoder-Output-Dropout'\n )\n\n return x\n\n def compute_position_bias(self, inputs=None):\n \"\"\"T5相对位置编码\n \"\"\"\n if self.position_bias is None:\n\n x = inputs\n p = self.apply(\n inputs=[x, x],\n layer=RelativePositionEmbeddingT5,\n input_dim=32,\n output_dim=self.num_attention_heads,\n bidirectional=True,\n embeddings_initializer=self.initializer,\n name='Encoder-Embedding-Relative-Position'\n )\n self.position_bias = p\n\n return self.position_bias\n\n\nclass T5_Decoder(LM_Mask, T5_Base):\n \"\"\"Google的T5模型(Decoder)\n \"\"\"\n def __init__(self, with_lm=True, **kwargs):\n super(T5_Decoder, self).__init__(**kwargs)\n self.with_lm = with_lm\n\n def get_inputs(self):\n \"\"\"T5的Decoder的输入为context序列和token_ids\n \"\"\"\n c_in = self.apply(\n layer=Input,\n shape=(self.sequence_length, self.hidden_size),\n name='Input-Context'\n )\n x_in = self.apply(\n layer=Input,\n shape=(self.sequence_length,),\n name='Decoder-Input-Token'\n )\n return [c_in, x_in]\n\n def apply_embeddings(self, inputs):\n \"\"\"T5的embedding只有token embedding,\n 并把relative position embedding准备好,待attention使用。\n \"\"\"\n c, x = inputs\n\n x = self.apply(\n inputs=x,\n layer=Embedding,\n input_dim=self.vocab_size,\n output_dim=self.embedding_size,\n embeddings_initializer=self.initializer,\n mask_zero=True,\n name='Embedding-Token'\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='Decoder-Embedding-Dropout'\n )\n if self.embedding_size != self.hidden_size:\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=self.hidden_size,\n kernel_initializer=self.initializer,\n name='Decoder-Embedding-Mapping'\n )\n\n return [c, x]\n\n def apply_main_layers(self, inputs, index):\n \"\"\"T5的Dencoder主体是基于Self-Attention、Cross-Attention的模块\n 顺序:LN --> Att1 --> Add --> LN --> Att2 --> Add --> LN --> FFN --> Add\n \"\"\"\n c, x = inputs\n z = self.layer_norm_conds[0]\n\n self_attention_name = 'Decoder-Transformer-%d-MultiHeadSelfAttention' % index\n cross_attention_name = 'Decoder-Transformer-%d-MultiHeadCrossAttention' % index\n feed_forward_name = 'Decoder-Transformer-%d-FeedForward' % index\n attention_mask = self.compute_attention_bias(index)\n position_bias = self.compute_position_bias([x, c])\n\n # Self Attention\n xi = x\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n center=False,\n epsilon=1e-6,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % self_attention_name\n )\n x = self.apply(\n inputs=[x, x, x, attention_mask, position_bias[0]],\n layer=MultiHeadAttention,\n arguments={\n 'a_bias': True,\n 'p_bias': 't5_relative'\n },\n heads=self.num_attention_heads,\n head_size=self.attention_head_size,\n out_dim=self.hidden_size,\n key_size=self.attention_key_size,\n use_bias=False,\n attention_scale=False,\n kernel_initializer=self.initializer,\n name=self_attention_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % self_attention_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % self_attention_name\n )\n\n # Cross Attention\n xi = x\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n center=False,\n epsilon=1e-6,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % cross_attention_name\n )\n x = self.apply(\n inputs=[x, c, c, position_bias[1]],\n layer=MultiHeadAttention,\n arguments={\n 'a_bias': None,\n 'p_bias': 't5_relative'\n },\n heads=self.num_attention_heads,\n head_size=self.attention_head_size,\n out_dim=self.hidden_size,\n key_size=self.attention_key_size,\n use_bias=False,\n attention_scale=False,\n kernel_initializer=self.initializer,\n name=cross_attention_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % cross_attention_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % cross_attention_name\n )\n\n # Feed Forward\n xi = x\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n center=False,\n epsilon=1e-6,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='%s-Norm' % feed_forward_name\n )\n x = self.apply(\n inputs=x,\n layer=FeedForward,\n units=self.intermediate_size,\n activation=self.hidden_act,\n use_bias=False,\n kernel_initializer=self.initializer,\n name=feed_forward_name\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='%s-Dropout' % feed_forward_name\n )\n x = self.apply(\n inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name\n )\n\n return [c, x]\n\n def apply_final_layers(self, inputs):\n \"\"\"剩余部分\n \"\"\"\n c, x = inputs\n z = self.layer_norm_conds[0]\n\n x = self.apply(\n inputs=self.simplify([x, z]),\n layer=LayerNormalization,\n center=False,\n epsilon=1e-6,\n conditional=(z is not None),\n hidden_units=self.layer_norm_conds[1],\n hidden_activation=self.layer_norm_conds[2],\n hidden_initializer=self.initializer,\n name='Decoder-Output-Norm'\n )\n x = self.apply(\n inputs=x,\n layer=Dropout,\n rate=self.dropout_rate,\n name='Decoder-Output-Dropout'\n )\n x = self.apply(\n inputs=x,\n layer=Lambda,\n function=lambda x: x / np.sqrt(self.hidden_size),\n mask=lambda i, m: m,\n name='Decoder-Output-Scale'\n )\n\n if self.with_lm:\n # 预测token概率部分\n if self.embedding_size != self.hidden_size:\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=self.embedding_size,\n kernel_initializer=self.initializer,\n name='Decoder-Output-Mapping'\n )\n lm_activation = 'softmax' if self.with_lm is True else self.with_lm\n if self.version == 't5.1.0':\n x = self.apply(\n inputs=x,\n layer=Embedding,\n arguments={'mode': 'dense'},\n name='Embedding-Token'\n )\n x = self.apply(\n inputs=x,\n layer=Activation,\n activation=lm_activation,\n name='Dencoder-Output-LM-Activation'\n )\n else:\n x = self.apply(\n inputs=x,\n layer=Dense,\n units=self.vocab_size,\n activation=lm_activation,\n use_bias=False,\n kernel_initializer=self.initializer,\n name='Decoder-Output-LM'\n )\n\n return x\n\n def compute_attention_bias(self, inputs=None):\n \"\"\"修改LM Mask的序列长度(从 self.inputs[0] 改为 self.inputs[1] )\n \"\"\"\n old_inputs = self.inputs[:]\n self.inputs = [old_inputs[1]]\n mask = super(T5_Decoder, self).compute_attention_bias(inputs)\n self.inputs = old_inputs\n return mask\n\n def compute_position_bias(self, inputs=None):\n \"\"\"T5相对位置编码\n \"\"\"\n if self.position_bias is None:\n\n x, c = inputs\n p1 = self.apply(\n inputs=[x, x],\n layer=RelativePositionEmbeddingT5,\n input_dim=32,\n output_dim=self.num_attention_heads,\n bidirectional=False,\n embeddings_initializer=self.initializer,\n name='Decoder-Embedding-Relative-Position'\n )\n p2 = self.apply(\n inputs=[x, c],\n layer=RelativePositionEmbeddingT5,\n input_dim=32,\n output_dim=self.num_attention_heads,\n bidirectional=False,\n embeddings_initializer=self.initializer,\n name='Decoder-Embedding-Relative-Position'\n )\n self.position_bias = (p1, p2)\n\n return self.position_bias\n\n\nclass T5(T5_Base):\n \"\"\"Google的T5模型(Encoder-Decoder)\n \"\"\"\n def __init__(self, **kwargs):\n super(T5, self).__init__(**kwargs)\n kwargs['layers'] = self.layers\n e_name, d_name = 'Encoder', 'Decoder'\n if 'name' in kwargs:\n e_name = '%s_%s' % (kwargs['name'], e_name)\n d_name = '%s_%s' % (kwargs['name'], d_name)\n del kwargs['name'] # 防止重复传参\n self._encoder = T5_Encoder(name=e_name, **kwargs)\n self._decoder = T5_Decoder(name=d_name, **kwargs)\n\n def build(self, **kwargs):\n \"\"\"同时构建Encoder和Decoder\n \"\"\"\n self._encoder.build(**kwargs)\n self._decoder.build(**kwargs)\n self.encoder = self._encoder.model\n self.decoder = self._decoder.model\n self.inputs = self.encoder.inputs + self.decoder.inputs[1:]\n self.outputs = self.decoder(\n self.encoder.outputs + self.decoder.inputs[1:]\n )\n self.model = Model(self.inputs, self.outputs)\n\n\ndef extend_with_language_model(BaseModel):\n \"\"\"添加下三角的Attention Mask(语言模型用)\n \"\"\"\n class LanguageModel(LM_Mask, BaseModel):\n \"\"\"带下三角Attention Mask的派生模型\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(LanguageModel, self).__init__(*args, **kwargs)\n self.with_mlm = self.with_mlm or True\n\n return LanguageModel\n\n\ndef extend_with_unified_language_model(BaseModel):\n \"\"\"添加UniLM的Attention Mask(Seq2Seq模型用)\n \"\"\"\n class UnifiedLanguageModel(UniLM_Mask, BaseModel):\n \"\"\"带UniLM的Attention Mask的派生模型\n UniLM: https://arxiv.org/abs/1905.03197\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(UnifiedLanguageModel, self).__init__(*args, **kwargs)\n self.with_mlm = self.with_mlm or True\n\n return UnifiedLanguageModel\n\n\ndef build_transformer_model(\n config_path=None,\n checkpoint_path=None,\n model='bert',\n application='encoder',\n return_keras_model=True,\n **kwargs\n):\n \"\"\"根据配置文件构建模型,可选加载checkpoint权重\n \"\"\"\n configs = {}\n if config_path is not None:\n configs.update(json.load(open(config_path)))\n configs.update(kwargs)\n if 'max_position' not in configs:\n configs['max_position'] = configs.get('max_position_embeddings', 512)\n if 'dropout_rate' not in configs:\n configs['dropout_rate'] = configs.get('hidden_dropout_prob')\n if 'segment_vocab_size' not in configs:\n configs['segment_vocab_size'] = configs.get('type_vocab_size', 2)\n\n models = {\n 'bert': BERT,\n 'albert': ALBERT,\n 'albert_unshared': ALBERT_Unshared,\n 'roberta': BERT,\n 'nezha': NEZHA,\n 'electra': ELECTRA,\n 'gpt': GPT,\n 'gpt2': GPT2,\n 'gpt2_ml': GPT2_ML,\n 't5': T5,\n 't5_encoder': T5_Encoder,\n 't5_decoder': T5_Decoder,\n 't5.1.0': T5,\n 't5.1.0_encoder': T5_Encoder,\n 't5.1.0_decoder': T5_Decoder,\n 't5.1.1': T5,\n 't5.1.1_encoder': T5_Encoder,\n 't5.1.1_decoder': T5_Decoder,\n }\n\n if is_string(model):\n model = model.lower()\n MODEL = models[model]\n else:\n MODEL = model\n\n application = application.lower()\n if application in ['lm', 'unilm'] and model in ['electra', 't5']:\n raise ValueError(\n '\"%s\" model can not be used as \"%s\" application.\\n' %\n (model, application)\n )\n\n if application == 'lm':\n MODEL = extend_with_language_model(MODEL)\n elif application == 'unilm':\n MODEL = extend_with_unified_language_model(MODEL)\n\n if model.startswith('t5.1.1'):\n configs['version'] = 't5.1.1'\n\n transformer = MODEL(**configs)\n # 此处以Transformer类中的build()函数创建模型.\n transformer.build(**configs)\n\n if checkpoint_path is not None:\n transformer.load_weights_from_checkpoint(checkpoint_path)\n\n if return_keras_model:\n return transformer.model\n else:\n return transformer\n" ]
[ [ "numpy.concatenate", "numpy.dot", "numpy.random.randn", "numpy.sqrt", "numpy.average" ] ]
SagarRoy1996/TabularDataExtraction
[ "59b05dde00272e7f04f56b89bd2139e3a4e252e5" ]
[ "examples/eg1/eg1.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport os\nimport re\nfrom math import radians, degrees\n\nimport numpy as np\nimport pandas as pd\nimport cv2\n\nfrom pdftabextract import imgproc\nfrom pdftabextract.geom import pt\nfrom pdftabextract.common import read_xml, parse_pages, save_page_grids\nfrom pdftabextract.textboxes import rotate_textboxes, sorted_by_attr\nfrom pdftabextract.clustering import (find_clusters_1d_break_dist,\n calc_cluster_centers_1d,\n zip_clusters_and_values)\nfrom pdftabextract.splitpages import split_page_texts, create_split_pages_dict_structure\nfrom pdftabextract.extract import make_grid_from_positions, fit_texts_into_grid, datatable_to_dataframe\n\n\n#%% Some constants\n#DATAPATH = 'data/'\n#DATAPATH = 'ip/'\n#OUTPUTPATH = 'generated_output/'\n#OUTPUTPATH = 'op/'\n#INPUT_XML = 'output.xml'\n#INPUT_XML = 'output.xml'\n\nDATAPATH = 'data/'\nOUTPUTPATH = 'generated_output/'\nINPUT_XML = 'schoollist_1.pdf.xml'\n\nMIN_ROW_HEIGHT = 260 # minimum height of a row in pixels, measured in the scanned pages\nMIN_COL_WIDTH = 194 # very important. the minimum width of a column in pixels, measured in the scanned pages\n\n#%% Some helper functions\ndef save_image_w_lines(iproc_obj, imgfilebasename, orig_img_as_background, file_suffix_prefix=''):\n file_suffix = 'lines-orig' if orig_img_as_background else 'lines'\n \n img_lines = iproc_obj.draw_lines(orig_img_as_background=orig_img_as_background)\n img_lines_file = os.path.join(OUTPUTPATH, '%s-%s.png' % (imgfilebasename, file_suffix_prefix + file_suffix))\n \n print(\"> saving image with detected lines to '%s'\" % img_lines_file)\n cv2.imwrite(img_lines_file, img_lines)\n\n#%% Read the XML\n\n# Load the XML that was generated with pdftohtml\nxmltree, xmlroot = read_xml(os.path.join(DATAPATH, INPUT_XML))\n\n# parse it and generate a dict of pages\npages = parse_pages(xmlroot, require_image=True)\n\n#%% Split the scanned double pages so that we can later process the lists page-by-page\n\nsplit_texts_and_images = [] # list of tuples with (double page, split text boxes, split images)\n\nfor p_num, p in pages.items():\n # get the image file of the scanned page\n imgfilebasename = p['image'][:p['image'].rindex('.')]\n imgfile = os.path.join(DATAPATH, p['image'])\n \n print(\"page %d: detecting lines in image file '%s'...\" % (p_num, imgfile))\n \n # create an image processing object with the scanned page\n iproc_obj = imgproc.ImageProc(imgfile)\n \n # calculate the scaling of the image file in relation to the text boxes coordinate system dimensions\n page_scaling_x = iproc_obj.img_w / p['width']\n page_scaling_y = iproc_obj.img_h / p['height']\n image_scaling = (page_scaling_x, # scaling in X-direction\n page_scaling_y) # scaling in Y-direction\n \n # detect the lines in the double pages\n lines_hough = iproc_obj.detect_lines(canny_low_thresh=50, canny_high_thresh=150, canny_kernel_size=3,\n hough_rho_res=1,\n hough_theta_res=np.pi/500,\n hough_votes_thresh=350)\n print(\"> found %d lines\" % len(lines_hough))\n \n save_image_w_lines(iproc_obj, imgfilebasename, True, 'bothpages-')\n \n # find the vertical line that separates both sides\n sep_line_img_x = iproc_obj.find_pages_separator_line(dist_thresh=MIN_COL_WIDTH/2)\n sep_line_page_x = sep_line_img_x / page_scaling_x\n print(\"> found pages separator line at %f (image space position) / %f (page space position)\"\n % (sep_line_img_x, sep_line_page_x))\n \n # split the scanned double page at the separator line\n split_images = iproc_obj.split_image(sep_line_img_x)\n \n # split the textboxes at the separator line\n split_texts = split_page_texts(p, sep_line_page_x)\n \n split_texts_and_images.append((p, split_texts, split_images))\n \n# generate a new XML and \"pages\" dict structure from the split pages\nsplit_pages_xmlfile = os.path.join(OUTPUTPATH, INPUT_XML[:INPUT_XML.rindex('.')] + '.split.xml')\nprint(\"> saving split pages XML to '%s'\" % split_pages_xmlfile)\nsplit_tree, split_root, split_pages = create_split_pages_dict_structure(split_texts_and_images,\n save_to_output_path=split_pages_xmlfile)\n\n# we don't need the original double pages any more, we'll work with 'split_pages'\ndel pages\n\n#%% Detect clusters of horizontal lines using the image processing module and rotate back or deskew pages\n\nhori_lines_clusters = {}\npages_image_scaling = {} # scaling of the scanned page image in relation to the OCR page dimensions for each page\n\nfor p_num, p in split_pages.items():\n # get the image file of the scanned page\n imgfilebasename = p['image'][:p['image'].rindex('.')]\n imgfile = os.path.join(OUTPUTPATH, p['image'])\n \n print(\"page %d: detecting lines in image file '%s'...\" % (p_num, imgfile))\n \n # create an image processing object with the scanned page\n iproc_obj = imgproc.ImageProc(imgfile)\n \n # calculate the scaling of the image file in relation to the text boxes coordinate system dimensions\n page_scaling_x = iproc_obj.img_w / p['width']\n page_scaling_y = iproc_obj.img_h / p['height']\n pages_image_scaling[p_num] = (page_scaling_x, # scaling in X-direction\n page_scaling_y) # scaling in Y-direction\n \n # detect the lines\n lines_hough = iproc_obj.detect_lines(canny_low_thresh=50, canny_high_thresh=150, canny_kernel_size=3,\n hough_rho_res=1,\n hough_theta_res=np.pi/500,\n hough_votes_thresh=round(0.2 * iproc_obj.img_w))\n print(\"> found %d lines\" % len(lines_hough))\n \n save_image_w_lines(iproc_obj, imgfilebasename, True)\n save_image_w_lines(iproc_obj, imgfilebasename, False)\n \n # find rotation or skew\n # the parameters are:\n # 1. the minimum threshold in radians for a rotation to be counted as such\n # 2. the maximum threshold for the difference between horizontal and vertical line rotation (to detect skew)\n # 3. an optional threshold to filter out \"stray\" lines whose angle is too far apart from the median angle of\n # all other lines that go in the same direction (no effect here)\n rot_or_skew_type, rot_or_skew_radians = iproc_obj.find_rotation_or_skew(radians(0.5), # uses \"lines_hough\"\n radians(1),\n omit_on_rot_thresh=radians(0.5))\n \n # rotate back text boxes\n # since often no vertical lines can be detected and hence it cannot be determined if the page is rotated or skewed,\n # we assume that it's always rotated\n if rot_or_skew_type is not None:\n print(\"> rotating back by %f°\" % -degrees(rot_or_skew_radians))\n rotate_textboxes(p, -rot_or_skew_radians, pt(0, 0))\n \n # rotate back detected lines\n lines_hough = iproc_obj.apply_found_rotation_or_skew(rot_or_skew_type, -rot_or_skew_radians)\n \n save_image_w_lines(iproc_obj, imgfilebasename + '-repaired', True)\n save_image_w_lines(iproc_obj, imgfilebasename + '-repaired', False)\n \n # cluster the detected *horizontal* lines using find_clusters_1d_break_dist as simple clustering function\n # (break on distance MIN_ROW_HEIGHT/2)\n # additionally, remove all cluster sections that are considered empty\n # a cluster is considered empty when the number of text boxes in it is below 10% of the median number of text boxes\n # per cluster section\n hori_clusters = iproc_obj.find_clusters(imgproc.DIRECTION_HORIZONTAL, find_clusters_1d_break_dist,\n remove_empty_cluster_sections_use_texts=p['texts'], # use this page's textboxes\n remove_empty_cluster_sections_n_texts_ratio=0.1, # 10% rule\n remove_empty_cluster_sections_scaling=page_scaling_y, # the positions are in \"scanned image space\" -> we scale them to \"text box space\"\n dist_thresh=MIN_ROW_HEIGHT/2)\n print(\"> found %d clusters\" % len(hori_clusters))\n \n if len(hori_clusters) > 0:\n # draw the clusters\n img_w_clusters = iproc_obj.draw_line_clusters(imgproc.DIRECTION_HORIZONTAL, hori_clusters)\n save_img_file = os.path.join(OUTPUTPATH, '%s-hori-clusters.png' % imgfilebasename)\n print(\"> saving image with detected horizontal clusters to '%s'\" % save_img_file)\n cv2.imwrite(save_img_file, img_w_clusters)\n \n hori_lines_clusters[p_num] = hori_clusters\n else:\n print(\"> no horizontal line clusters found\")\n\n# save split and repaired XML (i.e. XML with deskewed textbox positions)\noutput_files_basename = INPUT_XML[:INPUT_XML.rindex('.')]\nrepaired_xmlfile = os.path.join(OUTPUTPATH, output_files_basename + '.split.repaired.xml')\n\nprint(\"saving split and repaired XML file to '%s'...\" % repaired_xmlfile)\nsplit_tree.write(repaired_xmlfile)\n\n\n#%% Determine the rows and columns of the tables\n\npttrn_schoolnum = re.compile(r'^\\d{6}$') # a valid school number indicates a table row\npage_grids = {}\n\nprint(\"detecting rows and columns...\")\nfor p_num, p in split_pages.items():\n scaling_x, scaling_y = pages_image_scaling[p_num]\n \n # try to find out the table rows in this page using the horizontal lines that were detected before\n hori_lines = list(np.array(calc_cluster_centers_1d(hori_lines_clusters[p_num])) / scaling_y)\n hori_lines.append(p['height']) # last line: page bottom\n \n prev_line_y = 0\n row_texts = []\n row_positions = []\n in_table = False # is True when the current segment is a real table row (not a table header or surrounding text)\n for line_y in hori_lines:\n # get all texts in this row\n segment_texts = [t for t in p['texts'] if prev_line_y < t['bottom'] <= line_y]\n \n if not segment_texts: continue # skip empty rows\n \n # try to find the start and the end of the table\n for t in segment_texts:\n t_val = t['value'].strip()\n if pttrn_schoolnum.search(t_val): # if this matches, we found the start of the table\n if not in_table:\n in_table = True\n row_positions.append(prev_line_y)\n break\n else:\n if in_table: # we found the end of the table\n in_table = False\n \n if in_table: # this is a table row, so add the texts and row positions to the respective lists\n row_texts.append(segment_texts)\n row_positions.append(line_y)\n \n prev_line_y = line_y\n \n # try to find out the table columns in this page using the distribution of x-coordinates of the left position of\n # each text box in all rows\n text_xs = []\n for texts in row_texts:\n text_xs.extend([t['left'] for t in texts])\n \n text_xs = np.array(text_xs)\n \n # make clusters of x positions\n text_xs_clusters = find_clusters_1d_break_dist(text_xs, dist_thresh=MIN_COL_WIDTH/2/scaling_x)\n text_xs_clusters_w_values = zip_clusters_and_values(text_xs_clusters, text_xs)\n col_positions = calc_cluster_centers_1d(text_xs_clusters_w_values)\n \n # remove falsely identified columns (i.e. merge columns with only a few text boxes)\n filtered_col_positions = []\n n_rows = len(row_positions)\n n_cols = len(col_positions)\n if n_cols > 1 and n_rows > 1:\n top_y = row_positions[0]\n bottom_y = row_positions[-1]\n \n # append the rightmost text's right border as the last column border\n rightmost_pos = sorted_by_attr(p['texts'], 'right')[-1]['right']\n col_positions.append(rightmost_pos)\n \n # merge columns with few text boxes\n texts_in_table = [t for t in p['texts'] if top_y < t['top'] + t['height']/2 <= bottom_y]\n prev_col_x = col_positions[0]\n for col_x in col_positions[1:]:\n col_texts = [t for t in texts_in_table if prev_col_x < t['left'] + t['width']/2 <= col_x]\n\n if len(col_texts) >= n_rows: # there should be at least one text box per row\n filtered_col_positions.append(prev_col_x)\n last_col_x = col_x\n prev_col_x = col_x\n \n # manually add border for the last column because it has very few or no text boxes\n filtered_col_positions.append(filtered_col_positions[-1] + (rightmost_pos - filtered_col_positions[-1]) / 2)\n filtered_col_positions.append(rightmost_pos)\n\n # create the grid\n if filtered_col_positions:\n grid = make_grid_from_positions(filtered_col_positions, row_positions)\n \n n_rows = len(grid)\n n_cols = len(grid[0])\n print(\"> page %d: grid with %d rows, %d columns\" % (p_num, n_rows, n_cols))\n \n page_grids[p_num] = grid\n else: # this happens for the first page as there's no table on that\n print(\"> page %d: no table found\" % p_num)\n \n# save the page grids\n\n# After you created the page grids, you should then check that they're correct using pdf2xml-viewer's \n# loadGridFile() function\n\npage_grids_file = os.path.join(OUTPUTPATH, output_files_basename + '.pagegrids.json')\nprint(\"saving page grids JSON file to '%s'\" % page_grids_file)\nsave_page_grids(page_grids, page_grids_file)\n\n#%% Create data frames (requires pandas library)\n\n# For sake of simplicity, we will just fit the text boxes into the grid, merge the texts in their cells (splitting text\n# boxes to separate lines if necessary) and output the result. Normally, you would do some more parsing here, e.g.\n# extracting the address components from the second column.\n\nfull_df = pd.DataFrame()\nprint(\"fitting text boxes into page grids and generating final output...\")\nfor p_num, p in split_pages.items():\n if p_num not in page_grids: continue # happens when no table was detected\n\n print(\"> page %d\" % p_num)\n datatable, unmatched_texts = fit_texts_into_grid(p['texts'], page_grids[p_num], return_unmatched_texts=True)\n \n df = datatable_to_dataframe(datatable, split_texts_in_lines=True)\n df['from_page'] = p_num\n full_df = full_df.append(df, ignore_index=True)\n\nprint(\"extracted %d rows from %d pages\" % (len(full_df), len(split_pages)))\n\ncsv_output_file = os.path.join(OUTPUTPATH, output_files_basename + '.csv')\nprint(\"saving extracted data to '%s'\" % csv_output_file)\nfull_df.to_csv(csv_output_file, index=False)\n\nexcel_output_file = os.path.join(OUTPUTPATH, output_files_basename + '.xlsx')\nprint(\"saving extracted data to '%s'\" % excel_output_file)\nfull_df.to_excel(excel_output_file, index=False)\n" ]
[ [ "pandas.DataFrame", "numpy.array" ] ]
Leonardo-Blanger/detr_tensorflow
[ "38fc3c586b6767deed09bd7ec6c2a2fd7002346e" ]
[ "detr_tensorflow/models/custom_layers.py" ]
[ "import tensorflow as tf\n\n\nclass FrozenBatchNorm2D(tf.keras.layers.Layer):\n def __init__(self, eps=1e-5, **kwargs):\n super().__init__(**kwargs)\n self.eps = eps\n\n def build(self, input_shape):\n self.weight = self.add_weight(name='weight', shape=[input_shape[-1]],\n initializer='zeros', trainable=False)\n self.bias = self.add_weight(name='bias', shape=[input_shape[-1]],\n initializer='zeros', trainable=False)\n self.running_mean = self.add_weight(name='running_mean',\n shape=[input_shape[-1]],\n initializer='zeros',\n trainable=False)\n self.running_var = self.add_weight(name='running_var',\n shape=[input_shape[-1]],\n initializer='ones',\n trainable=False)\n\n def call(self, x):\n scale = self.weight * tf.math.rsqrt(self.running_var + self.eps)\n shift = self.bias - self.running_mean * scale\n return x * scale + shift\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass Linear(tf.keras.layers.Layer):\n '''\n Use this custom layer instead of tf.keras.layers.Dense\n to allow loading converted PyTorch Dense weights\n that have shape (output_dim, input_dim)\n '''\n def __init__(self, output_dim, **kwargs):\n super().__init__(**kwargs)\n self.output_dim = output_dim\n\n def build(self, input_shape):\n self.kernel = self.add_weight(name='kernel',\n shape=[self.output_dim, input_shape[-1]],\n initializer='zeros', trainable=True)\n self.bias = self.add_weight(name='bias',\n shape=[self.output_dim],\n initializer='zeros', trainable=True)\n\n def call(self, x):\n return tf.matmul(x, self.kernel, transpose_b=True) + self.bias\n\n def compute_output_shape(self, input_shape):\n return input_shape.as_list()[:-1] + [self.output_dim]\n\n\nclass FixedEmbedding(tf.keras.layers.Layer):\n def __init__(self, embed_shape, **kwargs):\n super().__init__(**kwargs)\n self.embed_shape = embed_shape\n\n def build(self, input_shape):\n self.w = self.add_weight(name='kernel', shape=self.embed_shape,\n initializer='zeros', trainable=True)\n\n def call(self, x=None):\n return self.w\n" ]
[ [ "tensorflow.matmul", "tensorflow.math.rsqrt" ] ]
Zenodia/NeMo
[ "3c288d8a7caf667c95444c39434e3ebc5f53d911" ]
[ "nemo/collections/asr/metrics/rnnt_wer.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom typing import List, Optional, Union\n\nimport editdistance\nimport torch\nfrom pytorch_lightning.metrics import Metric\n\nfrom nemo.collections.asr.parts import rnnt_beam_decoding as beam_decode\nfrom nemo.collections.asr.parts import rnnt_greedy_decoding as greedy_decode\nfrom nemo.collections.asr.parts.rnnt_utils import Hypothesis, NBestHypotheses\nfrom nemo.utils import logging\n\n__all__ = ['RNNTDecoding', 'RNNTWER']\n\n\nclass AbstractRNNTDecoding(ABC):\n \"\"\"\n Used for performing RNN-T auto-regressive decoding of the Decoder+Joint network given the encoder state.\n\n Args:\n decoding_cfg: A dict-like object which contains the following key-value pairs.\n strategy: str value which represents the type of decoding that can occur.\n Possible values are :\n - greedy, greedy_batch (for greedy decoding).\n - beam, tsd, alsd (for beam search decoding).\n\n compute_hypothesis_token_set: A bool flag, which determines whether to compute a list of decoded\n tokens as well as the decoded string. Default is False in order to avoid double decoding\n unless required.\n\n The config may further contain the following sub-dictionaries:\n \"greedy\":\n max_symbols: int, describing the maximum number of target tokens to decode per\n timestep during greedy decoding. Setting to larger values allows longer sentences\n to be decoded, at the cost of increased execution time.\n\n \"beam\":\n beam_size: int, defining the beam size for beam search. Must be >= 1.\n If beam_size == 1, will perform cached greedy search. This might be slightly different\n results compared to the greedy search above.\n\n score_norm: optional bool, whether to normalize the returned beam score in the hypotheses.\n Set to True by default.\n\n return_best_hypothesis: optional bool, whether to return just the best hypothesis or all of the\n hypotheses after beam search has concluded. This flag is set by default.\n\n tsd_max_sym_exp: optional int, determines number of symmetric expansions of the target symbols\n per timestep of the acoustic model. Larger values will allow longer sentences to be decoded,\n at increased cost to execution time.\n\n alsd_max_target_len: optional int or float, determines the potential maximum target sequence length.\n If an integer is provided, it can decode sequences of that particular maximum length.\n If a float is provided, it can decode sequences of int(alsd_max_target_len * seq_len),\n where seq_len is the length of the acoustic model output (T).\n\n NOTE:\n If a float is provided, it can be greater than 1!\n By default, a float of 2.0 is used so that a target sequence can be at most twice\n as long as the acoustic model output length T.\n\n decoder: The Decoder/Prediction network module.\n joint: The Joint network module.\n blank_id: The id of the RNNT blank token.\n \"\"\"\n\n def __init__(self, decoding_cfg, decoder, joint, blank_id: int):\n super(AbstractRNNTDecoding, self).__init__()\n self.cfg = decoding_cfg\n self.blank_id = blank_id\n self.compute_hypothesis_token_set = self.cfg.get(\"compute_hypothesis_token_set\", False)\n\n possible_strategies = ['greedy', 'greedy_batch', 'beam', 'tsd', 'alsd']\n if self.cfg.strategy not in possible_strategies:\n raise ValueError(f\"Decoding strategy must be one of {possible_strategies}\")\n\n if self.cfg.strategy == 'greedy':\n self.decoding = greedy_decode.GreedyRNNTInfer(\n decoder_model=decoder,\n joint_model=joint,\n blank_index=self.blank_id,\n max_symbols_per_step=self.cfg.greedy.get('max_symbols', None),\n )\n\n elif self.cfg.strategy == 'greedy_batch':\n self.decoding = greedy_decode.GreedyBatchedRNNTInfer(\n decoder_model=decoder,\n joint_model=joint,\n blank_index=self.blank_id,\n max_symbols_per_step=self.cfg.greedy.get('max_symbols', None),\n )\n\n elif self.cfg.strategy == 'beam':\n\n self.decoding = beam_decode.BeamRNNTInfer(\n decoder_model=decoder,\n joint_model=joint,\n beam_size=self.cfg.beam.beam_size,\n return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),\n search_type='default',\n score_norm=self.cfg.beam.get('score_norm', True),\n )\n\n elif self.cfg.strategy == 'tsd':\n\n self.decoding = beam_decode.BeamRNNTInfer(\n decoder_model=decoder,\n joint_model=joint,\n beam_size=self.cfg.beam.beam_size,\n return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),\n search_type='tsd',\n score_norm=self.cfg.beam.get('score_norm', True),\n tsd_max_sym_exp_per_step=self.cfg.beam.get('tsd_max_sym_exp', 50),\n )\n\n elif self.cfg.strategy == 'alsd':\n\n self.decoding = beam_decode.BeamRNNTInfer(\n decoder_model=decoder,\n joint_model=joint,\n beam_size=self.cfg.beam.beam_size,\n return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),\n search_type='alsd',\n score_norm=self.cfg.beam.get('score_norm', True),\n alsd_max_target_len=self.cfg.beam.get('alsd_max_target_len', 2),\n )\n\n def rnnt_decoder_predictions_tensor(\n self, encoder_output: torch.Tensor, encoded_lengths: torch.Tensor, return_hypotheses: bool = False\n ) -> (List[str], Optional[List[List[str]]], Optional[Union[Hypothesis, NBestHypotheses]]):\n \"\"\"\n Decode an encoder output by autoregressive decoding of the Decoder+Joint networks.\n\n Args:\n encoder_output: torch.Tensor of shape [B, D, T].\n encoded_lengths: torch.Tensor containing lengths of the padded encoder outputs. Shape [B].\n return_hypotheses: bool. If set to True it will return list of Hypothesis or NBestHypotheses\n\n Returns:\n If `return_best_hypothesis` is set:\n A tuple (hypotheses, None):\n hypotheses - list of Hypothesis (best hypothesis per sample).\n Look at rnnt_utils.Hypothesis for more information.\n\n If `return_best_hypothesis` is not set:\n A tuple(hypotheses, all_hypotheses)\n hypotheses - list of Hypothesis (best hypothesis per sample).\n Look at rnnt_utils.Hypothesis for more information.\n all_hypotheses - list of NBestHypotheses. Each NBestHypotheses further contains a sorted\n list of all the hypotheses of the model per sample.\n Look at rnnt_utils.NBestHypotheses for more information.\n \"\"\"\n # Compute hypotheses\n with torch.no_grad():\n hypotheses_list = self.decoding(\n encoder_output=encoder_output, encoded_lengths=encoded_lengths\n ) # type: [List[Hypothesis]]\n\n # extract the hypotheses\n hypotheses_list = hypotheses_list[0] # type: List[Hypothesis]\n\n prediction_list = hypotheses_list\n\n if isinstance(prediction_list[0], NBestHypotheses):\n hypotheses = []\n all_hypotheses = []\n for nbest_hyp in prediction_list: # type: NBestHypotheses\n n_hyps = nbest_hyp.n_best_hypotheses # Extract all hypotheses for this sample\n decoded_hyps = self.decode_hypothesis(n_hyps) # type: List[str]\n hypotheses.append(decoded_hyps[0]) # best hypothesis\n all_hypotheses.append(decoded_hyps)\n if return_hypotheses:\n return hypotheses, all_hypotheses\n best_hyp_text = [h.text for h in hypotheses]\n all_hyp_text = [h.text for hh in all_hypotheses for h in hh]\n return best_hyp_text, all_hyp_text\n else:\n hypotheses = self.decode_hypothesis(prediction_list) # type: List[str]\n if return_hypotheses:\n return hypotheses, None\n best_hyp_text = [h.text for h in hypotheses]\n return best_hyp_text, None\n\n def decode_hypothesis(self, hypotheses_list: List[Hypothesis]) -> List[Union[Hypothesis, NBestHypotheses]]:\n \"\"\"\n Decode a list of hypotheses into a list of strings.\n\n Args:\n hypotheses_list: List of Hypothesis.\n\n Returns:\n A list of strings.\n \"\"\"\n for ind in range(len(hypotheses_list)):\n # Extract the integer encoded hypothesis\n prediction = hypotheses_list[ind].y_sequence\n\n if type(prediction) != list:\n prediction = prediction.tolist()\n\n # RNN-T sample level is already preprocessed by implicit CTC decoding\n # Simply remove any blank tokens\n prediction = [p for p in prediction if p != self.blank_id]\n\n # De-tokenize the integer tokens\n hypothesis = self.decode_tokens_to_str(prediction)\n hypotheses_list[ind].text = hypothesis\n\n if self.compute_hypothesis_token_set:\n hypotheses_list[ind].tokens = self.decode_ids_to_tokens(prediction)\n return hypotheses_list\n\n @abstractmethod\n def decode_tokens_to_str(self, tokens: List[int]) -> str:\n \"\"\"\n Implemented by subclass in order to decoder a token id list into a string.\n\n Args:\n tokens: List of int representing the token ids.\n\n Returns:\n A decoded string.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:\n \"\"\"\n Implemented by subclass in order to decode a token id list into a token list.\n A token list is the string representation of each token id.\n\n Args:\n tokens: List of int representing the token ids.\n\n Returns:\n A list of decoded tokens.\n \"\"\"\n raise NotImplementedError()\n\n\nclass RNNTDecoding(AbstractRNNTDecoding):\n \"\"\"\n Used for performing RNN-T auto-regressive decoding of the Decoder+Joint network given the encoder state.\n\n Args:\n decoding_cfg: A dict-like object which contains the following key-value pairs.\n strategy: str value which represents the type of decoding that can occur.\n Possible values are :\n - greedy, greedy_batch (for greedy decoding).\n - beam, tsd, alsd (for beam search decoding).\n\n compute_hypothesis_token_set: A bool flag, which determines whether to compute a list of decoded\n tokens as well as the decoded string. Default is False in order to avoid double decoding\n unless required.\n\n The config may further contain the following sub-dictionaries:\n \"greedy\":\n max_symbols: int, describing the maximum number of target tokens to decode per\n timestep during greedy decoding. Setting to larger values allows longer sentences\n to be decoded, at the cost of increased execution time.\n\n \"beam\":\n beam_size: int, defining the beam size for beam search. Must be >= 1.\n If beam_size == 1, will perform cached greedy search. This might be slightly different\n results compared to the greedy search above.\n\n score_norm: optional bool, whether to normalize the returned beam score in the hypotheses.\n Set to True by default.\n\n return_best_hypothesis: optional bool, whether to return just the best hypothesis or all of the\n hypotheses after beam search has concluded. This flag is set by default.\n\n tsd_max_sym_exp: optional int, determines number of symmetric expansions of the target symbols\n per timestep of the acoustic model. Larger values will allow longer sentences to be decoded,\n at increased cost to execution time.\n\n alsd_max_target_len: optional int or float, determines the potential maximum target sequence length.\n If an integer is provided, it can decode sequences of that particular maximum length.\n If a float is provided, it can decode sequences of int(alsd_max_target_len * seq_len),\n where seq_len is the length of the acoustic model output (T).\n\n NOTE:\n If a float is provided, it can be greater than 1!\n By default, a float of 2.0 is used so that a target sequence can be at most twice\n as long as the acoustic model output length T.\n\n decoder: The Decoder/Prediction network module.\n joint: The Joint network module.\n vocabulary: The vocabulary (excluding the RNNT blank token) which will be used for decoding.\n \"\"\"\n\n def __init__(\n self, decoding_cfg, decoder, joint, vocabulary,\n ):\n blank_id = len(vocabulary)\n self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))])\n\n super(RNNTDecoding, self).__init__(decoding_cfg=decoding_cfg, decoder=decoder, joint=joint, blank_id=blank_id)\n\n def decode_tokens_to_str(self, tokens: List[int]) -> str:\n \"\"\"\n Implemented by subclass in order to decoder a token list into a string.\n\n Args:\n tokens: List of int representing the token ids.\n\n Returns:\n A decoded string.\n \"\"\"\n hypothesis = ''.join([self.labels_map[c] for c in tokens if c != self.blank_id])\n return hypothesis\n\n def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:\n \"\"\"\n Implemented by subclass in order to decode a token id list into a token list.\n A token list is the string representation of each token id.\n\n Args:\n tokens: List of int representing the token ids.\n\n Returns:\n A list of decoded tokens.\n \"\"\"\n token_list = [self.labels_map[c] for c in tokens if c != self.blank_id]\n return token_list\n\n\nclass RNNTWER(Metric):\n \"\"\"\n This metric computes numerator and denominator for Overall Word Error Rate (WER) between prediction and reference texts.\n When doing distributed training/evaluation the result of res=WER(predictions, targets, target_lengths) calls\n will be all-reduced between all workers using SUM operations.\n Here contains two numbers res=[wer_numerator, wer_denominator]. WER=wer_numerator/wer_denominator.\n\n If used with PytorchLightning LightningModule, include wer_numerator and wer_denominators inside validation_step results.\n Then aggregate (sum) then at the end of validation epoch to correctly compute validation WER.\n\n Example:\n def validation_step(self, batch, batch_idx):\n ...\n wer_num, wer_denom = self.__wer(predictions, transcript, transcript_len)\n return {'val_loss': loss_value, 'val_wer_num': wer_num, 'val_wer_denom': wer_denom}\n\n def validation_epoch_end(self, outputs):\n ...\n wer_num = torch.stack([x['val_wer_num'] for x in outputs]).sum()\n wer_denom = torch.stack([x['val_wer_denom'] for x in outputs]).sum()\n tensorboard_logs = {'validation_loss': val_loss_mean, 'validation_avg_wer': wer_num / wer_denom}\n return {'val_loss': val_loss_mean, 'log': tensorboard_logs}\n\n Args:\n decoding: RNNTDecoding object that will perform autoregressive decoding of the RNNT model.\n batch_dim_index: Index of the batch dimension.\n use_cer: Whether to use Character Error Rate isntead of Word Error Rate.\n log_prediction: Whether to log a single decoded sample per call.\n\n Returns:\n res: a torch.Tensor object with two elements: [wer_numerator, wer_denominator]. To correctly compute average\n text word error rate, compute wer=wer_numerator/wer_denominator\n \"\"\"\n\n def __init__(\n self, decoding: RNNTDecoding, batch_dim_index=0, use_cer=False, log_prediction=True, dist_sync_on_step=False\n ):\n super(RNNTWER, self).__init__(dist_sync_on_step=dist_sync_on_step, compute_on_step=False)\n self.decoding = decoding\n self.batch_dim_index = batch_dim_index\n self.use_cer = use_cer\n self.log_prediction = log_prediction\n self.blank_id = self.decoding.blank_id\n self.labels_map = self.decoding.labels_map\n\n self.add_state(\"scores\", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)\n self.add_state(\"words\", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)\n\n def update(\n self,\n encoder_output: torch.Tensor,\n encoded_lengths: torch.Tensor,\n targets: torch.Tensor,\n target_lengths: torch.Tensor,\n ) -> torch.Tensor:\n words = 0.0\n scores = 0.0\n references = []\n with torch.no_grad():\n # prediction_cpu_tensor = tensors[0].long().cpu()\n targets_cpu_tensor = targets.long().cpu()\n tgt_lenths_cpu_tensor = target_lengths.long().cpu()\n\n # iterate over batch\n for ind in range(targets_cpu_tensor.shape[self.batch_dim_index]):\n tgt_len = tgt_lenths_cpu_tensor[ind].item()\n target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist()\n\n reference = self.decoding.decode_tokens_to_str(target)\n references.append(reference)\n\n hypotheses, _ = self.decoding.rnnt_decoder_predictions_tensor(encoder_output, encoded_lengths)\n\n if self.log_prediction:\n logging.info(f\"\\n\")\n logging.info(f\"reference :{references[0]}\")\n logging.info(f\"predicted :{hypotheses[0]}\")\n\n for h, r in zip(hypotheses, references):\n if self.use_cer:\n h_list = list(h)\n r_list = list(r)\n else:\n h_list = h.split()\n r_list = r.split()\n words += len(r_list)\n # Compute Levenshtein's distance\n scores += editdistance.eval(h_list, r_list)\n\n self.scores += torch.tensor(scores, device=self.scores.device, dtype=self.scores.dtype)\n self.words += torch.tensor(words, device=self.words.device, dtype=self.words.dtype)\n # return torch.tensor([scores, words]).to(predictions.device)\n\n def compute(self):\n wer = self.scores.float() / self.words\n return wer, self.scores.detach(), self.words.detach()\n" ]
[ [ "torch.no_grad", "torch.tensor" ] ]
nathan-bennett/skellam
[ "8a1fff14ac8c5f6bd415a51befab818f864ab3e5" ]
[ "metrics/__init__.py" ]
[ "#!/usr/bin/env python\nimport numpy as np\nimport scipy\nfrom shared_utils import ArrayUtils\n\n\nclass SkellamMetrics:\n def __init__(self, x_metrics, y_metrics, y_hat, model, l0, l1, training_values):\n self._y = y_metrics\n self._y_hat = y_hat\n self.model = model\n self.l0 = ArrayUtils.convert_to_array(l0)\n self.l1 = ArrayUtils.convert_to_array(l1)\n self.training_values = training_values\n self._x0, self._x1 = self.split_or_duplicate_x(x_metrics)\n self.max_ll = self.model.fun\n self.coeff_size = self._x0.shape[1]\n self.lambda_0_coefficients = self.model.x[0 : self.coeff_size].reshape(-1, 1)\n self.lambda_1_coefficients = self.model.x[self.coeff_size :].reshape(-1, 1)\n self.train_length = len(training_values[0])\n\n @staticmethod\n def split_or_duplicate_x(x):\n return ArrayUtils.split_or_duplicate_x(x, False)\n\n def sse(self):\n return ((self._y - self._y_hat) ** 2).sum()\n\n def _y_bar(self):\n return self._y.mean()\n\n def sst(self):\n return ((self._y - self._y_bar()) ** 2).sum()\n\n def r2(self):\n \"\"\"Calculate R2 for either the train model or the test model\"\"\"\n sse_sst = self.sse() / self.sst()\n return 1 - sse_sst\n\n def adjusted_r2(self):\n \"\"\"Calculate adjusted R2 for either the train model or the test model\"\"\"\n r2 = self.r2()\n return 1 - (1-r2)*(self.train_length - 1)/(self.train_length - self.coeff_size - 1)\n\n def log_likelihood(self):\n \"\"\"Returns the maximum of the log likelihood function\"\"\"\n return self.max_ll\n\n def aic(self):\n return 2*self.coeff_size - 2*np.log(self.max_ll)\n\n def bic(self):\n return self.coeff_size*np.log(self.train_length) - 2*np.log(self.max_ll)\n\n def _calculate_lambda(self):\n \"\"\"Create arrays for our predictions of the two Poisson distributions\n \"\"\"\n _lambda0 = ArrayUtils.convert_to_array(\n np.exp(np.squeeze(self._x0 @ self.lambda_0_coefficients))\n )\n _lambda1 = ArrayUtils.convert_to_array(\n np.exp(np.squeeze(self._x1 @ self.lambda_1_coefficients))\n )\n return _lambda0, _lambda1\n\n def _calculate_v(self):\n \"\"\"Create diagonal matrix consisting of our predictions of the Poisson distributions\n \"\"\"\n _lambda0, _lambda1 = self._calculate_lambda()\n _v0 = np.diagflat(_lambda0)\n _v1 = np.diagflat(_lambda1)\n return _v0, _v1\n\n def _calculate_w(self):\n \"\"\"Create a diagonal matrix consisting of the difference between our predictions of the 2 Poisson distributions\n with their observed values\n \"\"\"\n _lambda0, _lambda1 = self._calculate_lambda()\n _w0 = np.diagflat((self.l0 - _lambda0.reshape(-1, 1)) ** 2)\n _w1 = np.diagflat((self.l1 - _lambda1.reshape(-1, 1)) ** 2)\n return _w0, _w1\n\n def _calculate_robust_covariance(self):\n \"\"\"Calculate robust variance covariance matrices for our two sets of coefficients\n \"\"\"\n _v0, _v1 = self._calculate_v()\n _w0, _w1 = self._calculate_w()\n _robust_cov0 = (\n np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0))\n * np.dot(np.dot(self._x0.T, _w0), self._x0)\n * np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0))\n )\n _robust_cov1 = (\n np.linalg.inv(np.dot(np.dot(self._x1.T, _v1), self._x1))\n * np.dot(np.dot(self._x1.T, _w1), self._x1)\n * np.linalg.inv(np.dot(np.dot(self._x1.T, _v1), self._x1))\n )\n return _robust_cov0, _robust_cov1\n\n def _calculate_robust_standard_errors(self):\n \"\"\"Calculate robust standard errors for our two sets of coefficients by taking the square root of the diagonal\n values in the variance covariance matrices\n \"\"\"\n _robust_cov0, _robust_cov1 = self._calculate_robust_covariance()\n _std_error0 = np.sqrt(np.diag(_robust_cov0))\n _std_error1 = np.sqrt(np.diag(_robust_cov1))\n return _std_error0, _std_error1\n\n def _calculate_z_values(self):\n \"\"\"Calculate z statistics for our two sets of coefficients\n \"\"\"\n _std_error0, _std_error1 = self._calculate_robust_standard_errors()\n _z_values0 = self.lambda_0_coefficients[:, 0] / _std_error0\n _z_values1 = self.lambda_1_coefficients[:, 0] / _std_error1\n return _z_values0, _z_values1\n\n def _calculate_p_values(self):\n \"\"\"Calculate p values for our two sets of coefficients\n \"\"\"\n _z_values0, _z_values1 = self._calculate_z_values()\n _p_values0 = scipy.stats.norm.sf(abs(_z_values0)) * 2\n _p_values1 = scipy.stats.norm.sf(abs(_z_values1)) * 2\n return _p_values0, _p_values1\n" ]
[ [ "numpy.diagflat", "numpy.dot", "numpy.log", "numpy.squeeze", "numpy.diag" ] ]
fluiddyn/transonic
[ "a460e9f6d1139f79b668cb3306d1e8a7e190b72d" ]
[ "doc/for_dev/scikit-image/setup_codes/cmorph__dilate.py" ]
[ "import numpy as np\nfrom future.cmorph import _dilate\n\nrows = 1024\ncols = 1024\nsrows = 64\nscols = 64\n\nimage = np.random.randint(0, 255, rows * cols, dtype=np.uint8).reshape(\n (rows, cols)\n)\nselem = np.random.randint(0, 1, srows * scols, dtype=np.uint8).reshape(\n (srows, scols)\n)\nout = np.zeros((rows, cols), dtype=np.uint8)\nshift_x = np.int8(2)\nshift_y = np.int8(2)" ]
[ [ "numpy.int8", "numpy.random.randint", "numpy.zeros" ] ]
cherisyu/ML_in_Action
[ "8c1019de911e7fb1bbab973067213f5f62ab9dcd" ]
[ "ML-in-Action/MachineLearning-dev/src/py3.x/ML/15.BigData_MapReduce/mrMeanMapper.py" ]
[ "#!/usr/bin/python\n# coding:utf-8\n\n'''\nCreated on 2017-04-06\nUpdate on 2017-11-17\nAuthor: Peter/ApacheCN-xy/片刻\nGitHub: https://github.com/apachecn/MachineLearning\n'''\n\nimport sys\nfrom numpy import mat, mean, power\n\n'''\n 这个mapper文件按行读取所有的输入并创建一组对应的浮点数,然后得到数组的长度并创建NumPy矩阵。\n 再对所有的值进行平方,最后将均值和平方后的均值发送出去。这些值将用来计算全局的均值和方差。\n\n Args:\n file 输入数据\n Return:\n'''\n\n\ndef read_input(file):\n for line in file:\n yield line.rstrip() # 返回一个 yield 迭代器,每次获取下一个值,节约内存。\n\n\ninput = read_input(sys.stdin) # 创建一个输入的数据行的列表list\ninput = [float(line) for line in input] # 将得到的数据转化为 float 类型\nnumInputs = len(input) # 获取数据的个数,即输入文件的数据的行数\ninput = mat(input) # 将 List 转换为矩阵\nsqInput = power(input, 2) # 将矩阵的数据分别求 平方,即 2次方\n\n# 输出 数据的个数,n个数据的均值,n个数据平方之后的均值\n# 第一行是标准输出,也就是reducer的输出\n# 第二行识标准错误输出,即对主节点作出的响应报告,表明本节点工作正常。\n# 【这不就是面试的装逼重点吗?如何设计监听架构细节】注意:一个好的习惯是想标准错误输出发送报告。如果某任务10分钟内没有报告输出,则将被Hadoop中止。\nprint(\"%d\\t%f\\t%f\" % (numInputs, mean(input), mean(sqInput))) # 计算均值\nprint(\"map report: still alive\", file=sys.stderr)\n" ]
[ [ "numpy.power", "numpy.mean", "numpy.mat" ] ]
amakropoulos/structural-pipeline-measures
[ "70e22f9ad94cc57e72e510576cfc3129da83f7fc" ]
[ "packages/structural_dhcp_mriqc/structural_dhcp_mriqc/qc/functional.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n# pylint: disable=no-member\n#\n# @Author: oesteban\n# @Date: 2016-02-23 19:25:39\n# @Email: code@oscaresteban.es\n# @Last Modified by: oesteban\n# @Last Modified time: 2016-02-29 11:43:16\n\"\"\"\nComputation of the quality assessment measures on functional MRI\n\n\n\n\"\"\"\nimport os.path as op\nimport numpy as np\nimport nibabel as nb\nfrom nitime import algorithms as nta\nimport scipy\n\n\ndef gsr(epi_data, mask, direction=\"y\", ref_file=None, out_file=None):\n \"\"\"\n Computes the :abbr:`GSR (ghost to signal ratio)` [Giannelli2010]_. The\n procedure is as follows:\n\n #. Create a Nyquist ghost mask by circle-shifting the original mask by :math:`N/2`.\n\n #. Rotate by :math:`N/2`\n\n #. Remove the intersection with the original mask\n\n #. Generate a non-ghost background\n\n #. Calculate the :abbr:`GSR (ghost to signal ratio)`\n\n\n .. warning ::\n\n This should be used with EPI images for which the phase\n encoding direction is known.\n\n :param str epi_file: path to epi file\n :param str mask_file: path to brain mask\n :param str direction: the direction of phase encoding (x, y, all)\n :return: the computed gsr\n\n \"\"\"\n\n direction = direction.lower()\n if direction[-1] not in ['x', 'y', 'all']:\n raise Exception(\"Unknown direction %s, should be one of x, -x, y, -y, all\"\n % direction)\n\n if direction == 'all':\n result = []\n for newdir in ['x', 'y']:\n ofile = None\n if out_file is not None:\n fname, ext = op.splitext(ofile)\n if ext == '.gz':\n fname, ext2 = op.splitext(fname)\n ext = ext2 + ext\n ofile = '%s_%s%s' % (fname, newdir, ext)\n result += [gsr(epi_data, mask, newdir,\n ref_file=ref_file, out_file=ofile)]\n return result\n\n # Step 1\n n2_mask = np.zeros_like(mask)\n\n # Step 2\n if direction == \"x\":\n n2lim = np.floor(mask.shape[0]/2)\n n2_mask[:n2lim, :, :] = mask[n2lim:(n2lim*2), :, :]\n n2_mask[n2lim:(n2lim*2), :, :] = mask[:n2lim, :, :]\n elif direction == \"y\":\n n2lim = np.floor(mask.shape[1]/2)\n n2_mask[:, :n2lim, :] = mask[:, n2lim:(n2lim*2), :]\n n2_mask[:, n2lim:(n2lim*2), :] = mask[:, :n2lim, :]\n elif direction == \"z\":\n n2lim = np.floor(mask.shape[2]/2)\n n2_mask[:, :, :n2lim] = mask[:, :, n2lim:(n2lim*2)]\n n2_mask[:, :, n2lim:(n2lim*2)] = mask[:, :, :n2lim]\n\n # Step 3\n n2_mask = n2_mask * (1-mask)\n\n # Step 4: non-ghost background region is labeled as 2\n n2_mask = n2_mask + 2 * (1 - n2_mask - mask)\n\n # Save mask\n if ref_file is not None and out_file is not None:\n ref = nb.load(ref_file)\n out = nb.Nifti1Image(n2_mask, ref.get_affine(), ref.get_header())\n out.to_filename(out_file)\n\n # Step 5: signal is the entire foreground image\n ghost = epi_data[n2_mask == 1].mean() - epi_data[n2_mask == 2].mean()\n signal = epi_data[n2_mask == 0].mean()\n return float(ghost/signal)\n\n\ndef dvars(func, mask, output_all=False, out_file=None):\n \"\"\"\n Compute the mean :abbr:`DVARS (D referring to temporal\n derivative of timecourses, VARS referring to RMS variance over voxels)`\n [Power2012]_.\n\n Particularly, the *standardized* :abbr:`DVARS (D referring to temporal\n derivative of timecourses, VARS referring to RMS variance over voxels)`\n [Nichols2013]_ are computed.\n\n .. note:: Implementation details\n\n Uses the implementation of the `Yule-Walker equations\n from nitime\n <http://nipy.org/nitime/api/generated/nitime.algorithms.autoregressive.html\\\n#nitime.algorithms.autoregressive.AR_est_YW>`_\n for the :abbr:`AR (auto-regressive)` filtering of the fMRI signal.\n\n :param numpy.ndarray func: functional data, after head-motion-correction.\n :param numpy.ndarray mask: a 3D mask of the brain\n :param bool output_all: write out all dvars\n :param str out_file: a path to which the standardized dvars should be saved.\n :return: the standardized DVARS\n\n \"\"\"\n if len(func.shape) != 4:\n raise RuntimeError(\n \"Input fMRI dataset should be 4-dimensional\" % func)\n\n # Remove zero-variance voxels across time axis\n zv_mask = zero_variance(func, mask)\n idx = np.where(zv_mask > 0)\n mfunc = func[idx[0], idx[1], idx[2], :]\n\n # Robust standard deviation\n func_sd = (np.percentile(mfunc, 75) -\n np.percentile(mfunc, 25)) / 1.349\n\n # Demean\n mfunc -= mfunc.mean(axis=1)[..., np.newaxis]\n\n # AR1\n ak_coeffs = np.apply_along_axis(nta.AR_est_YW, 1, mfunc, 1)\n\n # Predicted standard deviation of temporal derivative\n func_sd_pd = np.squeeze(np.sqrt((2 * (1 - ak_coeffs[:, 0])).tolist()) * func_sd)\n diff_sd_mean = func_sd_pd[func_sd_pd > 0].mean()\n\n # Compute temporal difference time series\n func_diff = np.diff(mfunc, axis=1)\n\n # DVARS (no standardization)\n dvars_nstd = func_diff.std(axis=0)\n\n # standardization\n dvars_stdz = dvars_nstd / diff_sd_mean\n\n # voxelwise standardization\n diff_vx_stdz = func_diff / np.array([func_sd_pd] * func_diff.shape[-1]).T\n dvars_vx_stdz = diff_vx_stdz.std(1, ddof=1)\n\n if output_all:\n gendvars = np.vstack((dvars_stdz, dvars_nstd, dvars_vx_stdz))\n else:\n gendvars = dvars_stdz.reshape(len(dvars_stdz), 1)\n\n if out_file is not None:\n np.savetxt(out_file, gendvars, fmt='%.12f')\n\n return gendvars\n\n\ndef fd_jenkinson(in_file, rmax=80., out_file=None):\n \"\"\"\n Compute the :abbr:`FD (framewise displacement)` [Jenkinson2002]_\n on a 4D dataset, after ``3dvolreg`` has been executed\n (generally a file named ``*.affmat12.1D``).\n\n :param str in_file: path to epi file\n :param float rmax: the default radius (as in FSL) of a sphere represents\n the brain in which the angular displacements are projected.\n :param str out_file: a path for the output file with the FD\n\n :return: the output file with the FD, and the average FD along\n the time series\n :rtype: tuple(str, float)\n\n\n .. note ::\n\n :code:`infile` should have one 3dvolreg affine matrix in one row -\n NOT the motion parameters\n\n\n \"\"\"\n\n import sys\n import math\n\n if out_file is None:\n fname, ext = op.splitext(op.basename(in_file))\n out_file = op.abspath('%s_fdfile%s' % (fname, ext))\n\n # if in_file (coordinate_transformation) is actually the rel_mean output\n # of the MCFLIRT command, forward that file\n if 'rel.rms' in in_file:\n return in_file\n\n pm_ = np.genfromtxt(in_file)\n original_shape = pm_.shape\n pm = np.zeros((pm_.shape[0], pm_.shape[1] + 4))\n pm[:, :original_shape[1]] = pm_\n pm[:, original_shape[1]:] = [0.0, 0.0, 0.0, 1.0]\n\n # rigid body transformation matrix\n T_rb_prev = np.matrix(np.eye(4))\n\n flag = 0\n X = [0] # First timepoint\n for i in range(0, pm.shape[0]):\n # making use of the fact that the order of aff12 matrix is \"row-by-row\"\n T_rb = np.matrix(pm[i].reshape(4, 4))\n\n if flag == 0:\n flag = 1\n else:\n M = np.dot(T_rb, T_rb_prev.I) - np.eye(4)\n A = M[0:3, 0:3]\n b = M[0:3, 3]\n\n FD_J = math.sqrt(\n (rmax * rmax / 5) * np.trace(np.dot(A.T, A)) + np.dot(b.T, b))\n X.append(FD_J)\n\n T_rb_prev = T_rb\n np.savetxt(out_file, X)\n return out_file\n\n\ndef gcor(func, mask):\n \"\"\"\n Compute the :abbr:`GCOR (global correlation)`.\n\n :param numpy.ndarray func: input fMRI dataset, after motion correction\n :param numpy.ndarray mask: 3D brain mask\n :return: the computed GCOR value\n\n \"\"\"\n # Remove zero-variance voxels across time axis\n tv_mask = zero_variance(func, mask)\n idx = np.where(tv_mask > 0)\n zscores = scipy.stats.mstats.zscore(func[idx[0], idx[1], idx[2], :], axis=1)\n avg_ts = zscores.mean(axis=0)\n return float(avg_ts.transpose().dot(avg_ts) / len(avg_ts))\n\ndef zero_variance(func, mask):\n \"\"\"\n Mask out voxels with zero variance across t-axis\n\n :param numpy.ndarray func: input fMRI dataset, after motion correction\n :param numpy.ndarray mask: 3D brain mask\n :return: the 3D mask of voxels with nonzero variance across :math:`t`.\n :rtype: numpy.ndarray\n\n \"\"\"\n idx = np.where(mask > 0)\n func = func[idx[0], idx[1], idx[2], :]\n tvariance = func.var(axis=1)\n tv_mask = np.zeros_like(tvariance)\n tv_mask[tvariance > 0] = 1\n\n newmask = np.zeros_like(mask)\n newmask[idx] = tv_mask\n return newmask\n" ]
[ [ "numpy.zeros_like", "numpy.array", "numpy.savetxt", "numpy.dot", "numpy.zeros", "scipy.stats.mstats.zscore", "numpy.percentile", "numpy.genfromtxt", "numpy.diff", "numpy.eye", "numpy.where", "numpy.apply_along_axis", "numpy.vstack", "numpy.floor" ] ]
artberryx/SAR
[ "e6c79ea271f1033d5ea3c11556aff173adf6d941" ]
[ "sb/stable_baselines_ex/common/wrappers_ex.py" ]
[ "import gym\nimport numpy as np\nfrom gym import spaces\n\nfrom stable_baselines.common.running_mean_std import RunningMeanStd\n\n\nclass ScaleRewardEnv(gym.RewardWrapper):\n def __init__(self, env: gym.Env, scale):\n gym.RewardWrapper.__init__(self, env)\n self.scale = scale\n\n def reward(self, reward: float) -> float:\n return reward * self.scale\n\n\nclass RepeatGoalEnv(gym.Wrapper):\n def __init__(\n self,\n env: gym.Env,\n gamma,\n max_d,\n max_t,\n lambda_dt,\n anoise_type=None,\n anoise_prob=0.,\n anoise_std=0.,\n ):\n gym.Wrapper.__init__(self, env)\n self.epsilon_std = 1e-3\n self.gamma = gamma\n self.max_d = max_d\n self.max_t = max_t\n self.lambda_dt = lambda_dt\n self.anoise_type = anoise_type\n self.anoise_prob = anoise_prob\n self.anoise_std = anoise_std\n\n self.body_key = None\n part_keys = set(self.env.sim.model._body_name2id.keys())\n target_keys = ['torso', 'cart', 'body1']\n for target_key in target_keys:\n if target_key in part_keys:\n self.body_key = target_key\n break\n\n if self.anoise_type in ['ext_fpc']:\n low = np.concatenate([self.observation_space.low, [-np.inf] * 3])\n high = np.concatenate([self.observation_space.high, [np.inf] * 3])\n self.observation_space = spaces.Box(\n low=low, high=high,\n shape=(self.observation_space.shape[0] + 3,), dtype=self.observation_space.dtype,\n )\n self.obs_dim = self.observation_space.shape[0] + 3\n self.cur_force = np.zeros(3)\n else:\n self.obs_dim = self.observation_space.shape[0]\n\n action_dim = self.env.action_space.shape[0]\n self.ori_action_dim = action_dim\n low = self.env.action_space.low\n high = self.env.action_space.high\n if self.max_d is not None or self.max_t is not None:\n action_dim += 1\n low = np.r_[low, -1.]\n high = np.r_[high, 1.]\n\n self.action_space = spaces.Box(\n low=low, high=high, shape=(action_dim,), dtype=env.action_space.dtype\n )\n\n self.cur_obs = None\n\n self.obs_rms = RunningMeanStd(shape=self.observation_space.shape)\n self.reset_update_obs_estimate = False\n self.num_steps = 0\n\n self.eval_mode = False\n\n def _update_obs_estimate(self, obs):\n if not self.eval_mode:\n self.obs_rms.update(obs[:, :self.obs_dim])\n\n def step(self, aug_action):\n cur_idx = self.ori_action_dim\n action = aug_action[:self.ori_action_dim]\n\n if self.anoise_type == 'action':\n if np.random.rand() < self.anoise_prob:\n action = action + np.random.randn(*action.shape) * self.anoise_std\n action = np.clip(action, self.action_space.low[:len(action)], self.action_space.high[:len(action)])\n elif self.anoise_type is not None and 'ext' in self.anoise_type:\n if np.random.rand() < self.anoise_prob:\n if self.env.spec.id == 'Reacher-v2':\n force = np.zeros(3)\n torque = np.random.randn(3) * self.anoise_std\n cur_info = torque\n else:\n force = np.random.randn(3) * self.anoise_std\n torque = np.zeros(3)\n cur_info = force\n\n if self.anoise_type == 'ext_fpc':\n self.cur_force = np.clip(cur_info, -1, 1)\n self.env.sim.data.xfrc_applied[self.env.sim.model._body_name2id[self.body_key], :] = np.r_[\n force, torque]\n else:\n self.env.sim.data.xfrc_applied[self.env.sim.model._body_name2id[self.body_key], :] = [0] * 6\n\n if self.max_d is not None or self.max_t is not None:\n u = aug_action[cur_idx]\n cur_idx += 1\n norm_u = (u + 1) / 2\n u = norm_u\n else:\n u = None\n\n lambda_dt = self.lambda_dt\n\n total_reward = 0.0\n done = None\n cur_gamma = 1.0\n first_obs = self.cur_obs\n for i in range(100000000):\n obs, reward, done, info = self.env.step(action)\n\n if self.anoise_type in ['ext_fpc']:\n obs = np.concatenate([obs, self.cur_force])\n\n if not done:\n self._update_obs_estimate(obs[np.newaxis, ...])\n self.reset_update_obs_estimate = True\n total_reward += reward * cur_gamma\n cur_gamma *= self.gamma\n if done:\n break\n\n if self.max_d is None and self.max_t is None:\n break\n\n if self.max_t is not None:\n t_delta = (i + 1) * self.env.dt\n\n if self.max_d is not None:\n norm_obs = (obs - self.obs_rms.mean) / (np.sqrt(self.obs_rms.var) + self.epsilon_std)\n norm_first_obs = (first_obs - self.obs_rms.mean) / (np.sqrt(self.obs_rms.var) + self.epsilon_std)\n\n d_delta = np.linalg.norm(norm_obs - norm_first_obs, ord=1) / len(obs)\n\n if self.max_d is not None and self.max_t is not None:\n if lambda_dt is None:\n if d_delta >= u * self.max_d:\n break\n if t_delta >= self.max_t:\n break\n else:\n ori_t_delta = t_delta\n t_delta = t_delta / self.max_t\n d_delta = d_delta / self.max_d\n delta = lambda_dt * d_delta + (1 - lambda_dt) * t_delta\n if delta >= u:\n break\n if ori_t_delta >= self.max_t:\n break\n elif self.max_t is not None:\n if t_delta >= u * self.max_t:\n break\n elif self.max_d is not None:\n if d_delta >= u * self.max_d:\n break\n\n self.cur_obs = obs\n info['w'] = i + 1\n info['t_diff'] = (i + 1) * self.env.dt\n if u is not None:\n if self.max_d is not None and self.max_t is not None:\n pass\n elif self.max_t is not None:\n info['t'] = u * self.max_t\n elif self.max_d is not None:\n info['d'] = u * self.max_d\n info['u'] = u\n if lambda_dt is not None:\n info['lambda_dt'] = lambda_dt\n\n self.num_steps += 1\n\n return self.cur_obs, total_reward, done, info\n\n def reset(self, **kwargs):\n obs = self.env.reset(**kwargs)\n\n if self.anoise_type in ['ext_fpc']:\n self.cur_force = np.zeros(3)\n obs = np.concatenate([obs, self.cur_force])\n\n if self.reset_update_obs_estimate:\n self._update_obs_estimate(obs[np.newaxis, ...])\n self.reset_update_obs_estimate = False\n self.cur_obs = obs\n return self.cur_obs\n" ]
[ [ "numpy.concatenate", "numpy.linalg.norm", "numpy.random.rand", "numpy.zeros", "numpy.random.randn", "numpy.sqrt", "numpy.clip" ] ]
Mahdi-Asadi/python_thesis
[ "6cb1dbe24fcf9133e971e64c91e1dde234250da9" ]
[ "RK45 - Copy.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import RK45\nf_out = \"E:\\\\1\\\\P_rk4.txt\" # address file for output\nf2 = open(f_out,\"w+\")\ndef du_dx(x,y):\n wa=1 # atomic frequency \n wp=0.6 # field frequency\n g=0.6 # coupling strength \n n = 1 # number of photons\n A = n*wp+(wa/2)\n B = (1+n)*wp-(wa/2)\n X = n+1\n C = np.sqrt(X) \n dydx_1= A*y[1]+g*C*y[3]\n dydx_2= -A*y[0]-g*C*y[2]\n dydx_3= B*y[3]+g*C*y[1]\n dydx_4= -B*y[2]-g*C*y[0] \n return [dydx_1,dydx_2,dydx_3,dydx_4]\n\ny_0 = (1/np.sqrt(2),0,1/np.sqrt(2),0) # initial value\n# print(\"y_0 = \",y_0)\nm = 1000\nti = 0\ntf = 30\nh = tf/m\ntspan = np.arange(ti,tf,h)\nprint(h)\nfor i in tspan:\n print(i)\n v = RK45(du_dx,t0 =i,y0 = y_0,t_bound=i) # 4 answer of dydx_1,...,dydx_4\n print(v.y[0:])\n# print(type(v))\n\n# print(\"v.t[0] = \",v.t[0])\n# print(len(v.t))\n# print(\"------------------\")\n# print(v.y)\n# print(len(v.t))\n# print(\"------------------\")\n# y_1 = v.y[:,0]\n# print(\"y_1 = \",y_1)\n# print(\"------------------\")\n# y_2 = v.y[0,:]\n# print(\"y_2 = \",y_2)\n# print(\"------------------\")\n# y_3 = v.y[0,0]\n# print(\"y_3 = \",y_3)\n# print(\"------------------\")\n# # --------------------------\n# # print in file \n# count = 0\n# while count<1000:\n# y_i = v.y[:,count]\n# f2.write(str(v.t[count]))\n# f2.write(\" \")\n# for i in y_i:\n# i = round(i,4)\n# i = str(i)\n# f2.write(i)\n# f2.write(len(i)*\" \")\n# f2.write(\"\\n\")\n# count = count+1\n\n# # y_prime = u_s[:,1]\n# # print(y_prime)\n# plt.plot(v.t, v.y[0,:],'-', label='r(t)') \n# plt.xlabel(\"x\")\n# plt.ylabel(\"y\")\n# plt.show()" ]
[ [ "scipy.integrate.RK45", "numpy.arange", "numpy.sqrt" ] ]
PuffyPuffin/LO_user
[ "c7cafc2045b027aad0098d034cbe2b70126c8379" ]
[ "tracker/tracker/user_tracker.py" ]
[ "\"\"\"\nCode for particle tracking, designed for ROMS output. This new version\nmakes extensive use of nearest-neighbor KDTree algorithms for interpolation.\nThis results is significantly (36x) faster runtimes compared with old version.\n\nPERFORMANCE: about 3 minutes per day for a 3D cas6 experiment with 10k particles.\n\nNOTE: You have to have run make_KDTrees.py for the grid (e.g. cas6) before running.\n\nNOTE: There is some issue, perhaps with garbage collection, which causes\nthe loading of NetCDF files to happen slower after running a few times\ninteractively from ipython. It appears that this can be avoided by running\nfrom the terminal as: python tracker.py [args].\n\nThis program is a driver where you specify:\n- an experiment (ROMS run + release locations + other choices)\n- a release or set of releases within that experiment (start day, etc.)\n\nThe main argument you provide is -exp, which is the experiment name, and\nis used by experiments.get_exp_info() and .get_ic() to get the gtagex and initial particle\nlocations. Other possible commmand line arguments and their defaults\nare explained in the argparse section below.\n\nNOTE: To improve usefulness for people other than me, this driver will\nfirst look for:\n- LiveOcean_user/tracker/user_trackfun.py\nbefore loading my versions.\n\nThis allows you to create your own modifications to the tracking\n(e.g. for diurnal depth behavior) while still being able to use git pull to update the main code.\n\nIt can be run on its own, or with command line arguments to facilitate\nlarge, automated jobs, for example in python:\n\nExamples:\n\npython tracker.py -clb True\n\nthe same command, with all the argmuents typed, instead of getting the as defaults:\n\npython tracker.py -gtx cas6_v3_lo8b -ro 2 -d 2019.07.04 -exp jdf0 -clb True\n\n\"\"\"\n\nimport sys\nfrom datetime import datetime, timedelta\nfrom time import time\nimport argparse\nimport numpy as np\n\nfrom lo_tools import Lfun, zfun\nLdir = Lfun.Lstart()\n\nfrom importlib import reload\n\npth = Ldir['LOu'] / 'tracker'\nif str(pth) not in sys.path:\n sys.path.append(str(pth))\nimport experiments as exp\nreload(exp)\n\nimport trackfun_nc as tfnc\nreload(tfnc)\n\n# The import of trackfun or user_trackfun is done later in this program,\n# about 100 lines down.\n\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n# command line arguments, can be input in any order\nparser = argparse.ArgumentParser()\n\n# Set the experiment name\n# (details set in experiments.py, or, if it exists, user_experiments.py)\n\nparser.add_argument('-gtx', '--gtagex', default='cas6_v0_live', type=str)\nparser.add_argument('-ro', '--roms_out_num', default=2, type=int)\n# 1 = Ldir['roms_out1'], etc.\n\n# this is the first starting day\nparser.add_argument('-d', '--date_string', default='2021.10.15', type=str)\n\nparser.add_argument('-exp', '--exp_name', default='elb', type=str)\nparser.add_argument('-clb', '--clobber', default=False, type=zfun.boolean_string)\n# overwrite existing output folder if clobber == True\nparser.add_argument('-sub_tag', default='', type=str)\n# append an optional tag to the end of the output folder name\n\n# These are False unless the flags are used with the argument True\n# so if you do NOT use these flags the run will be:\n# - trapped to the surface\n# - no vertical turbulent diffusion\nparser.add_argument('-3d', default=False, type=zfun.boolean_string) # do 3d tracking\nparser.add_argument('-laminar', default=False, type=zfun.boolean_string) # no turbulence\nparser.add_argument('-no_advection', default=False, type=zfun.boolean_string) # no advection\nparser.add_argument('-sink', default=0, type=float) # particle sinking speed (m per day, e.g. 40)\n\n# windage = a small number: 0 <= windage << 1 (e.g. 0.03)\n# fraction of windspeed added to advection, only for 3d=False\nparser.add_argument('-wnd', '--windage', default=0, type=float)\n\n# You can make multiple releases using:\n# number_of_start_days > 1 & days_between_starts, and which hour (UTC) to start on\nparser.add_argument('-nsd', '--number_of_start_days', default=1, type=int)\nparser.add_argument('-dbs', '--days_between_starts', default=1, type=int)\nparser.add_argument('-dtt', '--days_to_track', default=1, type=int)\nparser.add_argument('-sh', '--start_hour', default=0, type=int)\n\n# number of divisions to make between saves for the integration\n# e.g. if ndiv = 12 and we have hourly saves, we use a 300 sec step\n# for the integration. 300 s seems like a good default value,\n# based on Banas et al. (2009, CSR RISE paper).\nparser.add_argument('-ndiv', default=12, type=int)\nparser.add_argument('-sph', default=1, type=int)\n# sph = saves per hour, a new argument to allow more frequent writing of output.\n\nargs = parser.parse_args()\nTR = args.__dict__\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n# set where to look for model output\nif args.roms_out_num == 0:\n TR['roms_out'] = Ldir['roms_out']\nelif args.roms_out_num > 0:\n TR['roms_out'] = Ldir['roms_out' + str(args.roms_out_num)]\n\n# set dependent and default fields\n\nTR['turb'] = False\n\n# make sure sph is no greater than ndiv\nTR['sph'] = np.min((TR['sph'],TR['ndiv']))\n\n# overrides\nif TR['3d']:\n TR['windage'] = 0\n TR['turb'] = True # default is that 3d is always turbulent\n\nif TR['laminar']:\n TR['turb'] = False\n\n# get experiment info\nTR['gridname'], TR['tag'], TR['ex_name'] = TR['gtagex'].split('_')\n\n\n# pass some info to Ldir\nLdir['gtagex'] = TR['gtagex']\nLdir['roms_out'] = TR['roms_out']\n\n# get the full path to a valid history file\nfn00 = Ldir['roms_out'] / TR['gtagex'] / ('f' + TR['date_string']) / 'ocean_his_0001.nc'\nTR['fn00'] = fn00\n\n# set the name of the output folder\nout_name = TR['exp_name']\n\n# modify the output folder name, based on other choices\nif TR['3d']:\n out_name += '_3d'\nelif not TR['3d']:\n out_name += '_surf'\nif TR['laminar']:\n out_name += '_laminar'\nif TR['windage'] > 0:\n out_name += '_wind' + str(int(100*TR['windage']))\nif TR['start_hour'] > 0:\n out_name += '_sh' + str(int(TR['start_hour']))\nif TR['sink'] > 0:\n out_name += '_sink' + str(int(TR['sink']))\nif TR['no_advection'] == True:\n out_name += '_nadv'\nif TR['ndiv'] != 12: # only mention ndiv if it is NOT 12\n out_name += '_ndiv' + str(TR['ndiv'])\nif len(TR['sub_tag']) > 0:\n out_name += '_' + TR['sub_tag']\n\n# make the list of start days (datetimes) for separate releases\nidt_list = []\ndt = datetime.strptime(TR['date_string'], '%Y.%m.%d')\nfor nic in range(TR['number_of_start_days']):\n idt_list.append(dt)\n dt = dt + timedelta(TR['days_between_starts'])\n\n# make the output directory (empty)\noutdir0 = Ldir['LOo'] / 'tracks'\noutdir1 = out_name\noutdir = outdir0 / outdir1\nif outdir.is_dir():\n if args.clobber:\n pass # continue and overwrite if clobber is True\n else:\n print('Warning: output directory exists - rename if you want to keep it!!')\n print('-- tracker run not started --')\n sys.exit()\nLfun.make_dir(outdir, clean=True)\nprint(50*'*' + '\\nWriting to ' + str(outdir))\nsys.stdout.flush()\n\n# Write some info to outdir0 for use by trackfun.py\nLfun.dict_to_csv(TR, outdir0 / 'exp_info.csv')\n# and write the same info to outdir as part of the archived run output\nLfun.dict_to_csv(TR, outdir / 'exp_info.csv')\n\n# Load the trackfun module.\n# NOTE: we have to load this module AFTER we write [outdir0]/exp_info.csv\n# because it uses that information to decide which KDTrees to load. Crude.\nif (Ldir['LOu'] / 'tracker' / 'user_trackfun.py').is_file():\n sys.path.append(str(Ldir['LOu'] / 'tracker'))\n import user_trackfun as tfun\nelse:\n import trackfun as tfun\nreload(tfun)\n\n# get the initial particle location vectors\nEI = exp.get_exp_info(TR['exp_name'])\nplon00, plat00, pcs00 = exp.get_ic(EI, TR['fn00'])\n\n# step through the releases, one for each start day\nwrite_grid = True\nfor idt0 in idt_list:\n tt0 = time() # monitor integration time\n\n # name the release file by start day\n idt0_str = datetime.strftime(idt0,'%Y.%m.%d')\n outname = ('release_' + idt0_str + '.nc')\n print('-- ' + outname)\n sys.stdout.flush()\n out_fn = outdir / outname\n\n # we do the calculation in one-day segments, but write complete\n # output for a release to a single NetCDF file.\n for nd in range(TR['days_to_track']):\n\n # get or replace the history file list for this day\n idt = idt0 + timedelta(days=nd)\n idt_str = datetime.strftime(idt,'%Y.%m.%d')\n print(' - working on ' + idt_str)\n sys.stdout.flush()\n fn_list = tfun.get_fn_list(idt, Ldir)\n\n # write the grid file (once per experiment) for plotting\n if write_grid == True:\n g_infile = fn_list[0]\n g_outfile = outdir / 'grid.nc'\n tfnc.write_grid(g_infile, g_outfile)\n write_grid = False\n\n # DO THE TRACKING\n if nd == 0: # first day\n # set IC\n plon0 = plon00.copy()\n plat0 = plat00.copy()\n pcs0 = pcs00.copy()\n # do the tracking\n if TR['start_hour'] > 0:\n fn_list = fn_list[TR['start_hour']:]\n P = tfun.get_tracks(fn_list, plon0, plat0, pcs0, TR, trim_loc=True)\n # save the results to NetCDF\n tfnc.start_outfile(out_fn, P)\n else: # subsequent days\n # set IC\n plon0 = P['lon'][-1,:]\n plat0 = P['lat'][-1,:]\n pcs0 = P['cs'][-1,:]\n # do the tracking\n P = tfun.get_tracks(fn_list, plon0, plat0, pcs0, TR)\n tfnc.append_to_outfile(out_fn, P)\n\n print(' - Took %0.1f sec for %s day(s)' %\n (time() - tt0, str(TR['days_to_track'])))\n print(50*'=')\nprint(50*'*' + '\\nWrote to ' + str(outdir))\n" ]
[ [ "numpy.min" ] ]
BryanRacic/pandas
[ "21c299194a2b59a715fa7264bd6b44787deafc7a" ]
[ "pandas/core/base.py" ]
[ "\"\"\"\nBase and utility classes for pandas objects.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport textwrap\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Generic,\n Hashable,\n Literal,\n TypeVar,\n cast,\n final,\n)\n\nimport numpy as np\n\nimport pandas._libs.lib as lib\nfrom pandas._typing import (\n ArrayLike,\n DtypeObj,\n FrameOrSeries,\n IndexLabel,\n Shape,\n npt,\n)\nfrom pandas.compat import PYPY\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import (\n cache_readonly,\n doc,\n)\n\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_dict_like,\n is_extension_array_dtype,\n is_object_dtype,\n is_scalar,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCIndex,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import (\n isna,\n remove_na_arraylike,\n)\n\nfrom pandas.core import (\n algorithms,\n ops,\n)\nfrom pandas.core.accessor import DirNamesMixin\nfrom pandas.core.algorithms import (\n duplicated,\n unique1d,\n value_counts,\n)\nfrom pandas.core.arraylike import OpsMixin\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.construction import (\n create_series_with_explicit_dtype,\n ensure_wrapped_if_datetimelike,\n extract_array,\n)\nimport pandas.core.nanops as nanops\n\nif TYPE_CHECKING:\n\n from pandas._typing import (\n NumpySorter,\n NumpyValueArrayLike,\n )\n\n from pandas import Categorical\n\n\n_shared_docs: dict[str, str] = {}\n_indexops_doc_kwargs = {\n \"klass\": \"IndexOpsMixin\",\n \"inplace\": \"\",\n \"unique\": \"IndexOpsMixin\",\n \"duplicated\": \"IndexOpsMixin\",\n}\n\n_T = TypeVar(\"_T\", bound=\"IndexOpsMixin\")\n\n\nclass PandasObject(DirNamesMixin):\n \"\"\"\n Baseclass for various pandas objects.\n \"\"\"\n\n # results from calls to methods decorated with cache_readonly get added to _cache\n _cache: dict[str, Any]\n\n @property\n def _constructor(self):\n \"\"\"\n Class constructor (for this class it's just `__class__`.\n \"\"\"\n return type(self)\n\n def __repr__(self) -> str:\n \"\"\"\n Return a string representation for a particular object.\n \"\"\"\n # Should be overwritten by base classes\n return object.__repr__(self)\n\n def _reset_cache(self, key: str | None = None) -> None:\n \"\"\"\n Reset cached properties. If ``key`` is passed, only clears that key.\n \"\"\"\n if not hasattr(self, \"_cache\"):\n return\n if key is None:\n self._cache.clear()\n else:\n self._cache.pop(key, None)\n\n def __sizeof__(self) -> int:\n \"\"\"\n Generates the total memory usage for an object that returns\n either a value or Series of values\n \"\"\"\n memory_usage = getattr(self, \"memory_usage\", None)\n if memory_usage:\n mem = memory_usage(deep=True)\n return int(mem if is_scalar(mem) else mem.sum())\n\n # no memory_usage attribute, so fall back to object's 'sizeof'\n return super().__sizeof__()\n\n\nclass NoNewAttributesMixin:\n \"\"\"\n Mixin which prevents adding new attributes.\n\n Prevents additional attributes via xxx.attribute = \"something\" after a\n call to `self.__freeze()`. Mainly used to prevent the user from using\n wrong attributes on an accessor (`Series.cat/.str/.dt`).\n\n If you really want to add a new attribute at a later time, you need to use\n `object.__setattr__(self, key, value)`.\n \"\"\"\n\n def _freeze(self):\n \"\"\"\n Prevents setting additional attributes.\n \"\"\"\n object.__setattr__(self, \"__frozen\", True)\n\n # prevent adding any attribute via s.xxx.new_attribute = ...\n def __setattr__(self, key: str, value):\n # _cache is used by a decorator\n # We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)\n # because\n # 1.) getattr is false for attributes that raise errors\n # 2.) cls.__dict__ doesn't traverse into base classes\n if getattr(self, \"__frozen\", False) and not (\n key == \"_cache\"\n or key in type(self).__dict__\n or getattr(self, key, None) is not None\n ):\n raise AttributeError(f\"You cannot add any new attribute '{key}'\")\n object.__setattr__(self, key, value)\n\n\nclass DataError(Exception):\n pass\n\n\nclass SpecificationError(Exception):\n pass\n\n\nclass SelectionMixin(Generic[FrameOrSeries]):\n \"\"\"\n mixin implementing the selection & aggregation interface on a group-like\n object sub-classes need to define: obj, exclusions\n \"\"\"\n\n obj: FrameOrSeries\n _selection: IndexLabel | None = None\n exclusions: frozenset[Hashable]\n _internal_names = [\"_cache\", \"__setstate__\"]\n _internal_names_set = set(_internal_names)\n\n @final\n @property\n def _selection_list(self):\n if not isinstance(\n self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray)\n ):\n return [self._selection]\n return self._selection\n\n @cache_readonly\n def _selected_obj(self):\n if self._selection is None or isinstance(self.obj, ABCSeries):\n return self.obj\n else:\n return self.obj[self._selection]\n\n @final\n @cache_readonly\n def ndim(self) -> int:\n return self._selected_obj.ndim\n\n @final\n @cache_readonly\n def _obj_with_exclusions(self):\n if self._selection is not None and isinstance(self.obj, ABCDataFrame):\n return self.obj[self._selection_list]\n\n if len(self.exclusions) > 0:\n # equivalent to `self.obj.drop(self.exclusions, axis=1)\n # but this avoids consolidating and making a copy\n return self.obj._drop_axis(\n self.exclusions, axis=1, consolidate=False, only_slice=True\n )\n else:\n return self.obj\n\n def __getitem__(self, key):\n if self._selection is not None:\n raise IndexError(f\"Column(s) {self._selection} already selected\")\n\n if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)):\n if len(self.obj.columns.intersection(key)) != len(key):\n bad_keys = list(set(key).difference(self.obj.columns))\n raise KeyError(f\"Columns not found: {str(bad_keys)[1:-1]}\")\n return self._gotitem(list(key), ndim=2)\n\n elif not getattr(self, \"as_index\", False):\n if key not in self.obj.columns:\n raise KeyError(f\"Column not found: {key}\")\n return self._gotitem(key, ndim=2)\n\n else:\n if key not in self.obj:\n raise KeyError(f\"Column not found: {key}\")\n subset = self.obj[key]\n ndim = subset.ndim\n return self._gotitem(key, ndim=ndim, subset=subset)\n\n def _gotitem(self, key, ndim: int, subset=None):\n \"\"\"\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : str / list of selections\n ndim : {1, 2}\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n raise AbstractMethodError(self)\n\n def aggregate(self, func, *args, **kwargs):\n raise AbstractMethodError(self)\n\n agg = aggregate\n\n\nclass IndexOpsMixin(OpsMixin):\n \"\"\"\n Common ops mixin to support a unified interface / docs for Series / Index\n \"\"\"\n\n # ndarray compatibility\n __array_priority__ = 1000\n _hidden_attrs: frozenset[str] = frozenset(\n [\"tolist\"] # tolist is not deprecated, just suppressed in the __dir__\n )\n\n @property\n def dtype(self) -> DtypeObj:\n # must be defined here as a property for mypy\n raise AbstractMethodError(self)\n\n @property\n def _values(self) -> ExtensionArray | np.ndarray:\n # must be defined here as a property for mypy\n raise AbstractMethodError(self)\n\n def transpose(self: _T, *args, **kwargs) -> _T:\n \"\"\"\n Return the transpose, which is by definition self.\n\n Returns\n -------\n %(klass)s\n \"\"\"\n nv.validate_transpose(args, kwargs)\n return self\n\n T = property(\n transpose,\n doc=\"\"\"\n Return the transpose, which is by definition self.\n \"\"\",\n )\n\n @property\n def shape(self) -> Shape:\n \"\"\"\n Return a tuple of the shape of the underlying data.\n \"\"\"\n return self._values.shape\n\n def __len__(self) -> int:\n # We need this defined here for mypy\n raise AbstractMethodError(self)\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Number of dimensions of the underlying data, by definition 1.\n \"\"\"\n return 1\n\n def item(self):\n \"\"\"\n Return the first element of the underlying data as a Python scalar.\n\n Returns\n -------\n scalar\n The first element of %(klass)s.\n\n Raises\n ------\n ValueError\n If the data is not length-1.\n \"\"\"\n if len(self) == 1:\n return next(iter(self))\n raise ValueError(\"can only convert an array of size 1 to a Python scalar\")\n\n @property\n def nbytes(self) -> int:\n \"\"\"\n Return the number of bytes in the underlying data.\n \"\"\"\n return self._values.nbytes\n\n @property\n def size(self) -> int:\n \"\"\"\n Return the number of elements in the underlying data.\n \"\"\"\n return len(self._values)\n\n @property\n def array(self) -> ExtensionArray:\n \"\"\"\n The ExtensionArray of the data backing this Series or Index.\n\n Returns\n -------\n ExtensionArray\n An ExtensionArray of the values stored within. For extension\n types, this is the actual array. For NumPy native types, this\n is a thin (no copy) wrapper around :class:`numpy.ndarray`.\n\n ``.array`` differs ``.values`` which may require converting the\n data to a different form.\n\n See Also\n --------\n Index.to_numpy : Similar method that always returns a NumPy array.\n Series.to_numpy : Similar method that always returns a NumPy array.\n\n Notes\n -----\n This table lays out the different array types for each extension\n dtype within pandas.\n\n ================== =============================\n dtype array type\n ================== =============================\n category Categorical\n period PeriodArray\n interval IntervalArray\n IntegerNA IntegerArray\n string StringArray\n boolean BooleanArray\n datetime64[ns, tz] DatetimeArray\n ================== =============================\n\n For any 3rd-party extension types, the array type will be an\n ExtensionArray.\n\n For all remaining dtypes ``.array`` will be a\n :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray\n stored within. If you absolutely need a NumPy array (possibly with\n copying / coercing data), then use :meth:`Series.to_numpy` instead.\n\n Examples\n --------\n For regular NumPy types like int, and float, a PandasArray\n is returned.\n\n >>> pd.Series([1, 2, 3]).array\n <PandasArray>\n [1, 2, 3]\n Length: 3, dtype: int64\n\n For extension types, like Categorical, the actual ExtensionArray\n is returned\n\n >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))\n >>> ser.array\n ['a', 'b', 'a']\n Categories (2, object): ['a', 'b']\n \"\"\"\n raise AbstractMethodError(self)\n\n def to_numpy(\n self,\n dtype: npt.DTypeLike | None = None,\n copy: bool = False,\n na_value=lib.no_default,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n A NumPy ndarray representing the values in this Series or Index.\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to pass to :meth:`numpy.asarray`.\n copy : bool, default False\n Whether to ensure that the returned value is not a view on\n another array. Note that ``copy=False`` does not *ensure* that\n ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that\n a copy is made, even if not strictly necessary.\n na_value : Any, optional\n The value to use for missing values. The default value depends\n on `dtype` and the type of the array.\n\n .. versionadded:: 1.0.0\n\n **kwargs\n Additional keywords passed through to the ``to_numpy`` method\n of the underlying array (for extension arrays).\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n Series.array : Get the actual data stored within.\n Index.array : Get the actual data stored within.\n DataFrame.to_numpy : Similar method for DataFrame.\n\n Notes\n -----\n The returned array will be the same up to equality (values equal\n in `self` will be equal in the returned array; likewise for values\n that are not equal). When `self` contains an ExtensionArray, the\n dtype may be different. For example, for a category-dtype Series,\n ``to_numpy()`` will return a NumPy array and the categorical dtype\n will be lost.\n\n For NumPy dtypes, this will be a reference to the actual data stored\n in this Series or Index (assuming ``copy=False``). Modifying the result\n in place will modify the data stored in the Series or Index (not that\n we recommend doing that).\n\n For extension types, ``to_numpy()`` *may* require copying data and\n coercing the result to a NumPy type (possibly object), which may be\n expensive. When you need a no-copy reference to the underlying data,\n :attr:`Series.array` should be used instead.\n\n This table lays out the different dtypes and default return types of\n ``to_numpy()`` for various dtypes within pandas.\n\n ================== ================================\n dtype array type\n ================== ================================\n category[T] ndarray[T] (same dtype as input)\n period ndarray[object] (Periods)\n interval ndarray[object] (Intervals)\n IntegerNA ndarray[object]\n datetime64[ns] datetime64[ns]\n datetime64[ns, tz] ndarray[object] (Timestamps)\n ================== ================================\n\n Examples\n --------\n >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))\n >>> ser.to_numpy()\n array(['a', 'b', 'a'], dtype=object)\n\n Specify the `dtype` to control how datetime-aware data is represented.\n Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`\n objects, each with the correct ``tz``.\n\n >>> ser = pd.Series(pd.date_range('2000', periods=2, tz=\"CET\"))\n >>> ser.to_numpy(dtype=object)\n array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),\n Timestamp('2000-01-02 00:00:00+0100', tz='CET')],\n dtype=object)\n\n Or ``dtype='datetime64[ns]'`` to return an ndarray of native\n datetime64 values. The values are converted to UTC and the timezone\n info is dropped.\n\n >>> ser.to_numpy(dtype=\"datetime64[ns]\")\n ... # doctest: +ELLIPSIS\n array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],\n dtype='datetime64[ns]')\n \"\"\"\n if is_extension_array_dtype(self.dtype):\n # error: Too many arguments for \"to_numpy\" of \"ExtensionArray\"\n return self.array.to_numpy( # type: ignore[call-arg]\n dtype, copy=copy, na_value=na_value, **kwargs\n )\n elif kwargs:\n bad_keys = list(kwargs.keys())[0]\n raise TypeError(\n f\"to_numpy() got an unexpected keyword argument '{bad_keys}'\"\n )\n\n result = np.asarray(self._values, dtype=dtype)\n # TODO(GH-24345): Avoid potential double copy\n if copy or na_value is not lib.no_default:\n result = result.copy()\n if na_value is not lib.no_default:\n result[self.isna()] = na_value\n return result\n\n @property\n def empty(self) -> bool:\n return not self.size\n\n def max(self, axis=None, skipna: bool = True, *args, **kwargs):\n \"\"\"\n Return the maximum value of the Index.\n\n Parameters\n ----------\n axis : int, optional\n For compatibility with NumPy. Only 0 or None are allowed.\n skipna : bool, default True\n Exclude NA/null values when showing the result.\n *args, **kwargs\n Additional arguments and keywords for compatibility with NumPy.\n\n Returns\n -------\n scalar\n Maximum value.\n\n See Also\n --------\n Index.min : Return the minimum value in an Index.\n Series.max : Return the maximum value in a Series.\n DataFrame.max : Return the maximum values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([3, 2, 1])\n >>> idx.max()\n 3\n\n >>> idx = pd.Index(['c', 'b', 'a'])\n >>> idx.max()\n 'c'\n\n For a MultiIndex, the maximum is determined lexicographically.\n\n >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])\n >>> idx.max()\n ('b', 2)\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_max(args, kwargs)\n return nanops.nanmax(self._values, skipna=skipna)\n\n @doc(op=\"max\", oppose=\"min\", value=\"largest\")\n def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:\n \"\"\"\n Return int position of the {value} value in the Series.\n\n If the {op}imum is achieved in multiple locations,\n the first row position is returned.\n\n Parameters\n ----------\n axis : {{None}}\n Dummy argument for consistency with Series.\n skipna : bool, default True\n Exclude NA/null values when showing the result.\n *args, **kwargs\n Additional arguments and keywords for compatibility with NumPy.\n\n Returns\n -------\n int\n Row position of the {op}imum value.\n\n See Also\n --------\n Series.arg{op} : Return position of the {op}imum value.\n Series.arg{oppose} : Return position of the {oppose}imum value.\n numpy.ndarray.arg{op} : Equivalent method for numpy arrays.\n Series.idxmax : Return index label of the maximum values.\n Series.idxmin : Return index label of the minimum values.\n\n Examples\n --------\n Consider dataset containing cereal calories\n\n >>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,\n ... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})\n >>> s\n Corn Flakes 100.0\n Almond Delight 110.0\n Cinnamon Toast Crunch 120.0\n Cocoa Puff 110.0\n dtype: float64\n\n >>> s.argmax()\n 2\n >>> s.argmin()\n 0\n\n The maximum cereal calories is the third element and\n the minimum cereal calories is the first element,\n since series is zero-indexed.\n \"\"\"\n delegate = self._values\n nv.validate_minmax_axis(axis)\n skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)\n\n if isinstance(delegate, ExtensionArray):\n if not skipna and delegate.isna().any():\n return -1\n else:\n return delegate.argmax()\n else:\n # error: Incompatible return value type (got \"Union[int, ndarray]\", expected\n # \"int\")\n return nanops.nanargmax( # type: ignore[return-value]\n delegate, skipna=skipna\n )\n\n def min(self, axis=None, skipna: bool = True, *args, **kwargs):\n \"\"\"\n Return the minimum value of the Index.\n\n Parameters\n ----------\n axis : {None}\n Dummy argument for consistency with Series.\n skipna : bool, default True\n Exclude NA/null values when showing the result.\n *args, **kwargs\n Additional arguments and keywords for compatibility with NumPy.\n\n Returns\n -------\n scalar\n Minimum value.\n\n See Also\n --------\n Index.max : Return the maximum value of the object.\n Series.min : Return the minimum value in a Series.\n DataFrame.min : Return the minimum values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([3, 2, 1])\n >>> idx.min()\n 1\n\n >>> idx = pd.Index(['c', 'b', 'a'])\n >>> idx.min()\n 'a'\n\n For a MultiIndex, the minimum is determined lexicographically.\n\n >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])\n >>> idx.min()\n ('a', 1)\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_min(args, kwargs)\n return nanops.nanmin(self._values, skipna=skipna)\n\n @doc(argmax, op=\"min\", oppose=\"max\", value=\"smallest\")\n def argmin(self, axis=None, skipna=True, *args, **kwargs) -> int:\n delegate = self._values\n nv.validate_minmax_axis(axis)\n skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)\n\n if isinstance(delegate, ExtensionArray):\n if not skipna and delegate.isna().any():\n return -1\n else:\n return delegate.argmin()\n else:\n # error: Incompatible return value type (got \"Union[int, ndarray]\", expected\n # \"int\")\n return nanops.nanargmin( # type: ignore[return-value]\n delegate, skipna=skipna\n )\n\n def tolist(self):\n \"\"\"\n Return a list of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n Returns\n -------\n list\n\n See Also\n --------\n numpy.ndarray.tolist : Return the array as an a.ndim-levels deep\n nested list of Python scalars.\n \"\"\"\n if not isinstance(self._values, np.ndarray):\n # check for ndarray instead of dtype to catch DTA/TDA\n return list(self._values)\n return self._values.tolist()\n\n to_list = tolist\n\n def __iter__(self):\n \"\"\"\n Return an iterator of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n Returns\n -------\n iterator\n \"\"\"\n # We are explicitly making element iterators.\n if not isinstance(self._values, np.ndarray):\n # Check type instead of dtype to catch DTA/TDA\n return iter(self._values)\n else:\n return map(self._values.item, range(self._values.size))\n\n @cache_readonly\n def hasnans(self) -> bool:\n \"\"\"\n Return if I have any nans; enables various perf speedups.\n \"\"\"\n return bool(isna(self).any())\n\n def isna(self):\n return isna(self._values)\n\n def _reduce(\n self,\n op,\n name: str,\n *,\n axis=0,\n skipna=True,\n numeric_only=None,\n filter_type=None,\n **kwds,\n ):\n \"\"\"\n Perform the reduction type operation if we can.\n \"\"\"\n func = getattr(self, name, None)\n if func is None:\n raise TypeError(\n f\"{type(self).__name__} cannot perform the operation {name}\"\n )\n return func(skipna=skipna, **kwds)\n\n @final\n def _map_values(self, mapper, na_action=None):\n \"\"\"\n An internal function that maps values using the input\n correspondence (which can be a dict, Series, or function).\n\n Parameters\n ----------\n mapper : function, dict, or Series\n The input correspondence object\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values, without passing them to the\n mapping function\n\n Returns\n -------\n Union[Index, MultiIndex], inferred\n The output of the mapping function applied to the index.\n If the function returns a tuple with more than one element\n a MultiIndex will be returned.\n \"\"\"\n # we can fastpath dict/Series to an efficient map\n # as we know that we are not going to have to yield\n # python types\n if is_dict_like(mapper):\n if isinstance(mapper, dict) and hasattr(mapper, \"__missing__\"):\n # If a dictionary subclass defines a default value method,\n # convert mapper to a lookup function (GH #15999).\n dict_with_default = mapper\n mapper = lambda x: dict_with_default[x]\n else:\n # Dictionary does not have a default. Thus it's safe to\n # convert to an Series for efficiency.\n # we specify the keys here to handle the\n # possibility that they are tuples\n\n # The return value of mapping with an empty mapper is\n # expected to be pd.Series(np.nan, ...). As np.nan is\n # of dtype float64 the return value of this method should\n # be float64 as well\n mapper = create_series_with_explicit_dtype(\n mapper, dtype_if_empty=np.float64\n )\n\n if isinstance(mapper, ABCSeries):\n # Since values were input this means we came from either\n # a dict or a series and mapper should be an index\n if is_categorical_dtype(self.dtype):\n # use the built in categorical series mapper which saves\n # time by mapping the categories instead of all values\n\n cat = cast(\"Categorical\", self._values)\n return cat.map(mapper)\n\n values = self._values\n\n indexer = mapper.index.get_indexer(values)\n new_values = algorithms.take_nd(mapper._values, indexer)\n\n return new_values\n\n # we must convert to python types\n if is_extension_array_dtype(self.dtype) and hasattr(self._values, \"map\"):\n # GH#23179 some EAs do not have `map`\n values = self._values\n if na_action is not None:\n raise NotImplementedError\n map_f = lambda values, f: values.map(f)\n else:\n values = self._values.astype(object)\n if na_action == \"ignore\":\n map_f = lambda values, f: lib.map_infer_mask(\n values, f, isna(values).view(np.uint8)\n )\n elif na_action is None:\n map_f = lib.map_infer\n else:\n msg = (\n \"na_action must either be 'ignore' or None, \"\n f\"{na_action} was passed\"\n )\n raise ValueError(msg)\n\n # mapper is a function\n new_values = map_f(values, mapper)\n\n return new_values\n\n def value_counts(\n self,\n normalize: bool = False,\n sort: bool = True,\n ascending: bool = False,\n bins=None,\n dropna: bool = True,\n ):\n \"\"\"\n Return a Series containing counts of unique values.\n\n The resulting object will be in descending order so that the\n first element is the most frequently-occurring element.\n Excludes NA values by default.\n\n Parameters\n ----------\n normalize : bool, default False\n If True then the object returned will contain the relative\n frequencies of the unique values.\n sort : bool, default True\n Sort by frequencies.\n ascending : bool, default False\n Sort in ascending order.\n bins : int, optional\n Rather than count values, group them into half-open bins,\n a convenience for ``pd.cut``, only works with numeric data.\n dropna : bool, default True\n Don't include counts of NaN.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n DataFrame.count: Number of non-NA elements in a DataFrame.\n DataFrame.value_counts: Equivalent method on DataFrames.\n\n Examples\n --------\n >>> index = pd.Index([3, 1, 2, 3, 4, np.nan])\n >>> index.value_counts()\n 3.0 2\n 1.0 1\n 2.0 1\n 4.0 1\n dtype: int64\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> s = pd.Series([3, 1, 2, 3, 4, np.nan])\n >>> s.value_counts(normalize=True)\n 3.0 0.4\n 1.0 0.2\n 2.0 0.2\n 4.0 0.2\n dtype: float64\n\n **bins**\n\n Bins can be useful for going from a continuous variable to a\n categorical variable; instead of counting unique\n apparitions of values, divide the index in the specified\n number of half-open bins.\n\n >>> s.value_counts(bins=3)\n (0.996, 2.0] 2\n (2.0, 3.0] 2\n (3.0, 4.0] 1\n dtype: int64\n\n **dropna**\n\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> s.value_counts(dropna=False)\n 3.0 2\n 1.0 1\n 2.0 1\n 4.0 1\n NaN 1\n dtype: int64\n \"\"\"\n return value_counts(\n self,\n sort=sort,\n ascending=ascending,\n normalize=normalize,\n bins=bins,\n dropna=dropna,\n )\n\n def unique(self):\n values = self._values\n\n if not isinstance(values, np.ndarray):\n result: ArrayLike = values.unique()\n if self.dtype.kind in [\"m\", \"M\"] and isinstance(self, ABCSeries):\n # GH#31182 Series._values returns EA, unpack for backward-compat\n if getattr(self.dtype, \"tz\", None) is None:\n result = np.asarray(result)\n else:\n result = unique1d(values)\n\n return result\n\n def nunique(self, dropna: bool = True) -> int:\n \"\"\"\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include NaN in the count.\n\n Returns\n -------\n int\n\n See Also\n --------\n DataFrame.nunique: Method nunique for DataFrame.\n Series.count: Count non-NA/null observations in the Series.\n\n Examples\n --------\n >>> s = pd.Series([1, 3, 5, 7, 7])\n >>> s\n 0 1\n 1 3\n 2 5\n 3 7\n 4 7\n dtype: int64\n\n >>> s.nunique()\n 4\n \"\"\"\n uniqs = self.unique()\n if dropna:\n uniqs = remove_na_arraylike(uniqs)\n return len(uniqs)\n\n @property\n def is_unique(self) -> bool:\n \"\"\"\n Return boolean if values in the object are unique.\n\n Returns\n -------\n bool\n \"\"\"\n return self.nunique(dropna=False) == len(self)\n\n @property\n def is_monotonic(self) -> bool:\n \"\"\"\n Return boolean if values in the object are\n monotonic_increasing.\n\n Returns\n -------\n bool\n \"\"\"\n from pandas import Index\n\n return Index(self).is_monotonic\n\n @property\n def is_monotonic_increasing(self) -> bool:\n \"\"\"\n Alias for is_monotonic.\n \"\"\"\n # mypy complains if we alias directly\n return self.is_monotonic\n\n @property\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return boolean if values in the object are\n monotonic_decreasing.\n\n Returns\n -------\n bool\n \"\"\"\n from pandas import Index\n\n return Index(self).is_monotonic_decreasing\n\n def _memory_usage(self, deep: bool = False) -> int:\n \"\"\"\n Memory usage of the values.\n\n Parameters\n ----------\n deep : bool, default False\n Introspect the data deeply, interrogate\n `object` dtypes for system-level memory consumption.\n\n Returns\n -------\n bytes used\n\n See Also\n --------\n numpy.ndarray.nbytes : Total bytes consumed by the elements of the\n array.\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array if deep=False or if used on PyPy\n \"\"\"\n if hasattr(self.array, \"memory_usage\"):\n # https://github.com/python/mypy/issues/1424\n # error: \"ExtensionArray\" has no attribute \"memory_usage\"\n return self.array.memory_usage(deep=deep) # type: ignore[attr-defined]\n\n v = self.array.nbytes\n if deep and is_object_dtype(self) and not PYPY:\n values = cast(np.ndarray, self._values)\n v += lib.memory_usage_of_objects(values)\n return v\n\n @doc(\n algorithms.factorize,\n values=\"\",\n order=\"\",\n size_hint=\"\",\n sort=textwrap.dedent(\n \"\"\"\\\n sort : bool, default False\n Sort `uniques` and shuffle `codes` to maintain the\n relationship.\n \"\"\"\n ),\n )\n def factorize(self, sort: bool = False, na_sentinel: int | None = -1):\n return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)\n\n _shared_docs[\n \"searchsorted\"\n ] = \"\"\"\n Find indices where elements should be inserted to maintain order.\n\n Find the indices into a sorted {klass} `self` such that, if the\n corresponding elements in `value` were inserted before the indices,\n the order of `self` would be preserved.\n\n .. note::\n\n The {klass} *must* be monotonically sorted, otherwise\n wrong locations will likely be returned. Pandas does *not*\n check this for you.\n\n Parameters\n ----------\n value : array-like or scalar\n Values to insert into `self`.\n side : {{'left', 'right'}}, optional\n If 'left', the index of the first suitable location found is given.\n If 'right', return the last such index. If there is no suitable\n index, return either 0 or N (where N is the length of `self`).\n sorter : 1-D array-like, optional\n Optional array of integer indices that sort `self` into ascending\n order. They are typically the result of ``np.argsort``.\n\n Returns\n -------\n int or array of int\n A scalar or array of insertion points with the\n same shape as `value`.\n\n See Also\n --------\n sort_values : Sort by the values along either axis.\n numpy.searchsorted : Similar method from NumPy.\n\n Notes\n -----\n Binary search is used to find the required insertion points.\n\n Examples\n --------\n >>> ser = pd.Series([1, 2, 3])\n >>> ser\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> ser.searchsorted(4)\n 3\n\n >>> ser.searchsorted([0, 4])\n array([0, 3])\n\n >>> ser.searchsorted([1, 3], side='left')\n array([0, 2])\n\n >>> ser.searchsorted([1, 3], side='right')\n array([1, 3])\n\n >>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))\n >>> ser\n 0 2000-03-11\n 1 2000-03-12\n 2 2000-03-13\n dtype: datetime64[ns]\n\n >>> ser.searchsorted('3/14/2000')\n 3\n\n >>> ser = pd.Categorical(\n ... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True\n ... )\n >>> ser\n ['apple', 'bread', 'bread', 'cheese', 'milk']\n Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']\n\n >>> ser.searchsorted('bread')\n 1\n\n >>> ser.searchsorted(['bread'], side='right')\n array([3])\n\n If the values are not monotonically sorted, wrong locations\n may be returned:\n\n >>> ser = pd.Series([2, 1, 3])\n >>> ser\n 0 2\n 1 1\n 2 3\n dtype: int64\n\n >>> ser.searchsorted(1) # doctest: +SKIP\n 0 # wrong result, correct would be 1\n \"\"\"\n\n @doc(_shared_docs[\"searchsorted\"], klass=\"Index\")\n def searchsorted(\n self,\n value: NumpyValueArrayLike,\n side: Literal[\"left\", \"right\"] = \"left\",\n sorter: NumpySorter = None,\n ) -> npt.NDArray[np.intp] | np.intp:\n return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)\n\n def drop_duplicates(self, keep=\"first\"):\n duplicated = self._duplicated(keep=keep)\n # error: Value of type \"IndexOpsMixin\" is not indexable\n return self[~duplicated] # type: ignore[index]\n\n @final\n def _duplicated(\n self, keep: Literal[\"first\", \"last\", False] = \"first\"\n ) -> npt.NDArray[np.bool_]:\n return duplicated(self._values, keep=keep)\n\n def _arith_method(self, other, op):\n res_name = ops.get_op_result_name(self, other)\n\n lvalues = self._values\n rvalues = extract_array(other, extract_numpy=True, extract_range=True)\n rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape)\n rvalues = ensure_wrapped_if_datetimelike(rvalues)\n\n with np.errstate(all=\"ignore\"):\n result = ops.arithmetic_op(lvalues, rvalues, op)\n\n return self._construct_result(result, name=res_name)\n\n def _construct_result(self, result, name):\n \"\"\"\n Construct an appropriately-wrapped result from the ArrayLike result\n of an arithmetic-like operation.\n \"\"\"\n raise AbstractMethodError(self)\n" ]
[ [ "pandas.core.algorithms.value_counts", "pandas.core.algorithms.factorize", "pandas._libs.lib.memory_usage_of_objects", "pandas.compat.numpy.function.validate_max", "pandas.core.construction.extract_array", "pandas.core.ops.get_op_result_name", "pandas.core.ops.maybe_prepare_scalar_for_op", "pandas.compat.numpy.function.validate_argmin_with_skipna", "pandas.core.dtypes.missing.isna", "pandas.core.algorithms.searchsorted", "pandas.core.nanops.nanmin", "pandas.core.construction.create_series_with_explicit_dtype", "pandas.core.dtypes.common.is_extension_array_dtype", "pandas.core.nanops.nanmax", "pandas.compat.numpy.function.validate_minmax_axis", "pandas.core.dtypes.common.is_dict_like", "pandas.core.nanops.nanargmax", "pandas.core.dtypes.common.is_object_dtype", "pandas.errors.AbstractMethodError", "pandas.core.ops.arithmetic_op", "pandas.core.dtypes.common.is_scalar", "pandas.util._decorators.doc", "pandas.core.algorithms.unique1d", "pandas.core.construction.ensure_wrapped_if_datetimelike", "pandas.compat.numpy.function.validate_min", "pandas.Index", "pandas.compat.numpy.function.validate_transpose", "numpy.asarray", "numpy.errstate", "pandas.core.dtypes.missing.remove_na_arraylike", "pandas.core.nanops.nanargmin", "pandas.compat.numpy.function.validate_argmax_with_skipna", "pandas.core.dtypes.common.is_categorical_dtype", "pandas.core.algorithms.take_nd" ] ]
lukamaletin/multi-gan
[ "53b37c840d74ed0a9db888a03a5bed59ad33bc8e" ]
[ "src/util.py" ]
[ "import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\n\n\ndef make_trainable(net, val):\n net.trainable = val\n for layer in net.layers:\n layer.trainable = val\n\n\ndef plot_loss(losses):\n plt.figure(figsize=(10, 8))\n plt.plot(losses['g'], label='generative loss')\n plt.plot(losses['d'], label='discriminitive loss')\n plt.legend()\n plt.show()\n\n\ndef render_bboxes(bboxes_batch, labels_batch, shape):\n renders = []\n\n for i in range(len(bboxes_batch)):\n bboxes = bboxes_batch[i]\n labels = labels_batch[i]\n canvas = np.zeros(shape, dtype=np.float32)\n canvas += 255\n\n for j in range(len(bboxes)):\n bbox = bboxes[j]\n top, left, bottom, right = bbox\n label = labels[j]\n color = (np.where(label==1)[0][0] + 1) * 10\n canvas[top:bottom, left:right, 0] = color\n\n canvas /= 255\n renders.append(canvas)\n\n return np.array(renders)\n\n\ndef save_batch(images, epoch, path, suffix=''):\n samples_path = os.path.join(path, 'samples')\n if not os.path.exists(samples_path):\n os.makedirs(samples_path)\n\n num_images = images.shape[0]\n num_rows = images.shape[1]\n num_cols = images.shape[2]\n\n canvas = np.zeros((num_rows, num_images * num_cols, 1), dtype=images.dtype)\n for i in range(num_images):\n canvas[0:num_rows, i * num_cols:(i + 1) * num_cols] = images[i]\n\n img = canvas\n img *= 255\n img = Image.fromarray(np.squeeze(img))\n img = img.convert('L')\n img.save(samples_path + f'/{epoch}_{suffix}.png')\n\n\ndef load_model(model, path, name):\n model_path = os.path.join(path, name + '.h5')\n model.load_weights(model_path)\n\n\ndef save_model(model, path, name):\n model_path = os.path.join(path, name + '.h5')\n model.save_weights(model_path)\n" ]
[ [ "numpy.array", "numpy.zeros", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.where", "matplotlib.pyplot.show", "numpy.squeeze" ] ]
GBR-613/pyod
[ "dfafc57f74dc3d49d0166f21ab2ddb97e3d1d898", "dfafc57f74dc3d49d0166f21ab2ddb97e3d1d898", "bfbb297ac067c47488bcade77669c99de5a4838a" ]
[ "pyod/models/sod.py", "examples/generate_data_categorical_example.py", "pyod/test/test_cof.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Subspace Outlier Detection (SOD)\n\"\"\"\n# Author: Yahya Almardeny <almardeny@gmail.com>\n# License: BSD 2 clause\n\nimport numpy as np\nimport numba as nb\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.utils import check_array\n\nfrom ..utils.utility import check_parameter\nfrom .base import BaseDetector\n\n\n@nb.njit(parallel=True)\ndef _snn_imp(ind, ref_set_):\n \"\"\"Internal function for fast snn calculation\n\n Parameters\n ----------\n ind : int\n Indices return by kNN.\n\n ref_set_ : int, optional (default=10)\n specifies the number of shared nearest neighbors to create the\n reference set. Note that ref_set must be smaller than n_neighbors.\n\n \"\"\"\n n = ind.shape[0]\n _count = np.zeros(shape=(n, ref_set_), dtype=np.uint32)\n for i in nb.prange(n):\n temp = np.empty(n, dtype=np.uint32)\n test_element_set = set(ind[i])\n for j in nb.prange(n):\n temp[j] = len(set(ind[j]).intersection(test_element_set))\n temp[i] = np.iinfo(np.uint32).max\n _count[i] = np.argsort(temp)[::-1][1:ref_set_ + 1]\n\n return _count\n\n\nclass SOD(BaseDetector):\n \"\"\"Subspace outlier detection (SOD) schema aims to detect outlier in\n varying subspaces of a high dimensional feature space. For each data\n object, SOD explores the axis-parallel subspace spanned by the data\n object's neighbors and determines how much the object deviates from the\n neighbors in this subspace.\n\n See :cite:`kriegel2009outlier` for details.\n\n Parameters\n ----------\n n_neighbors : int, optional (default=20)\n Number of neighbors to use by default for k neighbors queries.\n\n ref_set: int, optional (default=10)\n specifies the number of shared nearest neighbors to create the\n reference set. Note that ref_set must be smaller than n_neighbors.\n\n alpha: float in (0., 1.), optional (default=0.8)\n specifies the lower limit for selecting subspace.\n 0.8 is set as default as suggested in the original paper.\n\n contamination : float in (0., 0.5), optional (default=0.1)\n The amount of contamination of the data set, i.e.\n the proportion of outliers in the data set. Used when fitting to\n define the threshold on the decision function.\n\n Attributes\n ----------\n decision_scores_ : numpy array of shape (n_samples,)\n The outlier scores of the training data.\n The higher, the more abnormal. Outliers tend to have higher\n scores. This value is available once the detector is\n fitted.\n\n threshold_ : float\n The threshold is based on ``contamination``. It is the\n ``n_samples * contamination`` most abnormal samples in\n ``decision_scores_``. The threshold is calculated for generating\n binary outlier labels.\n\n labels_ : int, either 0 or 1\n The binary labels of the training data. 0 stands for inliers\n and 1 for outliers/anomalies. It is generated by applying\n ``threshold_`` on ``decision_scores_``.\n \"\"\"\n\n def __init__(self, contamination=0.1, n_neighbors=20, ref_set=10,\n alpha=0.8):\n super(SOD, self).__init__(contamination=contamination)\n if isinstance(n_neighbors, int):\n check_parameter(n_neighbors, low=1, param_name='n_neighbors')\n else:\n raise ValueError(\n \"n_neighbors should be int. Got %s\" % type(n_neighbors))\n\n if isinstance(ref_set, int):\n check_parameter(ref_set, low=1, high=n_neighbors,\n param_name='ref_set')\n else:\n raise ValueError(\"ref_set should be int. Got %s\" % type(ref_set))\n\n if isinstance(alpha, float):\n check_parameter(alpha, low=0.0, high=1.0, param_name='alpha')\n else:\n raise ValueError(\"alpha should be float. Got %s\" % type(alpha))\n\n self.n_neighbors_ = n_neighbors\n self.ref_set_ = ref_set\n self.alpha_ = alpha\n self.decision_scores_ = None\n\n def fit(self, X, y=None):\n \"\"\"Fit detector. y is ignored in unsupervised methods.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The input samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted estimator.\n \"\"\"\n\n # validate inputs X and y (optional)\n X = check_array(X)\n self._set_n_classes(y)\n self.decision_scores_ = self.decision_function(X)\n self._process_decision_scores()\n\n return self\n\n def decision_function(self, X):\n \"\"\"Predict raw anomaly score of X using the fitted detector.\n The anomaly score of an input sample is computed based on different\n detector algorithms. For consistency, outliers are assigned with\n larger anomaly scores.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only\n if they are supported by the base estimator.\n\n Returns\n -------\n anomaly_scores : numpy array of shape (n_samples,)\n The anomaly score of the input samples.\n \"\"\"\n return self._sod(X)\n\n def _snn(self, X):\n \"\"\"This function is called internally to calculate the shared nearest\n neighbors (SNN). SNN is reported to be more robust than k nearest\n neighbors.\n\n Returns\n -------\n snn_indices : numpy array of shape (n_shared_nearest_neighbors,)\n The indices of top k shared nearest neighbors for each observation.\n \"\"\"\n knn = NearestNeighbors(n_neighbors=self.n_neighbors_)\n knn.fit(X)\n # Get the knn index\n ind = knn.kneighbors(return_distance=False)\n return _snn_imp(ind, self.ref_set_)\n\n def _sod(self, X):\n \"\"\"This function is called internally to perform subspace outlier \n detection algorithm.\n \n Returns\n -------\n anomaly_scores : numpy array of shape (n_samples,)\n The anomaly score of the input samples.\n \"\"\"\n ref_inds = self._snn(X)\n anomaly_scores = np.zeros(shape=(X.shape[0],))\n for i in range(X.shape[0]):\n obs = X[i]\n ref = X[ref_inds[i,],]\n means = np.mean(ref, axis=0) # mean of each column\n # average squared distance of the reference to the mean\n var_total = np.sum(np.sum(np.square(ref - means))) / self.ref_set_\n var_expect = self.alpha_ * var_total / X.shape[1]\n var_actual = np.var(ref, axis=0) # variance of each attribute\n var_inds = [1 if (j < var_expect) else 0 for j in var_actual]\n rel_dim = np.sum(var_inds)\n if rel_dim != 0:\n anomaly_scores[i] = np.sqrt(\n np.dot(var_inds, np.square(obs - means)) / rel_dim)\n\n return anomaly_scores\n", "# -*- coding: utf-8 -*-\n\"\"\"Example of using and visualizing ``generate_data_categorical`` function.\n\"\"\"\n# Author: Yahya Almardeny <almardeny@gmail.com>\n# License: BSD 2 clause\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# temporary solution for relative imports in case pyod is not installed\n# if pyod is installed, no need to use the following line\n\nsys.path.append(\n os.path.abspath(os.path.join(os.path.dirname(\"__file__\"), '..')))\n\nfrom pyod.utils.data import generate_data_categorical\n\nif __name__ == \"__main__\":\n contamination = 0.1 # percentage of outliers\n\n # Generate sample data in clusters\n X_train, X_test, y_train, y_test = generate_data_categorical \\\n (n_train=200, n_test=50,\n n_category_in=8, n_category_out=5,\n n_informative=1, n_features=1,\n contamination=contamination,\n shuffle=True, random_state=42)\n\n # note that visalizing it can only be in 1 dimension!\n cats = list(np.ravel(X_train))\n labels = list(y_train)\n fig, axs = plt.subplots(1, 2)\n axs[0].bar(cats, labels)\n axs[1].plot(cats, labels)\n plt.title('Synthetic Categorical Train Data')\n plt.show()\n\n cats = list(np.ravel(X_test))\n labels = list(y_test)\n fig, axs = plt.subplots(1, 2)\n axs[0].bar(cats, labels)\n axs[1].plot(cats, labels)\n plt.title('Synthetic Categorical Test Data')\n plt.show()\n", "# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nimport unittest\n# noinspection PyProtectedMember\nfrom numpy.testing import assert_allclose\nfrom numpy.testing import assert_array_less\nfrom numpy.testing import assert_equal\nfrom numpy.testing import assert_raises\n\nfrom sklearn.metrics import roc_auc_score\nfrom scipy.stats import rankdata\n\n# temporary solution for relative imports in case pyod is not installed\n# if pyod is installed, no need to use the following line\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom pyod.models.cof import COF\nfrom pyod.utils.data import generate_data\n\n\nclass TestCOF(unittest.TestCase):\n def setUp(self):\n self.n_train = 100\n self.n_test = 50\n self.contamination = 0.1\n self.roc_floor = 0.8\n self.X_train, self.y_train, self.X_test, self.y_test = generate_data(\n n_train=self.n_train, n_test=self.n_test,\n contamination=self.contamination, random_state=42)\n\n self.clf = COF(contamination=self.contamination)\n self.clf.fit(self.X_train)\n\n def test_parameters(self):\n assert (hasattr(self.clf, 'decision_scores_') and\n self.clf.decision_scores_ is not None)\n assert (hasattr(self.clf, 'labels_') and\n self.clf.labels_ is not None)\n assert (hasattr(self.clf, 'threshold_') and\n self.clf.threshold_ is not None)\n assert (hasattr(self.clf, 'n_neighbors_') and\n self.clf.n_neighbors_ is not None)\n\n def test_train_scores(self):\n assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])\n\n def test_prediction_scores(self):\n pred_scores = self.clf.decision_function(self.X_test)\n\n # check score shapes\n assert_equal(pred_scores.shape[0], self.X_test.shape[0])\n\n # check performance\n assert (roc_auc_score(self.y_test, pred_scores) >= self.roc_floor)\n\n def test_prediction_labels(self):\n pred_labels = self.clf.predict(self.X_test)\n assert_equal(pred_labels.shape, self.y_test.shape)\n\n def test_prediction_proba(self):\n pred_proba = self.clf.predict_proba(self.X_test)\n assert (pred_proba.min() >= 0)\n assert (pred_proba.max() <= 1)\n\n def test_prediction_proba_linear(self):\n pred_proba = self.clf.predict_proba(self.X_test, method='linear')\n assert (pred_proba.min() >= 0)\n assert (pred_proba.max() <= 1)\n\n def test_prediction_proba_unify(self):\n pred_proba = self.clf.predict_proba(self.X_test, method='unify')\n assert (pred_proba.min() >= 0)\n assert (pred_proba.max() <= 1)\n\n def test_prediction_proba_parameter(self):\n with assert_raises(ValueError):\n self.clf.predict_proba(self.X_test, method='something')\n\n def test_fit_predict(self):\n pred_labels = self.clf.fit_predict(self.X_train)\n assert_equal(pred_labels.shape, self.y_train.shape)\n\n def test_fit_predict_score(self):\n self.clf.fit_predict_score(self.X_test, self.y_test)\n self.clf.fit_predict_score(self.X_test, self.y_test,\n scoring='roc_auc_score')\n self.clf.fit_predict_score(self.X_test, self.y_test,\n scoring='prc_n_score')\n with assert_raises(NotImplementedError):\n self.clf.fit_predict_score(self.X_test, self.y_test,\n scoring='something')\n\n def test_predict_rank(self):\n pred_scores = self.clf.decision_function(self.X_test)\n pred_ranks = self.clf._predict_rank(self.X_test)\n print(pred_ranks)\n\n # assert the order is reserved\n assert_allclose(rankdata(pred_ranks), rankdata(pred_scores), atol=2)\n assert_array_less(pred_ranks, self.X_train.shape[0] + 1)\n assert_array_less(-0.1, pred_ranks)\n\n def test_predict_rank_normalized(self):\n pred_socres = self.clf.decision_function(self.X_test)\n pred_ranks = self.clf._predict_rank(self.X_test, normalized=True)\n\n # assert the order is reserved\n assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), atol=2)\n assert_array_less(pred_ranks, 1.01)\n assert_array_less(-0.1, pred_ranks)\n\n def test_check_parameters(self):\n with assert_raises(ValueError):\n COF(contamination=0.1, n_neighbors=-1)\n with assert_raises(ValueError):\n COF(contamination=10., n_neighbors=5)\n with assert_raises(TypeError):\n COF(contamination=0.1, n_neighbors='not int')\n with assert_raises(TypeError):\n COF(contamination='not float', n_neighbors=5)\n cof_ = COF(contamination=0.1, n_neighbors=10000)\n cof_.fit(self.X_train)\n assert self.X_train.shape[0] > cof_.n_neighbors_\n\n def tearDown(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.square", "numpy.empty", "numpy.zeros", "numpy.sum", "numpy.mean", "numpy.argsort", "sklearn.utils.check_array", "numpy.iinfo", "sklearn.neighbors.NearestNeighbors", "numpy.var" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.title", "numpy.ravel", "matplotlib.pyplot.subplots" ], [ "numpy.testing.assert_equal", "numpy.testing.assert_array_less", "scipy.stats.rankdata", "numpy.testing.assert_raises", "sklearn.metrics.roc_auc_score" ] ]
matchms/old-iomega-spec2vec
[ "216b8f8b5e4ffd320b4575326a05fb6c7cd28223" ]
[ "matchms/old/ms_similarity_classical.py" ]
[ "#\n# Spec2Vec\n#\n# Copyright 2019 Netherlands eScience Center\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numba\nimport numpy as np\nfrom scipy.optimize import linear_sum_assignment\nfrom scipy import spatial\n\n# Add multi core parallelization\nfrom concurrent.futures import ThreadPoolExecutor #, as_completed\n# TODO better use joblib ? or dask?\n\n\ndef mol_sim_matrix(fingerprints1,\n fingerprints2,\n method='cosine',\n filename=None,\n max_size=1000,\n print_progress=True):\n \"\"\"Create Matrix of all molecular similarities (based on molecular fingerprints).\n\n If filename is not None, the result will be saved as npy.\n To create molecular fingerprints see mol_fingerprints() function from MS_functions.\n\n Args:\n ----\n fingerprints1: list\n List of molecular fingerprints (numpy arrays).\n fingerprints2: list\n List of molecular fingerprints (numpy arrays).\n method: str\n Method to compare molecular fingerprints. Can be 'cosine', 'dice' etc.\n (see scipy.spatial.distance.cdist).\n filename: str\n Filename to save results to. OR: If file already exists it will be\n loaded instead.\n max_size: int\n Maximum size of (sub) all-vs-all matrix to handle in one go. Will split\n up larger matrices into\n max_size x max_size matrices.\n print_progress: bool, optional\n If True, print phase of the run to indicate progress. Default = True.\n \"\"\"\n\n if filename is not None:\n try:\n molecular_similarities = np.load(filename)\n print(\"Molecular similarity scores found and loaded.\")\n collect_new_data = False\n\n except FileNotFoundError:\n print(\"Could not find file \", filename)\n print(\"Molecular scores will be calculated from scratch.\")\n collect_new_data = True\n else:\n collect_new_data = True\n\n if collect_new_data:\n # Create array of all finterprints\n fingerprints_arr1 = np.array(fingerprints1)\n fingerprints_arr2 = np.array(fingerprints2)\n\n # Calculate all-vs-all similarity matrix (similarity here= 1-distance )\n matrix_size = (fingerprints_arr1.shape[0], fingerprints_arr2.shape[0])\n\n molecular_similarities = np.zeros(matrix_size)\n\n # Split large matrices up into smaller ones to track progress\n splits = int(np.ceil(matrix_size[0]/max_size) * np.ceil(matrix_size[1]/max_size))\n count_splits = 0\n\n for i in range(int(np.ceil(matrix_size[0]/max_size))):\n low1 = i * max_size\n high1 = min((i + 1) * max_size, matrix_size[0])\n for j in range(int(np.ceil(matrix_size[1]/max_size))):\n low2 = j * max_size\n high2 = min((j + 1) * max_size, matrix_size[1])\n\n molecular_similarities[low1:high1, low2:high2] = 1 - spatial.distance.cdist(\n fingerprints_arr1[low1:high1],\n fingerprints_arr2[low2:high2],\n method\n )\n # Track progress:\n count_splits += 1\n if print_progress:\n print('\\r',\n \"Calculated submatrix {} out of {}\".format(count_splits, splits),\n end=\"\")\n\n if print_progress:\n print(20 * '--')\n print(\"Succesfully calculated matrix with all-vs-all molecular similarity values.\")\n if filename is not None:\n np.save(filename, molecular_similarities)\n print(\"Matrix was saved under:\", filename)\n\n return molecular_similarities\n\n\n# --------------------------------------------------------------------------------------------------\n# ---------------------------- classical spectra similarity measures -------------------------------\n# --------------------------------------------------------------------------------------------------\n\n\ndef cosine_score_greedy(spec1,\n spec2,\n mass_shift,\n tol,\n min_intens=0,\n use_numba=True):\n \"\"\"Calculate cosine score between spectrum1 and spectrum2.\n\n If mass_shifted = True it will shift the spectra with respect to each other\n by difference in their parentmasses.\n\n Args:\n ----\n spec1: Spectrum peaks and intensities as numpy array.\n spec2: Spectrum peaks and intensities as numpy array.\n tol: float\n Tolerance value to define how far two peaks can be apart to still count as match.\n min_intens: float\n Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower\n intensity will be ignored --> higher min_intens is faster, but less precise.\n \"\"\"\n\n if spec1.shape[0] == 0 or spec2.shape[0] == 0:\n return 0.0, []\n\n # normalize intensities:\n spec1[:, 1] = spec1[:, 1]/max(spec1[:, 1])\n spec2[:, 1] = spec2[:, 1]/max(spec2[:, 1])\n\n # filter, if wanted:\n spec1 = spec1[spec1[:, 1] > min_intens, :]\n spec2 = spec2[spec2[:, 1] > min_intens, :]\n\n if use_numba:\n zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)\n else:\n zero_pairs = find_pairs(spec1, spec2, tol, shift=0.0)\n if mass_shift is not None \\\n and mass_shift != 0.0:\n if use_numba:\n nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)\n else:\n nonzero_pairs = find_pairs(spec1, spec2, tol, shift=mass_shift)\n matching_pairs = zero_pairs + nonzero_pairs\n else:\n matching_pairs = zero_pairs\n matching_pairs = sorted(matching_pairs, key=lambda x: x[2], reverse=True)\n\n used1 = set()\n used2 = set()\n score = 0.0\n used_matches = []\n for m in matching_pairs:\n if not m[0] in used1 and not m[1] in used2:\n score += m[2]\n used1.add(m[0])\n used2.add(m[1])\n used_matches.append(m)\n\n # Normalize score:\n score = score/max(np.sum(spec1[:, 1]**2), np.sum(spec2[:, 1]**2))\n\n return score, used_matches\n\n\ndef cosine_score_hungarian(spec1,\n spec2,\n mass_shift,\n tol,\n min_intens=0):\n \"\"\"Taking full care of weighted bipartite matching problem.\n\n Use Hungarian algorithm (slow...)\n\n Args:\n --------\n spec1: Spectrum peaks and intensities as numpy array.\n spec2: Spectrum peaks and intensities as numpy array.\n mass_shift: float\n Difference in parent mass of both spectra to account for. Set to 'None'\n when no shifting is desired --> back to normal cosine score.\n tol: float\n Tolerance value to define how far two peaks can be apart to still count as match.\n min_intens: float\n Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower\n intensity will be ignored --> higher min_intens is faster, but less precise.\n \"\"\"\n\n if spec1.shape[0] == 0 or spec2.shape[0] == 0:\n return 0.0, []\n\n # Normalize intensities:\n spec1[:, 1] = spec1[:, 1]/max(spec1[:, 1])\n spec2[:, 1] = spec2[:, 1]/max(spec2[:, 1])\n\n # Filter, if wanted:\n spec1 = spec1[spec1[:, 1] > min_intens, :]\n spec2 = spec2[spec2[:, 1] > min_intens, :]\n\n zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)\n if mass_shift is not None \\\n and mass_shift != 0.0:\n nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)\n matching_pairs = zero_pairs + nonzero_pairs\n else:\n matching_pairs = zero_pairs\n matching_pairs = sorted(matching_pairs, key=lambda x: x[2], reverse=True)\n\n # Use Hungarian_algorithm:\n used_matches = []\n list1 = list(set([x[0] for x in matching_pairs]))\n list2 = list(set([x[1] for x in matching_pairs]))\n matrix_size = (len(list1), len(list2))\n matrix = np.ones(matrix_size)\n\n if len(matching_pairs) > 0:\n for m in matching_pairs:\n matrix[list1.index(m[0]), list2.index(m[1])] = 1 - m[2]\n\n # Use hungarian agorithm to solve the linear sum assignment problem\n row_ind, col_ind = linear_sum_assignment(matrix)\n score = len(row_ind) - matrix[row_ind, col_ind].sum()\n used_matches = [(list1[x], list2[y]) for (x, y) in zip(row_ind, col_ind)]\n # Normalize score:\n score = score/max(np.sum(spec1[:, 1]**2), np.sum(spec2[:, 1]**2))\n else:\n score = 0.0\n\n return score, used_matches\n\n\ndef cosine_matrix_fast(spectra,\n tol,\n max_mz,\n min_mz=0):\n \"\"\"Calculates cosine similarity matrix.\n\n Be careful! Binning is here done by creating one-hot vectors.\n It is hence really actual \"bining\" and different from the tolerance-based\n approach used for the cosine_matrix or molnet_matrix!\n\n Also: tol here is about tol/2 when compared to cosine_matrix or molnet_matrix...\n \"\"\"\n\n for i, spectrum in enumerate(spectra):\n spec = np.array(spectrum.peaks.copy(), dtype=float)\n\n # Normalize intensities:\n spec[:, 1] = spec[:, 1]/np.max(spec[:, 1])\n\n if i == 0:\n vector = one_hot_spectrum(spec, tol, max_mz, shift=0, min_mz=min_mz, method='max')\n spec_vectors = np.zeros((len(spectra), vector.shape[0]))\n spec_vectors[0, :] = vector\n else:\n spec_vectors[i, :] = one_hot_spectrum(spec, tol,\n max_mz, shift=0,\n min_mz=min_mz,\n method='max')\n\n Cdist = spatial.distance.cdist(spec_vectors, spec_vectors, 'cosine')\n return 1 - Cdist\n\n\ndef cosine_score_matrix(spectra,\n tol,\n max_mz=1000.0,\n # min_mz=0,\n min_intens=0,\n mass_shifting=False,\n method='hungarian',\n num_workers=4,\n filename=None,\n safety_points=None):\n \"\"\"Create Matrix of all modified cosine similarities.\n\n Takes some time to calculate, so better only do it once and save as npy.\n\n Now implemented: parallelization of code using concurrent.futures and numba options.\n\n spectra: list\n List of spectra (of Spectrum class)\n tol: float\n Tolerance to still count peaks a match (mz +- tolerance).\n max_mz: float\n Maxium m-z mass to take into account\n #min_mz: float\n # Minimum m-z mass to take into account\n min_intens: float\n Sets the minimum relative intensity peaks must have to be looked at for\n potential matches.\n mass_shifting: bool\n Set to 'True' if mass difference between spectra should be accounted for\n --> \"modified cosine\" score\n Set to 'False' for --> \"normal cosine\" score\n method: 'greedy', 'greedy-numba', 'hungarian'\n \"greedy\" will use Simon's molnet scoring which is faster than hungarian,\n but not 100% accurate\n regarding the weighted bipartite matching problem.\n \"hungarian\" will use the Hungarian algorithm, which is more accurate.\n Since its slower, numba is used here to compile in time.\n \"greedy-numba\" will use a (partly) numba compiled version of greedy.\n Much faster, but needs numba.\n num_workers: int\n Number of threads to use for calculation.\n filename: str/ None\n Filename to look for existing npy-file with molent matrix. Or, if not\n found, to use to save the newly calculated matrix.\n safety_points: int\n Number of safety points, i.e. number of times the modcos-matrix is saved\n during process. Set to 'None' to avoid saving matrix on the way.\n \"\"\"\n if filename is not None:\n if filename[-4:] != '.npy':\n filename = filename + '.npy'\n\n # Try loading saved data\n try:\n print(\"Loading similarity scores from\", filename)\n modcos_sim = np.load(filename)\n print(\"Loading min_match values from\", filename[:-4]+ \"_matches.npy\")\n modcos_matches = np.load(filename[:-4] + \"_matches.npy\")\n\n # Check if matrix was calculated to the end:\n diagonal = modcos_sim.diagonal()\n if np.min(diagonal) == 0:\n print(\"Uncomplete cosine similarity scores found and loaded.\")\n missing_scores = np.where(diagonal == 0)[0].astype(int)\n print(\"Missing cosine scores will be calculated.\")\n counter_total = int((len(spectra)**2)/2)\n counter_init = counter_total - np.sum(len(spectra) - missing_scores)\n print(\"About \", 100*(counter_init/counter_total),\n \"% of the values already completed.\")\n collect_new_data = True\n else:\n print(\"Complete cosine similarity scores found and loaded.\")\n missing_scores = []\n counter_init = 0\n collect_new_data = False\n\n except FileNotFoundError:\n print(\"Could not find file \", filename, \"or file\",\n filename[:-4] + \"_matches.npy\")\n if mass_shifting:\n print(\"Modified cosine scores will be calculated from scratch.\")\n else:\n print(\"Cosine scores will be calculated from scratch.\")\n collect_new_data = True\n missing_scores = np.arange(0, len(spectra))\n counter_init = 0\n else:\n collect_new_data = True\n missing_scores = np.arange(0, len(spectra))\n counter_init = 0\n\n if collect_new_data:\n if counter_init == 0:\n modcos_sim = np.zeros((len(spectra), len(spectra)))\n modcos_matches = np.zeros((len(spectra), len(spectra)))\n\n counter = counter_init\n if safety_points is not None:\n # Save modcos-matrix along process\n safety_save = int(((len(spectra)**2)/2)/safety_points)\n\n print(\"Calculate pairwise scores by\", num_workers, \"number of workers.\")\n for i in missing_scores: #range(n_start, len(spectra)):\n spec1 = np.array(spectra[i].peaks, dtype=float)\n spec1 = spec1[spec1[:, 0] < max_mz, :]\n parameter_collection = []\n for j in range(i, len(spectra)):\n spec2 = np.array(spectra[j].peaks, dtype=float)\n spec2 = spec2[spec2[:, 0] < max_mz, :]\n if mass_shifting:\n mass_shift = spectra[i].parent_mz - spectra[j].parent_mz\n else:\n mass_shift = None\n parameter_collection.append([spec1, spec2, i, j,\n mass_shift, tol, min_intens,\n method, counter])\n counter += 1\n\n # Create a pool of processes. For instance one for each CPU in your machine.\n modcos_pairs = []\n with ThreadPoolExecutor(max_workers=num_workers) as executor:\n futures = [executor.submit(modcos_pair, X, len(spectra)) for X in parameter_collection]\n modcos_pairs.append(futures)\n\n for m, future in enumerate(modcos_pairs[0]):\n _, _, ind_i, ind_j, _, _, _, _, counting = parameter_collection[m]\n modcos_sim[ind_i, ind_j] = future.result()[0]\n modcos_matches[ind_i, ind_j] = future.result()[1]\n if filename is not None \\\n and safety_points is not None:\n if (counting+1) % safety_save == 0:\n np.save(filename, modcos_sim)\n np.save(filename[:-4] + \"_matches.npy\", modcos_matches)\n\n # Symmetric matrix --> fill\n for i in range(1, len(spectra)):\n for j in range(i):\n modcos_sim[i, j] = modcos_sim[j, i]\n modcos_matches[i, j] = modcos_matches[j, i]\n\n # Save final results\n if filename is not None:\n np.save(filename, modcos_sim)\n np.save(filename[:-4]+ \"_matches.npy\", modcos_matches)\n\n return modcos_sim, modcos_matches\n\n\n\ndef modcos_pair(X, len_spectra):\n \"\"\"Single molnet pair calculation\n \"\"\"\n spectra_i, spectra_j, i, j, mass_shift, tol, min_intens, method, counter = X\n if method == 'greedy':\n molnet_pair, used_matches = cosine_score_greedy(spectra_i, spectra_j,\n mass_shift, tol,\n min_intens=min_intens,\n use_numba=False)\n elif method == 'greedy-numba':\n molnet_pair, used_matches = cosine_score_greedy(spectra_i, spectra_j,\n mass_shift, tol,\n min_intens=min_intens,\n use_numba=True)\n elif method == 'hungarian':\n molnet_pair, used_matches = cosine_score_hungarian(spectra_i, spectra_j,\n mass_shift, tol,\n min_intens=min_intens)\n else:\n print(\"Given method does not exist...\")\n\n if (counter+1) % 1000 == 0 or counter == len_spectra-1:\n print('\\r',\n ' Calculated MolNet for pair {} -- {}'.format(i, j),\n '. ( ', np.round(200*(counter+1)/len_spectra**2, 2), ' % done).',\n end=\"\")\n\n return molnet_pair, len(used_matches)\n\n\ndef one_hot_spectrum(spec,\n tol,\n max_mz,\n shift=0,\n min_mz=0,\n method='max'):\n \"\"\"Convert spectrum peaks into on-hot-vector\n\n method: str\n 'max' take highest intensity peak within every bin.\n 'sum' take sum of all peaks within every bin.\n \"\"\"\n dim_vector = int((max_mz - min_mz)/tol)\n one_hot_spec = np.zeros((dim_vector))\n idx = ((spec[:, 0] + shift)*1/tol).astype(int)\n idx[idx >= dim_vector] = 0\n idx[idx < 0] = 0\n if method == 'max':\n for id1 in set(idx):\n one_hot_spec[id1] = np.max(spec[(idx == id1), 1])\n elif method == 'sum':\n for id1 in set(idx):\n one_hot_spec[id1] = np.sum(spec[(idx == id1), 1])\n else:\n print(\"Method not known...\")\n return one_hot_spec\n\n\n@numba.njit\ndef find_pairs_numba(spec1, spec2, tol, shift=0):\n \"\"\"Find matching pairs between two spectra.\n\n Args\n ----\n spec1 : list of tuples\n List of (mz, intensity) tuples.\n spec2 : list of tuples\n List of (mz, intensity) tuples.\n tol : float\n Tolerance. Peaks will be considered a match when < tol appart.\n shift : float, optional\n Shift spectra peaks by shift. The default is 0.\n\n Returns\n -------\n matching_pairs : list\n List of found matching peaks.\n\n \"\"\"\n matching_pairs = []\n\n for idx in range(len(spec1)):\n intensity = spec1[idx, 1]\n matches = np.where((np.abs(spec2[:, 0] - spec1[idx, 0] + shift) <= tol))[0]\n for match in matches:\n matching_pairs.append((idx, match, intensity*spec2[match][1]))\n\n return matching_pairs\n\n\ndef find_pairs(spec1, spec2, tol, shift=0):\n \"\"\"Find matching pairs between two spectra.\n\n Args\n ----\n spec1 : list of tuples\n List of (mz, intensity) tuples.\n spec2 : list of tuples\n List of (mz, intensity) tuples.\n tol : float\n Tolerance. Peaks will be considered a match when < tol appart.\n shift : float, optional\n Shift spectra peaks by shift. The default is 0.\n\n Returns\n -------\n matching_pairs : list\n List of found matching peaks.\n\n \"\"\"\n # Sort peaks and losses by m/z\n spec1 = spec1[np.lexsort((spec1[:, 1], spec1[:, 0])), :]\n spec2 = spec2[np.lexsort((spec2[:, 1], spec2[:, 0])), :]\n\n matching_pairs = []\n spec2lowpos = 0\n spec2length = len(spec2)\n\n for idx in range(len(spec1)):\n mz = spec1[idx, 0]\n intensity = spec1[idx, 1]\n # Do we need to increase the lower idx?\n while spec2lowpos < spec2length and spec2[spec2lowpos][0] + shift < mz - tol:\n spec2lowpos += 1\n if spec2lowpos == spec2length:\n break\n spec2pos = spec2lowpos\n while(spec2pos < spec2length and spec2[spec2pos][0] + shift < mz + tol):\n matching_pairs.append((idx, spec2pos, intensity * spec2[spec2pos][1]))\n spec2pos += 1\n\n return matching_pairs" ]
[ [ "numpy.max", "numpy.array", "numpy.ceil", "numpy.zeros", "numpy.lexsort", "numpy.round", "numpy.sum", "numpy.ones", "numpy.load", "numpy.min", "numpy.save", "scipy.optimize.linear_sum_assignment", "numpy.where", "numpy.abs", "scipy.spatial.distance.cdist" ] ]
rsumner31/pymc3-2
[ "e824294ddfb45610536cad07394b8c290904c38d", "e824294ddfb45610536cad07394b8c290904c38d", "fde52a4a69be1b0887a2f7861801fb48c941bbe6" ]
[ "pymc3/distributions/mixture.py", "pymc3/plots/artists.py", "pymc3/gp/util.py" ]
[ "import numpy as np\nimport theano.tensor as tt\n\nfrom pymc3.util import get_variable_name\nfrom ..math import logsumexp\nfrom .dist_math import bound\nfrom .distribution import Discrete, Distribution, draw_values, generate_samples\nfrom .continuous import get_tau_sd, Normal\n\n\ndef all_discrete(comp_dists):\n \"\"\"\n Determine if all distributions in comp_dists are discrete\n \"\"\"\n if isinstance(comp_dists, Distribution):\n return isinstance(comp_dists, Discrete)\n else:\n return all(isinstance(comp_dist, Discrete) for comp_dist in comp_dists)\n\n\nclass Mixture(Distribution):\n R\"\"\"\n Mixture log-likelihood\n\n Often used to model subpopulation heterogeneity\n\n .. math:: f(x \\mid w, \\theta) = \\sum_{i = 1}^n w_i f_i(x \\mid \\theta_i)\n\n ======== ============================================\n Support :math:`\\cap_{i = 1}^n \\textrm{support}(f_i)`\n Mean :math:`\\sum_{i = 1}^n w_i \\mu_i`\n ======== ============================================\n\n Parameters\n ----------\n w : array of floats\n w >= 0 and w <= 1\n the mixture weights\n comp_dists : multidimensional PyMC3 distribution (e.g. `pm.Poisson.dist(...)`)\n or iterable of one-dimensional PyMC3 distributions the\n component distributions :math:`f_1, \\ldots, f_n`\n\n Example\n -------\n .. code-block:: python\n\n # 2-Mixture Poisson distribution\n with pm.Model() as model:\n lam = pm.Exponential('lam', lam=1, shape=(2,)) # `shape=(2,)` indicates two mixtures.\n\n # As we just need the logp, rather than add a RV to the model, we need to call .dist()\n components = pm.Poisson.dist(mu=lam, shape=(2,)) \n\n w = pm.Dirichlet('w', a=np.array([1, 1])) # two mixture component weights.\n\n like = pm.Mixture('like', w=w, comp_dists=components, observed=data)\n\n # 2-Mixture Poisson using iterable of distributions.\n with pm.Model() as model:\n lam1 = pm.Exponential('lam1', lam=1)\n lam2 = pm.Exponential('lam2', lam=1)\n\n pois1 = pm.Poisson.dist(mu=lam1)\n pois2 = pm.Poisson.dist(mu=lam2)\n\n w = pm.Dirichlet('w', a=np.array([1, 1]))\n\n like = pm.Mixture('like', w=w, comp_dists = [pois1, pois2], observed=data)\n \"\"\"\n def __init__(self, w, comp_dists, *args, **kwargs):\n shape = kwargs.pop('shape', ())\n\n self.w = w = tt.as_tensor_variable(w)\n self.comp_dists = comp_dists\n\n defaults = kwargs.pop('defaults', [])\n\n if all_discrete(comp_dists):\n dtype = kwargs.pop('dtype', 'int64')\n else:\n dtype = kwargs.pop('dtype', 'float64')\n\n try:\n self.mean = (w * self._comp_means()).sum(axis=-1)\n\n if 'mean' not in defaults:\n defaults.append('mean')\n except AttributeError:\n pass\n\n try:\n comp_modes = self._comp_modes()\n comp_mode_logps = self.logp(comp_modes)\n self.mode = comp_modes[tt.argmax(w * comp_mode_logps, axis=-1)]\n\n if 'mode' not in defaults:\n defaults.append('mode')\n except AttributeError:\n pass\n\n super(Mixture, self).__init__(shape, dtype, defaults=defaults,\n *args, **kwargs)\n\n def _comp_logp(self, value):\n comp_dists = self.comp_dists\n\n try:\n value_ = value if value.ndim > 1 else tt.shape_padright(value)\n\n return comp_dists.logp(value_)\n except AttributeError:\n return tt.stack([comp_dist.logp(value) for comp_dist in comp_dists],\n axis=1)\n\n def _comp_means(self):\n try:\n return tt.as_tensor_variable(self.comp_dists.mean)\n except AttributeError:\n return tt.stack([comp_dist.mean for comp_dist in self.comp_dists],\n axis=1)\n\n def _comp_modes(self):\n try:\n return tt.as_tensor_variable(self.comp_dists.mode)\n except AttributeError:\n return tt.stack([comp_dist.mode for comp_dist in self.comp_dists],\n axis=1)\n\n def _comp_samples(self, point=None, size=None, repeat=None):\n try:\n samples = self.comp_dists.random(point=point, size=size, repeat=repeat)\n except AttributeError:\n samples = np.column_stack([comp_dist.random(point=point, size=size, repeat=repeat)\n for comp_dist in self.comp_dists])\n\n return np.squeeze(samples)\n\n def logp(self, value):\n w = self.w\n\n return bound(logsumexp(tt.log(w) + self._comp_logp(value), axis=-1).sum(),\n w >= 0, w <= 1, tt.allclose(w.sum(axis=-1), 1),\n broadcast_conditions=False)\n\n def random(self, point=None, size=None, repeat=None):\n def random_choice(*args, **kwargs):\n w = kwargs.pop('w')\n w /= w.sum(axis=-1, keepdims=True)\n k = w.shape[-1]\n\n if w.ndim > 1:\n return np.row_stack([np.random.choice(k, p=w_) for w_ in w])\n else:\n return np.random.choice(k, p=w, *args, **kwargs)\n\n w = draw_values([self.w], point=point)[0]\n\n w_samples = generate_samples(random_choice,\n w=w,\n broadcast_shape=w.shape[:-1] or (1,),\n dist_shape=self.shape,\n size=size).squeeze()\n comp_samples = self._comp_samples(point=point, size=size, repeat=repeat)\n\n if comp_samples.ndim > 1:\n return np.squeeze(comp_samples[np.arange(w_samples.size), w_samples])\n else:\n return np.squeeze(comp_samples[w_samples])\n\n\nclass NormalMixture(Mixture):\n R\"\"\"\n Normal mixture log-likelihood\n\n .. math::\n\n f(x \\mid w, \\mu, \\sigma^2) = \\sum_{i = 1}^n w_i N(x \\mid \\mu_i, \\sigma^2_i)\n\n ======== =======================================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\sum_{i = 1}^n w_i \\mu_i`\n Variance :math:`\\sum_{i = 1}^n w_i^2 \\sigma^2_i`\n ======== =======================================\n\n Parameters\n ----------\n w : array of floats\n w >= 0 and w <= 1\n the mixture weights\n mu : array of floats\n the component means\n sd : array of floats\n the component standard deviations\n tau : array of floats\n the component precisions\n\n Note: You only have to pass in sd or tau, but not both.\n \"\"\"\n def __init__(self, w, mu, *args, **kwargs):\n _, sd = get_tau_sd(tau=kwargs.pop('tau', None),\n sd=kwargs.pop('sd', None))\n \n distshape = np.broadcast(mu, sd).shape\n self.mu = mu = tt.as_tensor_variable(mu)\n self.sd = sd = tt.as_tensor_variable(sd)\n\n if not distshape: \n distshape = np.broadcast(mu.tag.test_value, sd.tag.test_value).shape\n\n super(NormalMixture, self).__init__(w, Normal.dist(mu, sd=sd, shape=distshape),\n *args, **kwargs)\n\n def _repr_latex_(self, name=None, dist=None):\n if dist is None:\n dist = self\n mu = dist.mu\n w = dist.w\n sd = dist.sd\n name = r'\\text{%s}' % name\n return r'${} \\sim \\text{{NormalMixture}}(\\mathit{{w}}={},~\\mathit{{mu}}={},~\\mathit{{sigma}}={})$'.format(name,\n get_variable_name(w),\n get_variable_name(mu),\n get_variable_name(sd))\n", "import numpy as np\nfrom scipy.stats import mode\n\nfrom pymc3.stats import hpd\nfrom .kdeplot import fast_kde, kdeplot\n\n\ndef _histplot_bins(column, bins=100):\n \"\"\"Helper to get bins for histplot.\"\"\"\n col_min = np.min(column)\n col_max = np.max(column)\n return range(col_min, col_max + 2, max((col_max - col_min) // bins, 1))\n\n\ndef histplot_op(ax, data, alpha=.35):\n \"\"\"Add a histogram for each column of the data to the provided axes.\"\"\"\n hs = []\n for column in data.T:\n hs.append(ax.hist(column, bins=_histplot_bins(\n column), alpha=alpha, align='left'))\n ax.set_xlim(np.min(data) - 0.5, np.max(data) + 0.5)\n return hs\n\n\ndef kdeplot_op(ax, data, bw, prior=None, prior_alpha=1, prior_style='--'):\n \"\"\"Get a list of density and likelihood plots, if a prior is provided.\"\"\"\n ls = []\n pls = []\n errored = []\n for i, d in enumerate(data.T):\n try:\n density, l, u = fast_kde(d, bw)\n x = np.linspace(l, u, len(density))\n if prior is not None:\n p = prior.logp(x).eval()\n pls.append(ax.plot(x, np.exp(p),\n alpha=prior_alpha, ls=prior_style))\n\n ls.append(ax.plot(x, density))\n except ValueError:\n errored.append(str(i))\n\n if errored:\n ax.text(.27, .47, 'WARNING: KDE plot failed for: ' + ','.join(errored),\n bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 10},\n style='italic')\n\n return ls, pls\n\n\ndef plot_posterior_op(trace_values, ax, bw, kde_plot, point_estimate, round_to,\n alpha_level, ref_val, rope, text_size=16, **kwargs):\n \"\"\"Artist to draw posterior.\"\"\"\n def format_as_percent(x, round_to=0):\n return '{0:.{1:d}f}%'.format(100 * x, round_to)\n\n def display_ref_val(ref_val):\n less_than_ref_probability = (trace_values < ref_val).mean()\n greater_than_ref_probability = (trace_values >= ref_val).mean()\n ref_in_posterior = \"{} <{:g}< {}\".format(\n format_as_percent(less_than_ref_probability, 1),\n ref_val,\n format_as_percent(greater_than_ref_probability, 1))\n ax.axvline(ref_val, ymin=0.02, ymax=.75, color='g',\n linewidth=4, alpha=0.65)\n ax.text(trace_values.mean(), plot_height * 0.6, ref_in_posterior,\n size=text_size, horizontalalignment='center')\n\n def display_rope(rope):\n ax.plot(rope, (plot_height * 0.02, plot_height * 0.02),\n linewidth=20, color='r', alpha=0.75)\n text_props = dict(size=text_size, horizontalalignment='center', color='r')\n ax.text(rope[0], plot_height * 0.14, rope[0], **text_props)\n ax.text(rope[1], plot_height * 0.14, rope[1], **text_props)\n\n def display_point_estimate():\n if not point_estimate:\n return\n if point_estimate not in ('mode', 'mean', 'median'):\n raise ValueError(\n \"Point Estimate should be in ('mode','mean','median')\")\n if point_estimate == 'mean':\n point_value = trace_values.mean()\n elif point_estimate == 'mode':\n if isinstance(trace_values[0], float):\n density, l, u = fast_kde(trace_values, bw)\n x = np.linspace(l, u, len(density))\n point_value = x[np.argmax(density)]\n else:\n point_value = mode(trace_values.round(round_to))[0][0]\n elif point_estimate == 'median':\n point_value = np.median(trace_values)\n point_text = '{point_estimate}={point_value:.{round_to}f}'.format(point_estimate=point_estimate,\n point_value=point_value, round_to=round_to)\n\n ax.text(point_value, plot_height * 0.8, point_text,\n size=text_size, horizontalalignment='center')\n\n def display_hpd():\n hpd_intervals = hpd(trace_values, alpha=alpha_level)\n ax.plot(hpd_intervals, (plot_height * 0.02,\n plot_height * 0.02), linewidth=4, color='k')\n ax.text(hpd_intervals[0], plot_height * 0.07,\n hpd_intervals[0].round(round_to),\n size=text_size, horizontalalignment='right')\n ax.text(hpd_intervals[1], plot_height * 0.07,\n hpd_intervals[1].round(round_to),\n size=text_size, horizontalalignment='left')\n ax.text((hpd_intervals[0] + hpd_intervals[1]) / 2, plot_height * 0.2,\n format_as_percent(1 - alpha_level) + ' HPD',\n size=text_size, horizontalalignment='center')\n\n def format_axes():\n ax.yaxis.set_ticklabels([])\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_visible(True)\n ax.yaxis.set_ticks_position('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.tick_params(axis='x', direction='out', width=1, length=3,\n color='0.5', labelsize=text_size)\n ax.spines['bottom'].set_color('0.5')\n\n def set_key_if_doesnt_exist(d, key, value):\n if key not in d:\n d[key] = value\n\n if kde_plot and isinstance(trace_values[0], float):\n kdeplot(trace_values, alpha=kwargs.pop('alpha', 0.35), bw=bw, ax=ax, **kwargs)\n\n else:\n set_key_if_doesnt_exist(kwargs, 'bins', 30)\n set_key_if_doesnt_exist(kwargs, 'edgecolor', 'w')\n set_key_if_doesnt_exist(kwargs, 'align', 'right')\n ax.hist(trace_values, **kwargs)\n\n plot_height = ax.get_ylim()[1]\n\n format_axes()\n display_hpd()\n display_point_estimate()\n if ref_val is not None:\n display_ref_val(ref_val)\n if rope is not None:\n display_rope(rope)\n", "from scipy.cluster.vq import kmeans\nimport numpy as np\nimport pymc3 as pm\nimport theano.tensor as tt\n\n\ncholesky = pm.distributions.dist_math.Cholesky(nofail=True, lower=True)\nsolve_lower = tt.slinalg.Solve(A_structure='lower_triangular')\nsolve_upper = tt.slinalg.Solve(A_structure='upper_triangular')\nsolve = tt.slinalg.Solve(A_structure='general')\n\n\ndef infer_shape(X, n_points=None):\n if n_points is None:\n try:\n n_points = np.int(X.shape[0])\n except TypeError:\n raise TypeError(\"Cannot infer 'shape', provide as an argument\")\n return n_points\n\n\ndef stabilize(K):\n \"\"\" adds small diagonal to a covariance matrix \"\"\"\n return K + 1e-6 * tt.identity_like(K)\n\n\ndef kmeans_inducing_points(n_inducing, X):\n # first whiten X\n if isinstance(X, tt.TensorConstant):\n X = X.value\n elif isinstance(X, (np.ndarray, tuple, list)):\n X = np.asarray(X)\n else:\n raise TypeError((\"To use K-means initialization, \"\n \"please provide X as a type that \"\n \"can be cast to np.ndarray, instead \"\n \"of {}\".format(type(X))))\n scaling = np.std(X, 0)\n # if std of a column is very small (zero), don't normalize that column\n scaling[scaling <= 1e-6] = 1.0\n Xw = X / scaling\n Xu, distortion = kmeans(Xw, n_inducing)\n return Xu * scaling\n\n\ndef conditioned_vars(varnames):\n \"\"\" Decorator for validating attrs that are conditioned on. \"\"\"\n def gp_wrapper(cls):\n def make_getter(name):\n def getter(self):\n value = getattr(self, name, None)\n if value is None:\n raise AttributeError((\"'{}' not set. Provide as argument \"\n \"to condition, or call 'prior' \"\n \"first\".format(name.lstrip(\"_\"))))\n else:\n return value\n return getattr(self, name)\n return getter\n\n def make_setter(name):\n def setter(self, val):\n setattr(self, name, val)\n return setter\n\n for name in varnames:\n getter = make_getter('_' + name)\n setter = make_setter('_' + name)\n setattr(cls, name, property(getter, setter))\n return cls\n return gp_wrapper\n\n\ndef plot_gp_dist(ax, samples, x, plot_samples=True, palette=\"Reds\"):\n \"\"\" A helper function for plotting 1D GP posteriors from trace \"\"\"\n import matplotlib.pyplot as plt\n\n cmap = plt.get_cmap(palette)\n percs = np.linspace(51, 99, 40)\n colors = (percs - np.min(percs)) / (np.max(percs) - np.min(percs))\n samples = samples.T\n x = x.flatten()\n for i, p in enumerate(percs[::-1]):\n upper = np.percentile(samples, p, axis=1)\n lower = np.percentile(samples, 100-p, axis=1)\n color_val = colors[i]\n ax.fill_between(x, upper, lower, color=cmap(color_val), alpha=0.8)\n if plot_samples:\n # plot a few samples\n idx = np.random.randint(0, samples.shape[1], 30)\n ax.plot(x, samples[:,idx], color=cmap(0.9), lw=1, alpha=0.1)\n\n\n\n" ]
[ [ "numpy.arange", "numpy.squeeze", "numpy.random.choice", "numpy.broadcast" ], [ "numpy.max", "numpy.median", "numpy.min", "numpy.exp", "numpy.argmax" ], [ "numpy.max", "numpy.int", "numpy.asarray", "scipy.cluster.vq.kmeans", "numpy.percentile", "matplotlib.pyplot.get_cmap", "numpy.min", "numpy.std", "numpy.random.randint", "numpy.linspace" ] ]
zonemercy/Kaggle
[ "35ecb08272b6491f5e6756c97c7dec9c46a13a43" ]
[ "quora/pyfm/generate_interaction.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder,LabelEncoder,StandardScaler\nfrom sklearn.decomposition import TruncatedSVD,PCA\nfrom sklearn.metrics.pairwise import cosine_similarity,pairwise_distances\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nSEED = 2048\nnp.random.seed(SEED)\nPATH = os.path.expanduser(\"~\") + \"/data/quora/\"\n\ntrain = pd.read_csv(PATH + \"train_porter.csv\")#, nrows=5000)\ntest = pd.read_csv(PATH + \"test_porter.csv\")#, nrows=5000)\ntest['is_duplicated'] = [-1]*test.shape[0]\n\nlen_train = train.shape[0]\n\ndata_all = pd.concat([train,test])\n\ndef calc_set_intersection(obj,target):\n\ta = set(obj.split())\n\tb = set(target.split())\n\treturn (len(a.intersection(b))*1.0) / (len(a)*1.0)\n\nprint('Generate intersection')\ntrain_interaction = train.astype(str).apply(lambda x: calc_set_intersection(x['question1'],x['question2']),axis=1)\ntest_interaction = test.astype(str).apply(lambda x: calc_set_intersection(x['question1'],x['question2']),axis=1)\npd.to_pickle(train_interaction,PATH+\"train_interaction.pkl\")\npd.to_pickle(test_interaction,PATH+\"test_interaction.pkl\")\n\nprint('Generate porter intersection')\ntrain_porter_interaction = train.astype(str).apply(lambda x:calc_set_intersection(x['question1_porter'],x['question2_porter']),axis=1)\ntest_porter_interaction = test.astype(str).apply(lambda x:calc_set_intersection(x['question1_porter'],x['question2_porter']),axis=1)\npd.to_pickle(train_porter_interaction, PATH+\"train_porter_interaction.pkl\")\npd.to_pickle(test_porter_interaction, PATH+\"test_porter_interaction.pkl\")" ]
[ [ "numpy.random.seed", "pandas.read_csv", "pandas.to_pickle", "pandas.concat" ] ]
menchelab/UMAPanalysis
[ "09f9b4a7823f6eceb6b40e25ee21412f3bf1c7fe" ]
[ "src/classification/predict_with_umap.py" ]
[ "import sys\nimport re\nimport pandas as pd\n\nnetwork_filename = sys.argv[1]\nm = re.match(\"networks/(?P<dataset>.*?)_similarity\", network_filename)\ndataset = m.groupdict()['dataset']\n\n\nG=nx.read_gml(network_filename)\nlabels=pd.read_csv(f\"munged_data/{dataset}/labels.csv\", index_col=0)\nmetadata = pd.read_csv(f\"data/intermediate/{dataset}/metadata.csv\", index_col=0)\nfeatures = pd.read_csv(f\"data/intermediate/{dataset}/features.csv\", index_col=0)\n\ntrain = pd.read_csv(f\"data/intermediate/{dataset}/train.csv\", header = None)[0].values\ntesting = pd.Series({i:(i in test) for i in labels.index})\nlabels = labels.mask(testing, other=0)\n\npropagator,nodes=make_propagator(G)\ndf,df_time=propagate(propagator, nodes, moas)\ndf.to_csv(f\"predictions/{dataset}/predicted_by_propagation.csv\")\n\n" ]
[ [ "pandas.read_csv", "pandas.Series" ] ]
SimoneDeGasperis/telluric
[ "2fe4388f4a69a5a939078a876943c5f4620693ca" ]
[ "tests/test_georaster_tiling.py" ]
[ "import os\nimport rasterio\nimport mercantile\nimport numpy as np\n\nimport pytest\nfrom tempfile import NamedTemporaryFile, TemporaryDirectory\n\nfrom affine import Affine\n\nfrom unittest import TestCase\nfrom unittest.mock import patch\nfrom datetime import datetime\nfrom shapely.geometry import Polygon\n\nfrom rasterio.enums import Resampling\nfrom rasterio.windows import Window\nfrom rasterio.crs import CRS\n\nfrom telluric import GeoRaster2, GeoVector\nfrom telluric.constants import WEB_MERCATOR_CRS, WGS84_CRS\nfrom telluric.georaster import MERCATOR_RESOLUTION_MAPPING, GeoRaster2Error, GeoRaster2IOError\nfrom telluric.util.general import convert_resolution_from_meters_to_deg\n\nimport sys\nimport logging\nimport tempfile\n\n\nlog = logging.getLogger('rasterio._gdal')\nlog.setLevel(logging.DEBUG)\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(message)s')\nch.setFormatter(formatter)\nlog.addHandler(ch)\n\n\nmanualtest = pytest.mark.skipif(\"TEST_MANUAL\" not in os.environ, reason=\"skip on auto testing\")\nwindow_data = pytest.mark.skip('pending decission of consistency in results between rasterio read and reproject')\nframing = pytest.mark.skip('witing for framing and get_window with boundless false')\n\ntiles = {\n 10: (579, 394, 10),\n 11: (1159, 789, 11),\n 12: (2319, 1578, 12),\n 14: (9277, 6312, 14),\n 15: (18554, 12624, 15),\n 17: (74216, 50496, 17),\n 18: (148433, 100994, 18)\n}\n\n\nclass GeoRaster2TilesTestGeneral(TestCase):\n \"\"\"GeoRaster2 Tiles general tests.\"\"\"\n\n def test_raise_exception_on_bad_file_path(self):\n vr = GeoRaster2.open('stam')\n with self.assertRaises(GeoRaster2IOError):\n vr.get_tile(1, 2, 3)\n\n def test_raise_exception_on_bad_raster_url(self):\n vr = GeoRaster2.open('http://stam')\n with self.assertRaises(GeoRaster2IOError):\n vr.get_tile(1, 2, 3)\n\n def test_raise_exception_on_bad_file_path_save_cog(self):\n vr = GeoRaster2.open('stam')\n with self.assertRaises(GeoRaster2IOError):\n vr.save_cloud_optimized('dest_file')\n\n def test_raise_exception_on_bad_raster_url_save_cog(self):\n vr = GeoRaster2.open('http://stam')\n with self.assertRaises(GeoRaster2IOError):\n vr.save_cloud_optimized('dest_file')\n\n\nclass BaseGeoRasterTestCase(TestCase):\n @classmethod\n def setUpClass(cls):\n path = \"./tests/data/raster/raster_for_test.tif\"\n cls.read_only_vgr = GeoRaster2.open(path)\n path = \"./tests/data/raster/raster_wgs84.tif\"\n cls.read_only_vgr_wgs84 = GeoRaster2.open(path)\n\n def read_only_virtual_geo_raster(self):\n return self.read_only_vgr\n\n def read_only_virtual_geo_raster_wgs84(self):\n return self.read_only_vgr_wgs84\n\n\nclass GeoRaster2TestGetTile(BaseGeoRasterTestCase):\n \"\"\"GeoRaster2 get tile tests.\"\"\"\n\n def test_geo_bounding_tile(self):\n gr = self.read_only_virtual_geo_raster()\n gv = gr.footprint().reproject(WGS84_CRS)\n bounding_tile = mercantile.bounding_tile(*gv.get_shape(gv.crs).bounds)\n self.assertEqual(bounding_tile, (37108, 25248, 16))\n\n @patch.object(GeoRaster2, 'crop')\n def test_fails_with_empty_raster_for_tile_out_of_raster_area(self, mock__crop):\n for raster in [self.read_only_virtual_geo_raster(), self.read_only_virtual_geo_raster_wgs84()]:\n r = raster.get_tile(16384, 16383, 15)\n self.assertTrue((r.image.data == 0).all())\n self.assertTrue((r.image.mask).all())\n self.assertEqual(r.image.shape, (3, 256, 256))\n self.assertEqual(r.crs, WEB_MERCATOR_CRS)\n mock__crop.assert_not_called()\n\n def test_get_all_raster_in_a_single_tile(self):\n for raster in [self.read_only_virtual_geo_raster(), self.read_only_virtual_geo_raster_wgs84()]:\n p = raster.footprint().reproject(WGS84_CRS).centroid\n r = raster.get_tile(*mercantile.tile(lng=p.x, lat=p.y, zoom=11))\n self.assertFalse((r.image.data == 0).all())\n self.assertFalse((r.image.mask).all())\n self.assertEqual(r.image.shape, (3, 256, 256))\n self.assertEqual(r.crs, WEB_MERCATOR_CRS)\n\n def test_get_tile_for_different_zoom_levels(self):\n for raster in [self.read_only_virtual_geo_raster(), self.read_only_virtual_geo_raster_wgs84()]:\n for zoom in tiles:\n r = raster.get_tile(*tiles[zoom])\n self.assertFalse((r.image.data == 0).all())\n self.assertFalse((r.image.mask).all())\n self.assertEqual(r.image.shape, (3, 256, 256))\n\n def test_get_tile_from_different_crs_tile_is_not_tilted(self):\n raster = self.read_only_virtual_geo_raster_wgs84()\n r = raster.get_tile(*tiles[18])\n self.assertEqual(1, len(np.unique(r.image.mask)))\n\n def test_get_tile_from_different_crs_tile_is_not_tilted_with_different_buffer(self):\n raster = self.read_only_virtual_geo_raster_wgs84()\n os.environ[\"TELLURIC_GET_TILE_BUFFER\"] = \"0\"\n try:\n r = raster.get_tile(*tiles[18])\n except Exception:\n del os.environ[\"TELLURIC_GET_TILE_BUFFER\"]\n self.assertEqual(2, len(np.unique(r.image.mask)))\n\n def test_get_entire_all_raster(self):\n vr = self.read_only_virtual_geo_raster()\n roi = GeoVector.from_xyz(37108, 25248, 16)\n r = vr.crop(roi)\n\n self.assertFalse((r.image.data == 0).all())\n self.assertFalse((r.image.mask).all())\n self.assertEqual(r.shape, (3, 612, 612))\n\n def test_fails_with_empty_raster_for_tile_out_of_raster_area_with_no_tile_size(self):\n vr = self.read_only_virtual_geo_raster()\n roi = GeoVector.from_xyz(16384, 16383, 15)\n r = vr.crop(roi)\n self.assertTrue((r.image.data == 0).all())\n self.assertTrue((r.image.mask).all())\n self.assertEqual(r.image.shape, (3, 1223, 1223))\n\n def test_get_window_of_full_resolution(self):\n vr = self.read_only_virtual_geo_raster()\n win = Window(0, 0, 300, 300)\n r = vr.get_window(win)\n self.assertFalse((r.image.data == 0).all())\n self.assertFalse((r.image.mask).all())\n self.assertEqual(r.image.shape, (3, 300, 300))\n\n def test_get_window_resize_to_256(self):\n vr = self.read_only_virtual_geo_raster()\n win = Window(0, 0, 300, 300)\n r = vr.get_window(win, xsize=256, ysize=256)\n self.assertFalse((r.image.data == 0).all())\n self.assertFalse((r.image.mask).all())\n self.assertEqual(r.image.shape, (3, 256, 256))\n\n def test_get_window_of_non_square_resize_to_256(self):\n vr = self.read_only_virtual_geo_raster()\n win = Window(0, 0, 300, 400)\n r = vr.get_window(win, xsize=256, ysize=256)\n self.assertFalse((r.image.data == 0).all())\n self.assertFalse((r.image.mask).all())\n self.assertEqual(r.image.shape, (3, 256, 256))\n\n def test_get_window_of_non_square_keeps_size_proportions_for_give_xsize(self):\n vr = self.read_only_virtual_geo_raster()\n win = Window(0, 0, 300, 400)\n r = vr.get_window(win, xsize=150)\n self.assertFalse((r.image.data == 0).all())\n self.assertFalse((r.image.mask).all())\n self.assertEqual(r.image.shape, (3, 200, 150))\n\n def test_get_window_of_non_square_keeps_size_proportions_for_give_ysize(self):\n vr = self.read_only_virtual_geo_raster()\n win = Window(0, 0, 300, 400)\n r = vr.get_window(win, ysize=200)\n self.assertFalse((r.image.data == 0).all())\n self.assertFalse((r.image.mask).all())\n self.assertEqual(r.image.shape, (3, 200, 150))\n\n def test_get_window_width_height_correctness(self):\n # See https://publicgitlab.satellogic.com/telluric/telluric/issues/58\n vr = self.read_only_virtual_geo_raster()\n expected_height = 200\n win = Window(0, vr.height - expected_height, 1, expected_height)\n r = vr.get_window(win)\n self.assertEqual(r.image.shape, (3, expected_height, 1))\n\n\nclass GeoRasterCropTest(BaseGeoRasterTestCase):\n metric_affine = Affine(1, 0.0, 2653750, 0.0, -1, 4594461)\n\n def test_crop_in_memory_and_off_memory_without_resizing_are_the_same(self):\n coords = mercantile.xy_bounds(*tiles[18])\n shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)\n raster = self.read_only_virtual_geo_raster()\n with NamedTemporaryFile(mode='w+b', suffix=\".tif\") as rf:\n raster.save(rf.name)\n raster2 = GeoRaster2.open(rf.name)\n off_memory_crop = raster2.crop(shape)\n # load the image data\n raster2.image\n in_memory_crop = raster2.crop(shape)\n self.assertEqual(off_memory_crop, in_memory_crop)\n\n @window_data\n def test_crop_and_get_tile_do_the_same(self):\n coords = mercantile.xy_bounds(*tiles[15])\n shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)\n raster = self.read_only_virtual_geo_raster()\n with NamedTemporaryFile(mode='w+b', suffix=\".tif\") as rf:\n raster.save(rf.name)\n raster2 = GeoRaster2.open(rf.name)\n tile15 = raster2.get_tile(*tiles[15])\n # load the image data\n raster2.image\n cropped15 = raster2.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])\n self.assertEqual(tile15, cropped15)\n\n @window_data\n def test_crop_and_get_tile_do_the_same_when_image_is_populated(self):\n coords = mercantile.xy_bounds(*tiles[15])\n shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)\n raster = self.read_only_virtual_geo_raster()\n with NamedTemporaryFile(mode='w+b', suffix=\".tif\") as rf:\n raster.save(rf.name)\n raster = GeoRaster2.open(rf.name)\n tile15 = raster.get_tile(*tiles[15])\n raster._populate_from_rasterio_object(read_image=True)\n cropped_15 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])\n self.assertEqual(tile15, cropped_15)\n\n @window_data\n def test_crop_image_from_and_get_win_do_the_same_with_resize(self):\n bounds = (2, 3, 4, 5)\n win = rasterio.windows.Window(bounds[0], bounds[1], bounds[2] - bounds[0], bounds[3] - bounds[1])\n xsize = round((bounds[2] - bounds[0]) / 2)\n ysize = round((bounds[3] - bounds[1]) / 2)\n raster = self.read_only_virtual_geo_raster()\n\n with NamedTemporaryFile(mode='w+b', suffix=\".tif\") as rf:\n raster.save(rf.name)\n raster.save('area.tif', tags={'AREA_OR_POINT': 'Area'})\n raster.save('point.tif', tags={'AREA_OR_POINT': 'Point'})\n saved_raster = GeoRaster2.open(rf.name)\n cropped_win = saved_raster.get_window(win, xsize=xsize, ysize=ysize)\n saved_raster_area = GeoRaster2.open('area.tif')\n cropped_win_area = saved_raster_area.get_window(win, xsize=xsize, ysize=ysize)\n saved_raster_point = GeoRaster2.open('point.tif')\n cropped_win_point = saved_raster_point.get_window(win, xsize=xsize, ysize=ysize)\n\n cropped_image = raster._crop(bounds, xsize=xsize, ysize=ysize)\n\n print('cropped_win_area pixels\\n', cropped_win_area.image)\n print('cropped_win_point pixels\\n', cropped_win_point.image)\n print('cropped_win pixels\\n', cropped_win.image)\n print('cropped_image pixels\\n', cropped_image.image)\n if (cropped_win_point == cropped_win_area):\n print('point == area')\n if (cropped_image == cropped_win_area):\n print('image == area')\n if (cropped_image == cropped_win_point):\n print('image == point')\n if (cropped_win == cropped_win_area):\n print('win == area')\n if (cropped_win == cropped_win_point):\n print('win == point')\n\n self.assertEqual(cropped_image, cropped_win)\n\n @framing\n def test_crop_and_get_tile_do_the_same_when_image_is_populated_first_high_zoom(self):\n coords = mercantile.xy_bounds(*tiles[17])\n shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)\n raster = self.read_only_virtual_geo_raster()\n with NamedTemporaryFile(mode='w+b', suffix=\".tif\") as rf:\n raster.save(rf.name)\n raster = GeoRaster2.open(rf.name)\n raster._populate_from_rasterio_object(read_image=True)\n tile17 = raster.get_tile(*tiles[17])\n cropped_17 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[17])\n self.assertEqual(tile17, cropped_17)\n\n @framing\n def test_crop_and_get_tile_do_the_same_when_image_is_populated_first_mid_zoom(self):\n coords = mercantile.xy_bounds(*tiles[15])\n shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)\n raster = self.read_only_virtual_geo_raster()\n with NamedTemporaryFile(mode='w+b', suffix=\".tif\") as rf:\n raster.save(rf.name)\n raster = GeoRaster2.open(rf.name)\n raster._populate_from_rasterio_object(read_image=True)\n tile15 = raster.get_tile(*tiles[15])\n cropped_15 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])\n self.assertEqual(tile15, cropped_15)\n\n @framing\n def test_crop_and_get_tile_do_the_same_when_image_is_populated_first_for_low_zoom(self):\n coords = mercantile.xy_bounds(*tiles[11])\n shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)\n raster = self.read_only_virtual_geo_raster()\n with NamedTemporaryFile(mode='w+b', suffix=\".tif\") as rf:\n raster.save(rf.name)\n raster = GeoRaster2.open(rf.name)\n raster._populate_from_rasterio_object(read_image=True)\n tile11 = raster.get_tile(*tiles[11])\n cropped_11 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[11])\n self.assertEqual(tile11, cropped_11)\n\n def test_crop_image_from_and_get_win_do_the_same_full_resolution(self):\n bounds = (20, 13, 40, 15)\n win = rasterio.windows.Window(bounds[0], bounds[1], bounds[2] - bounds[0], bounds[3] - bounds[1])\n raster = self.read_only_virtual_geo_raster()\n with NamedTemporaryFile(mode='w+b', suffix=\".tif\") as rf:\n raster.save(rf.name)\n saved_raster = GeoRaster2.open(rf.name)\n cropped_win = saved_raster.get_window(win)\n cropped_image = raster._crop(bounds)\n self.assertEqual(cropped_image, cropped_win)\n\n @patch.object(GeoRaster2, '_crop')\n def test_crop_use_crop_image_for_a_loaded_image(self, mock__crop):\n coords = mercantile.xy_bounds(*tiles[15])\n shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)\n raster = self.read_only_virtual_geo_raster()\n raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])\n assert mock__crop.called_once\n\n @patch.object(GeoRaster2, 'get_window')\n def test_crop_use_get_window_for_a_not_loaded_image(self, mock_get_window):\n coords = mercantile.xy_bounds(*tiles[15])\n shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)\n raster = self.read_only_virtual_geo_raster()\n with NamedTemporaryFile(mode='w+b', suffix=\".tif\") as rf:\n raster.save(rf.name)\n raster = GeoRaster2.open(rf.name)\n raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])\n assert mock_get_window.called_once\n\n def test_crop_returns_full_resolution_as_default(self):\n coords = mercantile.xy_bounds(*tiles[17])\n shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)\n raster = self.read_only_virtual_geo_raster()\n _, win = raster._vector_to_raster_bounds(shape)\n cropped = raster.crop(shape)\n self.assertEqual(cropped.shape, (raster.num_bands, round(win.height), round(win.width)))\n self.assertEqual(cropped.affine[0], raster.affine[0])\n\n def test_memory_crop_returns_resized_resolution(self):\n coords = mercantile.xy_bounds(*tiles[18])\n shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)\n raster = self.read_only_virtual_geo_raster()\n cropped = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[18])\n self.assertEqual(cropped.shape, (raster.num_bands, 256, 256))\n self.assertAlmostEqual(cropped.affine[0], MERCATOR_RESOLUTION_MAPPING[18], 2)\n\n def test_geographic_crop(self):\n raster = self.read_only_virtual_geo_raster_wgs84()\n rhombus_on_image = Polygon([[0, 2], [1, 1], [2, 2], [1, 3]]) # in pixels\n rhombus_world = raster.to_world(rhombus_on_image)\n cropped = raster.crop(rhombus_world)\n r = raster[0:2, 1:3]\n assert cropped == r\n\n def test_geographic_crop_with_resize(self):\n coords = mercantile.xy_bounds(*tiles[17])\n raster = self.read_only_virtual_geo_raster_wgs84()\n vector = GeoVector(Polygon.from_bounds(*coords), crs=WEB_MERCATOR_CRS)\n x_ex_res, y_ex_res = convert_resolution_from_meters_to_deg(\n self.metric_affine[6], MERCATOR_RESOLUTION_MAPPING[17])\n cropped = raster.crop(vector, (x_ex_res, y_ex_res))\n self.assertAlmostEqual(cropped.affine[0], x_ex_res)\n self.assertAlmostEqual(abs(cropped.affine[4]), y_ex_res, 6)\n\n def test_crop_raises_error_for_impossible_transformation(self):\n raster = self.read_only_virtual_geo_raster()\n vector = GeoVector(Polygon.from_bounds(-180, -90, 180, 90), crs=WGS84_CRS)\n with self.assertRaises(GeoRaster2Error):\n raster.crop(vector)\n\n def test_crop_of_rasters_with_opposite_affine_and_data_return_the_same(self):\n array = np.arange(0, 20).reshape(1, 4, 5)\n array2 = np.arange(19, -1, -1).reshape(1, 4, 5)\n array2.sort()\n\n image1 = np.ma.array(array, mask=False)\n image2 = np.ma.array(array2, mask=False)\n\n aff2 = Affine.translation(0, -8) * Affine.scale(2, 2)\n aff = Affine.scale(2, -2)\n\n r1 = GeoRaster2(image=image1, affine=aff, crs=WEB_MERCATOR_CRS)\n r2 = GeoRaster2(image=image2, affine=aff2, crs=WEB_MERCATOR_CRS)\n\n # r1 == r2 # doesn't work, see https://github.com/satellogic/telluric/issues/79\n roi = GeoVector(Polygon.from_bounds(0, 0, 3, -3), crs=WEB_MERCATOR_CRS)\n\n r1c = r1.crop(roi)\n r2c = r2.crop(roi)\n\n # r1c == r2c # doesn't work, see https://github.com/satellogic/telluric/issues/79\n # currently this is the only way to test the result is the same\n assert np.all(np.flip(r1c.image, axis=1) == r2c.image)\n\n\nclass GeoRasterMaskedTest(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.dir = TemporaryDirectory()\n path = os.path.join(cls.dir.name, 'test_masked_raster.tif')\n cls.masked_raster().save(path)\n cls.read_only_vgr = GeoRaster2.open(path)\n\n @classmethod\n def tearDownClass(cls):\n cls.dir.cleanup()\n\n @classmethod\n def masked_raster(cls):\n data = np.array([\n [0, 1, 1, 1],\n [0, 2, 0, 2],\n [0, 3, 3, 3],\n ], dtype=np.uint8)\n\n mask = np.array([\n [True, False, False, False],\n [True, False, False, False],\n [True, False, False, False],\n ], dtype=bool)\n\n image = np.ma.array(\n np.repeat(data[np.newaxis, :, :], 3, 0),\n mask=np.repeat(mask[np.newaxis, :, :], 3, 0)\n )\n\n # Don't use exactly -1.0 for the affine for rasterio < 1.0a13, see\n # https://github.com/mapbox/rasterio/issues/1272\n affine = Affine.scale(1, -1.0001) * Affine.translation(0, -3)\n crs = WGS84_CRS\n\n return GeoRaster2(\n image, affine=affine, crs=crs,\n )\n\n def read_only_virtual_geo_raster(self):\n return self.read_only_vgr\n\n def test_get_smaller_window_respects_mask(self):\n window = Window(1, 0, 3, 3)\n raster = self.read_only_virtual_geo_raster()\n\n cropped = raster.get_window(window, masked=True)\n\n assert (~cropped.image.mask).all()\n\n def test_get_bigger_window_respects_mask(self):\n window = Window(1, 0, 4, 3)\n raster = self.read_only_virtual_geo_raster()\n\n cropped = raster.get_window(window, masked=True)\n\n assert cropped.image[:, :, -1].mask.all() # This line of pixels is masked\n assert (~cropped.image[:, :, :-1].mask).all() # The rest is not masked\n\n\ndef test_small_read_only_virtual_geo_raster_wgs84_crop():\n # See https://github.com/satellogic/telluric/issues/61\n roi = GeoVector.from_bounds(xmin=0, ymin=0, xmax=2, ymax=2, crs=WGS84_CRS)\n resolution = 1.0 # deg / px\n\n raster = GeoRaster2.empty_from_roi(roi, resolution)\n\n assert raster.crop(roi) == raster.crop(roi, raster.resolution())\n\n\n@manualtest\nclass GeoRaster2ManualTest(TestCase):\n \"\"\"manual testing To be run manually only.\"\"\"\n\n files = {\n 'original': 'original2.tif',\n 'cloudoptimized aligned': 'original2_aligned_cloudoptimized-2.tif',\n 'mrf aligned': 'original2_aligned.mrf',\n 'cloudoptimized': 'original2_cloudoptimized-2.tif',\n 'mrf': 'original2.mrf',\n 'not aligned cloudoptimized': 'not_aligned_cloudoptimized_2.tif',\n 'not aligned mrf': 'not_aligned.mrf',\n 'not aligned mrf split': 'not_aligned_split.mrf',\n 'aligned mrf split': 'original2_aligned_split.mrf',\n 'original mrf split': 'original2_split.mrf',\n }\n\n resamplings = {\n # 'avarage': Resampling.average,\n # 'nearest': Resampling.nearest,\n # 'bilinear': Resampling.bilinear,\n 'cubic': Resampling.cubic\n }\n\n def random_string(self):\n import hashlib\n now = '%s' % datetime.now()\n return hashlib.md5(now.encode('utf-8')).hexdigest()\n\n def run_test_on_real_rasters(self, zoom, resampling, local):\n results_arr = np.empty(shape=(len(self.files)), dtype=object)\n # with rasterio.Env(CPL_DEBUG=True, GDAL_CACHEMAX=0):\n # with rasterio.Env(CPL_DEBUG=False):\n print('*' * 80)\n print(zoom)\n print('*' * 80)\n print('#' * 80)\n print(resampling.name)\n print('#' * 80)\n for i, (file_type, file_url) in enumerate(self.files.items()):\n if local or 'split' in file_type:\n base_url = './notebooks/'\n else:\n base_url = 'https://ariel.blob.core.windows.net/rastersfortest/'\n file_url = base_url + file_url\n if local and 'mrf' not in file_type:\n new_file = file_url + self.random_string()\n os.system(\"cp %s %s\" % (file_url, new_file))\n else:\n new_file = file_url\n\n print('file type: %s' % file_type)\n print('-' * 80)\n print('file_url: %s' % file_url)\n print('new_file: %s' % new_file)\n print('-' * 80)\n vr = GeoRaster2.open(new_file)\n start = datetime.now()\n rasterio_ops = {\n 'CPL_DEBUG': True,\n 'GDAL_DISABLE_READDIR_ON_OPEN': 'YES'\n }\n if 'mrf' not in file_type:\n rasterio_ops['CPL_VSIL_CURL_ALLOWED_EXTENSIONS'] = '.tif'\n with rasterio.Env(**rasterio_ops):\n vr.get_tile(*tiles[zoom], resampling=resampling)\n end = datetime.now()\n tt = (end - start).total_seconds() * 1000\n print(\"stars time : %s end time: %s total: %s ms\" % (start, end, tt))\n results_arr[i] = \"type: %s, zoom: %i, resampling: %s time: %s msec\" % (file_type, zoom,\n resampling.name, tt)\n if local and 'mrf' not in file_type:\n os.system(\"rm -f %s\" % (new_file))\n\n print('=' * 80)\n print(results_arr)\n\n def test_zoom_remote_11_resampling_cubic(self):\n self.run_test_on_real_rasters(11, Resampling.cubic, False)\n\n def test_zoom_remote_12_resampling_cubic(self):\n self.run_test_on_real_rasters(12, Resampling.cubic, False)\n\n def test_zoom_remote_14_resampling_cubic(self):\n self.run_test_on_real_rasters(14, Resampling.cubic, False)\n\n def test_zoom_remote_15_resampling_cubic(self):\n self.run_test_on_real_rasters(15, Resampling.cubic, False)\n\n def test_zoom_remote_17_resampling_cubic(self):\n self.run_test_on_real_rasters(17, Resampling.cubic, False)\n\n def test_zoom_remote_18_resampling_cubic(self):\n self.run_test_on_real_rasters(18, Resampling.cubic, False)\n" ]
[ [ "numpy.array", "numpy.ma.array", "numpy.arange", "numpy.flip", "numpy.repeat", "numpy.unique" ] ]
vietnamican/Deep-Image-Matting
[ "436487e680027f07387700fb8ee1486635b82335", "436487e680027f07387700fb8ee1486635b82335" ]
[ "segnet_v7.py", "train_segnet_v4_google_colab.py" ]
[ "import tensorflow.keras.backend as K\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Conv2D, UpSampling2D, BatchNormalization, ZeroPadding2D, MaxPooling2D, Reshape, \\\n Concatenate, Lambda\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.utils import multi_gpu_model\nfrom tensorflow.keras.utils import plot_model\n\nfrom custom_layers.unpooling_layer import Unpooling\n\nATROUS_RATES = [6, 12, 18]\n# Conv-MaxPool SPP 24M\ndef build_encoder_decoder():\n # Encoder\n input_tensor = Input(shape=(320, 320, 4))\n x = ZeroPadding2D((1, 1))(input_tensor)\n x = Conv2D(64, (3, 3), activation='relu', name='conv1_1')(x)\n x = BatchNormalization()(x)\n x = ZeroPadding2D((1, 1))(x)\n x = Conv2D(64, (3, 3), activation='relu', name='conv1_2')(x)\n x = BatchNormalization()(x)\n orig_1 = x\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n x = ZeroPadding2D((1, 1))(x)\n x = Conv2D(128, (3, 3), activation='relu', name='conv2_1')(x)\n x = ZeroPadding2D((1, 1))(x)\n x = Conv2D(128, (3, 3), activation='relu', name='conv2_2')(x)\n orig_2 = x\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n x = ZeroPadding2D((1, 1))(x)\n x = Conv2D(256, (3, 3), activation='relu', name='conv3_1')(x)\n x = ZeroPadding2D((1, 1))(x)\n x = Conv2D(256, (3, 3), activation='relu', name='conv3_2')(x)\n x = ZeroPadding2D((1, 1))(x)\n x = Conv2D(256, (3, 3), activation='relu', name='conv3_3')(x)\n orig_3 = x\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n inputs_size = x.get_shape()[1:3]\n\n conv_4_1x1 = Conv2D(512, (1, 1), activation='relu', padding='same', name='conv4_1x1')(x)\n conv_4_3x3_1 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[0], name='conv4_3x3_1')(x)\n conv_4_3x3_2 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[1], name='conv4_3x3_2')(x)\n conv_4_3x3_3 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[2], name='conv4_3x3_3')(x) \n # Image average pooling\n image_level_features = Lambda(lambda x: tf.reduce_mean(x, [1, 2], keepdims=True), name='global_average_pooling')(x)\n image_level_features = Conv2D(512, (1, 1), activation='relu', padding='same', name='image_level_features_conv_1x1')(image_level_features)\n image_level_features = Lambda(lambda x: tf.image.resize(x, inputs_size), name='upsample_1')(image_level_features)\n # Concat\n x = Concatenate(axis=3)([conv_4_1x1, conv_4_3x3_1, conv_4_3x3_2, conv_4_3x3_3, image_level_features])\n x = Conv2D(512, (1,1), activation='relu', padding='same', name='conv_1x1_1_concat')(x)\n x = Conv2D(512, (1,1), activation='relu', padding='same', name='conv_1x1_2_concat')(x)\n orig_4 = x\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n x = ZeroPadding2D((1, 1))(x)\n x = Conv2D(512, (3, 3), activation='relu', name='conv5_1')(x)\n x = ZeroPadding2D((1, 1))(x)\n x = Conv2D(512, (3, 3), activation='relu', name='conv5_2')(x)\n x = ZeroPadding2D((1, 1))(x)\n x = Conv2D(512, (3, 3), activation='relu', name='conv5_3')(x)\n orig_5 = x\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n # Decoder\n #\n x = UpSampling2D(size=(2, 2))(x)\n the_shape = K.int_shape(orig_5) \n shape = (1, the_shape[1], the_shape[2], the_shape[3])\n origReshaped = Reshape(shape)(orig_5)\n xReshaped = Reshape(shape)(x)\n together = Concatenate(axis=1)([origReshaped, xReshaped])\n x = Unpooling()(together)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_1',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_2',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_3',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n\n x = UpSampling2D(size=(2, 2))(x)\n the_shape = K.int_shape(orig_4)\n shape = (1, the_shape[1], the_shape[2], the_shape[3])\n origReshaped = Reshape(shape)(orig_4)\n xReshaped = Reshape(shape)(x)\n together = Concatenate(axis=1)([origReshaped, xReshaped])\n x = Unpooling()(together)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_1',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_2',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_3',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n\n x = UpSampling2D(size=(2, 2))(x)\n the_shape = K.int_shape(orig_3)\n shape = (1, the_shape[1], the_shape[2], the_shape[3])\n origReshaped = Reshape(shape)(orig_3)\n xReshaped = Reshape(shape)(x)\n together = Concatenate(axis=1)([origReshaped, xReshaped])\n x = Unpooling()(together)\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_1',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_2',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_3',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n\n x = UpSampling2D(size=(2, 2))(x)\n the_shape = K.int_shape(orig_2)\n shape = (1, the_shape[1], the_shape[2], the_shape[3])\n origReshaped = Reshape(shape)(orig_2)\n xReshaped = Reshape(shape)(x)\n together = Concatenate(axis=1)([origReshaped, xReshaped])\n x = Unpooling()(together)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv2_1',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv2_2',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n\n x = UpSampling2D(size=(2, 2))(x)\n the_shape = K.int_shape(orig_1)\n shape = (1, the_shape[1], the_shape[2], the_shape[3])\n origReshaped = Reshape(shape)(orig_1)\n xReshaped = Reshape(shape)(x)\n together = Concatenate(axis=1)([origReshaped, xReshaped])\n x = Unpooling()(together)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv1_1',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv1_2',\n kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n\n x = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='pred', kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n\n model = Model(inputs=input_tensor, outputs=x)\n return model\n\n\ndef build_refinement(encoder_decoder):\n input_tensor = encoder_decoder.input\n\n input = Lambda(lambda i: i[:, :, :, 0:3])(input_tensor)\n\n x = Concatenate(axis=3)([input, encoder_decoder.output])\n x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n x = BatchNormalization()(x)\n x = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='refinement_pred', kernel_initializer='he_normal',\n bias_initializer='zeros')(x)\n\n model = Model(inputs=input_tensor, outputs=x)\n return model\n\n\nif __name__ == '__main__':\n with tf.device(\"/cpu:0\"):\n encoder_decoder = build_encoder_decoder()\n print(encoder_decoder.summary())\n plot_model(encoder_decoder, to_file='encoder_decoder.svg', show_layer_names=True, show_shapes=True)\n\n with tf.device(\"/cpu:0\"):\n refinement = build_refinement(encoder_decoder)\n print(refinement.summary())\n plot_model(refinement, to_file='refinement.svg', show_layer_names=True, show_shapes=True)\n\n parallel_model = multi_gpu_model(refinement, gpus=None)\n print(parallel_model.summary())\n plot_model(parallel_model, to_file='parallel_model.svg', show_layer_names=True, show_shapes=True)\n\n K.clear_session()\n", "import argparse\nimport os\n\nimport tensorflow.keras as keras\nimport tensorflow as tf\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\nfrom tensorflow.keras.utils import multi_gpu_model\n\nfrom config import patience, batch_size, epochs, num_train_samples, num_valid_samples\nfrom data_generator import train_gen, valid_gen\nfrom migrate import migrate_model\nfrom segnet_v4 import build_encoder_decoder, build_refinement\nfrom utils import overall_loss, get_available_cpus, get_available_gpus\n\nlog_dir = '/content/drive/Shared drives/DNN/Deep-Image-Matting/logs_4'\ncheckpoint_models_path = '/content/drive/Shared drives/DNN/Deep-Image-Matting/checkpoints_4/'\n\nif __name__ == '__main__':\n # Parse arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-p\", \"--pretrained\", help=\"path to save pretrained model files\")\n args = vars(ap.parse_args())\n pretrained_path = args[\"pretrained\"]\n\n # Callbacks\n tensor_board = keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=True)\n model_names = checkpoint_models_path + 'final.{epoch:02d}-{val_loss:.4f}.hdf5'\n model_checkpoint = ModelCheckpoint(model_names, monitor='val_loss', verbose=1, save_best_only=True)\n early_stop = EarlyStopping('val_loss', patience=patience)\n reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience / 4), verbose=1)\n\n\n class MyCbk(keras.callbacks.Callback):\n def __init__(self, model):\n keras.callbacks.Callback.__init__(self)\n self.model_to_save = model\n\n def on_epoch_end(self, epoch, logs=None):\n fmt = checkpoint_models_path + 'final.%02d-%.4f.hdf5'\n self.model_to_save.save(fmt % (epoch, logs['val_loss']))\n\n\n # Load our model, added support for Multi-GPUs\n num_gpu = len(get_available_gpus())\n if num_gpu >= 2:\n with tf.device(\"/cpu:0\"):\n model = build_encoder_decoder()\n model = build_refinement(model)\n if pretrained_path is not None:\n model.load_weights(pretrained_path)\n\n final = multi_gpu_model(model, gpus=num_gpu)\n # rewrite the callback: saving through the original model and not the multi-gpu model.\n model_checkpoint = MyCbk(model)\n else:\n model = build_encoder_decoder()\n final = build_refinement(model)\n if pretrained_path is not None:\n final.load_weights(pretrained_path)\n final.compile(optimizer='nadam', loss=overall_loss)\n\n print(final.summary())\n\n # Final callbacks\n callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]\n\n # Start Fine-tuning\n final.fit_generator(train_gen(),\n steps_per_epoch=num_train_samples // batch_size,\n validation_data=valid_gen(),\n validation_steps=num_valid_samples // batch_size,\n epochs=epochs,\n verbose=1,\n callbacks=callbacks,\n use_multiprocessing=True,\n workers=2\n )\n" ]
[ [ "tensorflow.keras.backend.int_shape", "tensorflow.keras.layers.Lambda", "tensorflow.keras.layers.Input", "tensorflow.keras.layers.UpSampling2D", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Reshape", "tensorflow.keras.utils.multi_gpu_model", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.backend.clear_session", "tensorflow.device", "tensorflow.image.resize", "tensorflow.reduce_mean", "tensorflow.keras.layers.ZeroPadding2D", "tensorflow.keras.layers.Concatenate", "tensorflow.keras.utils.plot_model" ], [ "tensorflow.keras.callbacks.TensorBoard", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.utils.multi_gpu_model", "tensorflow.device", "tensorflow.keras.callbacks.Callback.__init__", "tensorflow.keras.callbacks.EarlyStopping" ] ]
gar-syn/congo-lab
[ "dc50af4e35903556bc8bc34dc23a7a708c1f5422" ]
[ "src/octopus/image/source.py" ]
[ "# System Imports\nimport cv2\nimport json\nfrom typing import Optional\n\n# Library imports\nimport numpy\n\n# Twisted Import\nfrom twisted.internet import reactor, defer, threads, protocol\nfrom twisted.internet.endpoints import TCP4ClientEndpoint\nfrom twisted.internet.interfaces import IAddress\n\n# Package Imports\nfrom .data import Image, ColorSpace\n\nclass cv_webcam (object):\n def __init__ (self, device, img_width, img_height):\n self.device_index = device\n self.img_width = img_width\n self.img_height = img_height\n self.name = \"cv_webcam(%s)\" % device\n self.camera = None\n\n @defer.inlineCallbacks\n def connect (self, _protocolFactory):\n if self.camera is None:\n\n self.camera = yield threads.deferToThread(cv2.VideoCapture, self.device_index)\n\n # Set picture capture dimensions\n self.camera.set(3, self.img_width)\n self.camera.set(4, self.img_height)\n\n defer.returnValue(self)\n\n @defer.inlineCallbacks\n def image (self):\n \"\"\"\n Get an image from the camera.\n \n Returns an Image object.\n \"\"\"\n\n try:\n flag, img_array = yield threads.deferToThread(self.camera.read)\n except SystemError:\n return\n\n if flag is False:\n print (\"No image\")\n return\n\n defer.returnValue(Image(img_array, ColorSpace.BGR))\n\n def disconnect (self):\n threads.deferToThread(self.camera.release)\n\n\nclass _camera_proxy_protocol (protocol.Protocol):\n _state: str\n _buffer: bytes = b''\n _image_callback: Optional[defer.Deferred] = None\n _camera_id: Optional[bytes] = None\n\n def setCameraId(self, camera_id: int):\n self._camera_id = str(camera_id).encode()\n self.requestFormat()\n\n # def connectionMade(self):\n # if self._camera_id is not None:\n # self.requestFormat()\n\n def dataReceived(self, data: bytes):\n \"\"\"\n Byte 1: command\n Byte 2-5: length\n Byte 6+: data\n \"\"\"\n\n self._buffer += data\n\n if len(self._buffer) > 5:\n command = chr(self._buffer[0])\n length = int.from_bytes(self._buffer[1:5], byteorder = 'big')\n\n if len(self._buffer) >= length + 5:\n\n data = self._buffer[5 : 5 + length]\n self._buffer = self._buffer[5 + length : ]\n\n if command == 'F':\n self.formatReceived(data)\n elif command == 'I':\n self.imageReceived(data)\n \n def formatReceived (self, data: bytes):\n image_format = json.loads(data.decode())\n\n if image_format['channels'] == 1:\n self._image_shape = (image_format['height'], image_format['width'])\n else:\n self._image_shape = (\n image_format['height'], \n image_format['width'],\n image_format['channels']\n )\n\n self._image_colorspace = image_format['colorspace']\n \n def imageReceived (self, data: bytes):\n try:\n img_data = numpy.reshape(\n numpy.frombuffer(data, dtype = numpy.uint8), \n newshape = self._image_shape\n )\n self._image_callback.callback(img_data)\n except (AttributeError, defer.AlreadyCalledError) as e:\n # No callback, or callback already done. (Unexpected image data).\n pass\n except Exception as e:\n try:\n self._image_callback.errback(e)\n except defer.AlreadyCalledError:\n pass\n\n def requestFormat (self):\n self.transport.write(b'F' + self._camera_id + b'\\n')\n\n def requestImage (self):\n self._image_callback = defer.Deferred()\n self.transport.write(b'I' + self._camera_id + b'\\n')\n return self._image_callback\n\n\nclass camera_proxy (object):\n def __init__ (self, host, port, camera_id):\n self.point = TCP4ClientEndpoint(reactor, host, port)\n self.name = f\"camera_proxy({host!s}, {port!s})\"\n self.camera_id = camera_id\n\n @defer.inlineCallbacks\n def connect (self, _protocolFactory):\n self._protocol = yield self.point.connect(\n protocol.Factory.forProtocol(_camera_proxy_protocol)\n )\n self._protocol.setCameraId(self.camera_id)\n # yield self._protocol._get_format_information()\n\n defer.returnValue(self)\n\n @defer.inlineCallbacks\n def image (self):\n \"\"\"\n Get an image from the camera.\n \n Returns a SimpleCV Image.\n \"\"\"\n\n try:\n img_array = yield self._protocol.requestImage()\n except Exception as e:\n print('Exception fetching image', e)\n return\n\n defer.returnValue(Image(img_array, ColorSpace.BGR))\n\n def disconnect (self):\n threads.deferToThread(self.camera.release)" ]
[ [ "numpy.frombuffer" ] ]
Oneflow-Inc/libai
[ "e473bd3962f07b1e37232d2be39c8257df0ec0f3" ]
[ "libai/data/datasets/bert_dataset.py" ]
[ "# coding=utf-8\n# Copyright 2021 The OneFlow Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"dataset for bert.\"\"\"\n\nimport collections\nimport math\n\nimport numpy as np\nimport oneflow as flow\n\nfrom libai.data.data_utils import SentenceIndexedDataset\nfrom libai.data.structures import DistTensorData, Instance\n\nMaskedLmInstance = collections.namedtuple(\"MaskedLmInstance\", [\"index\", \"label\"])\n\n\ndef is_start_piece(piece):\n \"\"\"Check if the current word piece is the starting piece (BERT).\"\"\"\n # When a word has been split into\n # WordPieces, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n return not piece.startswith(\"##\")\n\n\nclass BertDataset(flow.utils.data.Dataset):\n \"\"\"Dataset containing sentence pairs for BERT training.\n Each index corresponds to a randomly generated sentence pair.\n\n Args:\n tokenizer: Tokenizer to use.\n data_prefix: Path to the training dataset.\n indexed_dataset: Indexed dataset to use.\n max_seq_length: Maximum length of the sequence. All values are padded to\n this length. Defaults to 512.\n mask_lm_prob: Probability to mask tokens. Defaults to 0.15.\n short_seq_prob: Probability of producing a short sequence. Defaults to 0.0.\n max_preds_per_seq: Maximum number of mask tokens in each sentence. Defaults to None.\n seed: Seed for random number generator for reproducibility. Defaults to 1234.\n binary_head: Specifies whether the underlying dataset\n generates a pair of blocks along with a sentence_target or not.\n Setting it to True assumes that the underlying dataset generates a\n label for the pair of sentences which is surfaced as\n sentence_target. Defaults to True.\n \"\"\"\n\n def __init__(\n self,\n tokenizer,\n data_prefix,\n indexed_dataset,\n max_seq_length=512,\n mask_lm_prob=0.15,\n short_seq_prob=0.0,\n max_preds_per_seq=None,\n seed=1234,\n binary_head=True,\n ):\n self.seed = seed\n self.mask_lm_prob = mask_lm_prob\n self.max_seq_length = max_seq_length\n self.short_seq_prob = short_seq_prob\n self.binary_head = binary_head\n if max_preds_per_seq is None:\n max_preds_per_seq = math.ceil(max_seq_length * mask_lm_prob / 10) * 10\n self.max_preds_per_seq = max_preds_per_seq\n\n self.dataset = SentenceIndexedDataset(\n data_prefix,\n indexed_dataset,\n max_seq_length=self.max_seq_length - 3,\n short_seq_prob=self.short_seq_prob,\n binary_head=self.binary_head,\n )\n\n self.tokenizer = tokenizer\n self.vocab_id_list = list(tokenizer.get_vocab().values())\n self.cls_id = tokenizer.cls_token_id\n self.sep_id = tokenizer.sep_token_id\n self.mask_id = tokenizer.mask_token_id\n self.pad_id = tokenizer.pad_token_id\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, idx):\n # Note that this rng state should be numpy and not python since\n # python randint is inclusive whereas the numpy one is exclusive.\n np_rng = np.random.RandomState(seed=(self.seed + idx))\n\n sents = self.dataset[idx]\n\n if self.binary_head:\n tokens_a, tokens_b, is_next_random = self.create_random_sentence_pair(sents, np_rng)\n else:\n tokens_a = []\n for j in range(len(sents)):\n tokens_a.extend(sents[j])\n tokens_b = []\n is_next_random = False\n\n tokens_a, tokens_b = self.truncate_seq_pair(\n tokens_a, tokens_b, self.max_seq_length - 3, np_rng\n )\n\n tokens, token_types = self.create_tokens_and_token_types(tokens_a, tokens_b)\n\n tokens, masked_positions, masked_labels = self.create_masked_lm_predictions(tokens, np_rng)\n\n (\n tokens,\n token_types,\n labels,\n padding_mask,\n loss_mask,\n ) = self.pad_and_convert_to_tensor(tokens, token_types, masked_positions, masked_labels)\n\n sample = Instance(\n input_ids=DistTensorData(tokens),\n attention_mask=DistTensorData(padding_mask),\n tokentype_ids=DistTensorData(token_types),\n ns_labels=DistTensorData(\n flow.tensor(int(is_next_random), dtype=flow.long), placement_idx=-1\n ),\n lm_labels=DistTensorData(labels, placement_idx=-1),\n loss_mask=DistTensorData(loss_mask, placement_idx=-1),\n )\n return sample\n\n def create_random_sentence_pair(self, sample, np_rng):\n num_sentences = len(sample)\n assert num_sentences > 1, \"make sure each sample has at least two sentences.\"\n\n a_end = 1\n if num_sentences >= 3:\n a_end = np_rng.randint(1, num_sentences)\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(sample[j])\n\n tokens_b = []\n\n for j in range(a_end, num_sentences):\n tokens_b.extend(sample[j])\n\n is_next_random = False\n if np_rng.random() < 0.5:\n is_next_random = True\n tokens_a, tokens_b = tokens_b, tokens_a\n\n return tokens_a, tokens_b, is_next_random\n\n def truncate_seq_pair(self, tokens_a, tokens_b, max_num_tokens, np_rng):\n \"\"\"Truncate sequence pair to a maximum sequence length.\"\"\"\n\n len_a, len_b = len(tokens_a), len(tokens_b)\n while True:\n total_length = len_a + len_b\n if total_length <= max_num_tokens:\n break\n if len_a > len_b:\n trunc_tokens = tokens_a\n len_a -= 1\n else:\n trunc_tokens = tokens_b\n len_b -= 1\n\n if np_rng.random() < 0.5:\n trunc_tokens.pop(0) # remove the first element\n else:\n trunc_tokens.pop() # remove the last element\n\n return tokens_a, tokens_b\n\n def create_tokens_and_token_types(self, tokens_a, tokens_b):\n \"\"\"Merge segments A and B, add [CLS] and [SEP] and build token types.\"\"\"\n tokens = [self.cls_id] + tokens_a + [self.sep_id]\n token_types = [0] * (len(tokens_a) + 2)\n if len(tokens_b) > 0:\n tokens = tokens + tokens_b + [self.sep_id]\n token_types = token_types + [1] * (len(tokens_b) + 1)\n\n return tokens, token_types\n\n def mask_token(self, idx, tokens, np_rng):\n \"\"\"\n Helper function to mask `idx` token from `tokens` according to\n section 3.3.1 of https://arxiv.org/pdf/1810.04805.pdf\n \"\"\"\n label = tokens[idx]\n if np_rng.random() < 0.8:\n new_label = self.mask_id\n else:\n if np_rng.random() < 0.5:\n new_label = label\n else:\n new_label = np_rng.choice(self.vocab_id_list)\n\n tokens[idx] = new_label\n\n return label\n\n def create_masked_lm_predictions(\n self,\n tokens,\n np_rng,\n max_ngrams=3,\n do_whole_word_mask=True,\n favor_longer_ngram=False,\n geometric_dist=False,\n ):\n \"\"\"Creates the predictions for the masked LM objective.\n Note: Tokens here are vocab ids and not text tokens.\"\"\"\n\n cand_indexes = []\n token_boundary = [0] * len(tokens)\n new_tokens = []\n\n for (i, token) in enumerate(tokens):\n new_tokens.append(token % len(self.tokenizer))\n\n if token == self.cls_id or token == self.sep_id:\n token_boundary[i] = 1\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if (\n do_whole_word_mask\n and len(cand_indexes) >= 1\n and not is_start_piece(self.tokenizer._convert_id_to_token(token))\n ):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n if is_start_piece(self.tokenizer._convert_id_to_token(token)):\n token_boundary[i] = 1\n\n tokens = new_tokens\n\n masked_positions = []\n masked_labels = []\n\n output_tokens = list(tokens)\n\n if self.mask_lm_prob == 0:\n return output_tokens, masked_positions, masked_labels\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token == self.cls_id or token == self.sep_id:\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if do_whole_word_mask and len(cand_indexes) >= 1 and token_boundary[i] == 0:\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n num_to_predict = min(\n self.max_preds_per_seq, max(1, int(round(len(tokens) * self.mask_lm_prob)))\n )\n\n ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64)\n if not geometric_dist:\n # By default, we set the probilities to favor shorter ngram sequences.\n pvals = 1.0 / np.arange(1, max_ngrams + 1)\n pvals /= pvals.sum(keepdims=True)\n if favor_longer_ngram:\n pvals = pvals[::-1]\n\n ngram_indexes = []\n for idx in range(len(cand_indexes)):\n ngram_index = []\n for n in ngrams:\n ngram_index.append(cand_indexes[idx : idx + n])\n ngram_indexes.append(ngram_index)\n\n np_rng.shuffle(ngram_indexes)\n\n masked_lms = []\n covered_indexes = set()\n for cand_index_set in ngram_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n if not cand_index_set:\n continue\n # Skip current piece if they are covered in lm masking or previous ngrams.\n for index_set in cand_index_set[0]:\n for index in index_set:\n if index in covered_indexes:\n continue\n\n if not geometric_dist:\n n = np_rng.choice(\n ngrams[: len(cand_index_set)],\n p=pvals[: len(cand_index_set)]\n / pvals[: len(cand_index_set)].sum(keepdims=True),\n )\n else:\n # Sampling \"n\" from the geometric distribution and clipping it to\n # the max_ngrams. Using p=0.2 default from the SpanBERT paper\n # https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1)\n n = min(np_rng.geometric(0.2), max_ngrams)\n\n index_set = sum(cand_index_set[n - 1], [])\n n -= 1\n # Repeatedly looking for a candidate that does not exceed the\n # maximum number of predictions by trying shorter ngrams.\n while len(masked_lms) + len(index_set) > num_to_predict:\n if n == 0:\n break\n index_set = sum(cand_index_set[n - 1], [])\n n -= 1\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n label = self.mask_token(index, output_tokens, np_rng)\n masked_lms.append(MaskedLmInstance(index=index, label=label))\n\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n for p in masked_lms:\n masked_positions.append(p.index)\n masked_labels.append(p.label)\n\n return output_tokens, masked_positions, masked_labels\n\n def pad_and_convert_to_tensor(self, tokens, token_types, masked_positions, masked_labels):\n \"\"\"Pad sequences and convert them to tensor.\"\"\"\n\n # check\n num_tokens = len(tokens)\n num_pad = self.max_seq_length - num_tokens\n assert num_pad >= 0\n assert len(token_types) == num_tokens\n assert len(masked_positions) == len(masked_labels)\n\n # tokens and token types\n filler = [self.pad_id] * num_pad\n tokens = flow.tensor(tokens + filler, dtype=flow.long)\n token_types = flow.tensor(token_types + filler, dtype=flow.long)\n\n # padding mask\n padding_mask = flow.tensor([1] * num_tokens + [0] * num_pad, dtype=flow.long)\n\n # labels and loss mask\n labels = [-1] * self.max_seq_length\n loss_mask = [0] * self.max_seq_length\n for idx, label in zip(masked_positions, masked_labels):\n assert idx < num_tokens\n labels[idx] = label\n loss_mask[idx] = 1\n\n labels = flow.tensor(labels, dtype=flow.long)\n loss_mask = flow.tensor(loss_mask, dtype=flow.long)\n\n return tokens, token_types, labels, padding_mask, loss_mask\n\n @property\n def supports_prefetch(self):\n return self.dataset.supports_prefetch\n\n def prefetch(self, indices):\n self.dataset.prefetch(indices)\n" ]
[ [ "numpy.arange", "numpy.random.RandomState" ] ]
neggert/pytorch-lightning
[ "8208c330eb1a4e8cca243ee525882854dd366921" ]
[ "pytorch_lightning/trainer/training_loop.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom contextlib import contextmanager, suppress\nfrom copy import copy, deepcopy\nfrom typing import Any, Dict, List, Optional, Union\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.core.step_result import Result\nfrom pytorch_lightning.plugins import ParallelPlugin\nfrom pytorch_lightning.trainer.states import TrainerState\nfrom pytorch_lightning.trainer.supporters import TensorRunningAccum\nfrom pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType\nfrom pytorch_lightning.utilities.distributed import rank_zero_info\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.finite_checks import detect_nan_parameters\nfrom pytorch_lightning.utilities.grads import grad_norm\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom pytorch_lightning.utilities.parsing import AttributeDict\nfrom pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature\nfrom pytorch_lightning.utilities.warnings import WarningCache\n\n\nclass TrainLoop:\n\n def __init__(self, trainer, multiple_trainloader_mode: str):\n self.trainer = trainer\n self.accumulated_loss = None\n self.warning_cache = WarningCache()\n self._teardown_already_run = False\n self.running_loss = TensorRunningAccum(window_length=20)\n self._curr_step_result = None\n self._cur_grad_norm_dict = None\n self._multiple_trainloader_mode = multiple_trainloader_mode\n self._skip_backward = False\n self.trainer._multiple_trainloader_mode = multiple_trainloader_mode\n self._optimizer_freq_cumsum = None\n\n def on_trainer_init(\n self,\n max_epochs: Optional[int],\n min_epochs: Optional[int],\n max_steps: Optional[int],\n min_steps: Optional[int],\n num_sanity_val_steps: int,\n ) -> None:\n self.trainer.global_step = 0\n self.trainer.current_epoch = 0\n self.trainer.should_stop = False\n self.trainer.state = TrainerState()\n\n self.trainer.total_batch_idx = 0\n self.trainer.batch_idx = 0\n self.trainer.num_training_batches = 0\n self.trainer.train_dataloader = None\n\n # If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000\n self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs\n # If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1\n self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs\n self.trainer.max_steps = max_steps\n self.trainer.min_steps = min_steps\n\n if num_sanity_val_steps == -1:\n self.trainer.num_sanity_val_steps = float(\"inf\")\n else:\n self.trainer.num_sanity_val_steps = num_sanity_val_steps\n\n @property\n def num_optimizers(self):\n num_optimizers = len(self.get_optimizers_iterable())\n return num_optimizers\n\n @property\n def optimizer_freq_cumsum(self):\n if self._optimizer_freq_cumsum is None:\n self._optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)\n return self._optimizer_freq_cumsum\n\n def should_skip_training(self):\n should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps\n should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs\n return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0\n\n def on_train_start(self):\n # hook\n self.trainer.call_hook(\"on_train_start\")\n\n def on_train_end(self):\n if self._teardown_already_run:\n return\n self._teardown_already_run = True\n\n # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates\n # when a checkpoint was saved at the last step\n self.trainer.global_step -= 1\n self.check_checkpoint_callback(should_update=True, is_last=True)\n self.trainer.global_step += 1\n\n # hook\n self.trainer.call_hook(\"on_train_end\")\n\n # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.\n # It might be related to xla tensors blocked when moving the cpu\n # kill loggers\n if self.trainer.logger is not None:\n self.trainer.logger.finalize(\"success\")\n\n # summarize profile results\n self.trainer.profiler.describe()\n\n # give accelerators a chance to finish\n self.trainer.accelerator.on_train_end()\n\n # reset bookkeeping\n self.trainer.state.stage = None\n\n def check_checkpoint_callback(self, should_update, is_last=False):\n # TODO bake this logic into the ModelCheckpoint callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = self.trainer.checkpoint_callbacks\n\n if is_last and any(cb.save_last and cb.verbose for cb in callbacks):\n rank_zero_info(\"Saving latest checkpoint...\")\n\n model = self.trainer.lightning_module\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def on_train_epoch_start(self, epoch):\n\n # update training progress in trainer\n self.trainer.current_epoch = epoch\n\n model = self.trainer.lightning_module\n\n # reset train dataloader\n if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n # todo: specify the possible exception\n with suppress(Exception):\n # set seed for distributed sampler (enables shuffling for each epoch)\n self.trainer.train_dataloader.sampler.set_epoch(epoch)\n\n # changing gradient according accumulation_scheduler\n self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)\n\n # stores accumulated grad fractions per batch\n self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)\n\n # hook\n self.trainer.call_hook(\"on_epoch_start\")\n self.trainer.call_hook(\"on_train_epoch_start\")\n\n def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):\n batch_end_outputs = [opt_idx_out for opt_idx_out in batch_end_outputs if len(opt_idx_out)]\n\n processed_batch_end_outputs = TrainLoop._prepare_outputs(batch_end_outputs, batch_mode=True)\n\n # hook\n self.trainer.call_hook('on_train_batch_end', processed_batch_end_outputs, batch, batch_idx, dataloader_idx)\n self.trainer.call_hook('on_batch_end')\n\n # figure out what to track for epoch end\n self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)\n\n # reset batch logger internals\n self.trainer.logger_connector.on_train_batch_end()\n\n def reset_train_val_dataloaders(self, model) -> None:\n \"\"\"\n Resets train and val dataloaders if none are attached to the trainer.\n\n The val dataloader must be initialized before training loop starts, as the training loop\n inspects the val dataloader to determine whether to run the evaluation loop.\n \"\"\"\n if self.trainer.train_dataloader is None:\n self.trainer.reset_train_dataloader(model)\n\n if self.trainer.val_dataloaders is None:\n self.trainer.reset_val_dataloader(model)\n\n def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):\n\n hook_overridden = self._should_add_batch_output_to_epoch_output()\n\n # track the outputs to reduce at the end of the epoch\n for opt_idx, opt_outputs in enumerate(batch_end_outputs):\n sample_output = opt_outputs[-1]\n\n # decide if we need to reduce at the end of the epoch automatically\n auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end\n\n # only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end\n if not (hook_overridden or auto_reduce_tng_result):\n continue\n\n # with 1 step (no tbptt) don't use a sequence at epoch end\n if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):\n opt_outputs = opt_outputs[0]\n\n epoch_output[opt_idx].append(opt_outputs)\n\n def _should_add_batch_output_to_epoch_output(self) -> bool:\n # We add to the epoch outputs if\n # 1. The model defines training_epoch_end OR\n # 2. The model overrides on_train_epoch_end which has `outputs` in the signature\n # TODO: in v1.5 this only needs to check if training_epoch_end is overridden\n lightning_module = self.trainer.lightning_module\n if is_overridden(\"training_epoch_end\", model=lightning_module):\n return True\n\n if is_overridden(\"on_train_epoch_end\", model=lightning_module):\n model_hook_fx = getattr(lightning_module, \"on_train_epoch_end\")\n if is_param_in_hook_signature(model_hook_fx, \"outputs\"):\n return True\n\n return False\n\n def get_optimizers_iterable(self, batch_idx=None):\n \"\"\"\n Generates an iterable with (idx, optimizer) for each optimizer.\n \"\"\"\n if not self.trainer.optimizer_frequencies:\n # call training_step once per optimizer\n return list(enumerate(self.trainer.optimizers))\n\n if batch_idx is None:\n batch_idx = self.trainer.total_batch_idx\n\n optimizers_loop_length = self.optimizer_freq_cumsum[-1]\n current_place_in_loop = batch_idx % optimizers_loop_length\n\n # find optimzier index by looking for the first {item > current_place} in the cumsum list\n opt_idx = np.argmax(self.optimizer_freq_cumsum > current_place_in_loop)\n return [[opt_idx, self.trainer.optimizers[opt_idx]]]\n\n def on_after_backward(self, training_step_output, batch_idx, untouched_loss):\n training_step_output.detach()\n\n # insert after step hook\n self.trainer.call_hook(\"on_after_backward\")\n\n # when in dev debugging track the losses\n self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())\n\n def _check_training_step_output(self, training_step_output):\n if isinstance(training_step_output, torch.Tensor) and not self.trainer.lightning_module.automatic_optimization:\n if training_step_output.grad_fn is None:\n # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...\n raise MisconfigurationException(\"In manual optimization, `training_step` should not return a Tensor\")\n\n def training_step(self, split_batch, batch_idx, opt_idx, hiddens):\n # give the PL module a result for logging\n model_ref = self.trainer.lightning_module\n\n with self.trainer.profiler.profile(\"model_forward\"):\n args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)\n\n # manually capture logged metrics\n model_ref._current_fx_name = 'training_step'\n model_ref._results = Result()\n with self.trainer.profiler.profile(\"training_step\"):\n training_step_output = self.trainer.accelerator.training_step(args)\n self.trainer.accelerator.post_training_step()\n\n self.trainer.logger_connector.cache_logged_metrics()\n\n self._check_training_step_output(training_step_output)\n\n training_step_output = self.trainer.call_hook(\"training_step_end\", training_step_output)\n\n training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(\n training_step_output, split_batch\n )\n if training_step_output_for_epoch_end is None:\n return\n\n # enable empty loss when using manual opt\n closure_loss = None\n untouched_loss = None\n\n if self.trainer.lightning_module.automatic_optimization:\n # accumulate loss. if accumulate_grad_batches==1, no effect\n closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches\n\n # the loss will get scaled for amp. avoid any modifications to it\n untouched_loss = closure_loss.detach().clone()\n\n # result\n result = AttributeDict(\n closure_loss=closure_loss,\n loss=untouched_loss,\n training_step_output=training_step_output,\n training_step_output_for_epoch_end=training_step_output_for_epoch_end,\n )\n return result\n\n def _process_training_step_output(self, training_step_output, split_batch):\n training_step_output_for_epoch_end = training_step_output\n\n # enable validation_step return None\n if training_step_output_for_epoch_end is None:\n return None, None\n\n result = self.trainer.lightning_module._results\n\n loss = None\n hiddens = None\n result[\"extra\"] = {}\n\n # handle dict return\n if isinstance(training_step_output, dict):\n loss = training_step_output.pop(\"loss\", None)\n hiddens = training_step_output.pop(\"hiddens\", None)\n if hiddens is not None:\n hiddens = hiddens.detach()\n result[\"extra\"] = training_step_output\n\n # handle scalar return\n elif isinstance(training_step_output, torch.Tensor):\n loss = training_step_output\n\n # map to results under the hood\n result.minimize = loss\n self.trainer.hiddens = hiddens\n\n # track batch for manual reduction with result\n result.track_batch_size(len(split_batch))\n\n # track metrics without grads for epoch reduction\n training_step_output_for_epoch_end = copy(result)\n training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()\n if self.trainer.move_metrics_to_cpu:\n training_step_output_for_epoch_end = training_step_output_for_epoch_end.cpu()\n\n return training_step_output_for_epoch_end, result\n\n @staticmethod\n def _prepare_outputs(\n outputs: List[List[List[Result]]],\n batch_mode: bool,\n ) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]:\n \"\"\"\n Extract required information from batch or epoch end results.\n\n Args:\n outputs: A 3-dimensional list of ``Result`` objects with dimensions:\n [optimizer outs][batch outs][tbptt steps].\n\n batch_mode: If True, ignore the batch output dimension.\n\n Returns:\n The cleaned outputs with ``Result`` objects converted to dictionaries. All list dimensions of size one will\n be collapsed.\n \"\"\"\n processed_outputs = []\n for opt_outputs in outputs:\n # handle an edge case where an optimizer output is the empty list\n if len(opt_outputs) == 0:\n continue\n\n processed_batch_outputs = []\n\n if batch_mode:\n opt_outputs = [opt_outputs]\n\n for batch_outputs in opt_outputs:\n processed_tbptt_outputs = []\n\n for tbptt_output in batch_outputs:\n out = tbptt_output.extra\n out['loss'] = tbptt_output.minimize\n processed_tbptt_outputs.append(out)\n\n # if there was only one tbptt step then we can collapse that dimension\n if len(processed_tbptt_outputs) == 1:\n processed_tbptt_outputs = processed_tbptt_outputs[0]\n processed_batch_outputs.append(processed_tbptt_outputs)\n\n # batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer\n if batch_mode:\n processed_batch_outputs = processed_batch_outputs[0]\n processed_outputs.append(processed_batch_outputs)\n\n # if there is only one optimiser then we collapse that dimension\n if len(processed_outputs) == 1:\n processed_outputs = processed_outputs[0]\n return processed_outputs\n\n def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):\n model_ref = self.trainer.lightning_module\n\n is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)\n using_native_amp = self.trainer.amp_backend == AMPType.NATIVE\n\n # native amp + lbfgs is a no go right now\n if using_native_amp and is_lbfgs:\n raise MisconfigurationException(\n 'native PyTorch amp and lbfgs are not compatible.'\n ' To request, please file a Github issue in PyTorch and tag @mcarilli'\n )\n\n # wraps into LightningOptimizer only for running step\n optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)\n\n # model hook\n model_ref.optimizer_step(\n self.trainer.current_epoch,\n batch_idx,\n optimizer,\n opt_idx,\n train_step_and_backward_closure,\n on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,\n using_native_amp=using_native_amp,\n using_lbfgs=is_lbfgs,\n )\n\n def on_before_zero_grad(self, optimizer):\n self.trainer.call_hook('on_before_zero_grad', optimizer)\n\n def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):\n self.trainer.accelerator.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)\n\n def track_and_norm_grad(self, optimizer):\n # track gradient norms\n grad_norm_dic = self._track_gradient_norm()\n\n # clip gradients\n self.trainer.accelerator.clip_gradients(\n optimizer, self.trainer.gradient_clip_val, gradient_clip_algorithm=self.trainer.gradient_clip_algorithm\n )\n self._cur_grad_norm_dict = grad_norm_dic\n\n def _track_gradient_norm(self):\n grad_norm_dict = {}\n if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:\n if float(self.trainer.track_grad_norm) > 0:\n model = self.trainer.lightning_module\n grad_norm_dict = grad_norm(model, self.trainer.track_grad_norm)\n return grad_norm_dict\n\n def _tbptt_split_batch(self, batch: Any) -> List[Any]:\n splits = [batch]\n truncated_bptt_enabled = self._truncated_bptt_enabled()\n if truncated_bptt_enabled:\n model_ref = self.trainer.lightning_module\n with self.trainer.profiler.profile(\"tbptt_split_batch\"):\n splits = model_ref.tbptt_split_batch(batch, self._truncated_bptt_steps())\n return splits\n\n def run_training_epoch(self):\n # modify dataloader if needed (ddp, etc...)\n train_dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)\n\n # track epoch output\n epoch_output = [[] for _ in range(self.num_optimizers)]\n\n train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)\n dataloader_idx = 0\n val_loop_called = False\n\n batch_idx = None\n is_last_batch = None\n\n for batch_idx, (batch, is_last_batch) in train_dataloader:\n self.trainer.batch_idx = batch_idx\n self.trainer.is_last_batch = is_last_batch\n\n # ------------------------------------\n # TRAINING_STEP + TRAINING_STEP_END\n # ------------------------------------\n with self.trainer.profiler.profile(\"run_training_batch\"):\n batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)\n\n # when returning -1 from train_step, we end epoch early\n if batch_output.signal == -1:\n break\n\n # hook\n # TODO: add outputs to batches\n self.on_train_batch_end(\n epoch_output,\n batch_output.training_step_output_for_epoch_end,\n batch,\n batch_idx,\n dataloader_idx,\n )\n\n # -----------------------------------------\n # SAVE METRICS TO LOGGERS\n # -----------------------------------------\n self.trainer.logger_connector.log_train_step_metrics(batch_output)\n\n # -----------------------------------------\n # VALIDATE IF NEEDED\n # -----------------------------------------\n should_check_val = self._should_check_val_fx(batch_idx, is_last_batch)\n if should_check_val:\n self.trainer.validating = True\n self.trainer.run_evaluation()\n self.trainer.training = True\n val_loop_called = True\n\n # -----------------------------------------\n # SAVE LOGGERS (ie: Tensorboard, etc...)\n # -----------------------------------------\n self.save_loggers_on_train_batch_end()\n\n # update LR schedulers\n monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)\n self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)\n self.trainer.checkpoint_connector.has_trained = True\n\n # max steps reached, end training\n if (\n self.trainer.max_steps is not None and self.trainer.max_steps <= self.trainer.global_step + 1\n and self._accumulated_batches_reached()\n ):\n break\n\n # end epoch early\n # stop when the flag is changed or we've gone past the amount\n # requested in the batches\n if self.trainer.should_stop:\n break\n\n self.trainer.total_batch_idx += 1\n\n # stop epoch if we limited the number of training batches\n if self._num_training_batches_reached(is_last_batch):\n break\n\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n if batch_idx is None:\n # dataloader/iterator did not produce a batch\n return\n\n # handle epoch_output on epoch end\n self.on_train_epoch_end(epoch_output)\n\n # log epoch metrics\n self.trainer.logger_connector.log_train_epoch_end_metrics(epoch_output)\n\n should_check_val = self._should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)\n should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)\n should_train_only = self.trainer.disable_validation or should_skip_eval\n\n # update epoch level lr_schedulers if no val loop outside train loop is triggered\n if (val_loop_called and not should_check_val) or should_train_only:\n self.trainer.optimizer_connector.update_learning_rates(interval='epoch')\n\n if should_train_only:\n self.check_checkpoint_callback(True)\n\n if should_check_val:\n self.trainer.validating = True\n self.trainer.run_evaluation(on_epoch=True)\n self.trainer.training = True\n\n # increment the global step once\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n def on_train_epoch_end(self, epoch_output: List[List[List[Result]]]) -> None:\n # inform logger the batch loop has finished\n self.trainer.logger_connector.on_train_epoch_end()\n\n # prepare epoch output\n processed_epoch_output = TrainLoop._prepare_outputs(epoch_output, batch_mode=False)\n\n # get the model and call model.training_epoch_end\n model = self.trainer.lightning_module\n\n if is_overridden('training_epoch_end', model=model):\n # run training_epoch_end\n # refresh the result for custom logging at the epoch level\n model._current_fx_name = 'training_epoch_end'\n\n # lightningmodule hook\n training_epoch_end_output = model.training_epoch_end(processed_epoch_output)\n\n if training_epoch_end_output is not None:\n raise MisconfigurationException(\n 'training_epoch_end expects a return of None. '\n 'HINT: remove the return statement in training_epoch_end'\n )\n\n # capture logging\n self.trainer.logger_connector.cache_logged_metrics()\n\n # call train epoch end hooks\n self._on_train_epoch_end_hook(processed_epoch_output)\n self.trainer.call_hook('on_epoch_end')\n\n def _on_train_epoch_end_hook(self, processed_epoch_output) -> None:\n # We cannot rely on Trainer.call_hook because the signatures might be different across\n # lightning module and callback\n # As a result, we need to inspect if the module accepts `outputs` in `on_train_epoch_end`\n\n # This implementation is copied from Trainer.call_hook\n hook_name = \"on_train_epoch_end\"\n\n # set hook_name to model + reset Result obj\n skip = self.trainer._reset_result_and_set_hook_fx_name(hook_name)\n\n # always profile hooks\n with self.trainer.profiler.profile(hook_name):\n\n # first call trainer hook\n if hasattr(self.trainer, hook_name):\n trainer_hook = getattr(self.trainer, hook_name)\n trainer_hook(processed_epoch_output)\n\n # next call hook in lightningModule\n model_ref = self.trainer.lightning_module\n if is_overridden(hook_name, model_ref):\n hook_fx = getattr(model_ref, hook_name)\n if is_param_in_hook_signature(hook_fx, \"outputs\"):\n self.warning_cache.warn(\n \"The signature of `ModelHooks.on_train_epoch_end` has changed in v1.3.\"\n \" `outputs` parameter has been deprecated.\"\n \" Support for the old signature will be removed in v1.5\", DeprecationWarning\n )\n model_ref.on_train_epoch_end(processed_epoch_output)\n else:\n model_ref.on_train_epoch_end()\n\n # if the PL module doesn't have the hook then call the accelerator\n # used to auto-reduce things for the user with Results obj\n elif hasattr(self.trainer.accelerator, hook_name):\n accelerator_hook = getattr(self.trainer.accelerator, hook_name)\n accelerator_hook()\n\n if not skip:\n self.trainer._cache_logged_metrics()\n\n def run_training_batch(self, batch, batch_idx, dataloader_idx):\n # track grad norms\n grad_norm_dic = {}\n\n # bookkeeping\n self.trainer.hiddens = None\n\n optimizers = self.prepare_optimizers()\n\n # track all outputs across time and num of optimizers\n batch_outputs = [[] for _ in range(len(optimizers))]\n\n if batch is None:\n self.warning_cache.warn(\"train_dataloader yielded None. If this was on purpose, ignore this warning...\")\n return AttributeDict(\n signal=0,\n grad_norm_dic=grad_norm_dic,\n training_step_output_for_epoch_end=batch_outputs,\n )\n\n # hook\n response = self.trainer.call_hook(\"on_batch_start\")\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_train_batch_start\", batch, batch_idx, dataloader_idx)\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # lightning module hook\n splits = self._tbptt_split_batch(batch)\n\n for split_idx, split_batch in enumerate(splits):\n\n # create an iterable for optimizers and loop over them\n for opt_idx, optimizer in optimizers:\n\n # toggle model params + set info to logger_connector\n self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)\n\n if self.should_accumulate():\n # For gradient accumulation\n\n # -------------------\n # calculate loss (train step + train step end)\n # -------------------\n\n # automatic_optimization=True: perform dpp sync only when performing optimizer_step\n # automatic_optimization=False: don't block synchronization here\n with self.block_ddp_sync_behaviour():\n self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # ------------------------------\n # BACKWARD PASS\n # ------------------------------\n # gradient update with accumulated gradients\n\n else:\n if self.trainer.lightning_module.automatic_optimization:\n\n def train_step_and_backward_closure():\n result = self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n return None if result is None else result.loss\n\n # optimizer step\n self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)\n\n else:\n self._curr_step_result = self.training_step(\n split_batch, batch_idx, opt_idx, self.trainer.hiddens\n )\n\n if self._curr_step_result is None:\n # user decided to skip optimization\n # make sure to zero grad.\n continue\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # todo: Properly aggregate grad_norm accros opt_idx and split_idx\n grad_norm_dic = self._cur_grad_norm_dict\n self._cur_grad_norm_dict = None\n\n # update running loss + reset accumulated loss\n self.update_running_loss()\n\n result = AttributeDict(\n signal=0,\n grad_norm_dic=grad_norm_dic,\n training_step_output_for_epoch_end=batch_outputs,\n )\n return result\n\n @contextmanager\n def block_ddp_sync_behaviour(self, should_block_sync: bool = False):\n \"\"\"\n automatic_optimization = True\n Blocks ddp sync gradients behaviour on backwards pass.\n This is useful for skipping sync when accumulating gradients, reducing communication overhead\n\n automatic_optimization = False\n do not block ddp gradient sync when using manual optimization\n as gradients are needed within the training step\n\n Returns:\n context manager with sync behaviour off\n\n \"\"\"\n if (\n isinstance(self.trainer.training_type_plugin, ParallelPlugin)\n and (self.trainer.lightning_module.automatic_optimization or should_block_sync)\n ):\n with self.trainer.training_type_plugin.block_backward_sync():\n yield None\n else:\n yield None\n\n def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:\n opt_closure_result = self._curr_step_result\n\n if opt_closure_result is not None:\n\n # cache metrics\n self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self._check_finite(opt_closure_result.loss)\n\n # track all the outputs across all steps\n batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0\n batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)\n\n if self.trainer.lightning_module.automatic_optimization:\n # track total loss for logging (avoid mem leaks)\n self.accumulated_loss.append(opt_closure_result.loss)\n\n self._curr_step_result = None\n\n return batch_outputs\n\n def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"Wrap forward, zero_grad and backward in a closure so second order methods work\"\"\"\n with self.trainer.profiler.profile(\"training_step_and_backward\"):\n # lightning module hook\n result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)\n self._curr_step_result = result\n\n if not self._skip_backward and self.trainer.lightning_module.automatic_optimization:\n is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0\n\n if is_first_batch_to_accumulate:\n self.on_before_zero_grad(optimizer)\n self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)\n\n # backward pass\n if result is not None:\n with self.trainer.profiler.profile(\"backward\"):\n self.backward(result, optimizer, opt_idx)\n\n # hook - call this hook only\n # when gradients have finished to accumulate\n if not self.should_accumulate():\n self.on_after_backward(result.training_step_output, batch_idx, result.loss)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self._check_finite(result.loss)\n\n else:\n self.warning_cache.warn(\n \"training_step returned None. If this was on purpose, ignore this warning...\"\n )\n\n if len(self.trainer.optimizers) > 1:\n # revert back to previous state\n self.trainer.lightning_module.untoggle_optimizer(opt_idx)\n\n return result\n\n def _check_finite(self, loss: torch.Tensor) -> None:\n if not torch.isfinite(loss).all():\n raise ValueError(f'The loss returned in `training_step` is {loss}.')\n model = self.trainer.lightning_module\n detect_nan_parameters(model)\n\n def backward(self, result, optimizer, opt_idx, *args, **kwargs):\n self.trainer.dev_debugger.track_event(\"backward_call\")\n\n should_accumulate = self.should_accumulate()\n\n # backward can be called manually in the training loop\n if isinstance(result, torch.Tensor):\n self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)\n else:\n result.closure_loss = self.trainer.accelerator.backward(\n result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs\n )\n\n if not self.should_accumulate():\n # track gradients\n self.track_and_norm_grad(optimizer=optimizer)\n\n def update_train_loop_lr_schedulers(self, monitor_metrics=None):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n if num_accumulated_batches_reached or num_training_batches_reached:\n # update lr\n self.trainer.optimizer_connector.update_learning_rates(\n interval=\"step\",\n monitor_metrics=monitor_metrics,\n opt_indices=[opt_idx for opt_idx, _ in self.get_optimizers_iterable()],\n )\n\n def increment_accumulated_grad_global_step(self):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n # progress global step according to grads progress\n if num_accumulated_batches_reached or num_training_batches_reached:\n self.trainer.global_step = self.trainer.accelerator.update_global_step(\n self.trainer.total_batch_idx, self.trainer.global_step\n )\n\n def _accumulated_batches_reached(self):\n return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0\n\n def _num_training_batches_reached(self, is_last_batch=False):\n return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch\n\n def should_accumulate(self):\n # checks if backward or backward + optimizer step (via closure)\n accumulation_done = self._accumulated_batches_reached()\n is_final_batch = self._num_training_batches_reached()\n return not (accumulation_done or is_final_batch)\n\n def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool, on_epoch: bool = False) -> bool:\n \"\"\" Decide if we should run validation. \"\"\"\n\n if not self.trainer.enable_validation:\n return False\n\n # check if this epoch is eligible to run validation\n if (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch != 0:\n return False\n\n # val_check_batch is inf for iterable datasets with no length defined\n # TODO: let training/eval loop handle logic around limit_*_batches and val_check_batch\n is_val_check_batch = False\n if isinstance(self.trainer.limit_train_batches, int) and self.trainer.val_check_batch == float('inf'):\n is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0\n elif self.trainer.val_check_batch != float('inf'):\n is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0\n\n # Note: num_training_batches is also inf for iterable datasets with no length defined\n epoch_end_val_check = (batch_idx + 1) % self.trainer.num_training_batches == 0\n is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float(\"inf\")\n\n if on_epoch:\n return (\n is_val_check_batch and epoch_end_val_check\n ) or self.trainer.should_stop or is_last_batch_for_infinite_dataset\n else:\n return is_val_check_batch and not epoch_end_val_check\n\n def build_train_args(self, batch, batch_idx, opt_idx, hiddens):\n # enable not needing to add opt_idx to training_step\n args = [batch, batch_idx]\n\n if len(self.trainer.optimizers) > 1:\n if self.trainer.has_arg(\"training_step\", \"optimizer_idx\"):\n if not self.trainer.lightning_module.automatic_optimization:\n self.warning_cache.warn(\n \"`training_step` hook signature has changed in v1.3.\"\n \" `optimizer_idx` argument has been removed in case of manual optimization. Support for\"\n \" the old signature will be removed in v1.5\", DeprecationWarning\n )\n args.append(opt_idx)\n elif not self.trainer.has_arg(\n \"training_step\", \"optimizer_idx\"\n ) and self.trainer.lightning_module.automatic_optimization:\n raise ValueError(\n f\"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but\"\n ' `training_step` is missing the `optimizer_idx` argument.'\n )\n\n # pass hiddens if using tbptt\n if self._truncated_bptt_enabled():\n args.append(hiddens)\n\n return args\n\n def _truncated_bptt_enabled(self) -> bool:\n \"\"\" Temporary tbptt utilities until this flag is fully migrated to the lightning module. \"\"\"\n return self._truncated_bptt_steps() > 0\n\n def _truncated_bptt_steps(self) -> int:\n lightning_module = self.trainer.lightning_module\n # Give precedence to the LightningModule as the Trainer flag will be removed in v1.5\n if lightning_module.truncated_bptt_steps > 0:\n return lightning_module.truncated_bptt_steps\n return self.trainer.truncated_bptt_steps or 0\n\n def save_loggers_on_train_batch_end(self):\n # when loggers should save to disk\n should_flush_logs = self.trainer.logger_connector.should_flush_logs\n if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:\n self.trainer.logger.save()\n\n def prepare_optimizers(self):\n # in manual optimization we loop over all optimizers at once\n optimizers = self.get_optimizers_iterable()\n if not self.trainer.lightning_module.automatic_optimization:\n optimizers = [optimizers[0]]\n return optimizers\n\n def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):\n # set split_idx to trainer for tracking\n self.trainer.split_idx = split_idx\n\n # make sure only the gradients of the current optimizer's parameters are calculated\n # in the training step to prevent dangling gradients in multiple-optimizer setup.\n if self.trainer.lightning_module.automatic_optimization and len(self.trainer.optimizers) > 1:\n model = self.trainer.lightning_module\n model.toggle_optimizer(optimizer, opt_idx)\n\n # use to track metrics internally\n self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)\n\n def update_running_loss(self):\n accumulated_loss = self.accumulated_loss.mean()\n\n if accumulated_loss is not None:\n # calculate running loss for display\n self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)\n\n # reset for next set of accumulated grads\n self.accumulated_loss.reset()\n" ]
[ [ "numpy.argmax", "torch.isfinite", "numpy.cumsum" ] ]
yeong35/MusicTransformer-Pytorch
[ "5cd5e1bab8dfa0ed605089d7f41430e6e0596dc8" ]
[ "train.py" ]
[ "import os\nimport csv\nimport shutil\nfrom datetime import datetime\nfrom numpy import logspace\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.utils.data import DataLoader\nfrom torch.optim import Adam\n\nfrom dataset.e_piano import create_epiano_datasets, create_pop909_datasets\n\nfrom model.music_transformer import MusicTransformer\n\nfrom model.discriminator import MusicDiscriminator\nfrom model.classifier import CNNDiscriminator\n\nfrom model.loss import SmoothCrossEntropyLoss\n\nfrom utilities.constants import *\nfrom utilities.WGAN_GP import WassersteinLoss\nfrom utilities.device import get_device, use_cuda\nfrom utilities.lr_scheduling import LrStepTracker, get_lr\nfrom utilities.argument_funcs import parse_train_args, print_train_args, write_model_params\nfrom utilities.run_model import train_epoch, eval_model\n\nCSV_HEADER = [\"Epoch\", \"Learn rate\", \"Avg Train loss\", \"Train Accuracy\", \"Avg Eval loss\", \"Eval accuracy\"]\n\ndis_filter_sizes = [2, 3, 4, 5]\ndis_num_filters = [300, 300, 300, 300]\n\n# Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy\nBASELINE_EPOCH = -1\n\n# main\ndef main():\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Entry point. Trains a model specified by command line arguments\n ----------\n \"\"\"\n\n args = parse_train_args()\n print_train_args(args)\n\n if(args.force_cpu):\n use_cuda(False)\n print(\"WARNING: Forced CPU usage, expect model to perform slower\")\n print(\"\")\n\n eventid = f\"{datetime.now().strftime('MusicTransformer-%Y.%m.%d')}_gan_{args.gan}_creative_{args.creative}_ce_{args.ce_smoothing}\"\n\n args.output_dir = args.output_dir + \"/\" + eventid\n\n os.makedirs(args.output_dir, exist_ok=True)\n\n ##### Output prep #####\n params_file = os.path.join(args.output_dir, \"model_params.txt\")\n write_model_params(args, params_file)\n\n weights_folder = os.path.join(args.output_dir, \"weights\")\n os.makedirs(weights_folder, exist_ok=True)\n\n results_folder = os.path.join(args.output_dir, \"results\")\n os.makedirs(results_folder, exist_ok=True)\n\n results_file = os.path.join(results_folder, \"results.csv\")\n best_loss_file = os.path.join(results_folder, \"best_loss_weights.pickle\")\n best_acc_file = os.path.join(results_folder, \"best_acc_weights.pickle\")\n best_loss_critic_file = os.path.join(results_folder, \"best_loss_critic_weights.pickle\")\n best_acc_critic_file = os.path.join(results_folder, \"best_acc_critic_weights.pickle\")\n\n best_loss_classifier_file = os.path.join(\n results_folder, \"best_loss_classifier_weights.pickle\")\n best_acc_classifier_file = os.path.join(\n results_folder, \"best_acc_classifier_weights.pickle\")\n\n best_text = os.path.join(results_folder, \"best_epochs.txt\")\n\n ##### Tensorboard #####\n if(args.no_tensorboard):\n tensorboard_summary = None\n else:\n from torch.utils.tensorboard import SummaryWriter \n\n tensorboad_dir = os.path.join(args.output_dir, \"tensorboard/\" + eventid)\n tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir)\n\n ##### Datasets #####\n # 데이터셋이 바뀌기 때문에 아래와같이 해주어야함\n if args.interval and args.octave:\n print(\"octave interval dataset!!\")\n classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_interval_e_piano', args.max_sequence,\n condition_token=args.condition_token, interval = args.interval, octave = args.octave)\n pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)\n pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,\n [int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),\n len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],\n generator=torch.Generator().manual_seed(42))\n elif args.octave and args.fusion_encoding and args.absolute:\n print(\"absolute dataset!!\")\n classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_absolute_e_piano', args.max_sequence,\n condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)\n pop909_dataset = create_pop909_datasets('./dataset/pop909_absolute', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)\n pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,\n [int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),\n len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],\n generator=torch.Generator().manual_seed(42))\n elif args.interval and not args.octave:\n print(\"interval dataset!!\")\n classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_e_piano', args.max_sequence,\n condition_token=args.condition_token, interval = args.interval, octave = args.octave)\n pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)\n pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,\n [int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),\n len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],\n generator=torch.Generator().manual_seed(42))\n elif args.octave and args.fusion_encoding:\n print(\"Octave_fusion dataset!!\")\n classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_e_piano', args.max_sequence,\n condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)\n pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)\n pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,\n [int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),\n len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],\n generator=torch.Generator().manual_seed(42))\n elif not args.interval and args.octave and not args.fusion_encoding:\n print(\"Octave dataset!!\")\n classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_e_piano', args.max_sequence,\n condition_token=args.condition_token, interval = args.interval, octave = args.octave)\n pop909_dataset = create_pop909_datasets('./dataset/pop909_octave', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)\n pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,\n [int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),\n len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],\n generator=torch.Generator().manual_seed(42))\n elif args.logscale:\n print(\"logscvale dataset\")\n classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_epiano0420', args.max_sequence, random_seq=True,\n condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)\n pop909_dataset = create_pop909_datasets('./dataset/logscale_pop0420', args.max_sequence, random_seq=True, condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)\n pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,\n [int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),\n len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],\n generator=torch.Generator().manual_seed(42))\n else:\n classic_train, classic_val, classic_test = create_epiano_datasets(args.classic_input_dir, args.max_sequence,\n condition_token = args.condition_token, octave = args.octave)\n pop909_dataset = create_pop909_datasets('dataset/pop_pickle/', args.max_sequence, condition_token = args.condition_token, octave = args.octave)\n pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,\n [int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1), len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],\n generator=torch.Generator().manual_seed(42))\n\n if args.data == 'both':\n print(\"Dataset: both\")\n train_dataset = torch.utils.data.ConcatDataset([ classic_train, pop_train])\n val_dataset = torch.utils.data.ConcatDataset([ classic_val, pop_valid])\n elif args.data == 'classic':\n print(\"Dataset: classic\")\n train_dataset = torch.utils.data.ConcatDataset([classic_train])\n val_dataset = torch.utils.data.ConcatDataset([classic_val])\n else:\n print(\"Dataset: pop\")\n train_dataset = torch.utils.data.ConcatDataset([pop_train])\n val_dataset = torch.utils.data.ConcatDataset([pop_valid])\n\n test_dataset = torch.utils.data.ConcatDataset([classic_test, pop_test])\n\n\n train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True)\n val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers)\n test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers)\n\n model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads,\n d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout,\n max_sequence=args.max_sequence, rpr=args.rpr, \n condition_token = args.condition_token, interval = args.interval, octave = args.octave, \n fusion = args.fusion_encoding, absolute = args.absolute, logscale=args.logscale).to(get_device())\n\n # EY critic\n # num_prime = args.num_prime\n critic = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,\n d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,\n max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())\n\n classifier = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,\n d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,\n max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())\n\n\n if args.creative:\n classifier.load_state_dict(torch.load('best_classifier_acc_0.9883.pickle'))\n\n ##### Continuing from previous training session #####\n start_epoch = BASELINE_EPOCH\n if(args.continue_weights is not None):\n if(args.continue_epoch is None):\n print(\"ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights\")\n return\n else:\n model.load_state_dict(torch.load(args.continue_weights))\n start_epoch = args.continue_epoch\n elif(args.continue_epoch is not None):\n print(\"ERROR: Need continue weights (-continue_weights) when using continue_epoch\")\n return\n\n ##### Lr Scheduler vs static lr #####\n if(args.lr is None):\n if(args.continue_epoch is None):\n init_step = 0\n else:\n init_step = args.continue_epoch * len(train_loader)\n\n lr = LR_DEFAULT_START\n lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step)\n else:\n lr = args.lr\n\n ##### Not smoothing evaluation loss #####\n if args.interval and args.octave:\n eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_INTERVAL)\n elif args.interval and not args.octave:\n eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_INTERVAL)\n elif args.octave and args.fusion_encoding and args.absolute:\n eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)\n elif args.octave and args.fusion_encoding:\n eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION)\n elif not args.interval and args.octave and not args.fusion_encoding:\n eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE)\n elif args.logscale:\n eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_RELATIVE)\n else:\n eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD)\n\n\n ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training #####\n if(args.ce_smoothing is None):\n train_loss_func = eval_loss_func\n else:\n if args.interval and args.octave:\n train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)\n elif args.interval and not args.octave:\n train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)\n elif not args.interval and args.octave and args.fusion_encoding and args.absolute:\n train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION_ABSOLUTE, ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)\n elif not args.interval and args.octave and args.fusion_encoding:\n train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION, ignore_index=TOKEN_PAD_OCTAVE_FUSION)\n elif not args.interval and args.octave and not args.fusion_encoding:\n train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE, ignore_index=TOKEN_PAD_OCTAVE)\n elif args.logscale:\n train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_RELATIVE, ignore_index=TOKEN_PAD_RELATIVE)\n else:\n train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD)\n\n ##### EY - WGAN Loss #####\n classifier_loss_func = nn.MSELoss()\n\n ##### Optimizer #####\n opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)\n critic_opt = Adam(critic.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)\n classifier_opt = Adam(classifier.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)\n\n if(args.lr is None):\n lr_scheduler = LambdaLR(opt, lr_stepper.step)\n critic_lr_scheduler = LambdaLR(critic_opt, lr_stepper.step)\n classifier_lr_scheduler = LambdaLR(classifier_opt, lr_stepper.step)\n else:\n lr_scheduler = None\n\n ##### Tracking best evaluation accuracy #####\n best_eval_acc = 0.0\n best_eval_acc_epoch = -1\n best_eval_loss = float(\"inf\")\n best_eval_loss_epoch = -1\n\n ##### Results reporting #####\n if(not os.path.isfile(results_file)):\n with open(results_file, \"w\", newline=\"\") as o_stream:\n writer = csv.writer(o_stream)\n writer.writerow(CSV_HEADER)\n\n\n ##### TRAIN LOOP #####\n for epoch in range(start_epoch, args.epochs):\n # Baseline has no training and acts as a base loss and accuracy (epoch 0 in a sense)\n if(epoch >= BASELINE_EPOCH):\n print(SEPERATOR)\n print(\"NEW EPOCH:\", epoch+1)\n print(SEPERATOR)\n print(\"\")\n\n # Train\n # EY 고쳐야 할 부분의 시작\n train_loss, train_acc, dis_loss, gen_loss, cre_loss, gan_accuracy, class_accuracy, creativity = train_epoch(epoch+1, model, critic, classifier, train_loader, train_loss_func, classifier_loss_func, opt, critic_opt, classifier_opt, lr_scheduler, critic_lr_scheduler, classifier_lr_scheduler, args)\n\n print(SEPERATOR)\n print(\"Evaluating:\")\n else:\n print(SEPERATOR)\n print(\"Baseline model evaluation (Epoch 0):\")\n\n # Eval\n # train_loss, train_acc = eval_model(model, train_loader, train_loss_func)\n eval_loss, eval_acc = eval_model(model, val_loader, eval_loss_func, args)\n\n # Learn rate\n lr = get_lr(opt)\n\n print(\"Epoch:\", epoch+1)\n print(\"Avg train loss:\", train_loss)\n print(\"Avg train acc:\", train_acc)\n print(\"Avg eval loss:\", eval_loss)\n print(\"Avg eval acc:\", eval_acc)\n print(SEPERATOR)\n print(\"\")\n\n new_best = False\n\n if(eval_acc > best_eval_acc):\n best_eval_acc = eval_acc\n best_eval_acc_epoch = epoch+1\n torch.save(model.state_dict(), best_acc_file)\n torch.save(critic.state_dict(), best_acc_critic_file)\n torch.save(classifier.state_dict(), best_acc_classifier_file)\n new_best = True\n\n if(eval_loss < best_eval_loss):\n best_eval_loss = eval_loss\n best_eval_loss_epoch = epoch+1\n torch.save(model.state_dict(), best_loss_file)\n torch.save(critic.state_dict(), best_loss_critic_file)\n torch.save(classifier.state_dict(), best_loss_classifier_file)\n new_best = True\n\n # Writing out new bests\n if(new_best):\n with open(best_text, \"w\") as o_stream:\n print(\"Best eval acc epoch:\", best_eval_acc_epoch, file=o_stream)\n print(\"Best eval acc:\", best_eval_acc, file=o_stream)\n print(\"\")\n print(\"Best eval loss epoch:\", best_eval_loss_epoch, file=o_stream)\n print(\"Best eval loss:\", best_eval_loss, file=o_stream)\n\n\n if(not args.no_tensorboard):\n tensorboard_summary.add_scalar(\"Avg_CE_loss/train\", train_loss, global_step=epoch+1)\n tensorboard_summary.add_scalar(\"Avg_CE_loss/eval\", eval_loss, global_step=epoch+1)\n tensorboard_summary.add_scalar(\"Accuracy/train\", train_acc, global_step=epoch+1)\n tensorboard_summary.add_scalar(\"Accuracy/eval\", eval_acc, global_step=epoch+1)\n tensorboard_summary.add_scalar(\"Learn_rate/train\", lr, global_step=epoch+1)\n\n tensorboard_summary.add_scalar(\"Critic_loss/train\", dis_loss, global_step=epoch+1)\n tensorboard_summary.add_scalar(\"Gen_loss/train\", gen_loss, global_step=epoch+1)\n tensorboard_summary.add_scalar(\"Creativity_loss/train\", cre_loss, global_step=epoch+1)\n tensorboard_summary.add_scalar(\"GAN_accuracy/train\", gan_accuracy, global_step=epoch+1)\n tensorboard_summary.add_scalar(\"Class_accuracy/train\", class_accuracy, global_step=epoch+1)\n tensorboard_summary.add_scalar(\"Creativity/train\", creativity, global_step=epoch+1)\n\n tensorboard_summary.flush()\n\n if((epoch+1) % args.weight_modulus == 0):\n epoch_str = str(epoch+1).zfill(PREPEND_ZEROS_WIDTH)\n path = os.path.join(weights_folder, \"epoch_\" + epoch_str + \".pickle\")\n torch.save(model.state_dict(), path)\n\n with open(results_file, \"a\", newline=\"\") as o_stream:\n writer = csv.writer(o_stream)\n writer.writerow([epoch+1, lr, train_loss, train_acc, eval_loss, eval_acc])\n\n # Sanity check just to make sure everything is gone\n if(not args.no_tensorboard):\n tensorboard_summary.flush()\n\n return\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.utils.data.ConcatDataset", "torch.nn.MSELoss", "torch.nn.CrossEntropyLoss", "torch.Generator", "torch.utils.data.DataLoader", "torch.load", "torch.optim.lr_scheduler.LambdaLR", "torch.utils.tensorboard.SummaryWriter" ] ]
littlewatkins/nepc
[ "3e16e3a9622ca0ebb4484c9e4af253046367773a" ]
[ "tests/test_mysql_build.py" ]
[ "from nepc import nepc\nfrom nepc.util import util\nimport pandas as pd\nimport os\nimport pytest\nimport platform\n# TODO: remove dependence on csv; put function in scraper that uses built-in\n# readlines function\nimport csv\n\n# TODO: test that all values in [nepc]/tests/data are in the nepc database\n\n@pytest.mark.usefixtures(\"data_config\", \"nepc_connect\")\ndef test_states_table_has_species_metadata(data_config, nepc_connect):\n \"\"\"\n check that the states table has a species_id column\n \"\"\"\n NEPC_DATA = data_config[0]\n number_of_states = util.wc_fxn(NEPC_DATA + 'states.tsv') - 1\n df_states = nepc.table_as_df(nepc_connect[1], 'states')\n assert len(df_states) == number_of_states\n assert 'species_id' in list(df_states.columns)\n\n\n@pytest.mark.usefixtures(\"data_config\", \"nepc_connect\")\ndef test_csdata_lines(data_config, nepc_connect):\n DIR_NAMES = data_config[1]\n cs_lines = 0\n for directoryname in DIR_NAMES:\n directory = os.fsencode(directoryname)\n\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith(\".met\") or filename.endswith(\".mod\"):\n continue\n else:\n # subtract 1 to account for header\n cs_lines += util.wc_fxn(directoryname + filename) - 1\n\n assert cs_lines == nepc.count_table_rows(nepc_connect[1], \"csdata\")\n\n\n@pytest.mark.usefixtures(\"data_config\", \"nepc_connect\")\ndef test_data_entered(data_config, nepc_connect, local):\n NEPC_DATA = data_config[0]\n if local is False or platform.node() == 'ppdadamsonlinux':\n cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_prod.tsv',\n delimiter='\\t')\n else:\n cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_local.tsv',\n delimiter='\\t')\n\n for index, row in cs_dat_files.iterrows():\n cs_id = row['cs_id']\n dat_file = row['filename']\n df = pd.read_csv(NEPC_DATA + dat_file + '.dat', delimiter='\\t',\n usecols=['e_energy', 'sigma'])\n e_energy, sigma = nepc.cs_e_sigma(nepc_connect[1], cs_id)\n # assert e_energy == pytest.approx(df['e_energy'].tolist())\n assert sigma == pytest.approx(df['sigma'].tolist())\n\n\n@pytest.mark.usefixtures(\"data_config\", \"nepc_connect\")\ndef test_meta_entered(data_config, nepc_connect, local, dbug):\n NEPC_DATA = data_config[0]\n if local is False or platform.node() == 'ppdadamsonlinux':\n cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_prod.tsv',\n delimiter='\\t')\n else:\n cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_local.tsv',\n delimiter='\\t')\n\n for index, row in cs_dat_files.iterrows():\n cs_id = row['cs_id']\n met_file = row['filename']\n if dbug:\n print(cs_id, met_file)\n e, sigma = nepc.cs_e_sigma(nepc_connect[1], cs_id)\n\n meta_cols = ['cs_id', 'process', 'units_e',\n 'units_sigma', 'ref', 'lhsA',\n 'lhsB', 'rhsA', 'rhsB', 'threshold', 'wavelength',\n 'lhs_v', 'rhs_v', 'lhs_j', 'rhs_j',\n 'background', 'lpu', 'upu']\n\n with open(NEPC_DATA + met_file + \".met\", 'r', newline='') as f:\n reader = csv.reader(f, delimiter='\\t')\n next(reader)\n meta_disk = list(reader)[0]\n\n meta_disk = [meta_disk[i] for i in list(range(len(meta_cols)))]\n\n for i in [0, 11, 12, 13, 14]:\n meta_disk[i] = (int(meta_disk[i]) if meta_disk[i] != '\\\\N'\n else meta_disk[i])\n for i in [2, 3, 9, 10, 16, 17]:\n meta_disk[i] = (float(meta_disk[i]) if meta_disk[i] != '\\\\N'\n else meta_disk[i])\n\n meta_db = [nepc.cs_metadata(nepc_connect[1], cs_id)[i]\n for i in list(range(0, len(meta_cols)))]\n if dbug:\n print('meta_db: {}\\t from {}'.format(meta_db, met_file))\n for i in range(len(meta_cols)):\n if dbug:\n print('meta_db[{}]: {}\\t from {}'.format(str(i), str(meta_db[i]), met_file))\n if (type(meta_db[i]) is float):\n assert (pytest.approx(meta_disk[i]) ==\n pytest.approx(meta_db[i]))\n elif meta_db[i] is None:\n assert meta_disk[i] == '\\\\N'\n else:\n assert meta_disk[i] == meta_db[i]\n" ]
[ [ "pandas.read_csv" ] ]
hanhejia/SSD
[ "0c5684ad786768b46b119fb503f4f7174e2c78ed" ]
[ "yassd/testing_utils/videotest.py" ]
[ "\"\"\" A class for testing a SSD model on a video file or webcam \"\"\"\n\nimport cv2\nimport keras\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.models import Model\nfrom keras.preprocessing import image \nimport pickle\nimport numpy as np\nfrom random import shuffle\nfrom scipy.misc import imread, imresize\nfrom timeit import default_timer as timer\n\nimport sys\nsys.path.append(\"..\")\nfrom ssd_utils import BBoxUtility\n\n\nclass VideoTest(object):\n \"\"\" Class for testing a trained SSD model on a video file and show the\n result in a window. Class is designed so that one VideoTest object \n can be created for a model, and the same object can then be used on \n multiple videos and webcams.\n \n Arguments:\n class_names: A list of strings, each containing the name of a class.\n The first name should be that of the background class\n which is not used.\n \n model: An SSD model. It should already be trained for \n images similar to the video to test on.\n \n input_shape: The shape that the model expects for its input, \n as a tuple, for example (300, 300, 3) \n \n bbox_util: An instance of the BBoxUtility class in ssd_utils.py\n The BBoxUtility needs to be instantiated with \n the same number of classes as the length of \n class_names.\n \n \"\"\"\n \n def __init__(self, class_names, model, input_shape):\n self.class_names = class_names\n self.num_classes = len(class_names)\n self.model = model\n self.input_shape = input_shape\n self.bbox_util = BBoxUtility(self.num_classes)\n \n # Create unique and somewhat visually distinguishable bright\n # colors for the different classes.\n self.class_colors = []\n for i in range(0, self.num_classes):\n # This can probably be written in a more elegant manner\n hue = 255*i/self.num_classes\n col = np.zeros((1,1,3)).astype(\"uint8\")\n col[0][0][0] = hue\n col[0][0][1] = 128 # Saturation\n col[0][0][2] = 255 # Value\n cvcol = cv2.cvtColor(col, cv2.COLOR_HSV2BGR)\n col = (int(cvcol[0][0][0]), int(cvcol[0][0][1]), int(cvcol[0][0][2]))\n self.class_colors.append(col) \n \n def run(self, video_path = 0, start_frame = 0, conf_thresh = 0.6):\n \"\"\" Runs the test on a video (or webcam)\n \n # Arguments\n video_path: A file path to a video to be tested on. Can also be a number, \n in which case the webcam with the same number (i.e. 0) is \n used instead\n \n start_frame: The number of the first frame of the video to be processed\n by the network. \n \n conf_thresh: Threshold of confidence. Any boxes with lower confidence \n are not visualized.\n \n \"\"\"\n \n vid = cv2.VideoCapture(video_path)\n if not vid.isOpened():\n raise IOError((\"Couldn't open video file or webcam. If you're \"\n \"trying to open a webcam, make sure you video_path is an integer!\"))\n \n # Compute aspect ratio of video \n vidw = vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n vidh = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n vidar = vidw/vidh\n \n # Skip frames until reaching start_frame\n if start_frame > 0:\n vid.set(cv2.CAP_PROP_POS_MSEC, start_frame)\n \n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n prev_time = timer()\n \n while True:\n retval, orig_image = vid.read()\n if not retval:\n print(\"Done!\")\n return\n \n im_size = (self.input_shape[0], self.input_shape[1]) \n resized = cv2.resize(orig_image, im_size)\n rgb = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)\n \n # Reshape to original aspect ratio for later visualization\n # The resized version is used, to visualize what kind of resolution\n # the network has to work with.\n to_draw = cv2.resize(resized, (int(self.input_shape[0]*vidar), self.input_shape[1]))\n \n # Use model to predict \n inputs = [image.img_to_array(rgb)]\n tmp_inp = np.array(inputs)\n x = preprocess_input(tmp_inp)\n \n y = self.model.predict(x)\n \n \n # This line creates a new TensorFlow device every time. Is there a \n # way to avoid that?\n results = self.bbox_util.detection_out(y)\n \n if len(results) > 0 and len(results[0]) > 0:\n # Interpret output, only one frame is used \n det_label = results[0][:, 0]\n det_conf = results[0][:, 1]\n det_xmin = results[0][:, 2]\n det_ymin = results[0][:, 3]\n det_xmax = results[0][:, 4]\n det_ymax = results[0][:, 5]\n\n top_indices = [i for i, conf in enumerate(det_conf) if conf >= conf_thresh]\n\n top_conf = det_conf[top_indices]\n top_label_indices = det_label[top_indices].tolist()\n top_xmin = det_xmin[top_indices]\n top_ymin = det_ymin[top_indices]\n top_xmax = det_xmax[top_indices]\n top_ymax = det_ymax[top_indices]\n\n for i in range(top_conf.shape[0]):\n xmin = int(round(top_xmin[i] * to_draw.shape[1]))\n ymin = int(round(top_ymin[i] * to_draw.shape[0]))\n xmax = int(round(top_xmax[i] * to_draw.shape[1]))\n ymax = int(round(top_ymax[i] * to_draw.shape[0]))\n\n # Draw the box on top of the to_draw image\n class_num = int(top_label_indices[i])\n cv2.rectangle(to_draw, (xmin, ymin), (xmax, ymax), \n self.class_colors[class_num], 2)\n text = self.class_names[class_num] + \" \" + ('%.2f' % top_conf[i])\n\n text_top = (xmin, ymin-10)\n text_bot = (xmin + 80, ymin + 5)\n text_pos = (xmin + 5, ymin)\n cv2.rectangle(to_draw, text_top, text_bot, self.class_colors[class_num], -1)\n cv2.putText(to_draw, text, text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0,0,0), 1)\n \n # Calculate FPS\n # This computes FPS for everything, not just the model's execution \n # which may or may not be what you want\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n curr_fps = 0\n \n # Draw FPS in top left corner\n cv2.rectangle(to_draw, (0,0), (50, 17), (255,255,255), -1)\n cv2.putText(to_draw, fps, (3,10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0,0,0), 1)\n \n cv2.imshow(\"SSD result\", to_draw)\n cv2.waitKey(10)\n \n \n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
WZX1998/facial-recognition
[ "3284445abde438c0ae77807eeaf53bb5d1e06308" ]
[ "src/dataset_creator.py" ]
[ "import logging\nimport pickle\nimport os\nimport sys\nimport json\nimport cv2\nimport numpy as np\nimport glob\nimport tqdm\n\nsys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))\nimport src\nfrom src.__init__ import *\n\n\ndef image_reader(image_path_list):\n \n image = cv2.imread(image_path_list[0], 0)\n image = cv2.resize(image, (48, 48))\n image = np.expand_dims(image, axis=0)\n \n for img_path in image_path_list[1:]:\n image = np.concatenate(\n (\n image, \n np.expand_dims(\n cv2.resize(cv2.imread(img_path, 0), (48, 48)), \n axis=0\n )\n ), \n axis=0\n )\n \n return image\n\n\ndef image_label_generator(emotion_map):\n labels = []\n \n _i = 0\n\n image_lists = []\n for k, v in tqdm.tqdm(emotion_map.items()):\n\n path = os.path.join(FACE_IMAGES_PATH, k)\n logger.debug('reading images at path: {}'.format(path))\n image_list = glob.glob(path+'/*.png')\n logger.debug('length images list: {}'.format(len(image_list)))\n image_lists.append(image_list)\n labels.extend([v]*len(image_list))\n \n images = np.vstack((image_reader(image_list) for image_list in image_lists))\n\n return images, labels\n\n\ndef train_test_splitter(images, labels):\n dataset = [(image, label) for image, label in zip(images, labels)]\n\n dataset_size = len(dataset)\n trainset_size = int(.8 * dataset_size)\n testset_size = dataset_size - trainset_size\n logger.debug('Dataset size: {}'.format(dataset_size))\n \n np.random.shuffle(dataset)\n \n # PAY ATTENTION HERE: YOU CAN ALSO ADD DEV-SET :)\n trainset, testset = dataset[:trainset_size], dataset[trainset_size:]\n \n logger.debug('Trainset size: {}, Testset size: {}'.format(\n len(trainset), len(testset)\n ))\n \n logger.debug('concatinating the train images on axis 0')\n train_image = np.vstack((tr[0] for tr in tqdm.tqdm(trainset[:])))\n logger.debug('concatinating the train labels on axis 0')\n train_label = [tr[1] for tr in tqdm.tqdm(trainset[:])]\n\n logger.info('concatinating the test images on axis 0')\n test_image = np.vstack((te[0] for te in tqdm.tqdm(testset[:])))\n logger.debug('concatinating the test labels on axis 0')\n test_label = [te[1] for te in tqdm.tqdm(testset[:])]\n \n logger.debug('train-images-shape: {}, test-images-shape: {}'.format(\n train_image.shape, test_image.shape\n ))\n \n return (train_image, train_label), (test_image, test_label)\n\n\ndef create_dataset(images, labels):\n \n images = np.reshape(images, (-1, 48*48))\n logger.debug('images-shape: {}, length-labels: {}'.format(\n images.shape, len(labels)\n ))\n \n train, test = train_test_splitter(images, labels)\n \n \n train_dict = {\n 'data': train[0],\n 'labels': train[1]\n }\n test_dict = {\n 'data': test[0],\n 'labels': test[1]\n }\n \n with open(os.path.join(DATASET_SAVE_PATH, 'train_batch_0'), 'wb') as file:\n pickle.dump(train_dict, file)\n logger.info('dataset: trainset-dict pickled and saved at {}'.format(DATASET_SAVE_PATH))\n \n with open(os.path.join(DATASET_SAVE_PATH, 'test_batch_0'), 'wb') as file:\n pickle.dump(test_dict, file)\n logger.info('dataset: testset-dict pickled and saved at {}'.format(DATASET_SAVE_PATH))\n \n logger.info('dataset created :)')\n\n\ndef condition_satisfied(emotion_map):\n for emotion_class in emotion_map.keys():\n path = os.path.join(FACE_IMAGES_PATH, emotion_class)\n\n if not os.path.exists(path):\n logger.error('Please capture images for \"{}\" emotion-class as well'.format(\n emotion_class\n ))\n logger.error('FAIL.')\n return False\n\n return True\n\n\nif __name__ == '__main__':\n\n logger = logging.getLogger('emojifier.dataset_creator')\n FACE_IMAGES_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'images')\n DATASET_SAVE_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'dataset')\n\n if not os.path.exists(DATASET_SAVE_PATH):\n os.makedirs(DATASET_SAVE_PATH)\n\n if condition_satisfied(EMOTION_MAP):\n _images, _labels = image_label_generator(EMOTION_MAP)\n create_dataset(_images, _labels)\n" ]
[ [ "numpy.reshape", "numpy.random.shuffle", "numpy.expand_dims" ] ]
RomeroBarata/upt
[ "6f953e7a61c31cf608aef9a77b0af3ae8e1f6594" ]
[ "main.py" ]
[ "\"\"\"\nUtilities for training, testing and caching results\nfor HICO-DET and V-COCO evaluations.\n\nFred Zhang <frederic.zhang@anu.edu.au>\n\nThe Australian National University\nAustralian Centre for Robotic Vision\n\"\"\"\n\nimport os\nimport sys\nimport torch\nimport random\nimport warnings\nimport argparse\nimport numpy as np\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom torch.utils.data import DataLoader, DistributedSampler\n\nfrom upt import build_detector\nfrom utils import custom_collate, CustomisedDLE, DataFactory\n\nwarnings.filterwarnings(\"ignore\")\n\ndef main(rank, args):\n\n dist.init_process_group(\n backend=\"nccl\",\n init_method=\"env://\",\n world_size=args.world_size,\n rank=rank\n )\n\n # Fix seed\n seed = args.seed + dist.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n torch.cuda.set_device(rank)\n\n trainset = DataFactory(name=args.dataset, partition=args.partitions[0], data_root=args.data_root)\n testset = DataFactory(name=args.dataset, partition=args.partitions[1], data_root=args.data_root)\n\n train_loader = DataLoader(\n dataset=trainset,\n collate_fn=custom_collate, batch_size=args.batch_size,\n num_workers=args.num_workers, pin_memory=True, drop_last=True,\n sampler=DistributedSampler(\n trainset, \n num_replicas=args.world_size, \n rank=rank)\n )\n test_loader = DataLoader(\n dataset=testset,\n collate_fn=custom_collate, batch_size=1,\n num_workers=args.num_workers, pin_memory=True, drop_last=False,\n sampler=torch.utils.data.SequentialSampler(testset)\n )\n\n args.human_idx = 0\n if args.dataset == 'hicodet':\n object_to_target = train_loader.dataset.dataset.object_to_verb\n args.num_classes = 117\n elif args.dataset == 'vcoco':\n object_to_target = list(train_loader.dataset.dataset.object_to_action.values())\n args.num_classes = 24\n \n upt = build_detector(args, object_to_target)\n\n if os.path.exists(args.resume):\n print(f\"=> Rank {rank}: continue from saved checkpoint {args.resume}\")\n checkpoint = torch.load(args.resume, map_location='cpu')\n upt.load_state_dict(checkpoint['model_state_dict'])\n else:\n print(f\"=> Rank {rank}: start from a randomly initialised model\")\n\n engine = CustomisedDLE(\n upt, train_loader,\n max_norm=args.clip_max_norm,\n num_classes=args.num_classes,\n print_interval=args.print_interval,\n find_unused_parameters=True,\n cache_dir=args.output_dir\n )\n\n if args.cache:\n if args.dataset == 'hicodet':\n engine.cache_hico(test_loader, args.output_dir)\n elif args.dataset == 'vcoco':\n engine.cache_vcoco(test_loader, args.output_dir)\n return\n\n if args.eval:\n if args.dataset == 'vcoco':\n raise NotImplementedError(f\"Evaluation on V-COCO has not been implemented.\")\n ap = engine.test_hico(test_loader)\n # Fetch indices for rare and non-rare classes\n num_anno = torch.as_tensor(trainset.dataset.anno_interaction)\n rare = torch.nonzero(num_anno < 10).squeeze(1)\n non_rare = torch.nonzero(num_anno >= 10).squeeze(1)\n print(\n f\"The mAP is {ap.mean():.4f},\"\n f\" rare: {ap[rare].mean():.4f},\"\n f\" none-rare: {ap[non_rare].mean():.4f}\"\n )\n return\n\n for p in upt.detector.parameters():\n p.requires_grad = False\n param_dicts = [{\n \"params\": [p for n, p in upt.named_parameters()\n if \"interaction_head\" in n and p.requires_grad]\n }]\n optim = torch.optim.AdamW(\n param_dicts, lr=args.lr_head,\n weight_decay=args.weight_decay\n )\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, args.lr_drop)\n # Override optimiser and learning rate scheduler\n engine.update_state_key(optimizer=optim, lr_scheduler=lr_scheduler)\n\n engine(args.epochs)\n\n@torch.no_grad()\ndef sanity_check(args):\n dataset = DataFactory(name='hicodet', partition=args.partitions[0], data_root=args.data_root)\n args.human_idx = 0; args.num_classes = 117\n object_to_target = dataset.dataset.object_to_verb\n upt = build_detector(args, object_to_target)\n if args.eval:\n upt.eval()\n\n image, target = dataset[0]\n outputs = upt([image], [target])\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--lr-head', default=1e-4, type=float)\n parser.add_argument('--batch-size', default=2, type=int)\n parser.add_argument('--weight-decay', default=1e-4, type=float)\n parser.add_argument('--epochs', default=20, type=int)\n parser.add_argument('--lr-drop', default=10, type=int)\n parser.add_argument('--clip-max-norm', default=0.1, type=float)\n\n parser.add_argument('--backbone', default='resnet50', type=str)\n parser.add_argument('--dilation', action='store_true')\n parser.add_argument('--position-embedding', default='sine', type=str, choices=('sine', 'learned'))\n\n parser.add_argument('--repr-dim', default=512, type=int)\n parser.add_argument('--hidden-dim', default=256, type=int)\n parser.add_argument('--enc-layers', default=6, type=int)\n parser.add_argument('--dec-layers', default=6, type=int)\n parser.add_argument('--dim-feedforward', default=2048, type=int)\n parser.add_argument('--dropout', default=0.1, type=float)\n parser.add_argument('--nheads', default=8, type=int)\n parser.add_argument('--num-queries', default=100, type=int)\n parser.add_argument('--pre-norm', action='store_true')\n\n parser.add_argument('--no-aux-loss', dest='aux_loss', action='store_false')\n parser.add_argument('--set-cost-class', default=1, type=float)\n parser.add_argument('--set-cost-bbox', default=5, type=float)\n parser.add_argument('--set-cost-giou', default=2, type=float)\n parser.add_argument('--bbox-loss-coef', default=5, type=float)\n parser.add_argument('--giou-loss-coef', default=2, type=float)\n parser.add_argument('--eos-coef', default=0.1, type=float,\n help=\"Relative classification weight of the no-object class\")\n\n parser.add_argument('--alpha', default=0.5, type=float)\n parser.add_argument('--gamma', default=0.2, type=float)\n\n parser.add_argument('--dataset', default='hicodet', type=str)\n parser.add_argument('--partitions', nargs='+', default=['train2015', 'test2015'], type=str)\n parser.add_argument('--num-workers', default=2, type=int)\n parser.add_argument('--data-root', default='./hicodet')\n\n # training parameters\n parser.add_argument('--device', default='cuda',\n help='device to use for training / testing')\n parser.add_argument('--port', default='1234', type=str)\n parser.add_argument('--seed', default=66, type=int)\n parser.add_argument('--pretrained', default='', help='Path to a pretrained detector')\n parser.add_argument('--resume', default='', help='Resume from a model')\n parser.add_argument('--output-dir', default='checkpoints')\n parser.add_argument('--print-interval', default=500, type=int)\n parser.add_argument('--world-size', default=1, type=int)\n parser.add_argument('--eval', action='store_true')\n parser.add_argument('--cache', action='store_true')\n parser.add_argument('--sanity', action='store_true')\n parser.add_argument('--box-score-thresh', default=0.2, type=float)\n parser.add_argument('--fg-iou-thresh', default=0.5, type=float)\n parser.add_argument('--min-instances', default=3, type=int)\n parser.add_argument('--max-instances', default=15, type=int)\n\n args = parser.parse_args()\n print(args)\n\n if args.sanity:\n sanity_check(args)\n sys.exit()\n\n os.environ[\"MASTER_ADDR\"] = \"localhost\"\n os.environ[\"MASTER_PORT\"] = args.port\n\n mp.spawn(main, nprocs=args.world_size, args=(args,))\n" ]
[ [ "torch.nonzero", "torch.optim.AdamW", "torch.optim.lr_scheduler.StepLR", "torch.distributed.init_process_group", "numpy.random.seed", "torch.no_grad", "torch.utils.data.DistributedSampler", "torch.multiprocessing.spawn", "torch.utils.data.SequentialSampler", "torch.manual_seed", "torch.cuda.set_device", "torch.load", "torch.as_tensor", "torch.distributed.get_rank" ] ]
Tevien/NNDrone
[ "76dce457324ea03a8757d74f6403fbf60132294b" ]
[ "nndrone/converters.py" ]
[ "import numpy as np\nimport pickle\nimport math\n\ntry:\n from utilities import dot_loss, next_batch\nexcept ImportError:\n from utilities.utilities import dot_loss, next_batch\n\nclass DontCacheRef(Exception):\n pass\n\nclass BasicConverter(object):\n def __init__(self, learning_rate = 0.05, batch_size = 1, num_epochs = 300, threshold = 0.02, add_layer_dynamic = False, layer_to_expand = 0):\n # training control\n self._learning_rate = learning_rate\n self._batchSize = batch_size\n self._num_epochs = num_epochs\n self._threshold = threshold\n self._add_layer_dynamic = add_layer_dynamic\n self._layer_to_expand = int(layer_to_expand)\n # training history\n self._updatedLoss = 1000.0\n self._diffs = []\n self._losses = []\n self._updates = []\n self._epoch = 0\n\n\n def losses(self):\n return self._losses\n\n\n def diffs(self):\n return self._diffs\n\n\n def updates(self):\n return self._updates\n\n\n def save_history(self, fname):\n f_train = open(fname, 'wb')\n training_data = [self._losses, self._diffs, self._updates]\n pickle.dump(training_data, f_train)\n f_train.close()\n\n\n def get_refs(self, base_model, datapoints, scaler = None, conv_1d = False, conv_2d = False, cache_data = True):\n try:\n if not cache_data:\n raise DontCacheRef()\n # Return the cached list of reference outputs for the base model\n return (self.__datapoints, self.__refs)\n except (DontCacheRef, AttributeError) as e:\n # Create the list of reference outputs for the base model\n if conv_1d and conv_2d:\n print('ERROR: conv_1d and conv_2d are mutually exclusive')\n return None\n refs = []\n flattened = []\n for point in datapoints:\n spoint = point\n if scaler and not conv_2d:\n spoint = scaler.transform([point])\n prob = 0.0\n if conv_1d:\n prob = base_model.predict_proba(np.expand_dims(np.expand_dims(spoint, axis = 2), axis = 0))[0][0]\n elif conv_2d:\n # this will match if original model was trained with correct dimensionality\n prob = base_model.predict_proba(np.expand_dims(spoint, axis = 0))\n else:\n prob = base_model.predict_proba(spoint.reshape(1, -1))[0][0]\n refs.append(prob)\n flattened.append(spoint.flatten().tolist())\n self.__datapoints = np.asarray(flattened)\n self.__refs = np.asarray(refs)\n return (self.__datapoints, self.__refs)\n\n\n def convert_model(self, drone_model, base_model, datapoints, scaler = None, conv_1d = False, conv_2d = False, cache_data = True, epoch_reset = False):\n # Get the list of reference outputs for the base model\n datapoints_for_drone, refs = self.get_refs(base_model, datapoints, scaler, conv_1d, conv_2d, cache_data)\n inflate = 0 # to inflate the learning without change iterations\n if epoch_reset:\n self._epoch = 0\n avloss = 0\n # convert until min epochs are passed and leave only if loss at minima\n while (self._epoch < self._num_epochs) or (self._updatedLoss < avloss):\n # initialize the total loss for the epoch\n epochloss = []\n # loop over our data in batches\n for (batchX, batchY) in next_batch(datapoints_for_drone, refs, self._batchSize):\n batchY = np.array(batchY)\n if batchX.shape[0] != self._batchSize:\n print('Batch size insufficient (%s), continuing...' % batchY.shape[0])\n continue\n # Find current output and calculate loss for our graph\n preds = drone_model.evaluate_total(batchX, debug = False)\n loss, error = dot_loss(preds, batchY)\n epochloss.append(loss)\n # Update the model\n drone_model.update(batchX, batchY, self._learning_rate)\n avloss = np.average(epochloss)\n diff = 0.0\n if self._epoch > 0:\n # is the relative improvement of the loss too small, smaller than threshold\n diff = math.fabs(avloss - self._losses[-1]) / avloss\n self._diffs.append(diff)\n self._losses.append(avloss)\n update = 0\n modify = True if (diff < self._threshold) else False\n if modify:\n # If it is less than the threshold, is it below\n # where we last updated, has the drone learned enough\n #\n # - skip checks if we have never updated before\n # - do at least 6 learning iterations before attempting new update\n # - use asymptotic exponential to push model to learn\n # until its loss is far enough away from previous update,\n inflate += 1 # iterate inflating\n modify = True if self._updatedLoss == 1000.0 else (avloss < (self._updatedLoss - (50.0 * (1.0 - np.exp(-0.04 * inflate)) * diff * avloss))) and (inflate > 5)\n if modify:\n update = 1\n inflate = 0\n print('Model conversion not sufficient, updating...')\n print('Last updated loss: %s' % self._updatedLoss)\n self._updatedLoss = avloss\n if self._add_layer_dynamic:\n drone_model.add_layer_dynamic()\n else:\n drone_model.expand_layer_dynamic(self._layer_to_expand)\n print('Model structure is now:')\n drone_model.print_layers()\n self._updates.append(update)\n print('Epoch: %s, loss %s, diff %.5f, last updated loss %.5f' % (self._epoch, avloss, diff, self._updatedLoss))\n # update our loss history list by taking the average loss\n # across all batches\n if self._epoch == 0: # be consistent at the first epoch\n self._losses.append(avloss)\n self._diffs.append(math.fabs(avloss - self._updatedLoss) / avloss)\n self._updates.append(0)\n self._epoch += 1\n return drone_model\n\n\n\nclass AdvancedConverter(object):\n def __init__(self, learning_rate = 0.05, batch_size = 1, num_epochs = 300, threshold = 0.02, add_layer_dynamic = False, layer_to_expand = None):\n # training control\n self._learning_rate = learning_rate\n self._batchSize = batch_size\n self._num_epochs = num_epochs\n self._threshold = threshold\n self._add_layer_dynamic = add_layer_dynamic\n self.__round_robin = False\n if layer_to_expand is None:\n self.__round_robin = True\n self._layer_to_expand = int(layer_to_expand) if layer_to_expand is not None else None\n # training history\n self._updatedLoss = 1000.0\n self._diffs = []\n self._losses = []\n self._updates = []\n self._epoch = 0\n self.__rr_begin = 0\n self.__rr_last = 0\n\n def losses(self):\n return self._losses\n\n\n def diffs(self):\n return self._diffs\n\n\n def updates(self):\n return self._updates\n\n\n def round_robin(self, num_layers):\n self.__rr_last = self.__rr_begin\n self.__rr_begin = np.random.ranint(0, num_layers - 1) # careful, expanding last layer will change output number\n return self.__rr_last\n\n\n def save_history(self, fname):\n f_train = open(fname, 'wb')\n training_data = [self._losses, self._diffs, self._updates]\n pickle.dump(training_data, f_train)\n f_train.close()\n\n\n def get_refs(self, base_model, datapoints, scaler = None, cache_data = True):\n try:\n if not cache_data:\n raise DontCacheRef()\n # Return the cached list of reference outputs for the base model\n return (self.__datapoints, self.__refs)\n except(DontCacheRef, AttributeError) as e:\n # Create the list of reference outputs for the base model\n refs = []\n datapoints_for_drone = datapoints\n if scaler:\n datapoints_for_drone = scaler.transform(datapoints)\n for point in datapoints_for_drone:\n prob = base_model.predict_proba(point)\n refs.append(prob)\n self.__datapoints = datapoints_for_drone\n self.__refs = refs\n return (self.__datapoints, self.__refs)\n\n def convert_model(self, drone_model, base_model, datapoints, scaler = None, cache_data = True, epoch_reset = False):\n # Get the list of reference outputs for the base model\n datapoints_for_drone, refs = self.get_refs(base_model, datapoints, scaler, cache_data)\n inflate = 0 # to inflate the learning without change iterations\n if epoch_reset:\n self._epoch = 0\n avloss = 0\n # convert until min epochs are passed and leave only if loss at minima\n while (self._epoch < self._num_epochs) or (self._updatedLoss < avloss):\n # initialize the total loss for the epoch\n epochloss = []\n # loop over our data in batches\n for (batchX, batchY) in next_batch(datapoints_for_drone, refs, self._batchSize):\n batchY = np.array(batchY)\n if batchX.shape[0] != self._batchSize:\n print('Batch size insufficient ({}), continuing...'.format(batchY.shape[0]))\n continue\n # Find current output and calculate loss for our graph\n preds = drone_model.evaluate_total(batchX, debug = False)\n loss, error = dot_loss(preds, batchY)\n epochloss.append(loss)\n # Update the model\n drone_model.update(batchX, batchY, self._learning_rate)\n avloss = np.average(epochloss)\n diff = 0.0\n if self._epoch > 0:\n # is the relative improvement of the loss too small, smaller than threshold\n diff = math.fabs(avloss - self._losses[-1]) / avloss\n self._diffs.append(diff)\n self._losses.append(avloss)\n update = 0\n modify = True if (diff < self._threshold) else False\n if modify:\n # If it is less than the threshold, is it below\n # where we last updated, has the drone learned enough\n #\n # - skip checks if we have never updated before\n # - do at least 6 learning iterations before attempting new update\n # - use asymptotic exponential to push model to learn\n # until its loss is far enough away from previous update,\n inflate += 1 # iterate inflating\n modify = True if self._updatedLoss == 1000.0 else (avloss < (self._updatedLoss - (50.0 * (1.0 - np.exp(-0.04 * inflate)) * diff * avloss))) and (inflate > 5)\n if modify:\n update = 1\n inflate = 0\n print('Model conversion not sufficient, updating...')\n print('Last updated loss: %s' % self._updatedLoss)\n self._updatedLoss = avloss\n if self._add_layer_dynamic:\n drone_model.add_layer_dynamic()\n elif self._layer_to_expand is not None:\n drone_model.expand_layer_dynamic(self._layer_to_expand)\n else:\n drone_model.expand_layer_dynamic(self.round_robin(drone_model.num_layers()))\n print('Model structure is now:')\n drone_model.print_layers()\n self._updates.append(update)\n print('Epoch: %s, loss %s, diff %.5f, last updated loss %.5f' % (self._epoch, avloss, diff, self._updatedLoss))\n # update our loss history list by taking the average loss\n # across all batches\n if self._epoch == 0: # be consistent at the first epoch\n self._losses.append(avloss)\n self._diffs.append(math.fabs(avloss - self._updatedLoss) / avloss)\n self._updates.append(0)\n self._epoch += 1\n return drone_model\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.exp", "numpy.average", "numpy.random.ranint", "numpy.expand_dims" ] ]
ucgmsim/gmhazard
[ "d3d90b4c94b3d9605597a3efeccc8523a1e50c0e", "d3d90b4c94b3d9605597a3efeccc8523a1e50c0e" ]
[ "calculation/gmhazard_calc/gmhazard_calc/nz_code/nzs1170p5/nzs_zfactor_2016/ll2z.py", "calculation/spatial_hazard/spatial_hazard/scripts/generate_realisations.py" ]
[ "#!/usr/bin/env python\n\nimport os\n\nfrom matplotlib.path import Path\nimport numpy as np\nimport pandas as pd\nfrom scipy.interpolate import griddata\n\nfrom qcore import geo\n\nDATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"zdata\")\n\n# constant regions and max bounds for faster processing\nPOLYGONS = [\n (os.path.join(DATA, \"AucklandPolgonOutline_Points_WGS84.txt\"), 0.13),\n (os.path.join(DATA, \"ChristchurchPolgonOutline_Points_WGS84.txt\"), 0.3),\n (os.path.join(DATA, \"NorthlandPolgonOutline_Points_WGS84.txt\"), 0.1),\n]\n\nCITY_RADIUS_SEARCH = 2\n\n# contours\nZ_VALS = [0.13, 0.15, 0.175, 0.188, 0.20, 0.25, 0.275, 0.30, 0.325, 0.35, 0.375, 0.40, 0.415, 0.425, 0.45, 0.475, 0.50, 0.525, 0.55, 0.575, 0.60]\nZ_FORMAT = os.path.join(DATA, \"Z_%.3f_points_WGS84.txt\")\n\n\ndef ll2z(locations, radius_search=CITY_RADIUS_SEARCH):\n \"\"\"Computes the z-value for the given lon, lat tuple or\n list of lon, lat tuples\n :param locations:\n :param radius_search: Checks to see if a city is within X km from the given location,\n removes the search if value is set to 0\n :return: Array of z-values, one for each location specified\n \"\"\"\n try:\n multi = bool(len(locations[0]))\n except TypeError:\n multi = False\n locations = [locations]\n out = np.zeros(len(locations))\n\n # check if in polygon\n for p in POLYGONS:\n c = Path(\n geo.path_from_corners(\n corners=np.loadtxt(p[0]).tolist(), output=None, min_edge_points=4\n )\n ).contains_points(locations)\n out = np.where(c, p[1], out)\n\n # check if within specified radius from city\n if radius_search > 0:\n cities = pd.read_csv(os.path.join(DATA, 'cities_z.csv'), header=None, names=['lon', 'lat', 'city', 'z_value'])\n\n cities_ll = cities[['lon', 'lat']].values\n for i, location in enumerate(locations):\n dists = geo.get_distances(cities_ll, location[0], location[1])\n\n if np.any(dists < radius_search):\n cities['dist'] = dists\n city_idx = cities.dist.idxmin()\n out[i] = cities.loc[city_idx].z_value\n\n # interpolate contours\n nz = []\n points_all = []\n for z in Z_VALS:\n points = np.atleast_2d(np.loadtxt(Z_FORMAT % z))\n nz.append(len(points))\n points_all.append(points)\n points = np.concatenate(points_all)\n del points_all\n z = griddata(points, np.repeat(Z_VALS, nz), locations, method=\"linear\")\n\n return np.where(out == 0, np.where(np.isnan(z), 0.13, z), out)\n\n\nif __name__ == \"__main__\":\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument(\"lon\", type=float)\n parser.add_argument(\"lat\", type=float)\n a = parser.parse_args()\n print(ll2z((a.lon, a.lat)))\n", "import argparse\nfrom pathlib import Path\nfrom typing import Sequence\n\nimport numpy as np\nimport pandas as pd\n\nimport gmhazard_calc as gc\nfrom spatial_hazard import correlate_ims\nfrom spatial_hazard import plots\n\n\ndef main(\n IM: gc.im.IM,\n fault: str,\n N: int,\n stations_ll_ffp: str,\n imdb_ffps: Sequence[str],\n output_dir: Path,\n n_procs: int,\n):\n # Load the station data\n stations_df = pd.read_csv(stations_ll_ffp, \" \", index_col=2)\n stations = stations_df.index.values\n\n # Get realisations\n print(\"Retrieving GMM parameters\")\n emp_df = correlate_ims.load_stations_fault_data(imdb_ffps, stations, IM, fault)\n\n print(\"Computing distance matrix\")\n dist_matrix = correlate_ims.calculate_distance_matrix(stations, stations_df)\n\n assert np.all(\n dist_matrix.index.values == emp_df.index.values\n ), \"Order of the stations has to be the same\"\n\n print(\"Computing correlation matrix\")\n R = correlate_ims.get_corr_matrix(stations, dist_matrix, IM)\n\n print(\"Generating realisation\")\n random_IMs, between_event, within_event = correlate_ims.generate_im_values(\n N, R, emp_df\n )\n\n ln_im_values, between, within = (\n pd.DataFrame(data=random_IMs.T, index=stations),\n pd.DataFrame(data=between_event.T, index=stations),\n pd.DataFrame(data=within_event.T, index=stations),\n )\n\n # Save the data\n ln_im_values.to_csv(output_dir / \"realisation_im_values.csv\", index_label=\"station\")\n between.to_csv(\n output_dir / \"realisation_between_residuals.csv\", index_label=\"station\"\n )\n within.to_csv(\n output_dir / \"realisation_within_residuals.csv\", index_label=\"station\"\n )\n emp_df.to_csv(output_dir / \"gmm_parameters.csv\", index_label=\"station\")\n\n # Generate the plots\n plot_dir = output_dir / \"plots\"\n plot_dir.mkdir(exist_ok=True)\n\n im_values = ln_im_values.apply(np.exp)\n plots.plot_realisations(\n im_values,\n stations_df,\n plot_dir,\n f\"{fault}_{IM.file_format()}_Realisation\",\n label=f\"{IM}\",\n n_procs=n_procs,\n cpt_max=0.125,\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"IM\", type=str, help=\"IM of interest\")\n parser.add_argument(\n \"fault\", type=str, help=\"The fault for which to compute spatial hazard\"\n )\n parser.add_argument(\"N\", type=int, help=\"Number of realisations to generate\")\n parser.add_argument(\n \"stations_ll_ffp\",\n type=str,\n help=\"Path to the stations ll file. Has to contain the stations of interest.\",\n )\n parser.add_argument(\n \"imdb_ffps\", type=str, nargs=\"+\", help=\"Path of the different IMDBs to use\"\n )\n parser.add_argument(\"output_dir\", type=Path, help=\"Path of the output directory\")\n parser.add_argument(\n \"--n_procs\", type=int, help=\"Number of processes to use\", default=4\n )\n\n args = parser.parse_args()\n\n main(\n gc.im.IM.from_str(args.IM),\n args.fault,\n args.N,\n args.stations_ll_ffp,\n args.imdb_ffps,\n args.output_dir,\n args.n_procs,\n )\n" ]
[ [ "numpy.concatenate", "numpy.isnan", "numpy.where", "numpy.any", "numpy.loadtxt", "numpy.repeat" ], [ "numpy.all", "pandas.DataFrame", "pandas.read_csv" ] ]
Pavan-Samtani/CGP-CNN-v2
[ "2eede8297542b7551d5ef5bf11aeeaba34bf4f3f" ]
[ "cgp.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport csv\nimport time\nimport numpy as np\nimport math\nimport os\n\n\n# gene[f][c] f:function type, c:connection (nodeID)\nclass Individual(object):\n\n def __init__(self, net_info, init):\n self.net_info = net_info\n self.gene = np.zeros((self.net_info.node_num + self.net_info.out_num, self.net_info.max_in_num + 1)).astype(int)\n self.is_active = np.empty(self.net_info.node_num + self.net_info.out_num).astype(bool)\n self.is_pool = np.empty(self.net_info.node_num + self.net_info.out_num).astype(bool)\n self.eval = None\n self.size = None\n if init:\n print('init with specific architectures')\n self.init_gene_with_conv() # In the case of starting only convolution\n else:\n self.init_gene() # generate initial individual randomly\n\n def init_gene_with_conv(self):\n # initial architecture\n arch = ['S_ConvBlock_64_3']\n\n input_layer_num = int(self.net_info.input_num / self.net_info.rows) + 1\n output_layer_num = int(self.net_info.out_num / self.net_info.rows) + 1\n layer_ids = [((self.net_info.cols - 1 - input_layer_num - output_layer_num) + i) // (len(arch)) for i in\n range(len(arch))]\n prev_id = 0 # i.e. input layer\n current_layer = input_layer_num\n block_ids = [] # *do not connect with these ids\n\n # building convolution net\n for i, idx in enumerate(layer_ids):\n\n current_layer += idx\n n = current_layer * self.net_info.rows + np.random.randint(self.net_info.rows)\n block_ids.append(n)\n self.gene[n][0] = self.net_info.func_type.index(arch[i])\n col = np.min((int(n / self.net_info.rows), self.net_info.cols))\n max_connect_id = col * self.net_info.rows + self.net_info.input_num\n min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \\\n if col - self.net_info.level_back >= 0 else 0\n\n self.gene[n][1] = prev_id\n for j in range(1, self.net_info.max_in_num):\n self.gene[n][j + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)\n\n prev_id = n + self.net_info.input_num\n\n # output layer \n n = self.net_info.node_num\n type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num\n self.gene[n][0] = np.random.randint(type_num)\n col = np.min((int(n / self.net_info.rows), self.net_info.cols))\n max_connect_id = col * self.net_info.rows + self.net_info.input_num\n min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \\\n if col - self.net_info.level_back >= 0 else 0\n\n self.gene[n][1] = prev_id\n for i in range(1, self.net_info.max_in_num):\n self.gene[n][i + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)\n block_ids.append(n)\n\n # intermediate node\n for n in range(self.net_info.node_num + self.net_info.out_num):\n\n if n in block_ids:\n continue\n\n # type gene\n type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num\n self.gene[n][0] = np.random.randint(type_num)\n # connection gene\n col = np.min((int(n / self.net_info.rows), self.net_info.cols))\n max_connect_id = col * self.net_info.rows + self.net_info.input_num\n min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \\\n if col - self.net_info.level_back >= 0 else 0\n for i in range(self.net_info.max_in_num):\n self.gene[n][i + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)\n\n self.check_active()\n\n def init_gene(self):\n # intermediate node\n for n in range(self.net_info.node_num + self.net_info.out_num):\n # type gene\n type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num\n self.gene[n][0] = np.random.randint(type_num)\n # connection gene\n col = np.min((int(n / self.net_info.rows), self.net_info.cols))\n max_connect_id = col * self.net_info.rows + self.net_info.input_num\n min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \\\n if col - self.net_info.level_back >= 0 else 0\n for i in range(self.net_info.max_in_num):\n self.gene[n][i + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)\n\n self.check_active()\n\n def __check_course_to_out(self, n):\n if not self.is_active[n]:\n self.is_active[n] = True\n t = self.gene[n][0]\n if n >= self.net_info.node_num: # output node\n in_num = self.net_info.out_in_num[t]\n else: # intermediate node\n in_num = self.net_info.func_in_num[t]\n\n for i in range(in_num):\n if self.gene[n][i + 1] >= self.net_info.input_num:\n self.__check_course_to_out(self.gene[n][i + 1] - self.net_info.input_num)\n\n def check_active(self):\n # clear\n self.is_active[:] = False\n # start from output nodes\n for n in range(self.net_info.out_num):\n self.__check_course_to_out(self.net_info.node_num + n)\n\n def check_pool(self):\n is_pool = True\n pool_num = 0\n for n in range(self.net_info.node_num + self.net_info.out_num):\n if self.is_active[n]:\n if self.gene[n][0] > 19:\n is_pool = False\n pool_num += 1\n return is_pool, pool_num\n\n def __mutate(self, current, min_int, max_int):\n mutated_gene = current\n while current == mutated_gene:\n mutated_gene = min_int + np.random.randint(max_int - min_int)\n return mutated_gene\n\n def mutation(self, mutation_rate=0.01):\n active_check = False\n\n for n in range(self.net_info.node_num + self.net_info.out_num):\n t = self.gene[n][0]\n # mutation for type gene\n type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num\n if np.random.rand() < mutation_rate and type_num > 1:\n self.gene[n][0] = self.__mutate(self.gene[n][0], 0, type_num)\n if self.is_active[n]:\n active_check = True\n # mutation for connection gene\n col = np.min((int(n / self.net_info.rows), self.net_info.cols))\n max_connect_id = col * self.net_info.rows + self.net_info.input_num\n min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \\\n if col - self.net_info.level_back >= 0 else 0\n in_num = self.net_info.func_in_num[t] if n < self.net_info.node_num else self.net_info.out_in_num[t]\n for i in range(self.net_info.max_in_num):\n if np.random.rand() < mutation_rate and max_connect_id - min_connect_id > 1:\n self.gene[n][i + 1] = self.__mutate(self.gene[n][i + 1], min_connect_id, max_connect_id)\n if self.is_active[n] and i < in_num:\n active_check = True\n\n self.check_active()\n return active_check\n\n def neutral_mutation(self, mutation_rate=0.01):\n for n in range(self.net_info.node_num + self.net_info.out_num):\n t = self.gene[n][0]\n # mutation for type gene\n type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num\n if not self.is_active[n] and np.random.rand() < mutation_rate and type_num > 1:\n self.gene[n][0] = self.__mutate(self.gene[n][0], 0, type_num)\n # mutation for connection gene\n col = np.min((int(n / self.net_info.rows), self.net_info.cols))\n max_connect_id = col * self.net_info.rows + self.net_info.input_num\n min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \\\n if col - self.net_info.level_back >= 0 else 0\n in_num = self.net_info.func_in_num[t] if n < self.net_info.node_num else self.net_info.out_in_num[t]\n for i in range(self.net_info.max_in_num):\n if (not self.is_active[n] or i >= in_num) and np.random.rand() < mutation_rate \\\n and max_connect_id - min_connect_id > 1:\n self.gene[n][i + 1] = self.__mutate(self.gene[n][i + 1], min_connect_id, max_connect_id)\n\n self.check_active()\n return False\n\n def count_active_node(self):\n return self.is_active.sum()\n\n def copy(self, source):\n self.net_info = source.net_info\n self.gene = source.gene.copy()\n self.is_active = source.is_active.copy()\n self.eval = source.eval\n self.size = source.size\n\n def active_net_list(self):\n net_list = [[\"input\", 0, 0]]\n active_cnt = np.arange(self.net_info.input_num + self.net_info.node_num + self.net_info.out_num)\n active_cnt[self.net_info.input_num:] = np.cumsum(self.is_active)\n\n for n, is_a in enumerate(self.is_active):\n if is_a:\n t = self.gene[n][0]\n if n < self.net_info.node_num: # intermediate node\n type_str = self.net_info.func_type[t]\n else: # output node\n type_str = self.net_info.out_type[t]\n\n connections = [active_cnt[self.gene[n][i + 1]] for i in range(self.net_info.max_in_num)]\n net_list.append([type_str] + connections)\n return net_list\n\n\n# CGP with (1 + \\lambda)-ES\nclass CGP(object):\n def __init__(self, net_info, eval_func, lam=4, imgSize=32, init=False, bias=0):\n self.lam = lam\n self.pop = [Individual(net_info, init) for _ in range(1 + self.lam)]\n self.eval_func = eval_func\n self.num_gen = 0\n self.num_eval = 0\n self.max_pool_num = int(math.log2(imgSize) - 2)\n self.init = init\n self.bias = bias\n\n def _evaluation(self, pop, eval_flag):\n # create network list\n net_lists = []\n active_index = np.where(eval_flag)[0]\n for i in active_index:\n net_lists.append(pop[i].active_net_list())\n\n # evaluation\n fp = self.eval_func(net_lists)\n for i, j in enumerate(active_index):\n if isinstance(fp[i], tuple):\n pop[j].eval = fp[i][0]\n pop[j].size = fp[i][1]\n else:\n pop[j].eval = fp[i]\n pop[j].size = np.inf\n evaluations_acc = np.zeros(len(pop))\n evaluations_size = np.zeros(len(pop))\n for i in range(len(pop)):\n evaluations_acc[i] = pop[i].eval\n evaluations_size[i] = pop[i].size\n\n self.num_eval += len(net_lists)\n return evaluations_acc, evaluations_size\n\n def _log_data(self, net_info_type='active_only', start_time=0):\n log_list = [self.num_gen, self.num_eval, time.time() - start_time, self.pop[0].eval,\n self.pop[0].size, self.pop[0].count_active_node()]\n if net_info_type == 'active_only':\n log_list.append(self.pop[0].active_net_list())\n elif net_info_type == 'full':\n log_list += self.pop[0].gene.flatten().tolist()\n else:\n pass\n return log_list\n\n def _log_data_children(self, net_info_type='active_only', start_time=0, pop=None):\n log_list = [self.num_gen, self.num_eval, time.time() - start_time, pop.eval, pop.size, pop.count_active_node()]\n if net_info_type == 'active_only':\n log_list.append(pop.active_net_list())\n elif net_info_type == 'full':\n log_list += pop.gene.flatten().tolist()\n else:\n pass\n return log_list\n\n def load_log(self, log_data):\n self.num_gen = int(log_data[0])\n self.num_eval = int(log_data[1])\n net_info = self.pop[0].net_info\n self.pop[0].eval = log_data[3]\n self.pop[0].size = log_data[4]\n print(\"Loaded Accuracy:\", self.pop[0].eval)\n self.pop[0].gene = np.int64(np.array(log_data[6:])).reshape(\n (net_info.node_num + net_info.out_num, net_info.max_in_num + 1))\n self.pop[0].check_active()\n\n # Evolution CGP:\n # At each iteration:\n # - Generate lambda individuals in which at least one active node changes (i.e., forced mutation)\n # - Mutate the best individual with neutral mutation (unchanging the active nodes)\n # if the best individual is not updated.\n def modified_evolution(self, max_eval=100, mutation_rate=0.01, log_path='./'):\n with open(os.path.join(log_path, 'child.txt'), 'a') as fw_c:\n writer_c = csv.writer(fw_c, lineterminator='\\n')\n start_time = time.time()\n eval_flag = np.empty(self.lam)\n active_num = self.pop[0].count_active_node()\n _, pool_num = self.pop[0].check_pool()\n if self.init:\n pass\n else: # in the case of not using an init indiviudal\n while active_num < self.pop[0].net_info.min_active_num or pool_num > self.max_pool_num:\n self.pop[0].mutation(1.0)\n active_num = self.pop[0].count_active_node()\n _, pool_num = self.pop[0].check_pool()\n if self.pop[0].eval is None:\n self._evaluation([self.pop[0]], np.array([True]))\n print(self._log_data(net_info_type='active_only', start_time=start_time))\n\n while self.num_gen < max_eval:\n self.num_gen += 1\n # reproduction\n for i in range(self.lam):\n eval_flag[i] = False\n self.pop[i + 1].copy(self.pop[0]) # copy a parent\n active_num = self.pop[i + 1].count_active_node()\n _, pool_num = self.pop[i + 1].check_pool()\n # mutation (forced mutation)\n while not eval_flag[i] or active_num < self.pop[\n i + 1].net_info.min_active_num or pool_num > self.max_pool_num:\n self.pop[i + 1].copy(self.pop[0]) # copy a parent\n eval_flag[i] = self.pop[i + 1].mutation(mutation_rate) # mutation\n active_num = self.pop[i + 1].count_active_node()\n _, pool_num = self.pop[i + 1].check_pool()\n\n # evaluation and selection\n evaluations_acc, evaluations_size = self._evaluation(self.pop[1:], eval_flag=eval_flag)\n evaluations_argsort = np.argsort(-evaluations_acc)\n print(evaluations_acc, evaluations_argsort)\n best_arg = evaluations_argsort[0]\n # save\n f = open(os.path.join(log_path, 'arch_child.txt'), 'a')\n writer_f = csv.writer(f, lineterminator='\\n')\n for c in range(1 + self.lam):\n writer_c.writerow(\n self._log_data_children(net_info_type='full', start_time=start_time, pop=self.pop[c]))\n writer_f.writerow(\n self._log_data_children(net_info_type='active_only', start_time=start_time, pop=self.pop[c]))\n f.close()\n # replace the parent by the best individual\n print(\"Comparing children with parent...\")\n print(f\"Best Child's Accuracy {evaluations_acc[best_arg]}, Parent Accuracy: {self.pop[0].eval}\")\n if evaluations_acc[best_arg] > self.pop[0].eval:\n self.pop[0].copy(self.pop[best_arg + 1])\n print(\"Replacing parent with best child\")\n elif self.bias > 0:\n found = False\n print(f\"Parent: Accuracy: {self.pop[0].eval}, Size: {self.pop[0].size}\")\n for i, idx in enumerate(evaluations_argsort):\n print(f\"Child {i + 1}: Accuracy: {evaluations_acc[idx]}, Size: {evaluations_size[idx]}\")\n if evaluations_acc[idx] > (self.pop[0].eval - self.bias) and \\\n evaluations_size[idx] < self.pop[0].size:\n print(\"Replacing parent with child\")\n self.pop[0].copy(self.pop[idx + 1])\n found = True\n break\n if not found:\n self.pop[0].neutral_mutation(mutation_rate) # modify the parent (neutral mutation) \n else:\n self.pop[0].neutral_mutation(mutation_rate) # modify the parent (neutral mutation)\n\n # display and save log\n print(self._log_data(net_info_type='active_only', start_time=start_time))\n fw = open(os.path.join(log_path, 'log_cgp.txt'), 'a')\n writer = csv.writer(fw, lineterminator='\\n')\n writer.writerow(self._log_data(net_info_type='full', start_time=start_time))\n fa = open(os.path.join(log_path, 'arch.txt'), 'a')\n writer_a = csv.writer(fa, lineterminator='\\n')\n writer_a.writerow(self._log_data(net_info_type='active_only', start_time=start_time))\n fw.close()\n fa.close()\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.random.rand", "numpy.zeros", "numpy.where", "numpy.arange", "numpy.random.randint", "numpy.argsort", "numpy.cumsum" ] ]
oesst/HRTF_neural_model
[ "494d29c514eaad3aee575f77d08a59a9d011a415" ]
[ "src/data/generateData.py" ]
[ "# -*- coding: utf-8 -*-\nimport click\nimport logging\nfrom pathlib import Path\n\nfrom os import listdir\nfrom os.path import isfile, join\n\nimport numpy as np\nimport soundfile as sf\nfrom scipy import io\nimport scipy.signal as sp\nfrom src.features import gtgram\n\nROOT = Path(__file__).resolve().parents[2]\n# set the path to the sound files\nSOUND_FILES = ROOT / 'data/raw/sound_samples/'\n# create a list of the sound files\nSOUND_FILES = list(SOUND_FILES.glob('**/*.wav'))\n\n# Define up to which frequency the data should be generated\n\n\ndef create_data(freq_bands=24, participant_number=19, snr=0.2, normalize=False, azimuth=12, time_window=0.1, max_freq=20000):\n\n str_r = 'data/processed_' + str(max_freq) + 'Hz/binaural_right_0_gammatone_' + str(time_window) + '_window_{0:03d}'.format(participant_number) + '_cipic_' + str(\n int(snr * 100)) + '_srn_' + str(freq_bands) + '_channels_' + str((azimuth - 12) * 10) + '_azi_' + str(normalize) + '_norm.npy'\n str_l = 'data/processed_' + str(max_freq) + 'Hz/binaural_left_0_gammatone_' + str(time_window) + '_window_{0:03d}'.format(participant_number) + '_cipic_' + str(\n int(snr * 100)) + '_srn_' + str(freq_bands) + '_channels_' + str((azimuth - 12) * 10) + '_azi_' + str(normalize) + '_norm.npy'\n\n path_data_r = ROOT / str_r\n path_data_l = ROOT / str_l\n\n # check if we can load the data from a file\n if path_data_r.is_file() and path_data_l.is_file():\n print('Data set found. Loading from file : ' + str_r)\n return np.load(path_data_r), np.load(path_data_l)\n else:\n print('Creating data set : ' + str_l)\n # read the HRIR data\n hrtf_path = (\n ROOT / 'data/raw/hrtfs/hrir_{0:03d}.mat'.format(participant_number)).resolve()\n hrir_mat = io.loadmat(hrtf_path.as_posix())\n\n # get the data for the left ear\n hrir_l = hrir_mat['hrir_l']\n # get the data for the right ear\n hrir_r = hrir_mat['hrir_r']\n # use always all elevations -> 50\n psd_all_i = np.zeros((len(SOUND_FILES), 50, freq_bands))\n psd_all_c = np.zeros((len(SOUND_FILES), 50, freq_bands))\n # temporal_means = np.zeros((hrir_elevs.shape[0],87))\n for i in range(psd_all_i.shape[0]):\n for i_elevs in range(psd_all_i.shape[1]):\n # read the hrir for a specific location\n hrir_elevs = np.squeeze(hrir_l[azimuth, i_elevs, :])\n # load a sound sample\n signal = sf.read(SOUND_FILES[i].as_posix())[0]\n\n # add noise to the signal\n signal_elevs = (1 - snr) * sp.lfilter(hrir_elevs, 1, signal) + \\\n snr * (signal + np.random.random(signal.shape[0]) * snr)\n\n ###### TAKE THE ENTIRE SIGNAL #######\n # window_means = get_spectrum(signal_elevs,nperseg=welch_nperseg)\n #####################################\n # read the hrir for a specific location\n hrir_elevs = np.squeeze(hrir_r[azimuth, i_elevs, :])\n\n # add noise to the signal\n signal_elevs_c = (1 - snr) * sp.lfilter(hrir_elevs, 1, signal) + \\\n snr * (signal + np.random.random(signal.shape[0]) * snr)\n\n # Default gammatone-based spectrogram parameters\n twin = time_window\n thop = twin / 2\n fmin = 20\n fs = 44100\n\n ###### Apply Gammatone Filter Bank ##############\n y = gtgram.gtgram(signal_elevs, fs, twin,\n thop, freq_bands, fmin, max_freq)\n y = (20 * np.log10(y + 1))\n window_means = np.mean(y, axis=1)\n psd_all_i[i, i_elevs, :] = window_means\n\n y = gtgram.gtgram(signal_elevs_c, fs,\n twin, thop, freq_bands, fmin, max_freq)\n y = (20 * np.log10(y + 1))\n window_means = np.mean(y, axis=1)\n psd_all_c[i, i_elevs, :] = window_means\n #################################################\n\n\n np.save(path_data_r.absolute(), psd_all_c)\n np.save(path_data_l.absolute(), psd_all_i)\n\n return psd_all_c, psd_all_i\n\n\ndef main():\n \"\"\" This script creates HRTF filtered sound samples of the sounds given in the folder SOUND_FILES.\n This is done for each participant's HRTF specified in participant_numbers.\n ALL ELEVATIONS (50) are taken to filter the data.\n\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n ########################################################################\n ######################## Set parameters ################################\n ########################################################################\n normalize = False # paramter is not considered\n\n time_window = 0.1 # time window for spectrogram in sec\n\n # Parameter to test\n snrs = np.arange(0, 1.1, 0.1) # Signal to noise ratio\n # snrs = np.array([0.2]) # Signal to noise ratio\n # snrs = np.array([0.2]) # Signal to noise ratio\n # freq_bandss = np.array([32, 64, 128]) # Frequency bands in resulting data\n freq_bandss = np.array([128]) # Frequency bands in resulting data\n # azimuths = np.arange(0, 25, 1) # which azimuths to create\n azimuths = np.array([12]) # which azimuths to create\n participant_numbers = np.array([1, 2, 3, 8, 9, 10, 11,\n 12, 15, 17, 18, 19, 20,\n 21, 27, 28, 33, 40, 44,\n 48, 50, 51, 58, 59, 60,\n 61, 65, 119, 124, 126,\n 127, 131, 133, 134, 135,\n 137, 147, 148, 152, 153,\n 154, 155, 156, 158, 162,\n 163, 165])\n # define max frequency for gammatone filter bank\n max_freqs = np.array([16000, 20000])\n\n # participant_numbers = participant_numbers[::-1]\n # snrs = snrs[::-1]\n # freq_bandss = freq_bandss[::-1]\n\n ########################################################################\n ########################################################################\n\n # walk over all parameter combinations\n for _, participant_number in enumerate(participant_numbers):\n for _, snr in enumerate(snrs):\n for _, freq_bands in enumerate(freq_bandss):\n for _, azimuth in enumerate(azimuths):\n for _, max_freq in enumerate(max_freqs):\n psd_all_c, psd_all_i = create_data(freq_bands, participant_number, snr, normalize, azimuth, time_window, max_freq=max_freq)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n main()\n" ]
[ [ "numpy.array", "numpy.load", "numpy.mean", "numpy.arange", "scipy.signal.lfilter", "numpy.random.random", "numpy.log10", "numpy.squeeze" ] ]
manoil/Deep_VoiceChanger
[ "5cd3d6ff2a8a9eea3b8fae1c0e6ed2d00012b771" ]
[ "nets/block.py" ]
[ "import math\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport numpy as np\nfrom .sn_convolution_2d import SNConvolution2D, SNDeconvolution2D\nfrom .sn_linear import SNLinear\n\ndef _upsample(x):\n h, w = x.shape[2:]\n return F.unpooling_2d(x, 2, outsize=(h * 2, w * 2))\n\ndef _downsample(x):\n return F.average_pooling_2d(x, 2)\n\ndef upsample_conv(x, conv):\n return conv(_upsample(x))\n\ndef _upsample_frq(x):\n h, w = x.shape[2:]\n return F.unpooling_2d(x, (1,2), outsize=(h, w * 2))\n\ndef _downsample_frq(x):\n return F.average_pooling_2d(x, (1,2))\n\ndef upsample_conv_frq(x, conv):\n return conv(_upsample_frq(x))\n\nclass ResBlock(chainer.Chain):\n def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None):\n super(ResBlock, self).__init__()\n initializer = chainer.initializers.GlorotUniform()\n initializer_sc = chainer.initializers.GlorotUniform()\n self.activation = activation\n self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None\n self.learnable_sc = in_channels != out_channels\n self.dr = dr\n self.bn = bn\n with self.init_scope():\n self.c1 = L.Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)\n self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)\n if bn:\n self.b1 = L.BatchNormalization(out_channels)\n self.b2 = L.BatchNormalization(out_channels)\n if self.learnable_sc:\n self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)\n\n def residual(self, x):\n h = x\n h = self.c1(h)\n if self.bn:\n h = self.b1(h)\n if self.activation:\n h = self.activation(h)\n if self.mode:\n h = self.mode(h)\n if self.dr:\n with chainer.using_config('train', True):\n h = F.dropout(h, self.dr)\n h = self.c2(h)\n if self.bn:\n h = self.b2(h)\n if self.activation:\n h = self.activation(h)\n return h\n\n def shortcut(self, x):\n if self.mode:\n x = self.mode(x)\n if self.learnable_sc:\n x = self.c_sc(x)\n return x\n\n def __call__(self, x):\n return self.residual(x) + self.shortcut(x)\n\nclass ConvBlock(chainer.Chain):\n def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):\n super(ConvBlock, self).__init__()\n initializer = chainer.initializers.GlorotUniform()\n self.activation = activation\n self.bn = bn\n self.dr = dr\n with self.init_scope():\n if mode == 'none':\n self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)\n elif mode == 'none-7':\n self.c = L.Convolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)\n elif mode == 'down':\n self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)\n elif mode == 'up':\n self.c = L.Deconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)\n elif mode == 'full-down':\n self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)\n elif mode == 'frq':\n self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)\n elif mode == 'frq-down':\n self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)\n self.activation = lambda x: activation(_downsample(x))\n elif mode == 'frq-up':\n self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)\n self.activation = lambda x: activation(_upsample(x))\n elif mode == 'pad':\n self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=2, initialW=initializer, nobias=bn)\n elif mode == 'trim':\n self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=0, initialW=initializer, nobias=bn)\n else:\n raise Exception('mode is missing')\n if bn:\n self.b = L.BatchNormalization(out_channels)\n\n def __call__(self, h):\n if self.dr:\n with chainer.using_config('train', True):\n h = F.dropout(h, self.dr)\n h = self.c(h)\n if self.bn:\n h = self.b(h)\n if self.activation:\n h = self.activation(h)\n return h\n \nclass CoPSBlock(chainer.Chain):\n def __init__(self, in_channels, out_channels, activation=F.leaky_relu, bn=True):\n super(CoPSBlock, self).__init__()\n initializer = chainer.initializers.GlorotUniform()\n self.activation = activation\n self.bn = bn\n with self.init_scope():\n self.ps = L.Convolution2D(in_channels, in_channels*4, ksize=1, stride=1, initialW=initializer)\n self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer)\n if bn:\n self.b = L.BatchNormalization(out_channels)\n\n def pixel_shuffle(self, x):\n out = self.ps(x)\n b = out.shape[0]\n c = out.shape[1]\n h = out.shape[2]\n w = out.shape[3]\n out = F.reshape(out, (b, 2, 2, c//4, h, w))\n out = F.transpose(out, (0, 3, 4, 1, 5, 2))\n out = F.reshape(out, (b, c//4, h*2, w*2))\n return out\n\n def __call__(self, h):\n h = self.pixel_shuffle(h)\n h = self.c(h)\n if self.bn:\n h = self.b(h)\n if self.activation:\n h = self.activation(h)\n return h\n \nclass SNResBlock(chainer.Chain):\n def __init__(self, in_channels, out_channels, activation=F.leaky_relu, sample='none', dr=None):\n super(SNResBlock, self).__init__()\n initializer = chainer.initializers.GlorotUniform()\n initializer_sc = chainer.initializers.GlorotUniform()\n self.activation = activation\n self.dr = dr\n self.sample = _downsample if sample == 'down' else _upsample if sample == 'up' else None\n self.learnable_sc = in_channels != out_channels or sample == 'down' or sample == 'up'\n with self.init_scope():\n self.c1 = SNConvolution2D(in_channels, out_channels, ksize=3, pad=1, initialW=initializer)\n self.c2 = SNConvolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=initializer)\n if self.learnable_sc:\n self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)\n\n def residual(self, x):\n h = x\n h = self.activation(h)\n h = self.c1(h)\n if self.sample:\n h = self.sample(h)\n if self.dr:\n with chainer.using_config('train', True):\n h = F.dropout(h, self.dr)\n h = self.activation(h)\n h = self.c2(h)\n return h\n\n def shortcut(self, x):\n if self.learnable_sc:\n x = self.c_sc(x)\n if self.sample:\n return self.sample(x)\n else:\n return x\n else:\n return x\n\n def __call__(self, x):\n return self.residual(x) + self.shortcut(x)\n\nclass SNConvBlock(chainer.Chain):\n def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):\n super(SNConvBlock, self).__init__()\n initializer = chainer.initializers.GlorotUniform()\n self.activation = activation\n self.bn = bn\n self.dr = dr\n with self.init_scope():\n if mode == 'none':\n self.c = SNConvolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)\n elif mode == 'none-7':\n self.c = SNConvolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)\n elif mode == 'down':\n self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)\n elif mode == 'up':\n self.c = SNDeconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)\n elif mode == 'full-down':\n self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)\n elif mode == 'frq':\n self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)\n elif mode == 'frq-down':\n self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)\n self.activation = lambda x: activation(_downsample(x))\n elif mode == 'frq-up':\n self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)\n self.activation = lambda x: activation(_upsample(x))\n else:\n raise Exception('mode is missing')\n if bn:\n self.b = L.BatchNormalization(out_channels)\n\n def __call__(self, h):\n if self.dr:\n with chainer.using_config('train', True):\n h = F.dropout(h, self.dr)\n h = self.c(h)\n if self.bn:\n h = self.b(h)\n if self.activation:\n h = self.activation(h)\n return h\n \nclass SNLinearBlock(chainer.Chain):\n def __init__(self, in_channels, out_channels, activation=F.leaky_relu, dr=None):\n super(SNLinearBlock, self).__init__()\n initializer = chainer.initializers.GlorotUniform()\n self.activation = activation\n self.dr = dr\n if type(out_channels) is tuple:\n self.out_shape = (-1,)+out_channels\n else:\n self.out_shape = None\n with self.init_scope():\n self.l = SNLinear(in_channels, np.prod(out_channels), initialW=initializer)\n\n def __call__(self, x):\n if self.dr:\n x = F.dropout(x, self.dr)\n x = self.l(x)\n x = self.activation(x)\n if self.out_shape:\n x = F.reshape(x, self.out_shape)\n return x\n\nclass SNMDBlock(chainer.Chain):\n def __init__(self, in_channels, in_size=4, B=100, C=5, gap=True, dr=None):\n super(SNMDBlock, self).__init__()\n initializer = chainer.initializers.GlorotUniform()\n self.B = B\n self.C = C\n self.dr = dr\n self.gap = gap\n if gap:\n in_size = 1\n if type(in_size) is int:\n in_size = (in_size, in_size)\n with self.init_scope():\n self.l = SNLinear(in_size[0] * in_size[1] * in_channels + B, 1, initialW=initializer)\n self.md = SNLinear(in_size[0] * in_size[1] * in_channels, B * C, initialW=initializer)\n\n def __call__(self, x):\n if self.dr:\n with chainer.using_config('train', True):\n x = F.dropout(x, self.dr)\n if self.gap:\n x = F.sum(x, axis=(2,3))\n N = x.shape[0]\n #Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py\n feature = F.reshape(F.leaky_relu(x), (N, -1))\n m = F.reshape(self.md(feature), (N, self.B * self.C, 1))\n m0 = F.broadcast_to(m, (N, self.B * self.C, N))\n m1 = F.transpose(m0, (2, 1, 0))\n d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))\n d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1\n h = F.concat([feature, d])\n\n h = self.l(h)\n return h\n\nclass SNL1DBlock(chainer.Chain):\n def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):\n super(SNL1DBlock, self).__init__()\n initializer = chainer.initializers.GlorotUniform()\n self.activation = activation\n self.dr = dr\n self.out_ch = out_ch\n with self.init_scope():\n self.l = SNLinear(in_ch*width, out_ch*width, initialW=initializer)\n\n def __call__(self, x):\n if self.dr:\n x = F.dropout(x, self.dr)\n x = F.transpose(x, (0, 2, 1, 3))\n out_shape = list(x.shape)\n x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))\n x = self.l(x)\n x = self.activation(x)\n out_shape[2] = self.out_ch\n x = F.reshape(x, out_shape)\n x = F.transpose(x, (0, 2, 1, 3))\n return x\n\nclass L1DBlock(chainer.Chain):\n def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):\n super(L1DBlock, self).__init__()\n initializer = chainer.initializers.GlorotUniform()\n self.activation = activation\n self.dr = dr\n self.out_ch = out_ch\n with self.init_scope():\n self.l = L.Linear(in_ch*width, out_ch*width, initialW=initializer)\n\n def __call__(self, x):\n if self.dr:\n x = F.dropout(x, self.dr)\n x = F.transpose(x, (0, 2, 1, 3))\n out_shape = list(x.shape)\n x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))\n x = self.l(x)\n x = self.activation(x)\n out_shape[2] = self.out_ch\n x = F.reshape(x, out_shape)\n x = F.transpose(x, (0, 2, 1, 3))\n return x\n\nclass CLBlock(chainer.Chain):\n def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, liner_out_ch=1, dr=None):\n super(CLBlock, self).__init__()\n self.dr = dr\n if out_ch - liner_out_ch <= 0:\n raise Exception('out_ch <= liner_out_ch!')\n with self.init_scope():\n self.c = ConvBlock(in_ch, out_ch-liner_out_ch, activation=activation)\n self.l = L1DBlock(in_ch, liner_out_ch, width, activation)\n\n def __call__(self, x):\n h = x\n if self.dr:\n h = F.dropout(h, self.dr)\n h1 = self.c(h)\n h2 = self.l(h)\n h = F.concat([h1,h2])\n return h\n\nclass SNCLBlock(chainer.Chain):\n def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):\n super(SNCLBlock, self).__init__()\n self.dr = dr\n with self.init_scope():\n self.c = SNConvBlock(in_ch, out_ch-1, activation=activation)\n self.l = SNL1DBlock(in_ch, 1, width, activation)\n\n def __call__(self, x):\n h = x\n if self.dr:\n h = F.dropout(h, self.dr)\n h1 = self.c(h)\n h2 = self.l(h)\n h = F.concat([h1,h2])\n return h\n\n" ]
[ [ "numpy.prod" ] ]
etmwb/cvsegmentation
[ "c283a79f4cf4e78d057f598944b1c252f6533f00" ]
[ "cvss/datasets/nyuv2.py" ]
[ "import os\r\nimport sys\r\nimport numpy as np\r\nimport random\r\nimport math\r\nfrom PIL import Image, ImageOps, ImageFilter\r\n\r\nimport torch\r\nimport torch.utils.data as data\r\nimport torchvision.transforms as transform\r\n\r\nfrom .base import BaseDataset\r\n\r\nclass NYUv2Segmentation(BaseDataset):\r\n BASE_DIR = 'nyuv2'\r\n NUM_CLASS = 40\r\n def __init__(self, root=os.path.expanduser('~/.cvss/data'), split='train',\r\n mode=None, transform=None, target_transform=None, **kwargs):\r\n super(NYUv2Segmentation, self).__init__(\r\n root, split, mode, transform, target_transform, **kwargs)\r\n # assert exists and prepare dataset automatically\r\n root = os.path.join(root, self.BASE_DIR)\r\n assert os.path.exists(root), \"Please setup the dataset using\" + \\\r\n \"cvss/scripts/prepare_nyuv2.py\"\r\n self.images, self.masks = _get_nyuv2_pairs(root, split)\r\n if split != 'test':\r\n assert (len(self.images) == len(self.masks))\r\n if len(self.images) == 0:\r\n raise(RuntimeError(\"Found 0 images in subfolders of: \\\r\n \" + root + \"\\n\"))\r\n\r\n def __getitem__(self, index):\r\n img = Image.open(self.images[index]).convert('RGB')\r\n if self.mode == 'test':\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n return img, os.path.basename(self.images[index])\r\n mask = Image.open(self.masks[index])\r\n # synchrosized transform\r\n if self.mode == 'train':\r\n img, mask = self._sync_transform(img, mask)\r\n elif self.mode == 'val':\r\n img, mask = self._val_sync_transform(img, mask)\r\n else:\r\n assert self.mode == 'testval'\r\n mask = self._mask_transform(mask)\r\n # general resize, normalize and toTensor\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n if self.target_transform is not None:\r\n mask = self.target_transform(mask)\r\n return img, mask\r\n\r\n def _sync_transform(self, img, mask):\r\n # random mirror\r\n if random.random() < 0.5:\r\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\r\n mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\r\n crop_size = self.crop_size\r\n # random scale (short edge)\r\n w, h = img.size\r\n min_side = min(w, h)\r\n scale = np.random.uniform(0.5, 2.0)\r\n if min_side * scale < 350:\r\n scale = 350 * 1.0 / min_side\r\n long_size = int(self.base_size*scale)\r\n if h > w:\r\n oh = long_size\r\n ow = int(1.0 * w * long_size / h + 0.5)\r\n short_size = ow\r\n else:\r\n ow = long_size\r\n oh = int(1.0 * h * long_size / w + 0.5)\r\n short_size = oh\r\n img = img.resize((ow, oh), Image.BILINEAR)\r\n mask = mask.resize((ow, oh), Image.NEAREST)\r\n # pad crop\r\n if short_size < crop_size:\r\n padh = crop_size - oh if oh < crop_size else 0\r\n padw = crop_size - ow if ow < crop_size else 0\r\n img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)\r\n mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)\r\n # random crop crop_size\r\n w, h = img.size\r\n x1 = random.randint(0, w - crop_size)\r\n y1 = random.randint(0, h - crop_size)\r\n img = img.crop((x1, y1, x1+crop_size, y1+crop_size))\r\n mask = mask.crop((x1, y1, x1+crop_size, y1+crop_size))\r\n # final transform\r\n return img, self._mask_transform(mask)\r\n\r\n def _val_sync_transform(self, img, mask):\r\n # final transform\r\n return img, self._mask_transform(mask)\r\n\r\n def _mask_transform(self, mask):\r\n target = np.array(mask).astype('int64') - 1\r\n return torch.from_numpy(target)\r\n\r\n def __len__(self):\r\n return len(self.images)\r\n\r\n @property\r\n def pred_offset(self):\r\n return 1\r\n\r\n\r\ndef _get_nyuv2_pairs(folder, split='train'):\r\n def get_path_pairs(folder, split_file):\r\n img_paths = []\r\n mask_paths = []\r\n with open(os.path.join(folder, split_file), 'r') as f: \r\n for filename in f.readlines(): \r\n filename = filename.strip()\r\n imgpath = os.path.join(folder, 'image', filename)\r\n maskpath = os.path.join(folder, 'mask', filename)\r\n if os.path.isfile(maskpath):\r\n img_paths.append(imgpath)\r\n mask_paths.append(maskpath)\r\n else:\r\n print('cannot find the mask:', maskpath)\r\n return img_paths, mask_paths\r\n\r\n img_paths, mask_paths = get_path_pairs(folder, split_file=split+'.txt')\r\n return img_paths, mask_paths\r\n" ]
[ [ "numpy.array", "numpy.random.uniform", "torch.from_numpy" ] ]
nehz/librosa
[ "0dcd53f462db124ed3f54edf2334f28738d2ecc6" ]
[ "librosa/decompose.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nSpectrogram decomposition\n=========================\n.. autosummary::\n :toctree: generated/\n\n decompose\n hpss\n nn_filter\n\"\"\"\n\nimport numpy as np\n\nimport scipy.sparse\nfrom scipy.ndimage import median_filter\n\nimport sklearn.decomposition\n\nfrom . import core\nfrom . import cache\nfrom . import segment\nfrom . import util\nfrom .util.exceptions import ParameterError\n\n__all__ = ['decompose', 'hpss', 'nn_filter']\n\n\ndef decompose(S, n_components=None, transformer=None, sort=False, fit=True, **kwargs):\n \"\"\"Decompose a feature matrix.\n\n Given a spectrogram `S`, produce a decomposition into `components`\n and `activations` such that `S ~= components.dot(activations)`.\n\n By default, this is done with with non-negative matrix factorization (NMF),\n but any `sklearn.decomposition`-type object will work.\n\n\n Parameters\n ----------\n S : np.ndarray [shape=(n_features, n_samples), dtype=float]\n The input feature matrix (e.g., magnitude spectrogram)\n\n n_components : int > 0 [scalar] or None\n number of desired components\n\n if None, then `n_features` components are used\n\n transformer : None or object\n If None, use `sklearn.decomposition.NMF`\n\n Otherwise, any object with a similar interface to NMF should work.\n `transformer` must follow the scikit-learn convention, where\n input data is `(n_samples, n_features)`.\n\n `transformer.fit_transform()` will be run on `S.T` (not `S`),\n the return value of which is stored (transposed) as `activations`\n\n The components will be retrieved as `transformer.components_.T`\n\n `S ~= np.dot(activations, transformer.components_).T`\n\n or equivalently:\n `S ~= np.dot(transformer.components_.T, activations.T)`\n\n sort : bool\n If `True`, components are sorted by ascending peak frequency.\n\n .. note:: If used with `transformer`, sorting is applied to copies\n of the decomposition parameters, and not to `transformer`'s\n internal parameters.\n\n fit : bool\n If `True`, components are estimated from the input ``S``.\n\n If `False`, components are assumed to be pre-computed and stored\n in ``transformer``, and are not changed.\n\n kwargs : Additional keyword arguments to the default transformer\n `sklearn.decomposition.NMF`\n\n\n Returns\n -------\n components: np.ndarray [shape=(n_features, n_components)]\n matrix of components (basis elements).\n\n activations: np.ndarray [shape=(n_components, n_samples)]\n transformed matrix/activation matrix\n\n\n Raises\n ------\n ParameterError\n if `fit` is False and no `transformer` object is provided.\n\n\n See Also\n --------\n sklearn.decomposition : SciKit-Learn matrix decomposition modules\n\n\n Examples\n --------\n Decompose a magnitude spectrogram into 32 components with NMF\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> S = np.abs(librosa.stft(y))\n >>> comps, acts = librosa.decompose.decompose(S, n_components=8)\n >>> comps\n array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],\n [ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],\n ...,\n [ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],\n [ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])\n >>> acts\n array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],\n [ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],\n ...,\n [ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],\n [ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])\n\n\n Sort components by ascending peak frequency\n\n >>> comps, acts = librosa.decompose.decompose(S, n_components=16,\n ... sort=True)\n\n\n Or with sparse dictionary learning\n\n >>> import sklearn.decomposition\n >>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)\n >>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(10,8))\n >>> plt.subplot(3, 1, 1)\n >>> librosa.display.specshow(librosa.amplitude_to_db(S,\n ... ref=np.max),\n ... y_axis='log', x_axis='time')\n >>> plt.title('Input spectrogram')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.subplot(3, 2, 3)\n >>> librosa.display.specshow(librosa.amplitude_to_db(comps,\n ... ref=np.max),\n ... y_axis='log')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.title('Components')\n >>> plt.subplot(3, 2, 4)\n >>> librosa.display.specshow(acts, x_axis='time')\n >>> plt.ylabel('Components')\n >>> plt.title('Activations')\n >>> plt.colorbar()\n >>> plt.subplot(3, 1, 3)\n >>> S_approx = comps.dot(acts)\n >>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,\n ... ref=np.max),\n ... y_axis='log', x_axis='time')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.title('Reconstructed spectrogram')\n >>> plt.tight_layout()\n \"\"\"\n\n if transformer is None:\n if fit is False:\n raise ParameterError('fit must be True if transformer is None')\n\n transformer = sklearn.decomposition.NMF(n_components=n_components,\n **kwargs)\n\n if n_components is None:\n n_components = S.shape[0]\n\n if fit:\n activations = transformer.fit_transform(S.T).T\n else:\n activations = transformer.transform(S.T).T\n\n components = transformer.components_.T\n\n if sort:\n components, idx = util.axis_sort(components, index=True)\n activations = activations[idx]\n\n return components, activations\n\n\n@cache(level=30)\ndef hpss(S, kernel_size=31, power=2.0, mask=False, margin=1.0):\n \"\"\"Median-filtering harmonic percussive source separation (HPSS).\n\n If `margin = 1.0`, decomposes an input spectrogram `S = H + P`\n where `H` contains the harmonic components,\n and `P` contains the percussive components.\n\n If `margin > 1.0`, decomposes an input spectrogram `S = H + P + R`\n where `R` contains residual components not included in `H` or `P`.\n\n This implementation is based upon the algorithm described by [1]_ and [2]_.\n\n .. [1] Fitzgerald, Derry.\n \"Harmonic/percussive separation using median filtering.\"\n 13th International Conference on Digital Audio Effects (DAFX10),\n Graz, Austria, 2010.\n\n .. [2] Driedger, Müller, Disch.\n \"Extending harmonic-percussive separation of audio.\"\n 15th International Society for Music Information Retrieval Conference (ISMIR 2014),\n Taipei, Taiwan, 2014.\n\n Parameters\n ----------\n S : np.ndarray [shape=(d, n)]\n input spectrogram. May be real (magnitude) or complex.\n\n kernel_size : int or tuple (kernel_harmonic, kernel_percussive)\n kernel size(s) for the median filters.\n\n - If scalar, the same size is used for both harmonic and percussive.\n - If tuple, the first value specifies the width of the\n harmonic filter, and the second value specifies the width\n of the percussive filter.\n\n power : float > 0 [scalar]\n Exponent for the Wiener filter when constructing soft mask matrices.\n\n mask : bool\n Return the masking matrices instead of components.\n\n Masking matrices contain non-negative real values that\n can be used to measure the assignment of energy from `S`\n into harmonic or percussive components.\n\n Components can be recovered by multiplying `S * mask_H`\n or `S * mask_P`.\n\n\n margin : float or tuple (margin_harmonic, margin_percussive)\n margin size(s) for the masks (as described in [2]_)\n\n - If scalar, the same size is used for both harmonic and percussive.\n - If tuple, the first value specifies the margin of the\n harmonic mask, and the second value specifies the margin\n of the percussive mask.\n\n Returns\n -------\n harmonic : np.ndarray [shape=(d, n)]\n harmonic component (or mask)\n\n percussive : np.ndarray [shape=(d, n)]\n percussive component (or mask)\n\n\n See Also\n --------\n util.softmask\n\n Notes\n -----\n This function caches at level 30.\n\n Examples\n --------\n Separate into harmonic and percussive\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)\n >>> D = librosa.stft(y)\n >>> H, P = librosa.decompose.hpss(D)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> plt.subplot(3, 1, 1)\n >>> librosa.display.specshow(librosa.amplitude_to_db(D,\n ... ref=np.max),\n ... y_axis='log')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.title('Full power spectrogram')\n >>> plt.subplot(3, 1, 2)\n >>> librosa.display.specshow(librosa.amplitude_to_db(H,\n ... ref=np.max),\n ... y_axis='log')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.title('Harmonic power spectrogram')\n >>> plt.subplot(3, 1, 3)\n >>> librosa.display.specshow(librosa.amplitude_to_db(P,\n ... ref=np.max),\n ... y_axis='log')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.title('Percussive power spectrogram')\n >>> plt.tight_layout()\n\n\n Or with a narrower horizontal filter\n\n >>> H, P = librosa.decompose.hpss(D, kernel_size=(13, 31))\n\n Just get harmonic/percussive masks, not the spectra\n\n >>> mask_H, mask_P = librosa.decompose.hpss(D, mask=True)\n >>> mask_H\n array([[ 1.000e+00, 1.469e-01, ..., 2.648e-03, 2.164e-03],\n [ 1.000e+00, 2.368e-01, ..., 9.413e-03, 7.703e-03],\n ...,\n [ 8.869e-01, 5.673e-02, ..., 4.603e-02, 1.247e-05],\n [ 7.068e-01, 2.194e-02, ..., 4.453e-02, 1.205e-05]], dtype=float32)\n >>> mask_P\n array([[ 2.858e-05, 8.531e-01, ..., 9.974e-01, 9.978e-01],\n [ 1.586e-05, 7.632e-01, ..., 9.906e-01, 9.923e-01],\n ...,\n [ 1.131e-01, 9.433e-01, ..., 9.540e-01, 1.000e+00],\n [ 2.932e-01, 9.781e-01, ..., 9.555e-01, 1.000e+00]], dtype=float32)\n\n Separate into harmonic/percussive/residual components by using a margin > 1.0\n\n >>> H, P = librosa.decompose.hpss(D, margin=3.0)\n >>> R = D - (H+P)\n >>> y_harm = librosa.core.istft(H)\n >>> y_perc = librosa.core.istft(P)\n >>> y_resi = librosa.core.istft(R)\n\n\n Get a more isolated percussive component by widening its margin\n\n >>> H, P = librosa.decompose.hpss(D, margin=(1.0,5.0))\n\n \"\"\"\n\n if np.iscomplexobj(S):\n S, phase = core.magphase(S)\n else:\n phase = 1\n\n if np.isscalar(kernel_size):\n win_harm = kernel_size\n win_perc = kernel_size\n else:\n win_harm = kernel_size[0]\n win_perc = kernel_size[1]\n\n if np.isscalar(margin):\n margin_harm = margin\n margin_perc = margin\n else:\n margin_harm = margin[0]\n margin_perc = margin[1]\n\n # margin minimum is 1.0\n if margin_harm < 1 or margin_perc < 1:\n raise ParameterError(\"Margins must be >= 1.0. \"\n \"A typical range is between 1 and 10.\")\n\n # Compute median filters. Pre-allocation here preserves memory layout.\n harm = np.empty_like(S)\n harm[:] = median_filter(S, size=(1, win_harm), mode='reflect')\n\n perc = np.empty_like(S)\n perc[:] = median_filter(S, size=(win_perc, 1), mode='reflect')\n\n split_zeros = (margin_harm == 1 and margin_perc == 1)\n\n mask_harm = util.softmask(harm, perc * margin_harm,\n power=power,\n split_zeros=split_zeros)\n\n mask_perc = util.softmask(perc, harm * margin_perc,\n power=power,\n split_zeros=split_zeros)\n\n if mask:\n return mask_harm, mask_perc\n\n return ((S * mask_harm) * phase, (S * mask_perc) * phase)\n\n\n@cache(level=30)\ndef nn_filter(S, rec=None, aggregate=None, axis=-1, **kwargs):\n '''Filtering by nearest-neighbors.\n\n Each data point (e.g, spectrogram column) is replaced\n by aggregating its nearest neighbors in feature space.\n\n This can be useful for de-noising a spectrogram or feature matrix.\n\n The non-local means method [1]_ can be recovered by providing a\n weighted recurrence matrix as input and specifying `aggregate=np.average`.\n\n Similarly, setting `aggregate=np.median` produces sparse de-noising\n as in REPET-SIM [2]_.\n\n .. [1] Buades, A., Coll, B., & Morel, J. M.\n (2005, June). A non-local algorithm for image denoising.\n In Computer Vision and Pattern Recognition, 2005.\n CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.\n\n .. [2] Rafii, Z., & Pardo, B.\n (2012, October). \"Music/Voice Separation Using the Similarity Matrix.\"\n International Society for Music Information Retrieval Conference, 2012.\n\n Parameters\n ----------\n S : np.ndarray\n The input data (spectrogram) to filter\n\n rec : (optional) scipy.sparse.spmatrix or np.ndarray\n Optionally, a pre-computed nearest-neighbor matrix\n as provided by `librosa.segment.recurrence_matrix`\n\n aggregate : function\n aggregation function (default: `np.mean`)\n\n If `aggregate=np.average`, then a weighted average is\n computed according to the (per-row) weights in `rec`.\n\n For all other aggregation functions, all neighbors\n are treated equally.\n\n\n axis : int\n The axis along which to filter (by default, columns)\n\n kwargs\n Additional keyword arguments provided to\n `librosa.segment.recurrence_matrix` if `rec` is not provided\n\n Returns\n -------\n S_filtered : np.ndarray\n The filtered data\n\n Raises\n ------\n ParameterError\n if `rec` is provided and its shape is incompatible with `S`.\n\n See also\n --------\n decompose\n hpss\n librosa.segment.recurrence_matrix\n\n\n Notes\n -----\n This function caches at level 30.\n\n\n Examples\n --------\n\n De-noise a chromagram by non-local median filtering.\n By default this would use euclidean distance to select neighbors,\n but this can be overridden directly by setting the `metric` parameter.\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(),\n ... offset=30, duration=10)\n >>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)\n >>> chroma_med = librosa.decompose.nn_filter(chroma,\n ... aggregate=np.median,\n ... metric='cosine')\n\n To use non-local means, provide an affinity matrix and `aggregate=np.average`.\n\n >>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',\n ... metric='cosine', sparse=True)\n >>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,\n ... aggregate=np.average)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(10, 8))\n >>> plt.subplot(5, 1, 1)\n >>> librosa.display.specshow(chroma, y_axis='chroma')\n >>> plt.colorbar()\n >>> plt.title('Unfiltered')\n >>> plt.subplot(5, 1, 2)\n >>> librosa.display.specshow(chroma_med, y_axis='chroma')\n >>> plt.colorbar()\n >>> plt.title('Median-filtered')\n >>> plt.subplot(5, 1, 3)\n >>> librosa.display.specshow(chroma_nlm, y_axis='chroma')\n >>> plt.colorbar()\n >>> plt.title('Non-local means')\n >>> plt.subplot(5, 1, 4)\n >>> librosa.display.specshow(chroma - chroma_med,\n ... y_axis='chroma')\n >>> plt.colorbar()\n >>> plt.title('Original - median')\n >>> plt.subplot(5, 1, 5)\n >>> librosa.display.specshow(chroma - chroma_nlm,\n ... y_axis='chroma', x_axis='time')\n >>> plt.colorbar()\n >>> plt.title('Original - NLM')\n >>> plt.tight_layout()\n '''\n if aggregate is None:\n aggregate = np.mean\n\n if rec is None:\n kwargs = dict(kwargs)\n kwargs['sparse'] = True\n rec = segment.recurrence_matrix(S, axis=axis, **kwargs)\n elif not scipy.sparse.issparse(rec):\n rec = scipy.sparse.csr_matrix(rec)\n\n if rec.shape[0] != S.shape[axis] or rec.shape[0] != rec.shape[1]:\n raise ParameterError('Invalid self-similarity matrix shape '\n 'rec.shape={} for S.shape={}'.format(rec.shape,\n S.shape))\n\n return __nn_filter_helper(rec.data, rec.indices, rec.indptr,\n S.swapaxes(0, axis), aggregate).swapaxes(0, axis)\n\n\ndef __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):\n '''Nearest-neighbor filter helper function.\n\n This is an internal function, not for use outside of the decompose module.\n\n It applies the nearest-neighbor filter to S, assuming that the first index\n corresponds to observations.\n\n Parameters\n ----------\n R_data, R_indices, R_ptr : np.ndarrays\n The `data`, `indices`, and `indptr` of a scipy.sparse matrix\n\n S : np.ndarray\n The observation data to filter\n\n aggregate : callable\n The aggregation operator\n\n\n Returns\n -------\n S_out : np.ndarray like S\n The filtered data array\n '''\n s_out = np.empty_like(S)\n\n for i in range(len(R_ptr)-1):\n\n # Get the non-zeros out of the recurrence matrix\n targets = R_indices[R_ptr[i]:R_ptr[i+1]]\n\n if not len(targets):\n s_out[i] = S[i]\n continue\n\n neighbors = np.take(S, targets, axis=0)\n\n if aggregate is np.average:\n weights = R_data[R_ptr[i]:R_ptr[i+1]]\n s_out[i] = aggregate(neighbors, axis=0, weights=weights)\n else:\n s_out[i] = aggregate(neighbors, axis=0)\n\n return s_out\n" ]
[ [ "numpy.take", "numpy.iscomplexobj", "scipy.ndimage.median_filter", "numpy.isscalar", "numpy.empty_like" ] ]
enricoros/glow
[ "49cf6972ce0cb25cea66f9ed39d32add5eeef130" ]
[ "torch_glow/tests/nodes/conv2d_test.py" ]
[ "import torch\nimport torch.nn.functional as F\nimport torch_glow\nfrom collections import namedtuple\n\nfrom tests.utils import jitVsGlow\n\n# Basic test of the PyTorch conv2d Node on Glow.\ndef test_conv2d_basic():\n\n def conv2d_basic(inputs, filters):\n conv = F.conv2d(inputs, filters, padding=1)\n return F.relu(conv)\n\n inputs = torch.randn(1, 4, 5, 5)\n filters = torch.randn(8, 4, 3, 3)\n\n jitVsGlow(conv2d_basic, inputs, filters)\n\n# Test of the PyTorch conv2d Node with a provided bias tensor.\ndef test_conv2d_with_bias():\n\n def conv2d_with_bias(inputs, filters, bias):\n conv = F.conv2d(inputs, filters, bias)\n return F.relu(conv)\n\n inputs = torch.randn(1, 4, 5, 5)\n filters = torch.randn(8, 4, 3, 3)\n bias = torch.randn(8)\n\n jitVsGlow(conv2d_with_bias, inputs, filters, bias)\n\n# Test of the PyTorch conv2d Node sweeping through various parameters of the\n# Node to test that they work correctly.\ndef test_conv2d_param_sweep():\n hwOpts = [3, 4]\n padOpts = [0, 1]\n groupsOpts = [1, 2]\n dilationOpts = [1, 2]\n strideOpts = [1, 2]\n\n Setting = namedtuple('Setting', ['h', 'w', 'p', 'g', 'd', 's',])\n\n settings = [Setting(h=h, w=w, p=p, g=g, d=d, s=s) for h in hwOpts for w in hwOpts for p in padOpts for g in groupsOpts for d in dilationOpts for s in strideOpts]\n\n for setting in settings:\n def conv2d_param_sweep(inputs, filters):\n conv = F.conv2d(inputs, filters, padding=setting.p, groups=setting.g)\n return F.relu(conv)\n\n inputs = torch.randn(2, 4, setting.h, setting.w)\n filters = torch.randn(8, 4/setting.g, 3, 3)\n\n jitVsGlow(conv2d_param_sweep, inputs, filters)\n" ]
[ [ "torch.nn.functional.relu", "torch.nn.functional.conv2d", "torch.randn" ] ]
MatthewGong/DiffractionClassification
[ "68be6cf3960f09388253c79bab13cbd9dc07edbb" ]
[ "DiffractionClassifierCombinatorial2.0.py" ]
[ "import ClientSide2 #custom package\n\nimport numpy as np\nimport argparse\nimport json\nimport os\nimport ClassifierFunctions2 as cf\nimport random\nimport logging\n\nfrom matplotlib import pyplot as plt\nfrom builtins import input\n\nfrom Notation import SpaceGroupsDict as spgs\nSpGr = spgs.spacegroups()\n\n\n\nfrom itertools import combinations,chain\n\n\n# Initialize essential global variables\n#URL = \"\" #you'll need me to send you the link\nFAMILIES = [\"triclinic\",\"monoclinic\",\"orthorhombic\",\"tetragonal\",\n \"trigonal\",\"hexagonal\",\"cubic\"]\n\nDEFAULT_SESSION = os.path.join (\"Sessions\",\"session.json\")\nDEFAULT_USER = \"user_profile.json\"\nSERVER_INFO = \"server_gen2.json\"\n\n# list of three, one per level\nprediction_per_level = [1, 1, 2]\nnum_peaks = [1, 5]\n\nDEFAULT_FILTER_SETTINGS = { \"max_numpeaks\": 75,\n \"dspace_range\" : [0.5,6],\n \"peak_threshold\": 0.7,\n \"filter_size\" : 15,\n \"passes\" : 2\n }\n\n\ndef build_parser():\n parser = argparse.ArgumentParser()\n\n # This will be implemented as rollout broadens\n parser.add_argument('--apikey', type=str,\n dest='key', help='api key to securely access service',\n metavar='KEY', required=False)\n\n parser.add_argument('--session',\n dest='session', help='Keep user preferences for multirun sessions', metavar='SESSION',required=False, default=None)\n parser.add_argument('--subset',\n dest='subset',help='Run a small number of the possible combinations. Mostly for testing. Input the number of combos to run.', metavar='NO_OF_COMBOS',required=False, default=None)\n parser.add_argument('--dataonly',\n dest='data_only',help='run the classification without plotting', metavar='True/[False]',required=False, default=False)\n parser.add_argument('--figuresonly',\n dest='figures_only',help='Plot the figures without running data. Data must be saved previously.', metavar='True/[False]',required=False, default=False)\n \n return parser\n\ndef powerset(iterable):\n \"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\"\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))\n\n\n\ndef combination_peaks(peak_batch, chem_vec, mode, temp_name, crystal_family, user_info, URL, prediction_per_level, subset, num_peaks):\n\n outpath = \"Ready\"\n if not os.path.exists(outpath):\n os.makedirs(outpath)\n find_valid_peaks = list(powerset(peak_batch[\"vec\"]))\n find_valid_peaks = [item for item in find_valid_peaks if len(item) > num_peaks[0] and len(item) < num_peaks[1]]\n print(len(find_valid_peaks),\"valid peak combinations\")\n\n valid_peaks_combinations = [{\"vec\":proto_combo} for proto_combo in find_valid_peaks]\n found = False\n threshold = 0\n tot_spec = 1\n for p in prediction_per_level:\n tot_spec *= p\n guesses = {\"num_pred\": tot_spec}\n for k in range(1,tot_spec+1):\n guesses[\"species_\"+str(k)]=[]\n guesses[\"spec_confidence_\"+str(k)]=[]\n# print(guesses)\n common_peaks = []\n failed_combos = valid_peaks_combinations\n #peak_locs,user_info,URL,fam\n persistance = 0\n LIMIT = 3\n# print(failed_combos)\n \n \n if subset >0 and subset<len(failed_combos):\n failed_combos = random.sample(failed_combos, subset)\n print(\"using \", len(failed_combos),\" peak combinations\")\n \n while len(failed_combos) > 0 and persistance < LIMIT:\n for combo in failed_combos:\n try:\n# print('---classifying---')\n# print(combo)\n classificated = ClientSide2.Send_For_Classification(combo, chem_vec, mode, crystal_family, user_info, URL, prediction_per_level)\n print(classificated)\n classificated[\"file_name\"] = temp_name\n # print('name =')\n # print(temp_name)\n print(os.path.join(outpath,temp_name))\n cf.write_to_csv(os.path.join(outpath,temp_name) + \".csv\", classificated, prediction_per_level)\n print(tot_spec)\n for k in range(1,tot_spec+1):\n print(guesses)\n guesses['species_'+str(k)].append( classificated[\"species_\"+str(k)] )\n guesses['spec_confidence_'+str(k)].append( classificated[\"spec_confidence_\"+str(k)] )\n common_peaks.append(classificated[\"peaks\"])\n \n \n \n # remove the classified combination\n failed_combos.remove(combo)\n \n except KeyboardInterrupt:\n raise\n except:\n print(\"An error occured this combination was not classified.\\nIt will be retried {} more times\".format(LIMIT-persistance))\n\n persistance += 1\n\n if len(failed_combos)>0:\n print(\"there were {} failed combinations\".format(len(failed_combos)))\n print('returning')\n return common_peaks, guesses\n\ndef make_figures(guesses,crystal_family,froot):\n\n if crystal_family:\n lower_gen = SpGr.edges[\"genus\"][crystal_family][0]\n upper_gen = SpGr.edges[\"genus\"][crystal_family][1]\n else:\n lower_gen = SpGr.edges[\"genus\"][FAMILIES[0]][0]\n upper_gen = SpGr.edges[\"genus\"][FAMILIES[-1]][1]\n fam_range = range(SpGr.edges[\"species\"][lower_gen][0],1+SpGr.edges[\"species\"][upper_gen][1])\n \n # phi = 2*np.pi/360\n fig_ang = 300\n phi = (2*np.pi*fig_ang/360)/(max(fam_range)-min(fam_range)+1)\n thet = fig_ang/(max(fam_range)-min(fam_range)+1)\n fam_axes = [1,3,16,75,143,168,195]\n\n # fig1 = plt.figure(1,figsize=(len(fam_range),16))\n fig1 = plt.figure(2,figsize=(16,8))\n plt.clf()\n ax1 = fig1.add_axes([0.03,0.1,.96,.8])\n# ax1.set_yscale('log')\n fam_color = ['k','g','b','c','m','y','k']\n for k in range(len(fam_axes)-1):\n ax1.axvspan(fam_axes[k]-0.5,fam_axes[k+1]-0.5,facecolor = fam_color[k], alpha=0.5)\n # ax1.axvspan(fam_axes[0],fam_axes[1]-1,alpha=0.5)\n\n ax1.axvspan(fam_axes[-1]-0.5,np.max(fam_range)-0.5,alpha=0.3)\n plt.ion\n\n fig2 = plt.figure(3,figsize=(8,8))\n plt.clf()\n plt.ion\n ax2 = fig2.add_axes([0.1,0.1,0.8,0.8],polar=True)\n ax2.set_thetamin(1)\n ax2.set_rmin(0)\n ax2.set_thetamax(fig_ang)\n ax2.set_rlabel_position(30)\n ax2.set_theta_direction(-1)\n ax2.set_theta_zero_location(\"S\",offset=-(360-fig_ang)/2)\n # ax2.set_rscale('log')\n prev_histograms_1 = []\n prev_histograms_2 = []\n plots_1 = []\n plots_2 = []\n# print('guesses = ')\n# print(guesses)\n num_pred = np.prod(prediction_per_level)\n for rank in range(1,num_pred+1):\n histo = np.histogram([g for g in guesses[\"species_{}\".format(rank)]], weights = [g for g in guesses[\"spec_confidence_{}\".format(rank)]], bins = np.arange(min(fam_range)-0.5, max(fam_range)+1.5))\n histo_log = np.array([np.log10(float(h))+1 if h>0 else 0 for h in histo[0]])\n# print('log_histo = ')\n# print(histo_log.tolist())\n if rank > 1:\n plt.figure(2)\n plot_1 = plt.bar(fam_range, histo[0], bottom = np.sum(np.vstack(prev_histograms_1), axis=0), align=\"center\", width = 1.1)\n plt.figure(3)\n sum_hist = np.sum(np.vstack(prev_histograms_1), axis=0)\n log_sum = np.array([np.log10(float(h))-1 if h>0 else -1 for h in sum_hist])\n# print('log_sum = ')\n# print(log_sum.tolist())\n plot_2 = plt.bar([f*phi for f in fam_range], histo_log, bottom = log_sum, align=\"center\", width = phi)\n else:\n plt.figure(2)\n plot_1 = plt.bar(fam_range, histo[0], align=\"center\", color='red', width = 1.1)\n plt.figure(3)\n plot_2 = plt.bar([f*phi for f in fam_range], histo_log, bottom = -1, align=\"center\", color='red', width = phi)\n \n plots_1.append(plot_1)\n plots_2.append(plot_2)\n plt.figure(2)\n plt.yticks(rotation='vertical')\n plt.xticks(fam_range,rotation='vertical')\n prev_histograms_1.append(histo[0])\n prev_histograms_2.append(histo[0])\n # plt.figure(3)\n # ax2.set_xticks(histo[1][:-1])\n\n plt.figure(2)\n # ym = ax1.get_ymax()*.9\n\n r_max = 0\n for rect in plot_1:\n n_max = rect.get_height()+rect.get_y()\n if n_max>r_max:\n r_max = n_max\n \n\n for k in range(len(FAMILIES)-1):\n if k ==0:\n ym_t = r_max*0.7\n cent = 'left'\n else:\n ym_t = r_max*0.6\n cent = 'center'\n ax1.text((fam_axes[k+1]+fam_axes[k])/2,ym_t, FAMILIES[k], horizontalalignment=cent)\n\n\n ax1.text((fam_axes[-1]+np.max(fam_range))/2,ym_t, FAMILIES[-1], horizontalalignment='center')\n \n ax1.autoscale(enable=True, axis='x', tight=True)\n ax1.tick_params(axis='x', which='major', labelsize=6)\n plt.xlabel(\"Prediction\",fontsize=10)\n plt.ylabel(\"Counts\",fontsize=10)\n # plt.legend(plots,(\"species_1\",\"species_2\",\"species_3\",\"species_4\"))\n leg_list = [ \"species_{}\".format(k+1) for k in range(num_pred) ]\n plt.legend(plots_1,leg_list)\n print(\"Results/\"+froot+\"_gen2.png\")\n plt.savefig(\"Results/\"+froot+\"_gen2.png\",dpi = 300)\n\n plt.figure(3)\n # plt.xlabel(\"Prediction\",fontsize=10,rotation='vertical')\n # plt.ylabel(\"Counts\",fontsize=10)\n r_ticks = list(range(int(np.floor(ax2.get_rmin())),int(np.ceil(ax2.get_rmax())+1)))\n ax2.set_rgrids(r_ticks, labels = ['10e'+str(r) for r in r_ticks])\n ax2.set_thetagrids([f*thet for f in fam_axes],labels = FAMILIES)\n plt.legend(plots_2,leg_list)\n # plt.legend(plots,(\"species_1\",\"species_2\",\"species_3\",\"species_4\"))\n# print(\"Results/\"+froot+\"_gen2_polar.png\")\n# plt.savefig(\"Results/\"+froot+\"_gen2_polar.png\",dpi = 300)\n# plt.show()\n \n\ndef main():\n\n parser = build_parser()\n options = parser.parse_args()\n \n if options.subset:\n subset = int(options.subset)\n else:\n subset = -1\n \n\n print(options.session)\n\n # opens the user specified session\n if options.session:\n with open(os.path.join(\"Sessions\",options.session),'r') as f:\n session = json.load(f)\n\n # opens the default session \n else:\n with open(DEFAULT_SESSION,'r') as f:\n session = json.load(f)\n\n # set variables from loaded session data\n# print(session)\n file_path = session[\"file_path\"]\n if \"output_file\" in session:\n output_file = session[\"output_file\"]\n else:\n output_file = ''\n if \"output_file_root\" in session:\n output_file_root = session[\"output_file_root\"]\n else:\n output_file_root = ''\n if not (output_file or output_file_root):\n raise ValueError('output_file or output_file_root must be defined in session file.')\n manual_peak_selection = session[\"manual_peak_selection\"]\n known_family = session[\"known_family\"]\n chemistry = session[\"chemistry\"]\n diffraction = session[\"diffraction\"]\n \n print('file inputs')\n print(output_file)\n print(output_file_root)\n \n mode = \"\"\n \n if diffraction:\n if chemistry:\n mode=\"DiffChem\"\n else:\n mode=\"DiffOnly\"\n else:\n if chemistry:\n raise ValueError('Running chemistry only predictions is currently not implemented')\n else:\n raise ValueError('Invalid prediction type. Either diffraction or chemistry must be enabled')\n\n if known_family and known_family=='yes':\n print('known family')\n crystal_family = session[\"crystal_family\"]\n prediction_per_level[0] = 1\n else:\n crystal_family = None\n \n # Load user from provided path, [IN PROGRESS]\n if session[\"user_info\"]:\n with open(session[\"user_info\"],'r') as f:\n user_info = json.load(f)\n else:\n with open(DEFAULT_USER,'r') as f:\n user_info = json.load(f)\n \n with open(session[\"server_info\"],'r') as f:\n server_info = json.load(f)\n \n if server_info['URL']:\n url = server_info['URL']\n else:\n raise ValueError('you need to have the server URL provided to you')\n \n chem_vec = cf.check_for_chemistry(session)\n \n print(file_path)\n print('---starting loop--')\n # Determine if the path is a directory or a file\n if os.path.isdir(file_path):\n print(\"loading files from directory\")\n file_paths = []\n for dirpath,dirnames,fpath in os.walk(file_path):\n for path in fpath:\n if not path[0] == '.':\n file_paths.append(os.path.join(dirpath,path))\n print(\"found {} files to load.\".format(len(file_paths)))\n\n else:\n file_paths = [file_path]\n \n if not os.path.exists(\"Results\"):\n os.makedirs(\"Results\")\n \n\n print(file_paths)\n for f_path in file_paths:\n\n # Load Data from specified file (DM3, TIFF, CSV etc....)\n \n print(\"loading data from {}\".format(f_path))\n image_data,scale = ClientSide2.Load_Profile(f_path)\n print(\"I successfully loaded the data\")\n \n# print(scale)\n\n print(options.figures_only)\n print(options.data_only)\n \n # difining filepaths here to facilitate loading data.\n froot = os.path.splitext(os.path.basename(f_path))[0]\n if output_file_root:\n outfile = 'Results/'+output_file_root+froot+'.json'\n outfile_2 = 'Results/'+output_file_root+froot+'_peaks.json'\n else:\n output_file_root='' #for the figure filenames\n [outroot, ext] = os.path.splitext(output_file)\n if not ext=='.json':\n output_file = outroot+'.json'\n output_file_2 = outroot+'_peaks.json'\n outfile = 'Results/'+output_file\n outfile_2 = 'Results/'+output_file_2\n\n # optional skipping the data creation\n if options.figures_only:\n print('Only creating figures')\n with open(outfile, 'r') as fp:\n guesses = json.load(fp)\n else:\n if diffraction:\n peak_locs,peaks_h = ClientSide2.Find_Peaks(image_data, scale, **FILTER_SETTINGS)\n # Choose which peaks to classify on\n if manual_peak_selection:\n peak_locs = cf.choose_peaks(peak_locs,peaks_h)\n #raise NotImplementedError\n else:\n peak_locs = []\n peaks_h = []\n\n \n # Script hangs when there are too many peaks.\n # TODO: implement something better. \n if len(peak_locs['d_spacing'])>25:\n print(\"\\n\\n======================================================\")\n print(\"there are \"+ str(len(peak_locs['d_spacing']))+\" peaks, which is too many.\")\n print(f_path)\n print(\"======================================================\\n\\n\")\n continue\n\n \n \n common_peaks,guesses = combination_peaks(peak_locs, chem_vec, mode, froot, crystal_family, user_info, url, prediction_per_level, subset, num_peaks)\n# print(\"--- peak_locs ---\")\n# print(peak_locs)\n guesses[\"pk_d_spacing\"] = peak_locs[\"d_spacing\"].tolist()\n guesses[\"pk_vec\"] = peak_locs[\"vec\"]\n \n print(guesses)\n \n # save data\n with open(outfile, 'w') as fp:\n json.dump(guesses, fp)\n with open(outfile_2, 'w') as fp:\n json.dump(common_peaks, fp)\n \n \n if options.data_only:\n print('skipping figures')\n else:\n make_figures(guesses,crystal_family,output_file_root+froot)\n # TODO: Split up this function and enable plotting on precomupted data.\n \n \n # plt.show(block=False)\n \n\nif __name__ == \"__main__\":\n main()\n\n\n" ]
[ [ "numpy.max", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.legend", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "numpy.prod", "numpy.vstack", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf", "matplotlib.pyplot.bar", "matplotlib.pyplot.xticks" ] ]
edupooch/cxr-domain-shift
[ "9e88f82e3d42f660e9f79723adb8a733d0a0e5e3" ]
[ "nih/loader.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom torch.utils.data import Dataset\nimport os\nfrom PIL import Image\n\n\nclass CXRDataset(Dataset):\n\n def __init__(\n self,\n path_to_images,\n fold,\n transform=None,\n sample=0,\n finding=\"any\",):\n\n self.transform = transform\n self.path_to_images = path_to_images\n self.df = pd.read_csv(\"nih/nih_labels.csv\")\n self.df = self.df[self.df['fold'] == fold]\n\n\n # can limit to sample, useful for testing\n # if fold == \"train\" or fold ==\"val\": sample=500\n if(sample > 0 and sample < len(self.df)):\n self.df = self.df.sample(sample)\n\n if not finding == \"any\": # can filter for positive findings of the kind described; useful for evaluation\n if finding in self.df.columns:\n if len(self.df[self.df[finding] == 1]) > 0:\n self.df = self.df[self.df[finding] == 1]\n else:\n print(\"No positive cases exist for \"+LABEL+\", returning all unfiltered cases\")\n else:\n print(\"cannot filter on finding \" + finding +\n \" as not in data - please check spelling\")\n\n self.df = self.df.set_index(\"Image Index\")\n self.PRED_LABEL = [\n 'No Finding',\n 'Atelectasis',\n 'Cardiomegaly',\n 'Effusion',\n 'Infiltration',\n 'Lung Lesion',\n 'Pneumonia',\n 'Pneumothorax',\n 'Consolidation',\n 'Edema',\n 'Emphysema',\n 'Fibrosis',\n 'Pleural_Thickening',\n 'Hernia']\n RESULT_PATH = \"results/\"\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n\n image = Image.open(\n os.path.join(\n self.path_to_images,\n self.df.index[idx]))\n image = image.convert('RGB')\n\n label = np.zeros(len(self.PRED_LABEL), dtype=int)\n for i in range(0, len(self.PRED_LABEL)):\n # can leave zero if zero, else make one\n if(self.df[self.PRED_LABEL[i].strip()].iloc[idx].astype('int') > 0):\n label[i] = self.df[self.PRED_LABEL[i].strip()\n ].iloc[idx].astype('int')\n\n if self.transform:\n image = self.transform(image)\n\n return (image, label,self.df.index[idx])\n" ]
[ [ "pandas.read_csv" ] ]
ITMO-NSS-team/LightObjRecEnsembler
[ "1375400f0a681aefdd3ab484e828257fd7aed318" ]
[ "baseline/fast_rcnn/trainer.py" ]
[ "from __future__ import absolute_import\nimport os\nfrom collections import namedtuple\nimport time\nfrom torch.nn import functional as F\nfrom baseline.fast_rcnn.model.utils.creator_tool import AnchorTargetCreator, ProposalTargetCreator\n\nfrom torch import nn\nimport torch as t\nfrom baseline.fast_rcnn.utils import array_tool as at\nfrom baseline.fast_rcnn.utils.vis_tool import Visualizer\n\nfrom baseline.fast_rcnn.utils.config import opt\nfrom torchnet.meter import ConfusionMeter, AverageValueMeter\n\nLossTuple = namedtuple('LossTuple',\n ['rpn_loc_loss',\n 'rpn_cls_loss',\n 'roi_loc_loss',\n 'roi_cls_loss',\n 'total_loss'\n ])\n\n\nclass FasterRCNNTrainer(nn.Module):\n \"\"\"wrapper for conveniently training. return losses\n\n The losses include:\n\n * :obj:`rpn_loc_loss`: The localization loss for \\\n Region Proposal Network (RPN).\n * :obj:`rpn_cls_loss`: The classification loss for RPN.\n * :obj:`roi_loc_loss`: The localization loss for the head module.\n * :obj:`roi_cls_loss`: The classification loss for the head module.\n * :obj:`total_loss`: The sum of 4 loss above.\n\n Args:\n faster_rcnn (model.FasterRCNN):\n A Faster R-CNN model that is going to be trained.\n \"\"\"\n\n def __init__(self, faster_rcnn):\n super(FasterRCNNTrainer, self).__init__()\n\n self.faster_rcnn = faster_rcnn\n self.rpn_sigma = opt.rpn_sigma\n self.roi_sigma = opt.roi_sigma\n\n # target creator create gt_bbox gt_label etc as training targets. \n self.anchor_target_creator = AnchorTargetCreator()\n self.proposal_target_creator = ProposalTargetCreator()\n\n self.loc_normalize_mean = faster_rcnn.loc_normalize_mean\n self.loc_normalize_std = faster_rcnn.loc_normalize_std\n\n self.optimizer = self.faster_rcnn.get_optimizer()\n # visdom wrapper\n self.vis = Visualizer(env=opt.env)\n\n # indicators for training status\n self.rpn_cm = ConfusionMeter(2)\n self.roi_cm = ConfusionMeter(21)\n self.meters = {k: AverageValueMeter() for k in LossTuple._fields} # average loss\n\n def forward(self, imgs, bboxes, labels, scale):\n \"\"\"Forward Faster R-CNN and calculate losses.\n\n Here are notations used.\n\n * :math:`N` is the batch size.\n * :math:`R` is the number of bounding boxes per image.\n\n Currently, only :math:`N=1` is supported.\n\n Args:\n imgs (~torch.autograd.Variable): A variable with a batch of images.\n bboxes (~torch.autograd.Variable): A batch of bounding boxes.\n Its shape is :math:`(N, R, 4)`.\n labels (~torch.autograd..Variable): A batch of labels.\n Its shape is :math:`(N, R)`. The background is excluded from\n the definition, which means that the range of the value\n is :math:`[0, L - 1]`. :math:`L` is the number of foreground\n classes.\n scale (float): Amount of scaling applied to\n the raw image during preprocessing.\n\n Returns:\n namedtuple of 5 losses\n \"\"\"\n n = bboxes.shape[0]\n if n != 1:\n raise ValueError('Currently only batch size 1 is supported.')\n\n _, _, H, W = imgs.shape\n img_size = (H, W)\n\n features = self.faster_rcnn.extractor(imgs)\n\n rpn_locs, rpn_scores, rois, roi_indices, anchor = \\\n self.faster_rcnn.rpn(features, img_size, scale)\n\n # Since batch size is one, convert variables to singular form\n bbox = bboxes[0]\n label = labels[0]\n rpn_score = rpn_scores[0]\n rpn_loc = rpn_locs[0]\n roi = rois\n\n # Sample RoIs and forward\n # it's fine to break the computation graph of rois, \n # consider them as constant input\n sample_roi, gt_roi_loc, gt_roi_label = self.proposal_target_creator(\n roi,\n at.tonumpy(bbox),\n at.tonumpy(label),\n self.loc_normalize_mean,\n self.loc_normalize_std)\n # NOTE it's all zero because now it only support for batch=1 now\n sample_roi_index = t.zeros(len(sample_roi))\n roi_cls_loc, roi_score = self.faster_rcnn.head(\n features,\n sample_roi,\n sample_roi_index)\n\n # ------------------ RPN losses -------------------#\n gt_rpn_loc, gt_rpn_label = self.anchor_target_creator(\n at.tonumpy(bbox),\n anchor,\n img_size)\n gt_rpn_label = at.totensor(gt_rpn_label).long()\n gt_rpn_loc = at.totensor(gt_rpn_loc)\n rpn_loc_loss = _fast_rcnn_loc_loss(\n rpn_loc,\n gt_rpn_loc,\n gt_rpn_label.data,\n self.rpn_sigma)\n\n # NOTE: default value of ignore_index is -100 ...\n rpn_cls_loss = F.cross_entropy(rpn_score, gt_rpn_label.cuda(), ignore_index=-1)\n _gt_rpn_label = gt_rpn_label[gt_rpn_label > -1]\n _rpn_score = at.tonumpy(rpn_score)[at.tonumpy(gt_rpn_label) > -1]\n self.rpn_cm.add(at.totensor(_rpn_score, False), _gt_rpn_label.data.long())\n\n # ------------------ ROI losses (fast rcnn loss) -------------------#\n n_sample = roi_cls_loc.shape[0]\n roi_cls_loc = roi_cls_loc.view(n_sample, -1, 4)\n roi_loc = roi_cls_loc[t.arange(0, n_sample).long().cuda(), \\\n at.totensor(gt_roi_label).long()]\n gt_roi_label = at.totensor(gt_roi_label).long()\n gt_roi_loc = at.totensor(gt_roi_loc)\n\n roi_loc_loss = _fast_rcnn_loc_loss(\n roi_loc.contiguous(),\n gt_roi_loc,\n gt_roi_label.data,\n self.roi_sigma)\n\n roi_cls_loss = nn.CrossEntropyLoss()(roi_score, gt_roi_label.cuda())\n\n self.roi_cm.add(at.totensor(roi_score, False), gt_roi_label.data.long())\n\n losses = [rpn_loc_loss, rpn_cls_loss, roi_loc_loss, roi_cls_loss]\n losses = losses + [sum(losses)]\n\n return LossTuple(*losses)\n\n def train_step(self, imgs, bboxes, labels, scale):\n self.optimizer.zero_grad()\n losses = self.forward(imgs, bboxes, labels, scale)\n losses.total_loss.backward()\n self.optimizer.step()\n self.update_meters(losses)\n return losses\n\n def save(self, save_optimizer=False, save_path=None, **kwargs):\n \"\"\"serialize models include optimizer and other info\n return path where the model-file is stored.\n\n Args:\n save_optimizer (bool): whether save optimizer.state_dict().\n save_path (string): where to save model, if it's None, save_path\n is generate using time str and info from kwargs.\n \n Returns:\n save_path(str): the path to save models.\n \"\"\"\n save_dict = dict()\n\n save_dict['model'] = self.faster_rcnn.state_dict()\n save_dict['config'] = opt._state_dict()\n save_dict['other_info'] = kwargs\n save_dict['vis_info'] = self.vis.state_dict()\n\n if save_optimizer:\n save_dict['optimizer'] = self.optimizer.state_dict()\n\n if save_path is None:\n timestr = time.strftime('%m%d%H%M')\n save_path = 'checkpoints/fasterrcnn_%s' % timestr\n for k_, v_ in kwargs.items():\n save_path += '_%s' % v_\n\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n t.save(save_dict, save_path)\n self.vis.save([self.vis.env])\n return save_path\n\n def load(self, path, load_optimizer=True, parse_opt=False, cpu_flag: bool = True):\n if cpu_flag:\n state_dict = t.load(path,\n map_location=t.device('cpu'))\n else:\n state_dict = t.load(path)\n if 'model' in state_dict:\n self.faster_rcnn.load_state_dict(state_dict['model'])\n else: # legacy way, for backward compatibility\n self.faster_rcnn.load_state_dict(state_dict)\n return self\n if parse_opt:\n opt._parse(state_dict['config'])\n if 'optimizer' in state_dict and load_optimizer:\n self.optimizer.load_state_dict(state_dict['optimizer'])\n return self\n\n def update_meters(self, losses):\n loss_d = {k: at.scalar(v) for k, v in losses._asdict().items()}\n for key, meter in self.meters.items():\n meter.add(loss_d[key])\n\n def reset_meters(self):\n for key, meter in self.meters.items():\n meter.reset()\n self.roi_cm.reset()\n self.rpn_cm.reset()\n\n def get_meter_data(self):\n return {k: v.value()[0] for k, v in self.meters.items()}\n\n\ndef _smooth_l1_loss(x, t, in_weight, sigma):\n sigma2 = sigma ** 2\n diff = in_weight * (x - t)\n abs_diff = diff.abs()\n flag = (abs_diff.data < (1. / sigma2)).float()\n y = (flag * (sigma2 / 2.) * (diff ** 2) +\n (1 - flag) * (abs_diff - 0.5 / sigma2))\n return y.sum()\n\n\ndef _fast_rcnn_loc_loss(pred_loc, gt_loc, gt_label, sigma):\n in_weight = t.zeros(gt_loc.shape).cuda()\n # Localization loss is calculated only for positive rois.\n # NOTE: unlike origin implementation, \n # we don't need inside_weight and outside_weight, they can calculate by gt_label\n in_weight[(gt_label > 0).view(-1, 1).expand_as(in_weight).cuda()] = 1\n loc_loss = _smooth_l1_loss(pred_loc, gt_loc, in_weight.detach(), sigma)\n # Normalize by total number of negtive and positive rois.\n loc_loss /= ((gt_label >= 0).sum().float()) # ignore gt_label==-1 for rpn_loss\n return loc_loss\n" ]
[ [ "torch.zeros", "torch.device", "torch.arange", "torch.save", "torch.load", "torch.nn.CrossEntropyLoss" ] ]
jeremycward/ipp-core
[ "c3dbebaf997b045da8385cb3dfab46820e40afda" ]
[ "venv/Scripts/f2py.py" ]
[ "#!c:\\users\\jerem\\dev\\ipp-core\\venv\\scripts\\python.exe\n# See http://cens.ioc.ee/projects/f2py2e/\nfrom __future__ import division, print_function\n\nimport os\nimport sys\nfor mode in [\"g3-numpy\", \"2e-numeric\", \"2e-numarray\", \"2e-numpy\"]:\n try:\n i = sys.argv.index(\"--\" + mode)\n del sys.argv[i]\n break\n except ValueError:\n pass\nos.environ[\"NO_SCIPY_IMPORT\"] = \"f2py\"\nif mode == \"g3-numpy\":\n sys.stderr.write(\"G3 f2py support is not implemented, yet.\\\\n\")\n sys.exit(1)\nelif mode == \"2e-numeric\":\n from f2py2e import main\nelif mode == \"2e-numarray\":\n sys.argv.append(\"-DNUMARRAY\")\n from f2py2e import main\nelif mode == \"2e-numpy\":\n from numpy.f2py import main\nelse:\n sys.stderr.write(\"Unknown mode: \" + repr(mode) + \"\\\\n\")\n sys.exit(1)\nmain()\n" ]
[ [ "numpy.f2py.main" ] ]
lyhue1991/Hypernets
[ "d726bd297869eacb0cba84376fbac30206bbb60a" ]
[ "hypernets/tests/tabular/tb_cuml/drift_detection_test.py" ]
[ "# -*- coding:utf-8 -*-\n\"\"\"\n\n\"\"\"\nimport pandas as pd\nfrom pandas.util import hash_pandas_object\n\nfrom hypernets.tabular.datasets.dsutils import load_bank\nfrom . import if_cuml_ready, is_cuml_installed\n\nif is_cuml_installed:\n import cudf\n from hypernets.tabular.cuml_ex import CumlToolBox\n\n dd_selector = CumlToolBox.feature_selector_with_drift_detection\n\n\n@if_cuml_ready\nclass Test_drift_detection:\n def test_shift_score(self):\n df = load_bank().head(1000)\n df = cudf.from_pandas(df)\n selector = dd_selector()\n scores = selector._covariate_shift_score(df[:700], df[700:])\n print('_covariate_shift_score', scores)\n assert scores['id'] >=0.95\n\n def test_feature_selection(self):\n df = load_bank()\n df = cudf.from_pandas(df)\n y = df.pop('y')\n p = int(df.shape[0] * 0.8)\n X_train = df[:p]\n X_test = df[p:]\n # = train_test_split(df, train_size=0.7, random_state=9527)\n selector = dd_selector(remove_shift_variable=False,\n auc_threshold=0.55,\n min_features=15,\n remove_size=0.2)\n remain_features, history, scores = selector.select(X_train, X_test, copy_data=True)\n assert len(remain_features) == 15\n\n selector = dd_selector(remove_shift_variable=True,\n auc_threshold=0.55,\n min_features=15,\n remove_size=0.2)\n remain_features, history, scores = selector.select(X_train, X_test, copy_data=True)\n\n assert len(remain_features) == 16\n\n def test_drift_detector_split(self):\n df = cudf.from_pandas(load_bank())\n y = df.pop('y')\n X_train, X_test = CumlToolBox.train_test_split(df.copy(), train_size=0.7, shuffle=True, random_state=9527)\n dd = dd_selector().get_detector()\n dd.fit(X_train, X_test)\n\n assert len(dd.feature_names_) == 17\n assert len(dd.feature_importances_) == 17\n assert dd.auc_\n assert len(dd.estimator_) == 5\n\n proba = dd.predict_proba(df)\n assert proba.shape[0] == df.shape[0]\n\n df = cudf.from_pandas(load_bank())\n y = df.pop('y')\n p = int(df.shape[0] * 0.2)\n X_train, X_test, y_train, y_test = dd.train_test_split(df.copy(), y, test_size=0.2)\n assert X_train.shape == (df.shape[0] - p, df.shape[1])\n assert y_train.shape == (df.shape[0] - p,)\n assert X_test.shape == (p, df.shape[1])\n assert y_test.shape == (p,)\n\n df['y'] = y\n X_train['y'] = y_train\n X_test['y'] = y_test\n df, X_train, X_test = CumlToolBox.to_local(df, X_train, X_test)\n df_split = pd.concat([X_train, X_test])\n df_hash = hash_pandas_object(df).sort_values()\n splitted_hash = hash_pandas_object(df_split).sort_values()\n assert (df_hash == splitted_hash).all()\n" ]
[ [ "pandas.util.hash_pandas_object", "pandas.concat" ] ]
mariodoebler/byol-pytorch
[ "4c1b6d27d86e0a9a39ecef6f6888038355943cd0" ]
[ "byol_pytorch/byol_pytorch.py" ]
[ "import copy\n\nfrom functools import wraps\n\nimport numpy as np\n\nimport wandb\nimport torchvision\nimport torch\nimport torch.nn.functional as F\n\nfrom kornia import enhance, filters\nfrom torchvision.transforms import RandomApply, RandomChoice\nfrom atariari.methods.utils import EarlyStopping\n\nfrom torch import nn\nfrom torch.utils.data import BatchSampler, RandomSampler\n\n\ndef default(val, def_val):\n return def_val if val is None else val\n\n\ndef flatten(t):\n return t.reshape(t.shape[0], -1)\n\n\ndef singleton(cache_key):\n def inner_fn(fn):\n @wraps(fn)\n def wrapper(self, *args, **kwargs):\n instance = getattr(self, cache_key)\n if instance is not None:\n return instance\n\n instance = fn(self, *args, **kwargs)\n setattr(self, cache_key, instance)\n return instance\n return wrapper\n return inner_fn\n\n\ndef get_module_device(module):\n return next(module.parameters()).device\n\n\ndef set_requires_grad(model, val):\n for p in model.parameters():\n p.requires_grad = val\n\n# loss fn\n\n\ndef loss_fn(x, y):\n x = F.normalize(x, dim=-1, p=2)\n y = F.normalize(y, dim=-1, p=2)\n return 2 - 2 * (x * y).sum(dim=-1)\n\n# augmentation utils\n\n\n# class RandomApply(nn.Module):\n# def __init__(self, fn, p):\n# super().__init__()\n# self.fn = fn\n# self.p = p\n\n# def forward(self, x):\n# if random.random() > self.p:\n# return x\n# return self.fn(x)\n\n\n# exponential moving average\n\nclass EMA():\n def __init__(self, beta):\n super().__init__()\n self.beta = beta\n\n def update_average(self, old, new):\n if old is None:\n return new\n return old * self.beta + (1 - self.beta) * new\n\n\ndef update_moving_average(ema_updater, ma_model, current_model):\n for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):\n old_weight, up_weight = ma_params.data, current_params.data\n ma_params.data = ema_updater.update_average(old_weight, up_weight)\n\n# MLP class for projector and predictor\n\n\nclass MLP(nn.Module):\n def __init__(self, dim, projection_size, hidden_size=4096):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(dim, hidden_size),\n nn.BatchNorm1d(hidden_size),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_size, projection_size)\n )\n\n def forward(self, x):\n return self.net(x)\n\n# a wrapper class for the base neural network\n# will manage the interception of the hidden layer output\n# and pipe it into the projecter and predictor nets\n\n\nclass NetWrapper(nn.Module):\n def __init__(self, net, projection_size, projection_hidden_size, layer=-2):\n super().__init__()\n self.net = net\n self.layer = layer # final avg-pooling layer\n\n self.projector = None\n self.projection_size = projection_size\n self.projection_hidden_size = projection_hidden_size\n\n self.hidden = None\n self.hook_registered = False\n\n def _find_layer(self):\n if type(self.layer) == str:\n modules = dict([*self.net.named_modules()])\n return modules.get(self.layer, None)\n elif type(self.layer) == int:\n children = [*self.net.children()]\n return children[self.layer]\n return None\n\n def _hook(self, _, __, output):\n self.hidden = flatten(output)\n\n def _register_hook(self):\n layer = self._find_layer()\n assert layer is not None, f'hidden layer ({self.layer}) not found'\n handle = layer.register_forward_hook(self._hook)\n self.hook_registered = True\n\n @singleton('projector')\n def _get_projector(self, hidden):\n _, dim = hidden.shape\n projector = MLP(dim, self.projection_size, self.projection_hidden_size)\n return projector.to(hidden)\n\n def get_representation(self, x):\n if self.layer == -1:\n return self.net(x)\n\n if not self.hook_registered:\n self._register_hook()\n\n _ = self.net(x)\n hidden = self.hidden\n self.hidden = None\n assert hidden is not None, f'hidden layer {self.layer} never emitted an output'\n return hidden\n\n def forward(self, x):\n representation = self.get_representation(x)\n projector = self._get_projector(representation)\n projection = projector(representation)\n return projection\n\n# main class\n\n\nclass BYOL(nn.Module):\n def __init__(self, net, image_size, grayscale=True, num_frame_stack=1, batch_size=64, hidden_layer=-2, projection_size=256, projection_hidden_size=4096, augment_fn=None, augment_fn2=None, moving_average_decay=0.99, wandb=None, patience=15):\n super().__init__()\n\n # default SimCLR augmentation\n\n #####\n # IMPORTANT for kornia: parameters are often float!! e.g. 1. vs 1\n # DEFAULT_AUG = nn.Sequential(\n # RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),\n # augs.RandomHorizontalFlip(),\n # RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),\n # input tensor: float + normalized range [0,1]\n # augs.RandomResizedCrop(\n # size=(image_size, image_size), scale=(0.84, 1.), ratio=(1.,1.), p=1.0)\n # augs.Normalize(mean=torch.tensor(\n # [0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225]))\n # )\n\n kernel_size = (9, 9) # has to be ODD\n kernel_std = np.random.uniform(low=0.1, high=2.0)\n kernel_std = (kernel_std,)*2\n aug_transform = torchvision.transforms.Compose([\n RandomChoice(\n [enhance.AdjustBrightness(0.4),\n enhance.AdjustBrightness(0.3),\n enhance.AdjustBrightness(0.2),\n enhance.AdjustBrightness(0.1),\n enhance.AdjustBrightness(0.0)]\n ),\n RandomChoice(\n [enhance.AdjustContrast(1.0),\n enhance.AdjustContrast(0.9),\n enhance.AdjustContrast(0.8),\n enhance.AdjustContrast(0.7),\n enhance.AdjustContrast(0.6)]\n ),\n RandomApply([filters.GaussianBlur2d(\n kernel_size, kernel_std)], p=0.5)\n # RandomChoice(\n # [enhance.AdjustContrast(1.0),\n # enhance.AdjustContrast(1.0),\n # enhance.AdjustContrast(1.0),\n # filters.GaussianBlur2d((1, 1), (1, 1)),\n # filters.GaussianBlur2d((3, 3), (1.5, 1.5))]\n # )\n ])\n\n self.augment1 = default(augment_fn, aug_transform)\n self.augment2 = default(augment_fn2, self.augment1)\n\n self.online_encoder = NetWrapper(\n net, projection_size, projection_hidden_size, layer=hidden_layer)\n self.target_encoder = None\n self.target_ema_updater = EMA(moving_average_decay)\n\n self.online_predictor = MLP(\n projection_size, projection_size, projection_hidden_size)\n\n self.batch_size = batch_size\n # get device of network and make wrapper same device\n self.device = torch.device(\n 'cuda' if torch.cuda.is_available() else 'cpu')\n print(f\"Device is {self.device.type}\")\n self.to(self.device)\n self.wandb = wandb\n self.early_stopper = EarlyStopping(\n patience=patience, verbose=False, wandb=self.wandb, name=\"encoder-byol\")\n\n if self.wandb:\n wandb.watch(self.online_encoder, self.target_encoder,\n self.online_predictor)\n # send a mock image tensor to instantiate singleton parameters\n assert grayscale\n nr_channels = num_frame_stack\n self.forward(torch.rand(batch_size, nr_channels,\n 210, 160, device=self.device))\n self.opt = torch.optim.Adam(self.parameters(), lr=3e-4)\n print(\n f\"Finished Initialization of BYOL with model {self.online_encoder.net.__class__.__name__}\")\n\n @singleton('target_encoder')\n def _get_target_encoder(self):\n target_encoder = copy.deepcopy(self.online_encoder)\n set_requires_grad(target_encoder, False)\n return target_encoder\n\n def reset_moving_average(self):\n del self.target_encoder\n self.target_encoder = None\n\n def update_moving_average(self):\n assert self.target_encoder is not None, 'target encoder has not been created yet'\n update_moving_average(self.target_ema_updater,\n self.target_encoder, self.online_encoder)\n\n def forward(self, x):\n image_one, image_two = self.augment1(x), self.augment2(x)\n\n online_proj_one = self.online_encoder(image_one)\n online_proj_two = self.online_encoder(image_two)\n\n online_pred_one = self.online_predictor(online_proj_one)\n online_pred_two = self.online_predictor(online_proj_two)\n\n with torch.no_grad():\n target_encoder = self._get_target_encoder()\n target_proj_one = target_encoder(image_one)\n target_proj_two = target_encoder(image_two)\n\n loss_one = loss_fn(online_pred_one, target_proj_two.detach())\n loss_two = loss_fn(online_pred_two, target_proj_one.detach())\n\n loss = loss_one + loss_two\n return loss.mean()\n\n def logResults(self, epoch_idx, epoch_loss, prefix=\"\"):\n print(f\"{prefix} Epoch: {epoch_idx}, Loss: {epoch_loss}\")\n if self.wandb:\n self.wandb.log({prefix + '_loss': epoch_loss},\n step=epoch_idx, commit=False)\n\n def doOneEpoch(self, nr_epoch, episodes):\n mode = \"train\" if self.training else \"val\"\n data_generator = generate_batch(episodes, self.batch_size, self.device)\n for steps, batch in enumerate(data_generator):\n print(f\"batch nr {steps} for mode {mode}\")\n loss = self(batch)\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n self.update_moving_average() # update moving average of target encoder\n self.logResults(nr_epoch, loss / steps, prefix=mode)\n if mode == \"val\":\n self.early_stopper(-loss / steps, self.online_encoder)\n\n\ndef generate_batch(episodes, batch_size, device):\n total_steps = sum([len(e) for e in episodes])\n print('Total Steps: {}'.format(total_steps))\n # Episode sampler\n # Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch\n sampler = BatchSampler(RandomSampler(range(len(episodes)),\n replacement=True, num_samples=total_steps),\n batch_size, drop_last=True)\n for nr, indices in enumerate(sampler):\n x = []\n episodes_batch = [episodes[i] for i in indices]\n # print(f\"indices in sampler nr {nr} are {*indices,}\")\n for e in episodes_batch:\n t = np.random.randint(0, len(e))\n x.append(e[t])\n yield torch.stack(x).float().to(device) / 255. # SCALING!!!!" ]
[ [ "torch.nn.Linear", "torch.nn.functional.normalize", "torch.rand", "torch.stack", "torch.no_grad", "torch.nn.ReLU", "numpy.random.uniform", "torch.nn.BatchNorm1d", "torch.cuda.is_available" ] ]
Hacker-007/E2
[ "efb829da84734abfc6ac10e1ea20b5dcfd99c7f1" ]
[ "prepare_datasets_DRIVE.py" ]
[ "#==========================================================\n#\n# This prepare the hdf5 datasets of the DRIVE database\n#\n#============================================================\n\nimport os\nimport h5py\nimport numpy as np\nfrom PIL import Image\n\n#content/add2/E2/DRIVE_datasets_training_testing\n\ndef write_hdf5(arr,outfile):\n with h5py.File(outfile,\"w\") as f:\n f.create_dataset(\"image\", data=arr, dtype=arr.dtype)\n\n\n#------------Path of the images --------------------------------------------------------------\n#train\noriginal_imgs_train = \"/content/add2/E2/training/images/\"\ngroundTruth_imgs_train = \"/content/add2/E2/training/1st_manual/\"\nborderMasks_imgs_train = \"/content/add2/E2/training/mask/\"\n#test\noriginal_imgs_test = \"/content/add2/E2//test/images/\"\ngroundTruth_imgs_test = \"/content/add2/E2/test/1st_manual/\"\nborderMasks_imgs_test = \"content/add2/E2/test/mask/\"\n#---------------------------------------------------------------------------------------------\n\nNimgs = 20\nchannels = 3\nheight = 584\nwidth = 565\ndataset_path = \"/content/add2/E2/DRIVE_datasets_training_testing/\"\n\ndef get_datasets(imgs_dir,groundTruth_dir,borderMasks_dir,train_test=\"null\"):\n imgs = np.empty((Nimgs,height,width,channels))\n groundTruth = np.empty((Nimgs,height,width))\n border_masks = np.empty((Nimgs,height,width))\n for path, subdirs, files in os.walk(imgs_dir): #list all files, directories in the path\n for i in range(len(files)):\n #original\n print (\"original image: \" +files[i])\n img = Image.open(imgs_dir+files[i])\n imgs[i] = np.asarray(img)\n #corresponding ground truth\n groundTruth_name = files[i][0:2] + \"_manual1.gif\"\n print (\"ground truth name: \" + groundTruth_name)\n g_truth = Image.open(groundTruth_dir + groundTruth_name)\n groundTruth[i] = np.asarray(g_truth)\n #corresponding border masks\n border_masks_name = \"\"\n if train_test==\"train\":\n border_masks_name = files[i][0:2] + \"_training_mask.gif\"\n elif train_test==\"test\":\n border_masks_name = files[i][0:2] + \"_test_mask.gif\"\n else:\n print (\"specify if train or test!!\")\n exit()\n print (\"border masks name: \" + border_masks_name)\n b_mask = Image.open(borderMasks_dir + border_masks_name)\n border_masks[i] = np.asarray(b_mask)\n\n print (\"imgs max: \" +str(np.max(imgs)))\n print (\"imgs min: \" +str(np.min(imgs)))\n assert(np.max(groundTruth)==255 and np.max(border_masks)==255)\n assert(np.min(groundTruth)==0 and np.min(border_masks)==0)\n print (\"ground truth and border masks are correctly withih pixel value range 0-255 (black-white)\")\n #reshaping for my standard tensors\n imgs = np.transpose(imgs,(0,3,1,2))\n assert(imgs.shape == (Nimgs,channels,height,width))\n groundTruth = np.reshape(groundTruth,(Nimgs,1,height,width))\n border_masks = np.reshape(border_masks,(Nimgs,1,height,width))\n assert(groundTruth.shape == (Nimgs,1,height,width))\n assert(border_masks.shape == (Nimgs,1,height,width))\n return imgs, groundTruth, border_masks\n\nif not os.path.exists(dataset_path):\n os.makedirs(dataset_path)\n#getting the training datasets\nimgs_train, groundTruth_train, border_masks_train = get_datasets(original_imgs_train,groundTruth_imgs_train,borderMasks_imgs_train,\"train\")\nprint (\"saving train datasets\")\nwrite_hdf5(imgs_train, dataset_path + \"DRIVE_dataset_imgs_train.hdf5\")\nwrite_hdf5(groundTruth_train, dataset_path + \"DRIVE_dataset_groundTruth_train.hdf5\")\nwrite_hdf5(border_masks_train,dataset_path + \"DRIVE_dataset_borderMasks_train.hdf5\")\n\n#getting the testing datasets\nimgs_test, groundTruth_test, border_masks_test = get_datasets(original_imgs_test,groundTruth_imgs_test,borderMasks_imgs_test,\"test\")\nprint (\"saving test datasets\")\nwrite_hdf5(imgs_test,dataset_path + \"DRIVE_dataset_imgs_test.hdf5\")\nwrite_hdf5(groundTruth_test, dataset_path + \"DRIVE_dataset_groundTruth_test.hdf5\")\nwrite_hdf5(border_masks_test,dataset_path + \"DRIVE_dataset_borderMasks_test.hdf5\")\n" ]
[ [ "numpy.max", "numpy.empty", "numpy.reshape", "numpy.asarray", "numpy.min", "numpy.transpose" ] ]
afard/VerticaPy
[ "ecbee0027a208ba53b31438e5b2f4577af95a07e" ]
[ "verticapy/learn/tsa.py" ]
[ "# (c) Copyright [2018-2021] Micro Focus or one of its affiliates.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# |_ |~) _ _| _ /~\\ _ |.\n# |_)\\/ |_)(_|(_|| \\_/|_|(_|||\n# /\n# ____________ ______\n# / __ `\\ / /\n# | \\/ / / /\n# |______ / / /\n# |____/ / /\n# _____________ / /\n# \\ / / /\n# \\ / / /\n# \\_______/ / /\n# ______ / /\n# \\ / / /\n# \\ / / /\n# \\/ / /\n# / /\n# / /\n# \\ /\n# \\ /\n# \\/\n# _\n# \\ / _ __|_. _ _ |_)\n# \\/ (/_| | |(_(_|| \\/\n# /\n# VerticaPy is a Python library with scikit-like functionality to use to conduct\n# data science projects on data stored in Vertica, taking advantage Vertica’s\n# speed and built-in analytics and machine learning features. It supports the\n# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize\n# data transformation operations, and offers beautiful graphical options.\n#\n# VerticaPy aims to solve all of these problems. The idea is simple: instead\n# of moving data around for processing, VerticaPy brings the logic to the data.\n#\n#\n# Modules\n#\n# Standard Python Modules\nimport math, warnings\nfrom typing import Union\n\n# VerticaPy Modules\nfrom verticapy.learn.vmodel import *\nfrom verticapy.learn.linear_model import LinearRegression\nfrom verticapy import vDataFrame\nfrom verticapy.plot import gen_colors\nfrom verticapy.learn.tools import *\n\n# Other Python Modules\nfrom dateutil.parser import parse\nimport matplotlib.pyplot as plt\n\n# ---#\nclass SARIMAX(Regressor):\n \"\"\"\n---------------------------------------------------------------------------\n[Beta Version]\nCreates an SARIMAX object using the Vertica Linear Regression algorithm on \nthe data.\n\nParameters\n----------\nname: str\n Name of the the model. The model will be stored in the DB.\ncursor: DBcursor, optional\n Vertica database cursor.\np: int, optional\n Order of the AR (Auto-Regressive) part.\nd: int, optional\n Order of the I (Integrated) part.\nq: int, optional\n Order of the MA (Moving-Average) part.\nP: int, optional\n Order of the seasonal AR (Auto-Regressive) part.\nD: int, optional\n Order of the seasonal I (Integrated) part.\nQ: int, optional\n Order of the seasonal MA (Moving-Average) part.\ns: int, optional\n Span of the seasonality.\ntol: float, optional\n Determines whether the algorithm has reached the specified accuracy result.\nmax_iter: int, optional\n Determines the maximum number of iterations the algorithm performs before \n achieving the specified accuracy result.\nsolver: str, optional\n The optimizer method to use to train the model. \n Newton : Newton Method\n BFGS : Broyden Fletcher Goldfarb Shanno\nmax_pik: int, optional\n Number of inverse MA coefficient used to approximate the MA.\npapprox_ma: int, optional\n the p of the AR(p) used to approximate the MA coefficients.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n cursor=None,\n p: int = 0,\n d: int = 0,\n q: int = 0,\n P: int = 0,\n D: int = 0,\n Q: int = 0,\n s: int = 0,\n tol: float = 1e-4,\n max_iter: int = 1000,\n solver: str = \"Newton\",\n max_pik: int = 100,\n papprox_ma: int = 200,\n ):\n check_types([(\"name\", name, [str],)])\n self.type, self.name = \"SARIMAX\", name\n self.set_params(\n {\n \"p\": p,\n \"d\": d,\n \"q\": q,\n \"P\": P,\n \"D\": D,\n \"Q\": Q,\n \"s\": s,\n \"tol\": tol,\n \"max_iter\": max_iter,\n \"solver\": solver,\n \"max_pik\": max_pik,\n \"papprox_ma\": papprox_ma,\n }\n )\n if self.parameters[\"s\"] == 0:\n assert (\n self.parameters[\"D\"] == 0\n and self.parameters[\"P\"] == 0\n and self.parameters[\"Q\"] == 0\n ), ParameterError(\n \"In case of non-seasonality (s = 0), all the parameters P, D or Q must be equal to 0.\"\n )\n else:\n assert (\n self.parameters[\"D\"] > 0\n or self.parameters[\"P\"] > 0\n or self.parameters[\"Q\"] > 0\n ), ParameterError(\n \"In case of seasonality (s > 0), at least one of the parameters P, D or Q must be strictly greater than 0.\"\n )\n cursor = check_cursor(cursor)[0]\n self.cursor = cursor\n version(cursor=cursor, condition=[8, 0, 0])\n\n # ---#\n def deploySQL(self):\n \"\"\"\n ---------------------------------------------------------------------------\n Returns the SQL code needed to deploy the model.\n\n Returns\n -------\n str\n the SQL code needed to deploy the model.\n \"\"\"\n sql = self.deploy_predict_\n if (self.parameters[\"d\"] > 0) or (\n self.parameters[\"D\"] > 0 and self.parameters[\"s\"] > 0\n ):\n for i in range(0, self.parameters[\"d\"] + 1):\n for k in range(\n 0, max((self.parameters[\"D\"] + 1) * min(1, self.parameters[\"s\"]), 1)\n ):\n if (k, i) != (0, 0):\n comb_i_d = (\n math.factorial(self.parameters[\"d\"])\n / math.factorial(self.parameters[\"d\"] - i)\n / math.factorial(i)\n )\n comb_k_D = (\n math.factorial(self.parameters[\"D\"])\n / math.factorial(self.parameters[\"D\"] - k)\n / math.factorial(k)\n )\n sql += \" + {} * LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])\".format(\n (-1) ** (i + k + 1) * comb_i_d * comb_k_D,\n i + self.parameters[\"s\"] * k,\n )\n return sql\n\n # ---#\n def fpredict(self, L: list):\n \"\"\"\n ---------------------------------------------------------------------------\n Computes the prediction.\n\n Parameters\n ----------\n L: list\n List containing the data. It must be a two-dimensional list containing \n multiple rows. Each row must include as first element the ordered predictor\n and as nth elements the nth - 1 exogenous variable (nth > 2). \n\n Returns\n -------\n float\n the prediction.\n \"\"\"\n\n def sub_arp(L: list):\n L_final = []\n for i in range(len(L)):\n result = L[-i]\n for i in range(len(self.coef_.values[\"coefficient\"])):\n elem = self.coef_.values[\"predictor\"][i]\n if elem.lower() == \"intercept\":\n result -= self.coef_.values[\"coefficient\"][i]\n elif elem.lower()[0:2] == \"ar\":\n nb = int(elem[2:])\n try:\n result -= self.coef_.values[\"coefficient\"][i] * L[-nb]\n except:\n result = None\n L_final = [result] + L_final\n return L_final\n\n def fepsilon(L: list):\n if self.parameters[\"p\"] > 0 or self.parameters[\"P\"] > 0:\n L_tmp = sub_arp(L)\n else:\n L_tmp = L\n try:\n result = L_tmp[-1] - self.ma_avg_\n for i in range(1, self.parameters[\"max_pik\"]):\n result -= self.ma_piq_.values[\"coefficient\"][i] * (\n L_tmp[-i] - self.ma_avg_\n )\n return result\n except:\n return 0\n\n if (\n self.parameters[\"p\"] == 0\n and self.parameters[\"q\"] == 0\n and self.parameters[\"d\"] == 0\n and self.parameters[\"s\"] == 0\n and not (self.exogenous)\n ):\n return self.ma_avg_\n try:\n yt = [elem[0] for elem in L]\n yt_copy = [elem[0] for elem in L]\n yt.reverse()\n if self.parameters[\"d\"] > 0:\n for i in range(self.parameters[\"d\"]):\n yt = [yt[i - 1] - yt[i] for i in range(1, len(yt))]\n if self.parameters[\"D\"] > 0 and self.parameters[\"s\"] > 0:\n for i in range(self.parameters[\"D\"]):\n yt = [\n yt[i - self.parameters[\"s\"]] - yt[i]\n for i in range(self.parameters[\"s\"], len(yt))\n ]\n yt.reverse()\n result, j = 0, 1\n for i in range(len(self.coef_.values[\"coefficient\"])):\n elem = self.coef_.values[\"predictor\"][i]\n if elem.lower() == \"intercept\":\n result += self.coef_.values[\"coefficient\"][i]\n elif elem.lower()[0:2] == \"ar\":\n nb = int(elem[2:])\n result += self.coef_.values[\"coefficient\"][i] * yt[-nb]\n elif elem.lower()[0:2] == \"ma\":\n nb = int(elem[2:])\n result += self.coef_.values[\"coefficient\"][i] * fepsilon(\n yt[: -nb - 1]\n )\n else:\n result += self.coef_.values[\"coefficient\"][i] * L[-1][j]\n j += 1\n for i in range(0, self.parameters[\"d\"] + 1):\n for k in range(\n 0, max((self.parameters[\"D\"] + 1) * min(1, self.parameters[\"s\"]), 1)\n ):\n if (k, i) != (0, 0):\n comb_i_d = (\n math.factorial(self.parameters[\"d\"])\n / math.factorial(self.parameters[\"d\"] - i)\n / math.factorial(i)\n )\n comb_k_D = (\n math.factorial(self.parameters[\"D\"])\n / math.factorial(self.parameters[\"D\"] - k)\n / math.factorial(k)\n )\n result += (\n (-1) ** (i + k + 1)\n * comb_i_d\n * comb_k_D\n * yt_copy[-(i + self.parameters[\"s\"] * k)]\n )\n return result\n except:\n return None\n\n # ---#\n def fit(\n self,\n input_relation: Union[vDataFrame, str],\n y: str,\n ts: str,\n X: list = [],\n test_relation: Union[vDataFrame, str] = \"\",\n ):\n \"\"\"\n ---------------------------------------------------------------------------\n Trains the model.\n\n Parameters\n ----------\n input_relation: str/vDataFrame\n Training relation.\n y: str\n Response column.\n ts: str\n vcolumn used to order the data.\n X: list, optional\n exogenous columns used to fit the model.\n test_relation: str/vDataFrame, optional\n Relation used to test the model.\n\n Returns\n -------\n object\n model\n \"\"\"\n check_types(\n [\n (\"input_relation\", input_relation, [str, vDataFrame],),\n (\"y\", y, [str],),\n (\"test_relation\", test_relation, [str, vDataFrame],),\n (\"ts\", ts, [str],),\n ]\n )\n self.cursor = check_cursor(self.cursor, input_relation, True)[0]\n # Initialization\n does_model_exist(name=self.name, cursor=self.cursor, raise_error=True)\n self.input_relation = (\n input_relation\n if isinstance(input_relation, str)\n else input_relation.__genSQL__()\n )\n if isinstance(test_relation, vDataFrame):\n self.test_relation = test_relation.__genSQL__()\n elif test_relation:\n self.test_relation = test_relation\n else:\n self.test_relation = self.input_relation\n self.y, self.ts, self.deploy_predict_ = str_column(y), str_column(ts), \"\"\n self.coef_ = tablesample({\"predictor\": [], \"coefficient\": []})\n self.ma_avg_, self.ma_piq_ = None, None\n X, schema = [str_column(elem) for elem in X], schema_relation(self.name)[0]\n self.X, self.exogenous = [], X\n relation = (\n \"(SELECT *, [VerticaPy_y] AS VerticaPy_y_copy FROM {}) VERTICAPY_SUBTABLE \"\n )\n model = LinearRegression(\n name=self.name,\n solver=self.parameters[\"solver\"],\n max_iter=self.parameters[\"max_iter\"],\n tol=self.parameters[\"tol\"],\n )\n\n if (\n self.parameters[\"p\"] == 0\n and self.parameters[\"q\"] == 0\n and self.parameters[\"d\"] == 0\n and self.parameters[\"s\"] == 0\n and not (self.exogenous)\n ):\n query = \"SELECT AVG({}) FROM {}\".format(self.y, self.input_relation)\n self.ma_avg_ = self.cursor.execute(query).fetchone()[0]\n self.deploy_predict_ = str(self.ma_avg_)\n\n # I(d)\n if self.parameters[\"d\"] > 0:\n for i in range(self.parameters[\"d\"]):\n relation = \"(SELECT [VerticaPy_y] - LAG([VerticaPy_y], 1) OVER (ORDER BY [VerticaPy_ts]) AS [VerticaPy_y], VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE\".format(\n relation\n )\n if self.parameters[\"D\"] > 0 and self.parameters[\"s\"] > 0:\n for i in range(self.parameters[\"D\"]):\n relation = \"(SELECT [VerticaPy_y] - LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS [VerticaPy_y], VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE\".format(\n self.parameters[\"s\"], relation\n )\n\n def drop_temp_elem(self, schema):\n try:\n with warnings.catch_warnings(record=True) as w:\n drop(\n \"{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}\".format(\n schema, get_session(self.cursor)\n ),\n cursor=self.cursor,\n method=\"view\",\n )\n except:\n pass\n\n # AR(p)\n if self.parameters[\"p\"] > 0 or self.parameters[\"P\"] > 0:\n columns = [\n \"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}\".format(\n i, i\n )\n for i in range(1, self.parameters[\"p\"] + 1)\n ]\n AR = [\"AR{}\".format(i) for i in range(1, self.parameters[\"p\"] + 1)]\n if self.parameters[\"s\"] > 0:\n for i in range(1, self.parameters[\"P\"] + 1):\n if (i * self.parameters[\"s\"]) not in (\n range(1, self.parameters[\"p\"] + 1)\n ):\n columns += [\n \"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}\".format(\n i * self.parameters[\"s\"], i * self.parameters[\"s\"]\n )\n ]\n AR += [\"AR{}\".format(i * self.parameters[\"s\"])]\n relation = \"(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE\".format(\n \", \".join(columns), relation\n )\n drop_temp_elem(self, schema)\n query = \"CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}\".format(\n schema,\n get_session(self.cursor),\n relation.format(self.input_relation)\n .replace(\"[VerticaPy_ts]\", self.ts)\n .replace(\"[VerticaPy_y]\", self.y)\n .replace(\"[VerticaPy_key_columns]\", \", \" + \", \".join([self.ts] + X)),\n )\n try:\n self.cursor.execute(query)\n self.X += AR + X\n model.fit(\n input_relation=\"{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}\".format(\n schema, get_session(self.cursor)\n ),\n X=self.X,\n y=self.y,\n )\n except:\n drop_temp_elem(self, schema)\n raise\n drop_temp_elem(self, schema)\n self.coef_.values[\"predictor\"] = model.coef_.values[\"predictor\"]\n self.coef_.values[\"coefficient\"] = model.coef_.values[\"coefficient\"]\n alphaq = model.coef_.values[\"coefficient\"]\n model.drop()\n epsilon_final = (\n \"[VerticaPy_y] - \"\n + str(alphaq[0])\n + \" - \"\n + \" - \".join(\n [\n str(alphaq[i])\n + \" * \"\n + \"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts])\".format(\n i\n )\n for i in range(1, self.parameters[\"p\"] + 1)\n ]\n )\n )\n self.deploy_predict_ = (\n str(alphaq[0])\n + \" + \"\n + \" + \".join(\n [\n str(alphaq[i])\n + \" * \"\n + \"LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])\".format(\n i\n )\n for i in range(1, self.parameters[\"p\"] + 1)\n ]\n )\n )\n if self.parameters[\"s\"] > 0 and self.parameters[\"P\"] > 0:\n epsilon_final += \" - \" + \" - \".join(\n [\n str(alphaq[i])\n + \" * \"\n + \"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts])\".format(\n i * self.parameters[\"s\"]\n )\n for i in range(\n self.parameters[\"p\"] + 1,\n self.parameters[\"p\"]\n + (self.parameters[\"P\"] if self.parameters[\"s\"] > 0 else 0)\n + 1,\n )\n ]\n )\n self.deploy_predict_ += \" + \" + \" + \".join(\n [\n str(alphaq[i])\n + \" * \"\n + \"LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])\".format(\n i * self.parameters[\"s\"]\n )\n for i in range(\n self.parameters[\"p\"] + 1,\n self.parameters[\"p\"]\n + (self.parameters[\"P\"] if self.parameters[\"s\"] > 0 else 0)\n + 1,\n )\n ]\n )\n for idx, elem in enumerate(X):\n epsilon_final += \" - {} * [X{}]\".format(\n alphaq[\n idx\n + self.parameters[\"p\"]\n + (self.parameters[\"P\"] if self.parameters[\"s\"] > 0 else 0)\n + 1\n ],\n idx,\n )\n self.deploy_predict_ += \" + {} * [X{}]\".format(\n alphaq[\n idx\n + self.parameters[\"p\"]\n + (self.parameters[\"P\"] if self.parameters[\"s\"] > 0 else 0)\n + 1\n ],\n idx,\n )\n relation = \"(SELECT {} AS [VerticaPy_y], {}, VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE\".format(\n epsilon_final, \", \".join(AR), relation\n )\n\n # MA(q)\n if self.parameters[\"q\"] > 0 or (\n self.parameters[\"Q\"] > 0 and self.parameters[\"s\"] > 0\n ):\n transform_relation = relation.replace(\"[VerticaPy_y]\", y).replace(\n \"[VerticaPy_ts]\", ts\n )\n transform_relation = transform_relation.replace(\n \"[VerticaPy_key_columns]\", \", \" + \", \".join(X + [ts])\n )\n for idx, elem in enumerate(X):\n transform_relation = transform_relation.replace(\n \"[X{}]\".format(idx), elem\n )\n query = \"SELECT COUNT(*), AVG({}) FROM {}\".format(\n self.y, transform_relation.format(self.input_relation)\n )\n result = self.cursor.execute(query).fetchone()\n self.ma_avg_ = result[1]\n n = result[0]\n n = max(\n max(\n min(max(n ** (1.0 / 3.0), 8), self.parameters[\"papprox_ma\"]),\n self.parameters[\"q\"],\n ),\n self.parameters[\"Q\"] * self.parameters[\"s\"] + 1,\n )\n n = int(n)\n columns = [\n \"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS ARq{}\".format(\n i, i\n )\n for i in range(1, n)\n ]\n ARq = [\"ARq{}\".format(i) for i in range(1, n)]\n tmp_relation = \"(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE\".format(\n \", \".join(columns), relation\n )\n for idx, elem in enumerate(X):\n tmp_relation = tmp_relation.replace(\"[X{}]\".format(idx), elem)\n drop_temp_elem(self, schema)\n query = \"CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}\".format(\n schema,\n get_session(self.cursor),\n tmp_relation.format(self.input_relation)\n .replace(\"[VerticaPy_ts]\", self.ts)\n .replace(\"[VerticaPy_y]\", self.y)\n .replace(\"[VerticaPy_key_columns]\", \", \" + \", \".join([self.ts] + X)),\n )\n try:\n self.cursor.execute(query)\n model.fit(\n input_relation=\"{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}\".format(\n schema, get_session(self.cursor)\n ),\n X=ARq,\n y=self.y,\n )\n except:\n drop_temp_elem(self, schema)\n raise\n drop_temp_elem(self, schema)\n if not (self.coef_.values[\"predictor\"]):\n self.coef_.values[\"predictor\"] += [\"Intercept\"]\n self.coef_.values[\"coefficient\"] += [self.ma_avg_]\n self.deploy_predict_ = str(self.ma_avg_)\n alphaq = model.coef_.values[\"coefficient\"][1:]\n model.drop()\n thetaq, piq = [], [-1] + []\n for j in range(0, len(alphaq)):\n thetaq += [\n sum([alphaq[j - i - 1] * thetaq[i] for i in range(0, j)])\n + alphaq[j]\n ]\n for j in range(self.parameters[\"q\"]):\n self.coef_.values[\"predictor\"] += [\"ma{}\".format(j + 1)]\n self.coef_.values[\"coefficient\"] += [thetaq[j]]\n self.deploy_predict_ += \" + {} * MA{}\".format(thetaq[j], j + 1)\n if self.parameters[\"s\"] > 0:\n for j in range(1, self.parameters[\"Q\"] + 1):\n self.coef_.values[\"predictor\"] += [\n \"ma{}\".format(self.parameters[\"s\"] * j)\n ]\n self.coef_.values[\"coefficient\"] += [\n thetaq[self.parameters[\"s\"] * j - 1]\n ]\n self.deploy_predict_ += \" + {} * MA{}\".format(\n thetaq[self.parameters[\"s\"] * j - 1], self.parameters[\"s\"] * j\n )\n for j in range(0, self.parameters[\"max_pik\"]):\n piq_tmp = 0\n for i in range(0, self.parameters[\"q\"]):\n if j - i > 0:\n piq_tmp -= thetaq[i] * piq[j - i]\n elif j - i == 0:\n piq_tmp -= thetaq[i]\n piq = piq + [piq_tmp]\n self.ma_piq_ = tablesample({\"coefficient\": piq})\n epsilon = (\n \"[VerticaPy_y] - \"\n + str(self.ma_avg_)\n + \" - \"\n + \" - \".join(\n [\n str((piq[i]))\n + \" * \"\n + \"LAG([VerticaPy_y] - {}, {}) OVER (ORDER BY [VerticaPy_ts])\".format(\n self.ma_avg_, i\n )\n for i in range(1, self.parameters[\"max_pik\"])\n ]\n )\n )\n epsilon += \" AS MA0\"\n relation = \"(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE\".format(\n epsilon, relation\n )\n columns = [\n \"LAG(MA0, {}) OVER (ORDER BY [VerticaPy_ts]) AS MA{}\".format(i, i)\n for i in range(1, self.parameters[\"q\"] + 1)\n ]\n MA = [\"MA{}\".format(i) for i in range(1, self.parameters[\"q\"] + 1)]\n if self.parameters[\"s\"] > 0:\n columns += [\n \"LAG(MA0, {}) OVER (ORDER BY [VerticaPy_ts]) AS MA{}\".format(\n i * self.parameters[\"s\"], i * self.parameters[\"s\"]\n )\n for i in range(1, self.parameters[\"Q\"] + 1)\n ]\n MA += [\n \"MA{}\".format(i * self.parameters[\"s\"])\n for i in range(1, self.parameters[\"Q\"] + 1)\n ]\n relation = \"(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE\".format(\n \", \".join(columns), relation\n )\n self.X += MA\n transform_relation = relation.replace(\"[VerticaPy_y]\", y).replace(\n \"[VerticaPy_ts]\", ts\n )\n transform_relation = transform_relation.replace(\n \"[VerticaPy_key_columns]\", \", \" + \", \".join(X + [ts])\n )\n for idx, elem in enumerate(X):\n transform_relation = transform_relation.replace(\n \"[X{}]\".format(idx), elem\n )\n self.transform_relation = relation\n model_save = {\n \"type\": \"SARIMAX\",\n \"input_relation\": self.input_relation,\n \"test_relation\": self.test_relation,\n \"transform_relation\": self.transform_relation,\n \"deploy_predict\": self.deploy_predict_,\n \"ma_avg\": self.ma_avg_,\n \"ma_piq\": self.ma_piq_.values if (self.ma_piq_) else None,\n \"X\": self.X,\n \"y\": self.y,\n \"ts\": self.ts,\n \"exogenous\": self.exogenous,\n \"coef\": self.coef_.values,\n \"p\": self.parameters[\"p\"],\n \"d\": self.parameters[\"d\"],\n \"q\": self.parameters[\"q\"],\n \"P\": self.parameters[\"P\"],\n \"D\": self.parameters[\"D\"],\n \"Q\": self.parameters[\"Q\"],\n \"s\": self.parameters[\"s\"],\n \"tol\": self.parameters[\"tol\"],\n \"max_iter\": self.parameters[\"max_iter\"],\n \"solver\": self.parameters[\"solver\"],\n \"max_pik\": self.parameters[\"max_pik\"],\n \"papprox_ma\": self.parameters[\"papprox_ma\"],\n }\n insert_verticapy_schema(\n model_name=self.name,\n model_type=\"SARIMAX\",\n model_save=model_save,\n cursor=self.cursor,\n )\n return self\n\n # ---#\n def plot(\n self,\n vdf: vDataFrame = None,\n y: str = \"\",\n ts: str = \"\",\n X: list = [],\n dynamic: bool = False,\n one_step: bool = True,\n observed: bool = True,\n confidence: bool = True,\n nlead: int = 10,\n nlast: int = 0,\n limit: int = 1000,\n ax=None,\n **style_kwds,\n ):\n \"\"\"\n ---------------------------------------------------------------------------\n Draws the SARIMAX model.\n\n Parameters\n ----------\n vdf: vDataFrame, optional\n Object to use to run the prediction.\n y: str, optional\n Response column.\n ts: str, optional\n vcolumn used to order the data.\n X: list, optional\n exogenous vcolumns.\n dynamic: bool, optional\n If set to True, the dynamic forecast will be drawn.\n one_step: bool, optional\n If set to True, the one step ahead forecast will be drawn.\n observed: bool, optional\n If set to True, the observation will be drawn.\n confidence: bool, optional\n If set to True, the confidence ranges will be drawn.\n nlead: int, optional\n Number of predictions computed by the dynamic forecast after\n the last ts date.\n nlast: int, optional\n The dynamic forecast will start nlast values before the last\n ts date.\n limit: int, optional\n Maximum number of past elements to use.\n ax: Matplotlib axes object, optional\n The axes to plot on.\n **style_kwds\n Any optional parameter to pass to the Matplotlib functions.\n\n Returns\n -------\n ax\n Matplotlib axes object\n \"\"\"\n if not (vdf):\n vdf = vdf_from_relation(relation=self.input_relation, cursor=self.cursor)\n check_types(\n [\n (\"limit\", limit, [int, float],),\n (\"nlead\", nlead, [int, float],),\n (\"dynamic\", dynamic, [bool],),\n (\"observed\", observed, [bool],),\n (\"one_step\", one_step, [bool],),\n (\"confidence\", confidence, [bool],),\n (\"vdf\", vdf, [vDataFrame],),\n ],\n )\n delta_limit, limit = (\n limit,\n max(\n max(\n limit,\n self.parameters[\"p\"] + 1 + nlast,\n self.parameters[\"P\"] * self.parameters[\"s\"] + 1 + nlast,\n ),\n 200,\n ),\n )\n delta_limit = max(limit - delta_limit - nlast, 0)\n assert dynamic or one_step or observed, ParameterError(\n \"No option selected.\\n You should set either dynamic, one_step or observed to True.\"\n )\n assert nlead + nlast > 0 or not (dynamic), ParameterError(\n \"Dynamic Plots are only possible if either parameter 'nlead' is greater than 0 or parameter 'nlast' is greater than 0, and parameter 'dynamic' is set to True.\"\n )\n if dynamic:\n assert not (self.exogenous), Exception(\n \"Dynamic Plots are only possible for SARIMA models (no exegenous variables), not SARIMAX.\"\n )\n if not (y):\n y = self.y\n if not (ts):\n ts = self.ts\n if not (X):\n X = self.exogenous\n result = self.predict(\n vdf=vdf, y=y, ts=ts, X=X, nlead=0, name=\"_verticapy_prediction_\"\n )\n error_eps = 1.96 * math.sqrt(self.score(method=\"mse\"))\n print_info = verticapy.options[\"print_info\"]\n verticapy.options[\"print_info\"] = False\n try:\n result = (\n result.select([ts, y, \"_verticapy_prediction_\"])\n .dropna()\n .sort([ts])\n .tail(limit)\n .values\n )\n except:\n verticapy.options[\"print_info\"] = print_info\n raise\n verticapy.options[\"print_info\"] = print_info\n columns = [elem for elem in result]\n if isinstance(result[columns[0]][0], str):\n result[columns[0]] = [parse(elem) for elem in result[columns[0]]]\n true_value = [result[columns[0]], result[columns[1]]]\n one_step_ahead = [result[columns[0]], result[columns[2]]]\n lower_osa, upper_osa = (\n [\n float(elem) - error_eps if elem != None else None\n for elem in one_step_ahead[1]\n ],\n [\n float(elem) + error_eps if elem != None else None\n for elem in one_step_ahead[1]\n ],\n )\n if dynamic:\n deltat = result[columns[0]][-1] - result[columns[0]][-2]\n lead_time_list = []\n if nlast > 0:\n lead_list = [[elem] for elem in result[columns[1]][:-nlast]]\n else:\n lead_list = [[elem] for elem in result[columns[1]]]\n for i in range(nlast):\n lead_list += [[self.fpredict(lead_list)]]\n lead_time_list += [result[columns[0]][i - nlast]]\n if lead_time_list:\n start_time = lead_time_list[-1]\n else:\n start_time = result[columns[0]][-1]\n for i in range(nlead):\n lead_list += [[self.fpredict(lead_list)]]\n lead_time_list += [start_time + (i + 1) * deltat]\n dynamic_forecast = (\n [result[columns[0]][-nlast - 1]] + lead_time_list,\n [result[columns[1]][-nlast - 1]]\n + [elem[0] for elem in lead_list[-nlast - nlead :]],\n )\n lower_d, upper_d = [], []\n for i in range(len(dynamic_forecast[1])):\n if (\n self.parameters[\"s\"] > 0\n and self.parameters[\"p\"] == 0\n and self.parameters[\"d\"] == 0\n and self.parameters[\"q\"] == 0\n ):\n delta_error = error_eps * math.sqrt(\n int(i / self.parameters[\"s\"]) + 1\n )\n else:\n delta_error = error_eps * math.sqrt(i + 1)\n lower_d += [float(dynamic_forecast[1][i]) - delta_error]\n upper_d += [float(dynamic_forecast[1][i]) + delta_error]\n else:\n lower_d, upper_d, dynamic_forecast = [], [], ([], [])\n alpha = 0.3\n if not (ax):\n fig, ax = plt.subplots()\n if isnotebook():\n fig.set_size_inches(10, 6)\n ax.grid()\n colors = gen_colors()\n param1 = {\n \"color\": colors[2],\n \"linewidth\": 2,\n }\n param2 = {\n \"color\": colors[3],\n \"linewidth\": 2,\n \"linestyle\": \":\",\n }\n param3 = {\n \"color\": colors[0],\n \"linewidth\": 2,\n \"linestyle\": \"dashed\",\n }\n if dynamic:\n ax.fill_between(\n dynamic_forecast[0],\n 1.02\n * float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),\n 1.02\n * float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),\n alpha=0.04,\n color=updated_dict(param3, style_kwds, 2)[\"color\"],\n )\n if confidence:\n ax.fill_between(\n dynamic_forecast[0], lower_d, upper_d, alpha=0.08, color=\"#555555\"\n )\n ax.plot(dynamic_forecast[0], lower_d, alpha=0.08, color=\"#000000\")\n ax.plot(dynamic_forecast[0], upper_d, alpha=0.08, color=\"#000000\")\n ax.plot(\n dynamic_forecast[0],\n dynamic_forecast[1],\n label=\"Dynamic Forecast\",\n **updated_dict(param3, style_kwds, 2),\n )\n if one_step:\n if confidence:\n ax.fill_between(\n one_step_ahead[0][delta_limit:],\n lower_osa[delta_limit:],\n upper_osa[delta_limit:],\n alpha=0.04,\n color=\"#555555\",\n )\n ax.plot(\n one_step_ahead[0][delta_limit:],\n lower_osa[delta_limit:],\n alpha=0.04,\n color=\"#000000\",\n )\n ax.plot(\n one_step_ahead[0][delta_limit:],\n upper_osa[delta_limit:],\n alpha=0.04,\n color=\"#000000\",\n )\n ax.plot(\n one_step_ahead[0][delta_limit:],\n one_step_ahead[1][delta_limit:],\n label=\"One-step ahead Forecast\",\n **updated_dict(param2, style_kwds, 1),\n )\n if observed:\n ax.plot(\n true_value[0][delta_limit:],\n true_value[1][delta_limit:],\n label=\"Observed\",\n **updated_dict(param1, style_kwds, 0),\n )\n ax.set_title(\n \"SARIMAX({},{},{})({},{},{})_{}\".format(\n self.parameters[\"p\"],\n self.parameters[\"d\"],\n self.parameters[\"q\"],\n self.parameters[\"P\"],\n self.parameters[\"D\"],\n self.parameters[\"Q\"],\n self.parameters[\"s\"],\n )\n )\n ax.set_xlabel(ts)\n ax.legend(loc=\"center left\", bbox_to_anchor=[1, 0.5])\n ax.set_ylim(\n 1.02 * float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),\n 1.02 * float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),\n )\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n return ax\n\n # ---#\n def predict(\n self,\n vdf: vDataFrame,\n y: str = \"\",\n ts: str = \"\",\n X: list = [],\n nlead: int = 0,\n name: str = \"\",\n ):\n \"\"\"\n ---------------------------------------------------------------------------\n Predicts using the input relation.\n\n Parameters\n ----------\n vdf: vDataFrame\n Object to use to run the prediction.\n y: str, optional\n Response column.\n ts: str, optional\n vcolumn used to order the data.\n X: list, optional\n exogenous vcolumns.\n nlead: int, optional\n Number of records to predict after the last ts date.\n name: str, optional\n Name of the added vcolumn. If empty, a name will be generated.\n\n Returns\n -------\n vDataFrame\n object including the prediction.\n \"\"\"\n check_types(\n [\n (\"name\", name, [str],),\n (\"y\", y, [str],),\n (\"ts\", ts, [str],),\n (\"X\", X, [list],),\n (\"nlead\", nlead, [int, float],),\n (\"vdf\", vdf, [vDataFrame],),\n ],\n )\n if not (y):\n y = self.y\n if not (ts):\n ts = self.ts\n if not (X):\n X = self.exogenous\n columns_check([y, ts], vdf)\n y, ts = vdf_columns_names([y, ts], vdf)\n name = (\n \"{}_\".format(self.type) + \"\".join(ch for ch in self.name if ch.isalnum())\n if not (name)\n else name\n )\n key_columns = \", \" + \", \".join(vdf.get_columns(exclude_columns=[y]))\n transform_relation = self.transform_relation.replace(\n \"[VerticaPy_y]\", y\n ).replace(\"[VerticaPy_ts]\", ts)\n transform_relation = transform_relation.replace(\n \"[VerticaPy_key_columns]\", key_columns\n )\n predictSQL = self.deploySQL().replace(\"[VerticaPy_y]\", y).replace(\n \"[VerticaPy_ts]\", ts\n ) + \" AS {}\".format(name)\n for idx, elem in enumerate(X):\n transform_relation = transform_relation.replace(\"[X{}]\".format(idx), elem)\n predictSQL = predictSQL.replace(\"[X{}]\".format(idx), elem)\n columns = (\n vdf.get_columns(exclude_columns=[y])\n + [predictSQL]\n + [\"VerticaPy_y_copy AS {}\".format(y)]\n )\n relation = vdf.__genSQL__()\n for i in range(nlead):\n query = \"SELECT ({} - LAG({}, 1) OVER (ORDER BY {}))::VARCHAR FROM {} ORDER BY {} DESC LIMIT 1\".format(\n ts, ts, ts, relation, ts\n )\n deltat = vdf._VERTICAPY_VARIABLES_[\"cursor\"].execute(query).fetchone()[0]\n query = \"SELECT (MAX({}) + '{}'::interval)::VARCHAR FROM {}\".format(\n ts, deltat, relation\n )\n next_t = vdf._VERTICAPY_VARIABLES_[\"cursor\"].execute(query).fetchone()[0]\n if i == 0:\n first_t = next_t\n new_line = \"SELECT '{}'::TIMESTAMP AS {}, {}\".format(\n next_t,\n ts,\n \", \".join(\n [\n \"NULL AS {}\".format(column)\n for column in vdf.get_columns(exclude_columns=[ts])\n ]\n ),\n )\n relation_tmp = \"(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE\".format(\n \", \".join([ts] + vdf.get_columns(exclude_columns=[ts])),\n relation,\n new_line,\n )\n query = \"SELECT {} FROM {} ORDER BY {} DESC LIMIT 1\".format(\n self.deploySQL()\n .replace(\"[VerticaPy_y]\", y)\n .replace(\"[VerticaPy_ts]\", ts),\n transform_relation.format(relation_tmp),\n ts,\n )\n prediction = (\n vdf._VERTICAPY_VARIABLES_[\"cursor\"].execute(query).fetchone()[0]\n )\n columns_tmp = vdf.get_columns(exclude_columns=[ts, y])\n new_line = \"SELECT '{}'::TIMESTAMP AS {}, {} AS {} {}\".format(\n next_t,\n ts,\n prediction,\n y,\n (\", \" if (columns_tmp) else \"\")\n + \", \".join([\"NULL AS {}\".format(column) for column in columns_tmp]),\n )\n relation = \"(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE\".format(\n \", \".join([ts, y] + vdf.get_columns(exclude_columns=[ts, y])),\n relation,\n new_line,\n )\n final_relation = \"(SELECT {} FROM {}) VERTICAPY_SUBTABLE\".format(\n \", \".join(columns), transform_relation.format(relation)\n )\n result = vdf_from_relation(final_relation, \"SARIMAX\", self.cursor,)\n if nlead > 0:\n result[y].apply(\n \"CASE WHEN {} >= '{}' THEN NULL ELSE {} END\".format(ts, first_t, \"{}\")\n )\n return result\n\n\n# ---#\nclass VAR(Regressor):\n \"\"\"\n---------------------------------------------------------------------------\n[Beta Version]\nCreates an VAR object using the Vertica Linear Regression algorithm on the \ndata.\n\nParameters\n----------\nname: str\n Name of the the model. The model will be stored in the DB.\ncursor: DBcursor, optional\n Vertica database cursor.\np: int, optional\n Order of the AR (Auto-Regressive) part.\ntol: float, optional\n Determines whether the algorithm has reached the specified accuracy result.\nmax_iter: int, optional\n Determines the maximum number of iterations the algorithm performs before \n achieving the specified accuracy result.\nsolver: str, optional\n The optimizer method to use to train the model. \n Newton : Newton Method\n BFGS : Broyden Fletcher Goldfarb Shanno\n \"\"\"\n\n def __init__(\n self,\n name: str,\n cursor=None,\n p: int = 1,\n tol: float = 1e-4,\n max_iter: int = 1000,\n solver: str = \"Newton\",\n ):\n check_types([(\"name\", name, [str],)])\n self.type, self.name = \"VAR\", name\n assert p > 0, ParameterError(\n \"Parameter 'p' must be greater than 0 to build a VAR model.\"\n )\n self.set_params(\n {\"p\": p, \"tol\": tol, \"max_iter\": max_iter, \"solver\": solver,}\n )\n cursor = check_cursor(cursor)[0]\n self.cursor = cursor\n version(cursor=cursor, condition=[8, 0, 0])\n\n # ---#\n def deploySQL(self):\n \"\"\"\n ---------------------------------------------------------------------------\n Returns the SQL code needed to deploy the model.\n\n Returns\n -------\n str\n the SQL code needed to deploy the model.\n \"\"\"\n sql = []\n for idx, coefs in enumerate(self.coef_):\n coefs_tmp = coefs.values[\"coefficient\"]\n predictors_tmp = coefs.values[\"predictor\"]\n sql += [\n str(coefs_tmp[0])\n + \" + \"\n + \" + \".join(\n [\n str(coefs_tmp[i]) + \" * \" + str(predictors_tmp[i])\n for i in range(1, len(coefs_tmp))\n ]\n )\n ]\n return sql\n\n # ---#\n def features_importance(\n self, X_idx: int = 0, ax=None, show: bool = True, **style_kwds,\n ):\n \"\"\"\n ---------------------------------------------------------------------------\n Computes the model's features importance.\n\n Parameters\n ----------\n X_idx: int/str, optional\n Index of the main vector vcolumn used to draw the features importance.\n It can also be the name of a predictor vcolumn.\n ax: Matplotlib axes object, optional\n The axes to plot on.\n show: bool\n If set to True, draw the features importance.\n **style_kwds\n Any optional parameter to pass to the Matplotlib functions.\n\n Returns\n -------\n ax\n Matplotlib axes object\n \"\"\"\n check_types([(\"X_idx\", X_idx, [int, float, str],), (\"show\", show, [bool],),],)\n if isinstance(X_idx, str):\n X_idx = str_column(X_idx).lower()\n for idx, elem in enumerate(self.X):\n if str_column(elem).lower() == X_idx:\n X_idx = idx\n break\n assert (\n isinstance(X_idx, (float, int)) and len(self.X) > X_idx >= 0\n ), ParameterError(\n \"The index of the vcolumn to draw 'X_idx' must be between 0 and {}. It can also be the name of a predictor vcolumn.\".format(\n len(self.X)\n )\n )\n relation = self.transform_relation.replace(\"[VerticaPy_ts]\", self.ts).format(\n self.test_relation\n )\n for idx, elem in enumerate(self.X):\n relation = relation.replace(\"[X{}]\".format(idx), elem)\n min_max = (\n vdf_from_relation(relation=self.input_relation, cursor=self.cursor)\n .agg(func=[\"min\", \"max\"], columns=self.X)\n .transpose()\n )\n coefficient = self.coef_[X_idx].values\n coeff_importances = {}\n coeff_sign = {}\n for idx, coef in enumerate(coefficient[\"predictor\"]):\n if idx > 0:\n predictor = int(coef.split(\"_\")[0].replace(\"ar\", \"\"))\n predictor = str_column(self.X[predictor])\n minimum, maximum = min_max[predictor]\n val = coefficient[\"coefficient\"][idx]\n coeff_importances[coef] = abs(val) * (maximum - minimum)\n coeff_sign[coef] = 1 if val >= 0 else -1\n total = sum([coeff_importances[elem] for elem in coeff_importances])\n for elem in coeff_importances:\n coeff_importances[elem] = 100 * coeff_importances[elem] / total\n if show:\n plot_importance(\n coeff_importances, coeff_sign, print_legend=True, ax=ax, **style_kwds,\n )\n importances = {\"index\": [\"importance\", \"sign\"]}\n for elem in coeff_importances:\n importances[elem] = [coeff_importances[elem], coeff_sign[elem]]\n return tablesample(values=importances).transpose()\n\n # ---#\n def fit(\n self,\n input_relation: Union[vDataFrame, str],\n X: list,\n ts: str,\n test_relation: Union[vDataFrame, str] = \"\",\n ):\n \"\"\"\n ---------------------------------------------------------------------------\n Trains the model.\n\n Parameters\n ----------\n input_relation: str/vDataFrame\n Training relation.\n X: list\n List of the response columns.\n ts: str\n vcolumn used to order the data.\n test_relation: str/vDataFrame, optional\n Relation used to test the model.\n\n Returns\n -------\n object\n self\n \"\"\"\n check_types(\n [\n (\"input_relation\", input_relation, [str, vDataFrame],),\n (\"X\", X, [list],),\n (\"ts\", ts, [str],),\n (\"test_relation\", test_relation, [str, vDataFrame],),\n ]\n )\n self.cursor = check_cursor(self.cursor, input_relation, True)[0]\n # Initialization\n does_model_exist(name=self.name, cursor=self.cursor, raise_error=True)\n self.input_relation = (\n input_relation\n if isinstance(input_relation, str)\n else input_relation.__genSQL__()\n )\n if isinstance(test_relation, vDataFrame):\n self.test_relation = test_relation.__genSQL__()\n elif test_relation:\n self.test_relation = test_relation\n else:\n self.test_relation = self.input_relation\n self.ts, self.deploy_predict_ = str_column(ts), []\n self.X, schema = [str_column(elem) for elem in X], schema_relation(self.name)[0]\n model = LinearRegression(\n name=self.name,\n solver=self.parameters[\"solver\"],\n max_iter=self.parameters[\"max_iter\"],\n tol=self.parameters[\"tol\"],\n )\n\n # AR(p)\n columns, AR = [], []\n for idx, elem in enumerate(self.X):\n for i in range(1, self.parameters[\"p\"] + 1):\n columns += [\n \"LAG([X{}], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}_{}\".format(\n idx, i, idx, i\n )\n ]\n AR += [\"AR{}_{}\".format(idx, i)]\n self.transform_relation = \"(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE\".format(\n \", \".join(columns), \"{}\"\n )\n relation = self.transform_relation.replace(\"[VerticaPy_ts]\", self.ts).format(\n self.input_relation\n )\n for idx, elem in enumerate(self.X):\n relation = relation.replace(\"[X{}]\".format(idx), elem)\n\n def drop_temp_elem(self, schema):\n try:\n with warnings.catch_warnings(record=True) as w:\n drop(\n \"{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}\".format(\n schema, get_session(self.cursor)\n ),\n cursor=self.cursor,\n method=\"view\",\n )\n except:\n pass\n\n drop_temp_elem(self, schema)\n try:\n query = \"CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}\".format(\n schema, get_session(self.cursor), relation\n )\n self.cursor.execute(query)\n self.coef_ = []\n for elem in X:\n model.fit(\n input_relation=\"{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}\".format(\n schema, get_session(self.cursor)\n ),\n X=AR,\n y=elem,\n )\n self.coef_ += [model.coef_]\n model.drop()\n except:\n drop_temp_elem(self, schema)\n raise\n drop_temp_elem(self, schema)\n model_save = {\n \"type\": \"VAR\",\n \"input_relation\": self.input_relation,\n \"test_relation\": self.test_relation,\n \"transform_relation\": self.transform_relation,\n \"deploy_predict\": self.deploy_predict_,\n \"X\": self.X,\n \"ts\": self.ts,\n \"p\": self.parameters[\"p\"],\n \"tol\": self.parameters[\"tol\"],\n \"max_iter\": self.parameters[\"max_iter\"],\n \"solver\": self.parameters[\"solver\"],\n }\n for idx, elem in enumerate(self.coef_):\n model_save[\"coef_{}\".format(idx)] = elem.values\n insert_verticapy_schema(\n model_name=self.name,\n model_type=\"VAR\",\n model_save=model_save,\n cursor=self.cursor,\n )\n return self\n\n # ---#\n def fpredict(self, L: list):\n \"\"\"\n ---------------------------------------------------------------------------\n Computes the prediction.\n\n Parameters\n ----------\n L: list\n List containing the data. It must be a two-dimensional list containing \n multiple rows. Each row must include as first element the ordered predictor \n and as nth elements the nth - 1 exogenous variable (nth > 2).\n\n Returns\n -------\n float\n the prediction.\n \"\"\"\n try:\n result = []\n result_tmp = 0\n for i in range(len(self.X)):\n result_tmp = 0\n for j in range(len(self.coef_[i].values[\"coefficient\"])):\n elem = self.coef_[i].values[\"predictor\"][j]\n if elem.lower() == \"intercept\":\n result_tmp += self.coef_[i].values[\"coefficient\"][j]\n else:\n ni, nj = elem[2:].split(\"_\")\n ni, nj = int(ni), int(nj)\n result_tmp += (\n self.coef_[i].values[\"coefficient\"][j] * L[-nj][ni]\n )\n result += [result_tmp]\n return result\n except:\n return None\n\n # ---#\n def plot(\n self,\n vdf: vDataFrame = None,\n X: list = [],\n ts: str = \"\",\n X_idx: int = 0,\n dynamic: bool = False,\n one_step: bool = True,\n observed: bool = True,\n confidence: bool = True,\n nlead: int = 10,\n nlast: int = 0,\n limit: int = 1000,\n ax=None,\n **style_kwds,\n ):\n \"\"\"\n ---------------------------------------------------------------------------\n Draws the VAR model.\n\n Parameters\n ----------\n vdf: vDataFrame\n Object to use to run the prediction.\n X: list, optional\n List of the response columns.\n ts: str, optional\n vcolumn used to order the data.\n X_idx: int, optional\n Index of the main vector vcolumn to draw. It can also be the name of a \n predictor vcolumn.\n dynamic: bool, optional\n If set to True, the dynamic forecast will be drawn.\n one_step: bool, optional\n If set to True, the one step ahead forecast will be drawn.\n observed: bool, optional\n If set to True, the observation will be drawn.\n confidence: bool, optional\n If set to True, the confidence ranges will be drawn.\n nlead: int, optional\n Number of predictions computed by the dynamic forecast after\n the last ts date.\n nlast: int, optional\n The dynamic forecast will start nlast values before the last\n ts date.\n limit: int, optional\n Maximum number of past elements to use.\n ax: Matplotlib axes object, optional\n The axes to plot on.\n **style_kwds\n Any optional parameter to pass to the Matplotlib functions.\n\n Returns\n -------\n ax \n Matplotlib axes object\n \"\"\"\n if not (vdf):\n vdf = vdf_from_relation(relation=self.input_relation, cursor=self.cursor)\n check_types(\n [\n (\"limit\", limit, [int, float],),\n (\"nlead\", nlead, [int, float],),\n (\"X_idx\", X_idx, [int, float, str],),\n (\"dynamic\", dynamic, [bool],),\n (\"observed\", observed, [bool],),\n (\"one_step\", one_step, [bool],),\n (\"confidence\", confidence, [bool],),\n (\"vdf\", vdf, [vDataFrame],),\n ],\n )\n delta_limit, limit = (\n limit,\n max(max(limit, self.parameters[\"p\"] + 1 + nlast), 200),\n )\n delta_limit = max(limit - delta_limit - nlast, 0)\n if not (ts):\n ts = self.ts\n if not (X):\n X = self.X\n assert dynamic or one_step or observed, ParameterError(\n \"No option selected.\\n You should set either dynamic, one_step or observed to True.\"\n )\n assert nlead + nlast > 0 or not (dynamic), ParameterError(\n \"Dynamic Plots are only possible if either parameter 'nlead' is greater than 0 or parameter 'nlast' is greater than 0, and parameter 'dynamic' is set to True.\"\n )\n if isinstance(X_idx, str):\n X_idx = str_column(X_idx).lower()\n for idx, elem in enumerate(X):\n if str_column(elem).lower() == X_idx:\n X_idx = idx\n break\n assert (\n isinstance(X_idx, (float, int)) and len(self.X) > X_idx >= 0\n ), ParameterError(\n \"The index of the vcolumn to draw 'X_idx' must be between 0 and {}. It can also be the name of a predictor vcolumn.\".format(\n len(self.X)\n )\n )\n result_all = self.predict(\n vdf=vdf,\n X=X,\n ts=ts,\n nlead=0,\n name=[\n \"_verticapy_prediction_{}_\".format(idx) for idx in range(len(self.X))\n ],\n )\n y, prediction = X[X_idx], \"_verticapy_prediction_{}_\".format(X_idx)\n error_eps = 1.96 * math.sqrt(self.score(method=\"mse\").values[\"mse\"][X_idx])\n print_info = verticapy.options[\"print_info\"]\n verticapy.options[\"print_info\"] = False\n try:\n result = (\n result_all.select([ts, y, prediction])\n .dropna()\n .sort([ts])\n .tail(limit)\n .values\n )\n except:\n verticapy.options[\"print_info\"] = print_info\n raise\n verticapy.options[\"print_info\"] = print_info\n columns = [elem for elem in result]\n if isinstance(result[columns[0]][0], str):\n result[columns[0]] = [parse(elem) for elem in result[columns[0]]]\n true_value = [result[columns[0]], result[columns[1]]]\n one_step_ahead = [result[columns[0]], result[columns[2]]]\n lower_osa, upper_osa = (\n [\n float(elem) - error_eps if elem != None else None\n for elem in one_step_ahead[1]\n ],\n [\n float(elem) + error_eps if elem != None else None\n for elem in one_step_ahead[1]\n ],\n )\n if dynamic:\n print_info = verticapy.options[\"print_info\"]\n verticapy.options[\"print_info\"] = False\n try:\n result = (\n result_all.select([ts] + X).dropna().sort([ts]).tail(limit).values\n )\n except:\n verticapy.options[\"print_info\"] = print_info\n raise\n verticapy.options[\"print_info\"] = print_info\n columns = [elem for elem in result]\n if isinstance(result[columns[0]][0], str):\n result[columns[0]] = [parse(elem) for elem in result[columns[0]]]\n deltat = result[columns[0]][-1] - result[columns[0]][-2]\n lead_time_list, lead_list = [], []\n if nlast > 0:\n for i in range(len(result[columns[0]][:-nlast])):\n lead_list += [[result[elem][i] for elem in columns[1:]]]\n else:\n for i in range(len(result[columns[0]])):\n lead_list += [[result[elem][i] for elem in columns[1:]]]\n for i in range(nlast):\n lead_list += [self.fpredict(lead_list)]\n lead_time_list += [result[columns[0]][i - nlast]]\n if lead_time_list:\n start_time = lead_time_list[-1]\n else:\n start_time = result[columns[0]][-1]\n for i in range(nlead):\n lead_list += [self.fpredict(lead_list)]\n lead_time_list += [start_time + (i + 1) * deltat]\n dynamic_forecast = (\n [result[columns[0]][-nlast - 1]] + lead_time_list,\n [result[columns[1 + X_idx]][-nlast - 1]]\n + [elem[X_idx] for elem in lead_list[-nlast - nlead :]],\n )\n lower_d, upper_d = [], []\n for i in range(len(dynamic_forecast[1])):\n delta_error = error_eps * math.sqrt(i + 1)\n lower_d += [float(dynamic_forecast[1][i]) - delta_error]\n upper_d += [float(dynamic_forecast[1][i]) + delta_error]\n else:\n lower_d, upper_d, dynamic_forecast = [], [], ([], [])\n alpha = 0.3\n if not (ax):\n fig, ax = plt.subplots()\n if isnotebook():\n fig.set_size_inches(10, 6)\n ax.grid()\n colors = gen_colors()\n param1 = {\n \"color\": colors[2],\n \"linewidth\": 2,\n }\n param2 = {\n \"color\": colors[3],\n \"linewidth\": 2,\n \"linestyle\": \":\",\n }\n param3 = {\n \"color\": colors[0],\n \"linewidth\": 2,\n \"linestyle\": \"dashed\",\n }\n if dynamic:\n ax.fill_between(\n dynamic_forecast[0],\n 1.02\n * float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),\n 1.02\n * float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),\n alpha=0.04,\n color=updated_dict(param3, style_kwds, 2)[\"color\"],\n )\n if confidence:\n ax.fill_between(\n dynamic_forecast[0], lower_d, upper_d, alpha=0.08, color=\"#555555\"\n )\n ax.plot(dynamic_forecast[0], lower_d, alpha=0.08, color=\"#000000\")\n ax.plot(dynamic_forecast[0], upper_d, alpha=0.08, color=\"#000000\")\n ax.plot(\n dynamic_forecast[0],\n dynamic_forecast[1],\n label=\"Dynamic Forecast\",\n **updated_dict(param3, style_kwds, 2),\n )\n if one_step:\n if confidence:\n ax.fill_between(\n one_step_ahead[0][delta_limit:],\n lower_osa[delta_limit:],\n upper_osa[delta_limit:],\n alpha=0.04,\n color=\"#555555\",\n )\n ax.plot(\n one_step_ahead[0][delta_limit:],\n lower_osa[delta_limit:],\n alpha=0.04,\n color=\"#000000\",\n )\n ax.plot(\n one_step_ahead[0][delta_limit:],\n upper_osa[delta_limit:],\n alpha=0.04,\n color=\"#000000\",\n )\n ax.plot(\n one_step_ahead[0][delta_limit:],\n one_step_ahead[1][delta_limit:],\n label=\"One-step ahead Forecast\",\n **updated_dict(param2, style_kwds, 1),\n )\n if observed:\n ax.plot(\n true_value[0][delta_limit:],\n true_value[1][delta_limit:],\n label=\"Observed\",\n **updated_dict(param1, style_kwds, 0),\n )\n ax.set_title(\"VAR({}) [{}]\".format(self.parameters[\"p\"], y))\n ax.set_xlabel(ts)\n ax.legend(loc=\"center left\", bbox_to_anchor=[1, 0.5])\n ax.set_ylim(\n 1.02 * float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),\n 1.02 * float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),\n )\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n return ax\n\n # ---#\n def predict(\n self,\n vdf: vDataFrame,\n X: list = [],\n ts: str = \"\",\n nlead: int = 0,\n name: list = [],\n ):\n \"\"\"\n ---------------------------------------------------------------------------\n Predicts using the input relation.\n\n Parameters\n ----------\n vdf: vDataFrame\n Object to use to run the prediction.\n X: list, optional\n List of the response columns.\n ts: str, optional\n vcolumn used to order the data.\n nlead: int, optional\n Number of records to predict after the last ts date.\n name: list, optional\n Names of the added vcolumns. If empty, names will be generated.\n\n Returns\n -------\n vDataFrame\n object including the prediction.\n \"\"\"\n check_types(\n [\n (\"name\", name, [list],),\n (\"ts\", ts, [str],),\n (\"nlead\", nlead, [int, float],),\n (\"X\", X, [list],),\n (\"vdf\", vdf, [vDataFrame],),\n ],\n )\n if not (ts):\n ts = self.ts\n if not (X):\n X = self.X\n columns_check(X + [ts], vdf)\n X = vdf_columns_names(X, vdf)\n ts = vdf_columns_names([ts], vdf)[0]\n all_pred, names = [], []\n transform_relation = self.transform_relation.replace(\"[VerticaPy_ts]\", self.ts)\n for idx, elem in enumerate(X):\n name_tmp = (\n \"{}_\".format(self.type) + \"\".join(ch for ch in elem if ch.isalnum())\n if len(name) != len(X)\n else name[idx]\n )\n all_pred += [\"{} AS {}\".format(self.deploySQL()[idx], name_tmp)]\n transform_relation = transform_relation.replace(\"[X{}]\".format(idx), elem)\n columns = vdf.get_columns() + all_pred\n relation = vdf.__genSQL__()\n for i in range(nlead):\n query = \"SELECT ({} - LAG({}, 1) OVER (ORDER BY {}))::VARCHAR FROM {} ORDER BY {} DESC LIMIT 1\".format(\n ts, ts, ts, relation, ts\n )\n deltat = vdf._VERTICAPY_VARIABLES_[\"cursor\"].execute(query).fetchone()[0]\n query = \"SELECT (MAX({}) + '{}'::interval)::VARCHAR FROM {}\".format(\n ts, deltat, relation\n )\n next_t = vdf._VERTICAPY_VARIABLES_[\"cursor\"].execute(query).fetchone()[0]\n if i == 0:\n first_t = next_t\n new_line = \"SELECT '{}'::TIMESTAMP AS {}, {}\".format(\n next_t,\n ts,\n \", \".join(\n [\n \"NULL AS {}\".format(column)\n for column in vdf.get_columns(exclude_columns=[ts])\n ]\n ),\n )\n relation_tmp = \"(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE\".format(\n \", \".join([ts] + vdf.get_columns(exclude_columns=[ts])),\n relation,\n new_line,\n )\n query = \"SELECT {} FROM {} ORDER BY {} DESC LIMIT 1\".format(\n \", \".join(self.deploySQL()), transform_relation.format(relation_tmp), ts\n )\n prediction = vdf._VERTICAPY_VARIABLES_[\"cursor\"].execute(query).fetchone()\n for idx, elem in enumerate(X):\n prediction[idx] = \"{} AS {}\".format(prediction[idx], elem)\n columns_tmp = vdf.get_columns(exclude_columns=[ts] + X)\n new_line = \"SELECT '{}'::TIMESTAMP AS {}, {} {}\".format(\n next_t,\n ts,\n \", \".join(prediction),\n (\", \" if (columns_tmp) else \"\")\n + \", \".join([\"NULL AS {}\".format(column) for column in columns_tmp]),\n )\n relation = \"(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE\".format(\n \", \".join([ts] + X + vdf.get_columns(exclude_columns=[ts] + X)),\n relation,\n new_line,\n )\n final_relation = \"(SELECT {} FROM {}) VERTICAPY_SUBTABLE\".format(\n \", \".join(columns), transform_relation.format(relation)\n )\n result = vdf_from_relation(final_relation, \"VAR\", self.cursor,)\n if nlead > 0:\n for elem in X:\n result[elem].apply(\n \"CASE WHEN {} >= '{}' THEN NULL ELSE {} END\".format(\n ts, first_t, \"{}\"\n )\n )\n return result\n" ]
[ [ "matplotlib.pyplot.subplots" ] ]
jiachengxu/io
[ "0ef0f21193d7a48c50f8cddeaa1f0fb3056040ea" ]
[ "tensorflow_io/hadoop/python/ops/hadoop_dataset_ops.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"SequenceFile Dataset.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow import dtypes\nfrom tensorflow.compat.v1 import data\nfrom tensorflow_io.core.python.ops import _load_library\nhadoop_ops = _load_library('_hadoop_ops.so')\n\n\nclass SequenceFileDataset(data.Dataset):\n \"\"\"A Sequence File Dataset that reads the sequence file.\"\"\"\n\n def __init__(self, filenames):\n \"\"\"Create a `SequenceFileDataset`.\n\n `SequenceFileDataset` allows a user to read data from a hadoop sequence\n file. A sequence file consists of (key value) pairs sequentially. At\n the moment, `org.apache.hadoop.io.Text` is the only serialization type\n being supported, and there is no compression support.\n\n For example:\n\n ```python\n dataset = SequenceFileDataset(\"/foo/bar.seq\")\n iterator = dataset.make_one_shot_iterator()\n next_element = iterator.get_next()\n # Prints the (key, value) pairs inside a hadoop sequence file.\n while True:\n try:\n print(sess.run(next_element))\n except tf.errors.OutOfRangeError:\n break\n ```\n\n Args:\n filenames: A `tf.string` tensor containing one or more filenames.\n \"\"\"\n self._filenames = tf.convert_to_tensor(\n filenames, dtype=dtypes.string, name=\"filenames\")\n super(SequenceFileDataset, self).__init__()\n\n def _inputs(self):\n return []\n\n def _as_variant_tensor(self):\n return hadoop_ops.sequence_file_dataset(\n self._filenames, (dtypes.string, dtypes.string))\n\n @property\n def output_classes(self):\n return tf.Tensor, tf.Tensor\n\n @property\n def output_shapes(self):\n return (tf.TensorShape([]), tf.TensorShape([]))\n\n @property\n def output_types(self):\n return dtypes.string, dtypes.string\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.TensorShape" ] ]
chrisroadmap/Near_term_warming
[ "7fc712fdcbf135bc3a73027b1c7b5a3504c5ea5e" ]
[ "analysis_figure_code/SuppFig2/SuppFig2.py" ]
[ "import numpy as np\nimport numpy.ma as npma\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport baspy as bp\nimport fnmatch\n\n\"\"\"\nCreated on Wed Nov 27 18:34 2019\n\n@author: Christine McKenna\n\n========================================================================\nPurpose: Plots Supp Fig 2, a pdf of all possible 20-year trends in gsat \n for CMIP6 piControl simulations for each model. First detrends\n the raw gsat time series to remove any long term drift,\n which could bias 20-year trends (e.g. if positive drift,\n pdf of 20-year trends likely biased positive).\n Saves pdf of 20-year trends for models used in Supp Fig 8. \n========================================================================\n\"\"\"\n\n\n# Required directories\nloaddir_CMIP = 'Priestley-Centre/Near_term_warming/analysis_figure_code/'+\\\n 'SuppFig2/saved_arrays'\nsavedir = 'Priestley-Centre/Near_term_warming/analysis_figure_code/'+\\\n 'SuppFig8/saved_data'\n\n\n### ------ Load in CMIP6 data ------\n\n# Load models\nmodels = np.load(loaddir_CMIP+'/models_gtas_CMIP6_piControl.npy')\n\n# Load catalogue so can extract runids\nvar = 'tas'\ncat_PI = bp.catalogue(dataset='cmip6',Var=var,Experiment='piControl',\\\n CMOR='Amon')\nyears = np.linspace(1,20,20)\n\n### Process data, one model and RunID at a time\ni = 0\nfig,axs = plt.subplots(7,7,sharex=True,sharey=True,\\\n figsize=(15,12))\nfig.suptitle('PDFs of rolling GSAT trends for 20-year segments of CMIP6 '+\\\n 'piControl runs',fontsize=20)\naxs = axs.ravel()\n\nfor model in models:\n\n ## Get data for model\n filtmod_PI = cat_PI[cat_PI['Model'] == model]\n\n ## Only keep r1i1p1f?\n runids_PI = np.unique(filtmod_PI['RunID'])\n runids_PI = fnmatch.filter(runids_PI,'r1i1p1f?')\n\n ## Get data for each RunID\n for runid in runids_PI: \n\n ## Load gsat data\n gsat_tmp = np.load(loaddir_CMIP+'/gtas_'+model+'_'+runid+\\\n '_CMIP6_piControl.npy')\n ny = len(gsat_tmp)\n\n ## Remove any drift\n [m,c,_,_,_] = stats.linregress(np.linspace(0,ny-1,ny),gsat_tmp)\n gsat_lin = m*np.linspace(0,ny-1,ny)+c\n gsat = gsat_tmp - gsat_lin\n\n ## Calculate trends\n gsat_trends = np.zeros([ny-20])\n for y in xrange(0,ny-20):\n [m,_,_,_,_] = stats.linregress(years,gsat[y:y+20])\n gsat_trends[y] = m*10\n\n ## If model used in Supp Fig 8 save pdf of 20y trends\n if (model == 'BCC-CSM2-MR') or (model == 'MIROC-ES2L'):\n np.save(savedir+'/gsat_20ytrends_CMIP6_piControl_'+\\\n model+'.npy',gsat_trends) \n\n \n ### ------ Plot results ------\n\n ### Plot individual models \n axs[i].hist(gsat_trends,density=True)\n axs[i].set_title(model,fontsize=13) \n axs[i].plot(np.zeros([2]),[0,11],'grey',linewidth=1)\n axs[i].plot(np.ones([2])*(-0.075),[0,11],'black',\\\n linewidth=1,linestyle='--')\n axs[i].plot(np.ones([2])*(0.072),[0,11],'black',\\\n linewidth=1,linestyle='--')\n axs[i].plot(np.ones([2])*(-0.084),[0,11],'black',\\\n linewidth=1,linestyle='--')\n axs[i].plot(np.ones([2])*(0.094),[0,11],'black',\\\n linewidth=1,linestyle='--')\n axs[i].tick_params(labelsize=13)\n i += 1\n\n\nfig.text(0.5,0.02,'$^{\\circ}$C / decade',ha='center',\\\n va='center',fontsize=18)\nfig.text(0.02,0.5,'Probability density',ha='center',va='center',\\\n rotation='vertical',fontsize=18)\naxs[i-1].set_xlim([-0.3,0.3])\naxs[i-1].set_ylim([0,11])\naxs[i].axis('off')\nplt.subplots_adjust(top=0.9,bottom=0.07,left=0.07,right=0.97,\\\n wspace=0.17,hspace=0.27)\nplt.show()\n\n" ]
[ [ "numpy.zeros", "numpy.ones", "numpy.load", "scipy.stats.linregress", "matplotlib.pyplot.subplots", "numpy.save", "matplotlib.pyplot.show", "numpy.linspace", "matplotlib.pyplot.subplots_adjust", "numpy.unique" ] ]
zbzhu99/SMARTS
[ "652aa23e71bd4e2732e2742140cfcd0ec082a7da" ]
[ "smarts/core/tests/test_sensors.py" ]
[ "# MIT License\n#\n# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nfrom helpers.scenario import temp_scenario\n\nfrom smarts.core.agent_interface import AgentInterface\nfrom smarts.core.coordinates import Heading, Pose\nfrom smarts.core.plan import Plan\nfrom smarts.core.scenario import Scenario\nfrom smarts.core.sensors import DrivenPathSensor, WaypointsSensor\nfrom smarts.sstudio import gen_scenario\nfrom smarts.sstudio import types as t\n\nAGENT_ID = \"Agent-007\"\n\n\ndef test_driven_path_sensor():\n vehicle = mock.Mock()\n sim = mock.Mock()\n\n max_path_length = 5\n sensor = DrivenPathSensor(vehicle, max_path_length=max_path_length)\n\n positions = [(x, 0, 0) for x in range(0, 100, 10)]\n sim_times = list(range(0, 50, 5))\n for idx, (position, sim_time) in enumerate(zip(positions, sim_times)):\n sim.elapsed_sim_time = sim_time\n vehicle.position = position\n sensor.track_latest_driven_path(sim)\n\n if idx >= 3:\n assert sensor.distance_travelled(sim, last_n_steps=3) == 30\n assert sensor.distance_travelled(sim, last_n_seconds=10) == 20\n\n assert len(sensor()) <= max_path_length\n\n sensor.teardown()\n\n\n@pytest.fixture\ndef scenarios():\n with temp_scenario(name=\"straight\", map=\"maps/6lane.net.xml\") as scenario_root:\n ego_missions = [\n t.Mission(\n t.Route(\n begin=(\"edge-west-WE\", 0, 10),\n end=(\"edge-east-WE\", 0, \"max\"),\n )\n ),\n ]\n gen_scenario(\n t.Scenario(ego_missions=ego_missions),\n output_dir=scenario_root,\n )\n\n yield Scenario.variations_for_all_scenario_roots(\n [str(scenario_root)], [AGENT_ID]\n )\n\n\ndef test_waypoints_sensor(scenarios):\n scenario = next(scenarios)\n sim = mock.Mock()\n vehicle = mock.Mock()\n vehicle.pose = Pose(\n position=np.array([33, -65, 0]),\n orientation=[0, 0, 0, 0],\n heading_=Heading(0),\n )\n\n mission = scenario.missions[AGENT_ID]\n plan = Plan(scenario.road_map, mission)\n\n sensor = WaypointsSensor(vehicle, plan)\n waypoints = sensor()\n\n assert len(waypoints) == 3\n" ]
[ [ "numpy.array" ] ]
ultimus11/Foreground-Detection-OpenCV
[ "910d6ffa2d37b999ed746ebc69da289d9b48bdf1" ]
[ "code/grab_foreground.py" ]
[ "import numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\n\r\n#read image\r\nimg = np.array(cv2.imread('1.jpg'))\r\n\r\n#this is mask\r\nmask = np.zeros(img.shape[:2],np.uint8)\r\n\r\n#this bgdModel and fgdModel is used in background\r\nbgdModel = np.zeros((1,65),np.float64)\r\nfgdModel = np.zeros((1,65),np.float64)\r\n\r\n#This is a rectangular cross section of given image where it will search for foreground\r\nrect = (35,30,330,312)\r\n\r\n#This is a grabcut func from opencv which is used to detect foreground\r\ncv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)\r\nmask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')\r\nimg = img*mask2[:,:,np.newaxis]\r\n\r\n#here we show our image \r\nplt.imshow(img)\r\nplt.colorbar()\r\nplt.show()\r\ncv2.imshow(\"sdfg\",img)\r\ncv2.waitKey(0)\r\ncv2.imwrite(\"foreground.jpg\",img)\r\n" ]
[ [ "matplotlib.pyplot.colorbar", "numpy.zeros", "numpy.where", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow" ] ]
xuhongzuo/Outlier-Interpretation
[ "9bc2dbedcb7b89d0e4ecf7cea2a60612ab19ae4a" ]
[ "model_aton/datasets.py" ]
[ "\"\"\"\nThis script implements an outlier interpretation method of the following paper:\n\"Beyond Outlier Detection: Outlier Interpretation by Attention-Guided Triplet Deviation Network\". in WWW'21.\n@ Author: Hongzuo Xu\n@ email: hongzuo.xu@gmail.com or leogarcia@126.com or xuhongzuo13@nudt.edu.cn\n\"\"\"\n\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom sklearn.neighbors import NearestNeighbors\n\n\nclass SingleTripletDataset(Dataset):\n def __init__(self, anom_idx, x, y, triplets_selector, transform=None):\n self.transform = transform\n self.data = x\n self.triplets = triplets_selector.get_triplets(anom_idx, x, y)\n\n def __getitem__(self, index):\n a_idx, p_idx, n_idx = self.triplets[index]\n anchor, positive, negative = self.data[a_idx], self.data[p_idx], self.data[n_idx]\n if self.transform is not None:\n anchor = self.transform(anchor)\n positive = self.transform(positive)\n negative = self.transform(negative)\n return anchor, positive, negative\n\n def __len__(self):\n return len(self.triplets)\n\n\nclass SingleDataset(Dataset):\n def __init__(self, anom_idx, x, y, data_selector, transform=None):\n self.transform = transform\n self.selected_data = data_selector.get_data(anom_idx, x, y)\n\n def __getitem__(self, index):\n data = self.selected_data[0][index]\n target = self.selected_data[1][index]\n if self.transform is not None:\n data = self.transform(data)\n return data, target\n\n def __len__(self):\n return len(self.selected_data[0])\n\n\nclass SingleTripletDatasetClf(Dataset):\n def __init__(self, anom_idx, x, y, triplets_selector, transform=None):\n self.transform = transform\n self.data = x\n self.triplets, self.targets = triplets_selector.get_triplets(anom_idx, x, y)\n\n def __getitem__(self, index):\n a_idx, p_idx, n_idx = self.triplets[index]\n a_target, p_target, n_target = self.targets[index]\n anchor, positive, negative = self.data[a_idx], self.data[p_idx], self.data[n_idx]\n if self.transform is not None:\n anchor = self.transform(anchor)\n positive = self.transform(positive)\n negative = self.transform(negative)\n return anchor, positive, negative, a_target, p_target, n_target\n\n def __len__(self):\n return len(self.triplets)\n\n\nclass MyHardSingleTripletSelector:\n def __init__(self, nbrs_num, rand_num, nbr_indices):\n self.x = None\n self.y = None\n self.nbrs_num = nbrs_num\n self.rand_num = rand_num\n self.nbr_indices = nbr_indices\n\n def get_triplets(self, anom_idx, x, y, normal_label=0):\n self.x = x.cpu().data.numpy()\n self.y = y.cpu().data.numpy()\n\n # anom_x = self.x[anom_idx]\n # x_noml = self.x[noml_idx]\n # n_neighbors = self.nbrs_num\n # nbrs_local = NearestNeighbors(n_neighbors=n_neighbors).fit(x_noml)\n # nbr_indices = noml_idx[nbrs_local.kneighbors([anom_x])[1].flatten()]\n\n noml_idx = np.where(self.y == normal_label)[0]\n nbr_indices = self.nbr_indices\n rand_num = self.rand_num\n\n rand_canddt = np.setdiff1d(noml_idx, nbr_indices)\n rand_indices = np.random.choice(rand_canddt, rand_num, replace=False)\n\n triplets = [[anchor, positive, anom_idx]\n for anchor in rand_indices\n for positive in nbr_indices]\n return torch.LongTensor(np.array(triplets))\n\n\nclass MyHardSingleSelectorClf:\n def __init__(self, nbrs_num, rand_num):\n self.nbrs_num = nbrs_num\n self.rand_num = rand_num\n\n def get_data(self, anom_idx, x, y, normal_label=0):\n x = x.cpu().data.numpy()\n y = y.cpu().data.numpy()\n\n anom_x = x[anom_idx]\n noml_idx = np.where(y == normal_label)[0]\n x_noml = x[noml_idx]\n\n nbrs_local = NearestNeighbors(n_neighbors=self.nbrs_num).fit(x_noml)\n nbr_indices = noml_idx[nbrs_local.kneighbors([anom_x])[1].flatten()]\n rand_canddt = np.setdiff1d(noml_idx, nbr_indices)\n rand_indices = np.random.choice(rand_canddt, self.rand_num, replace=False)\n\n # perturbation to augment\n dim = x.shape[1]\n anom_lst = []\n anom_lst.append(anom_x)\n for i in range(self.rand_num + self.nbrs_num -1):\n new_anom_x = anom_x.copy()\n choose_f = np.random.choice(np.arange(dim), 3)\n for a in choose_f:\n new_anom_x[a] = anom_x[a] * 1.01\n anom_lst.append(new_anom_x)\n\n data_idx = np.hstack([rand_indices, nbr_indices])\n norm_data = x[data_idx]\n data = np.vstack([np.array(anom_lst), norm_data])\n target = np.hstack([np.ones(10), np.zeros(len(rand_indices), dtype=int), np.zeros(len(nbr_indices), dtype=int)])\n\n return torch.FloatTensor(data), torch.LongTensor(target)\n\n\nclass MyHardSingleTripletSelectorClf:\n def __init__(self, nbrs_num, rand_num):\n self.x = None\n self.y = None\n self.nbrs_num = nbrs_num\n self.rand_num = rand_num\n\n def get_triplets(self, anom_idx, x, y, normal_label=0):\n self.x = x.cpu().data.numpy()\n self.y = y.cpu().data.numpy()\n\n anom_x = self.x[anom_idx]\n noml_idx = np.where(self.y == normal_label)[0]\n x_noml = self.x[noml_idx]\n n_neighbors = self.nbrs_num\n rand_num = self.rand_num\n\n nbrs_local = NearestNeighbors(n_neighbors=n_neighbors).fit(x_noml)\n\n nbr_indices = noml_idx[nbrs_local.kneighbors([anom_x])[1].flatten()]\n # nbr_dist = nbrs_local.kneighbors([anom_x])[0].flatten()\n\n rand_canddt = np.setdiff1d(noml_idx, nbr_indices)\n rand_indices = np.random.choice(rand_canddt, rand_num, replace=False)\n\n triplets = [[anchor, positive, anom_idx]\n for anchor in rand_indices\n for positive in nbr_indices]\n\n # print(\"Generate triplets Num: [%d]\" % len(triplets))\n target = [[0, 0, 1]] * len(triplets)\n\n return torch.LongTensor(np.array(triplets)), torch.LongTensor(np.array(target))\n\n\nclass MyHardSingleTripletSelector2:\n def __init__(self, nbrs_num, rand_num):\n self.x = None\n self.y = None\n self.nbrs_num = nbrs_num\n self.rand_num = rand_num\n\n def get_triplets(self, anom_idx, x, y, normal_label=0):\n self.x = x.cpu().data.numpy()\n self.y = y.cpu().data.numpy()\n\n n_neighbors = self.nbrs_num\n rand_num = self.rand_num\n\n anom_x = self.x[anom_idx]\n\n anom_indices = np.where(self.y != normal_label)[0]\n noml_indices = np.where(self.y == normal_label)[0]\n noml_x = self.x[noml_indices]\n \n nbrs_local = NearestNeighbors(n_neighbors=n_neighbors).fit(noml_x)\n nbr_indices = noml_indices[nbrs_local.kneighbors([anom_x])[1].flatten()]\n # nbr_dist = nbrs_local.kneighbors([anom_x])[0].flatten()\n\n rand_canddt_nor = np.setdiff1d(noml_indices, nbr_indices)\n rand_nor_indices = np.random.choice(rand_canddt_nor, rand_num, replace=False)\n\n triplets1 = [[anchor, positive, anom_idx]\n for anchor in rand_nor_indices\n for positive in nbr_indices]\n \n rand_canddt_ano = np.setdiff1d(anom_indices, anom_idx)\n if len(rand_canddt_ano) < rand_num:\n rand_ano_indices = rand_canddt_ano\n else:\n rand_ano_indices = np.random.choice(rand_canddt_ano, rand_num, replace=False)\n\n triplets2 = [[anchor, anom_idx, negative]\n for anchor in rand_ano_indices\n for negative in nbr_indices]\n triplets = triplets1 + triplets2\n\n # print(\"Generate triplets Num: [%d]\" % len(triplets))\n target1 = [[0, 0, 1]] * len(triplets1)\n target2 = [[1, 1, 0]] * len(triplets2)\n target = target1 + target2\n\n return torch.LongTensor(np.array(triplets)), torch.LongTensor(np.array(target))\n\n" ]
[ [ "numpy.array", "numpy.random.choice", "numpy.setdiff1d", "torch.FloatTensor", "numpy.ones", "numpy.where", "torch.LongTensor", "numpy.arange", "numpy.hstack", "sklearn.neighbors.NearestNeighbors" ] ]
adammichaelwood/agents
[ "66ad01b9ae909bc6c344b8f0cb356758cae95236", "66ad01b9ae909bc6c344b8f0cb356758cae95236", "66ad01b9ae909bc6c344b8f0cb356758cae95236", "66ad01b9ae909bc6c344b8f0cb356758cae95236" ]
[ "tf_agents/environments/random_py_environment_test.py", "tf_agents/utils/composite_test.py", "tf_agents/agents/ddpg/critic_rnn_network.py", "tf_agents/networks/value_rnn_network.py" ]
[ "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for utils.random_py_environment.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tf_agents.environments import random_py_environment\nfrom tf_agents.specs import array_spec\nfrom tf_agents.utils import test_utils\n\n\nclass RandomPyEnvironmentTest(parameterized.TestCase, test_utils.TestCase):\n\n def testEnvResetAutomatically(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n env = random_py_environment.RandomPyEnvironment(obs_spec)\n\n time_step = env.step([0])\n self.assertTrue(np.all(time_step.observation >= -10))\n self.assertTrue(np.all(time_step.observation <= 10))\n self.assertTrue(time_step.is_first())\n\n while not time_step.is_last():\n time_step = env.step([0])\n self.assertTrue(np.all(time_step.observation >= -10))\n self.assertTrue(np.all(time_step.observation <= 10))\n\n time_step = env.step([0])\n self.assertTrue(np.all(time_step.observation >= -10))\n self.assertTrue(np.all(time_step.observation <= 10))\n self.assertTrue(time_step.is_first())\n\n @parameterized.named_parameters([\n ('OneStep', 1),\n ('FiveSteps', 5),\n ])\n def testEnvMinDuration(self, min_duration):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, episode_end_probability=0.9, min_duration=min_duration)\n num_episodes = 100\n\n for _ in range(num_episodes):\n time_step = env.step([0])\n self.assertTrue(time_step.is_first())\n num_steps = 0\n while not time_step.is_last():\n time_step = env.step([0])\n num_steps += 1\n self.assertGreaterEqual(num_steps, min_duration)\n\n @parameterized.named_parameters([\n ('OneStep', 1),\n ('FiveSteps', 5),\n ])\n def testEnvMaxDuration(self, max_duration):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, episode_end_probability=0.1, max_duration=max_duration)\n num_episodes = 100\n\n for _ in range(num_episodes):\n time_step = env.step([0])\n self.assertTrue(time_step.is_first())\n num_steps = 0\n while not time_step.is_last():\n time_step = env.step([0])\n num_steps += 1\n self.assertLessEqual(num_steps, max_duration)\n\n def testEnvChecksActions(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n action_spec = array_spec.BoundedArraySpec((2, 2), np.int32, -10, 10)\n env = random_py_environment.RandomPyEnvironment(\n obs_spec, action_spec=action_spec)\n\n env.step(np.array([[0, 0], [0, 0]]))\n\n with self.assertRaises(ValueError):\n env.step([0])\n\n def testRewardFnCalled(self):\n\n def reward_fn(unused_step_type, action, unused_observation):\n return action\n\n action_spec = array_spec.BoundedArraySpec((1,), np.int32, -10, 10)\n observation_spec = array_spec.BoundedArraySpec((1,), np.int32, -10, 10)\n env = random_py_environment.RandomPyEnvironment(\n observation_spec, action_spec, reward_fn=reward_fn)\n\n time_step = env.step(1) # No reward in first time_step\n self.assertEqual(0.0, time_step.reward)\n time_step = env.step(1)\n self.assertEqual(1, time_step.reward)\n\n def testRendersImage(self):\n action_spec = array_spec.BoundedArraySpec((1,), np.int32, -10, 10)\n observation_spec = array_spec.BoundedArraySpec((1,), np.int32, -10, 10)\n env = random_py_environment.RandomPyEnvironment(\n observation_spec, action_spec, render_size=(4, 4, 3))\n\n env.reset()\n img = env.render()\n\n self.assertTrue(np.all(img < 256))\n self.assertTrue(np.all(img >= 0))\n self.assertEqual((4, 4, 3), img.shape)\n self.assertEqual(np.uint8, img.dtype)\n\n def testBatchSize(self):\n batch_size = 3\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n env = random_py_environment.RandomPyEnvironment(obs_spec,\n batch_size=batch_size)\n\n time_step = env.step([0])\n self.assertEqual(time_step.observation.shape, (3, 2, 3))\n self.assertEqual(time_step.reward.shape[0], batch_size)\n self.assertEqual(time_step.discount.shape[0], batch_size)\n\n def testCustomRewardFn(self):\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n batch_size = 3\n env = random_py_environment.RandomPyEnvironment(\n obs_spec,\n reward_fn=lambda *_: np.ones(batch_size),\n batch_size=batch_size)\n env._done = False\n env.reset()\n time_step = env.step([0])\n self.assertSequenceAlmostEqual([1.0] * 3, time_step.reward)\n\n def testRewardCheckerBatchSizeOne(self):\n # Ensure batch size 1 with scalar reward works\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n env = random_py_environment.RandomPyEnvironment(\n obs_spec,\n reward_fn=lambda *_: np.array([1.0]),\n batch_size=1)\n env._done = False\n env.reset()\n time_step = env.step([0])\n self.assertEqual(time_step.reward, 1.0)\n\n def testRewardCheckerSizeMismatch(self):\n # Ensure custom scalar reward with batch_size greater than 1 raises\n # ValueError\n obs_spec = array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10)\n env = random_py_environment.RandomPyEnvironment(\n obs_spec,\n reward_fn=lambda *_: 1.0,\n batch_size=5)\n env.reset()\n env._done = False\n with self.assertRaises(ValueError):\n env.step([0])\n\n\nif __name__ == '__main__':\n test_utils.main()\n", "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.utils.composite.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tf_agents.utils import composite\n\n\ndef _to_dense(st):\n return tf.scatter_nd(st.indices, st.values, st.dense_shape)\n\n\nclass CompositeTest(tf.test.TestCase):\n \"\"\"Tests functions related to composite tensors.\"\"\"\n\n def setUp(self):\n super(CompositeTest, self).setUp()\n self._x = tf.random.uniform([4, 5, 6], dtype=tf.int32, maxval=1000)\n self._sx = tf.sparse.reorder(\n tf.SparseTensor(\n indices=tf.random.uniform([40, 3], maxval=10, dtype=tf.int64),\n values=tf.random.uniform([40], maxval=1000, dtype=tf.int32),\n dense_shape=[10, 10, 10]))\n\n def testSliceFrom(self):\n from_1 = composite.slice_from(self._x, axis=1, start=1)\n from_n1 = composite.slice_from(self._x, axis=1, start=-1)\n x, from_1, from_n1 = self.evaluate((self._x, from_1, from_n1))\n self.assertAllEqual(from_1, x[:, 1:, :])\n self.assertAllEqual(from_n1, x[:, -1:, :])\n\n s_from_1 = _to_dense(composite.slice_from(self._sx, axis=1, start=1))\n s_from_n1 = _to_dense(composite.slice_from(self._sx, axis=1, start=-1))\n sx = _to_dense(self._sx)\n sx, s_from_1, s_from_n1 = self.evaluate((sx, s_from_1, s_from_n1))\n self.assertAllEqual(s_from_1, sx[:, 1:, :])\n self.assertAllEqual(s_from_n1, sx[:, -1:, :])\n\n def testSliceTo(self):\n to_1 = composite.slice_to(self._x, axis=1, end=1)\n to_n1 = composite.slice_to(self._x, axis=1, end=-1)\n x, to_1, to_n1 = self.evaluate((self._x, to_1, to_n1))\n self.assertAllEqual(to_1, x[:, :1, :])\n self.assertAllEqual(to_n1, x[:, :-1, :])\n\n s_from_1 = _to_dense(composite.slice_to(self._sx, axis=1, end=1))\n s_from_n1 = _to_dense(composite.slice_to(self._sx, axis=1, end=-1))\n sx = _to_dense(self._sx)\n sx, s_from_1, s_from_n1 = self.evaluate((sx, s_from_1, s_from_n1))\n self.assertAllEqual(s_from_1, sx[:, :1, :])\n self.assertAllEqual(s_from_n1, sx[:, :-1, :])\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Sample recurrent Critic network to use with DDPG agents.\"\"\"\n\nimport functools\nimport gin\nimport tensorflow as tf\nfrom tf_agents.networks import dynamic_unroll_layer\nfrom tf_agents.networks import network\nfrom tf_agents.networks import utils\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import time_step\nfrom tf_agents.utils import nest_utils\n\n\n@gin.configurable\nclass CriticRnnNetwork(network.Network):\n \"\"\"Creates a recurrent Critic network.\"\"\"\n\n def __init__(self,\n input_tensor_spec,\n observation_conv_layer_params=None,\n observation_fc_layer_params=(200,),\n action_fc_layer_params=(200,),\n joint_fc_layer_params=(100),\n lstm_size=(40,),\n output_fc_layer_params=(200, 100),\n activation_fn=tf.keras.activations.relu,\n name='CriticRnnNetwork'):\n \"\"\"Creates an instance of `CriticRnnNetwork`.\n\n Args:\n input_tensor_spec: A tuple of (observation, action) each of type\n `tensor_spec.TensorSpec` representing the inputs.\n observation_conv_layer_params: Optional list of convolution layers\n parameters to apply to the observations, where each item is a\n length-three tuple indicating (filters, kernel_size, stride).\n observation_fc_layer_params: Optional list of fully_connected parameters,\n where each item is the number of units in the layer. This is applied\n after the observation convultional layer.\n action_fc_layer_params: Optional list of parameters for a fully_connected\n layer to apply to the actions, where each item is the number of units\n in the layer.\n joint_fc_layer_params: Optional list of parameters for a fully_connected\n layer to apply after merging observations and actions, where each item\n is the number of units in the layer.\n lstm_size: An iterable of ints specifying the LSTM cell sizes to use.\n output_fc_layer_params: Optional list of fully_connected parameters, where\n each item is the number of units in the layer. This is applied after the\n LSTM cell.\n activation_fn: Activation function, e.g. tf.nn.relu, slim.leaky_relu, ...\n name: A string representing name of the network.\n\n Returns:\n A tf.float32 Tensor of q-values.\n\n Raises:\n ValueError: If `observation_spec` or `action_spec` contains more than one\n item.\n \"\"\"\n observation_spec, action_spec = input_tensor_spec\n\n if len(tf.nest.flatten(observation_spec)) > 1:\n raise ValueError(\n 'Only a single observation is supported by this network.')\n\n if len(tf.nest.flatten(action_spec)) > 1:\n raise ValueError('Only a single action is supported by this network.')\n\n observation_layers = utils.mlp_layers(\n observation_conv_layer_params,\n observation_fc_layer_params,\n activation_fn=activation_fn,\n kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(\n scale=1. / 3., mode='fan_in', distribution='uniform'),\n name='observation_encoding')\n\n action_layers = utils.mlp_layers(\n None,\n action_fc_layer_params,\n activation_fn=activation_fn,\n kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(\n scale=1. / 3., mode='fan_in', distribution='uniform'),\n name='action_encoding')\n\n joint_layers = utils.mlp_layers(\n None,\n joint_fc_layer_params,\n activation_fn=activation_fn,\n kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(\n scale=1. / 3., mode='fan_in', distribution='uniform'),\n name='joint_mlp')\n\n # Create RNN cell\n if len(lstm_size) == 1:\n cell = tf.keras.layers.LSTMCell(lstm_size[0])\n else:\n cell = tf.keras.layers.StackedRNNCells(\n [tf.keras.layers.LSTMCell(size) for size in lstm_size])\n\n state_spec = tf.nest.map_structure(\n functools.partial(\n tensor_spec.TensorSpec, dtype=tf.float32,\n name='network_state_spec'), list(cell.state_size))\n\n output_layers = utils.mlp_layers(fc_layer_params=output_fc_layer_params,\n name='output')\n\n output_layers.append(\n tf.keras.layers.Dense(\n 1,\n activation=None,\n kernel_initializer=tf.keras.initializers.RandomUniform(\n minval=-0.003, maxval=0.003),\n name='value'))\n\n super(CriticRnnNetwork, self).__init__(\n input_tensor_spec=input_tensor_spec,\n state_spec=state_spec,\n name=name)\n\n self._observation_layers = observation_layers\n self._action_layers = action_layers\n self._joint_layers = joint_layers\n self._dynamic_unroll = dynamic_unroll_layer.DynamicUnroll(cell)\n self._output_layers = output_layers\n\n # TODO(kbanoop): Standardize argument names across different networks.\n def call(self, inputs, step_type, network_state=None):\n observation, action = inputs\n observation_spec, _ = self.input_tensor_spec\n num_outer_dims = nest_utils.get_outer_rank(observation,\n observation_spec)\n if num_outer_dims not in (1, 2):\n raise ValueError(\n 'Input observation must have a batch or batch x time outer shape.')\n\n has_time_dim = num_outer_dims == 2\n if not has_time_dim:\n # Add a time dimension to the inputs.\n observation = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1),\n observation)\n action = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1), action)\n step_type = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1),\n step_type)\n\n observation = tf.cast(tf.nest.flatten(observation)[0], tf.float32)\n action = tf.cast(tf.nest.flatten(action)[0], tf.float32)\n\n batch_squash = utils.BatchSquash(2) # Squash B, and T dims.\n observation = batch_squash.flatten(observation) # [B, T, ...] -> [BxT, ...]\n action = batch_squash.flatten(action)\n\n for layer in self._observation_layers:\n observation = layer(observation)\n\n for layer in self._action_layers:\n action = layer(action)\n\n joint = tf.concat([observation, action], -1)\n for layer in self._joint_layers:\n joint = layer(joint)\n\n joint = batch_squash.unflatten(joint) # [B x T, ...] -> [B, T, ...]\n\n with tf.name_scope('reset_mask'):\n reset_mask = tf.equal(step_type, time_step.StepType.FIRST)\n # Unroll over the time sequence.\n joint, network_state = self._dynamic_unroll(\n joint,\n reset_mask,\n initial_state=network_state)\n\n output = batch_squash.flatten(joint) # [B, T, ...] -> [B x T, ...]\n\n for layer in self._output_layers:\n output = layer(output)\n\n q_value = tf.reshape(output, [-1])\n q_value = batch_squash.unflatten(q_value) # [B x T, ...] -> [B, T, ...]\n if not has_time_dim:\n q_value = tf.squeeze(q_value, axis=1)\n\n return q_value, network_state\n", "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Sample Keras Value Network with LSTM cells .\n\nImplements a network that will generate the following layers:\n\n [optional]: preprocessing_layers # preprocessing_layers\n [optional]: (Add | Concat(axis=-1) | ...) # preprocessing_combiner\n [optional]: Conv2D # conv_layer_params\n Flatten\n [optional]: Dense # input_fc_layer_params\n [optional]: LSTM # lstm_cell_params\n [optional]: Dense # output_fc_layer_params\n Dense -> 1 # Value output\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gin\nimport tensorflow as tf\n\nfrom tf_agents.networks import lstm_encoding_network\nfrom tf_agents.networks import network\n\n\n@gin.configurable\nclass ValueRnnNetwork(network.Network):\n \"\"\"Recurrent value network. Reduces to 1 value output per batch item.\"\"\"\n\n def __init__(self,\n input_tensor_spec,\n preprocessing_layers=None,\n preprocessing_combiner=None,\n conv_layer_params=None,\n input_fc_layer_params=(75, 40),\n input_dropout_layer_params=None,\n lstm_size=(40,),\n output_fc_layer_params=(75, 40),\n activation_fn=tf.keras.activations.relu,\n dtype=tf.float32,\n name='ValueRnnNetwork'):\n \"\"\"Creates an instance of `ValueRnnNetwork`.\n\n Network supports calls with shape outer_rank + input_tensor_shape.shape.\n Note outer_rank must be at least 1.\n\n Args:\n input_tensor_spec: A nest of `tensor_spec.TensorSpec` representing the\n input observations.\n preprocessing_layers: (Optional.) A nest of `tf.keras.layers.Layer`\n representing preprocessing for the different observations.\n All of these layers must not be already built. For more details see\n the documentation of `networks.EncodingNetwork`.\n preprocessing_combiner: (Optional.) A keras layer that takes a flat list\n of tensors and combines them. Good options include\n `tf.keras.layers.Add` and `tf.keras.layers.Concatenate(axis=-1)`.\n This layer must not be already built. For more details see\n the documentation of `networks.EncodingNetwork`.\n conv_layer_params: Optional list of convolution layers parameters, where\n each item is a length-three tuple indicating (filters, kernel_size,\n stride).\n input_fc_layer_params: Optional list of fully_connected parameters, where\n each item is the number of units in the layer. This is applied before\n the LSTM cell.\n input_dropout_layer_params: Optional list of dropout layer parameters,\n where each item is the fraction of input units to drop. The dropout\n layers are interleaved with the fully connected layers; there is a\n dropout layer after each fully connected layer, except if the entry in\n the list is None. This list must have the same length of\n input_fc_layer_params, or be None.\n lstm_size: An iterable of ints specifying the LSTM cell sizes to use.\n output_fc_layer_params: Optional list of fully_connected parameters, where\n each item is the number of units in the layer. This is applied after the\n LSTM cell.\n activation_fn: Activation function, e.g. tf.keras.activations.relu,.\n dtype: The dtype to use by the convolution, LSTM, and fully connected\n layers.\n name: A string representing name of the network.\n \"\"\"\n del input_dropout_layer_params\n\n lstm_encoder = lstm_encoding_network.LSTMEncodingNetwork(\n input_tensor_spec=input_tensor_spec,\n preprocessing_layers=preprocessing_layers,\n preprocessing_combiner=preprocessing_combiner,\n conv_layer_params=conv_layer_params,\n input_fc_layer_params=input_fc_layer_params,\n lstm_size=lstm_size,\n output_fc_layer_params=output_fc_layer_params,\n activation_fn=activation_fn,\n dtype=dtype,\n name=name)\n\n postprocessing_layers = tf.keras.layers.Dense(\n 1,\n activation=None,\n kernel_initializer=tf.compat.v1.initializers.random_uniform(\n minval=-0.03, maxval=0.03))\n\n super(ValueRnnNetwork, self).__init__(\n input_tensor_spec=input_tensor_spec,\n state_spec=lstm_encoder.state_spec,\n name=name)\n\n self._lstm_encoder = lstm_encoder\n self._postprocessing_layers = postprocessing_layers\n\n def call(self, observation, step_type=None, network_state=None):\n state, network_state = self._lstm_encoder(\n observation, step_type=step_type, network_state=network_state)\n value = self._postprocessing_layers(state)\n return tf.squeeze(value, -1), network_state\n" ]
[ [ "numpy.all", "numpy.array", "numpy.ones" ], [ "tensorflow.random.uniform", "tensorflow.test.main", "tensorflow.scatter_nd" ], [ "tensorflow.concat", "tensorflow.expand_dims", "tensorflow.equal", "tensorflow.reshape", "tensorflow.nest.flatten", "tensorflow.keras.initializers.RandomUniform", "tensorflow.squeeze", "tensorflow.name_scope", "tensorflow.keras.layers.LSTMCell", "tensorflow.compat.v1.keras.initializers.VarianceScaling" ], [ "tensorflow.squeeze", "tensorflow.compat.v1.initializers.random_uniform" ] ]
s-scherrer/qa4sm-preprocessing
[ "dbb6dea8e4d34b69ee4d5f82f0a0028294d45170" ]
[ "src/qa4sm_preprocessing/nc_image_reader/transpose.py" ]
[ "import copy\nimport dask\nimport dask.array as da\nfrom dask.distributed import Client\nimport datetime\nimport logging\nimport math\nfrom multiprocessing.pool import ThreadPool\nimport numpy as np\nfrom pathlib import Path\nfrom tqdm.auto import tqdm\nfrom typing import Union, TypeVar, Tuple\nimport xarray as xr\nimport shutil\nimport warnings\nimport zarr\n\n\nfrom .utils import infer_chunks\nfrom .readers import DirectoryImageReader\n\n\nReader = TypeVar(\"Reader\")\n\n\ndef write_transposed_dataset(\n reader: Reader,\n outfname: Union[Path, str],\n start: datetime.datetime = None,\n end: datetime.datetime = None,\n chunks: dict = None,\n memory: float = 2,\n n_threads: int = 4,\n zlib: bool = True,\n complevel: int = 4,\n distributed: Union[bool, Client] = False,\n use_dask: bool = True,\n):\n \"\"\"\n Creates a stacked and transposed netCDF file from a given reader.\n\n WARNING: very experimental!\n\n Parameters\n ----------\n reader : XarrayImageReaderBase\n Reader for the dataset.\n outfname : str or Path\n Output filename. Must end with \".nc\" for netCDF output or with \".zarr\"\n for zarr output.\n start : datetime.datetime, optional\n If not given, start at first timestamp in dataset.\n end : datetime.datetime, optional\n If not given, end at last timestamp in dataset.\n chunks : dictionary, optional\n The chunk sizes that are used for the transposed file. If none are\n given, chunks with a size of 1MB are used for netCDF, and chunks with a\n size of 50MB are used for zarr output.\n memory : float, optional\n The amount of memory to be used for buffering in GB. Default is 2.\n Higher is faster.\n n_threads : int, optional\n The amount of threads to use. Default is 4.\n zlib : bool, optional\n Whether to use compression when storing the files. Reduces file size,\n but strongly increases write time, and maybe also access time. Default\n is ``False``.\n complevel : int, optional\n Compression level to use. Default is 4. Range is from 1 (low) to 9\n (high).\n distributed : bool or Client, optional\n Whether to use the local or the distributed dask scheduler. If a client\n for a distributed scheduler is used, this is used instead.\n use_dask : bool, optional\n Whether to use dask for the transposing. Default is True, but sometimes\n (especially with large datasets) this fails. If set to False, the data\n is written to an intermediate zarr store.\n \"\"\"\n dask_config = {\n \"array.slicing.split_large_chunks\": False,\n }\n args = (reader, outfname)\n kwargs = {\n \"start\": start,\n \"end\": end,\n \"memory\": memory,\n \"zlib\": zlib,\n \"complevel\": complevel,\n \"chunks\": chunks,\n }\n if not use_dask:\n _transpose_no_dask(*args, **kwargs)\n elif isinstance(distributed, Client) or not distributed:\n if not distributed:\n dask_config.update(\n {\"scheduler\": \"threads\", \"pool\": ThreadPool(n_threads)}\n )\n with dask.config.set(**dask_config):\n _transpose(*args, **kwargs)\n elif distributed:\n with dask.config.set(**dask_config), Client(\n n_workers=1,\n threads_per_worker=n_threads,\n memory_limit=f\"{memory}GB\",\n ) as client:\n print(\"Dask dashboard accessible at:\", client.dashboard_link)\n _transpose(*args, **kwargs)\n\n\ndef _get_intermediate_chunks(array, chunks, new_last_dim, zarr_output, memory):\n \"\"\"\n Calculates chunk sizes for the given array for the intermediate output\n files.\n\n Parameters\n ----------\n array : xr.DataArray\n Array to rechunk and transpose\n chunks : dict or None\n Chunks passed to write_transposed_dataset, None if none were given.\n new_last_dim : str\n Name of the new last dimension, normally \"time\".\n zarr_output : bool\n Whether the final file will be a zarr file (True) or a netCDf (False).\n memory : float\n The amount of memory to be used for buffering in GB.\n\n Returns\n -------\n tmp_chunks : dict\n Chunks to be used for rechunking the array to a temporary file. The\n order of keys corresponds to the order of dimensions in the transposed\n array.\n \"\"\"\n dtype = array.dtype\n dims = dict(zip(array.dims, array.shape))\n transposed_shape = [\n length for dim, length in dims.items() if dim != new_last_dim\n ]\n transposed_shape.append(dims[new_last_dim])\n\n # If the chunks argument was not given, we have to infer the spatial\n # and temporal chunks for the intermediate file.\n # The spatial chunks will be set such that for a continuous time\n # dimension the chunk size is still reasonable.\n if chunks is None:\n if zarr_output:\n chunksizes = infer_chunks(transposed_shape, 100, dtype)[:-1]\n else:\n chunksizes = infer_chunks(transposed_shape, 1, dtype)[:-1]\n chunks = dict(\n zip([dim for dim in dims if dim != new_last_dim], chunksizes)\n )\n chunks[new_last_dim] = -1\n else:\n chunks = copy.copy(chunks)\n tmp_chunks = {dim: chunks[dim] for dim in dims if dim != new_last_dim}\n\n # figure out temporary chunk sizes based on image size and available memory\n size = dtype.itemsize\n chunksizes = [size if size != -1 else dims[dim] for dim, size in chunks.items()]\n chunksize_MB = np.prod(chunksizes) * size / 1024 ** 2\n img_shape = transposed_shape[:-1]\n len_time = transposed_shape[-1]\n imagesize_GB = np.prod(img_shape) * size / 1024 ** 3\n # we need to divide by two, because we need intermediate storage for\n # the transposing\n stepsize = int(math.floor(memory / imagesize_GB)) // 2\n stepsize = min(stepsize, len_time)\n\n tmp_chunks[new_last_dim] = stepsize\n tmp_chunks_str = str(tuple(tmp_chunks.values()))\n logging.info(\n f\"write_transposed_dataset: Creating chunks {tmp_chunks_str}\"\n f\" with chunksize {chunksize_MB:.2f} MB\"\n )\n return tmp_chunks\n\n\ndef _transpose(\n reader: Reader,\n outfname: Union[Path, str],\n start: datetime.datetime = None,\n end: datetime.datetime = None,\n chunks: dict = None,\n memory: float = 2,\n zlib: bool = True,\n complevel: int = 4,\n):\n zarr_output = str(outfname).endswith(\".zarr\")\n new_last_dim = reader.timename\n\n if isinstance(reader, DirectoryImageReader) and reader.chunks is None:\n logging.info(\n \"You are using DirectoryImageReader without dask. If you run into\"\n \" memory issues or have large datasets to transpose, consider\"\n \" setting use_dask=True in the constructor of DirectoryImageReader.\"\n )\n\n ds = reader.read_block(start, end)\n\n # We process each variable separately and store them as intermediately\n # chunked temporary files. The chunk size in time dimension is inferred\n # from the given memory.\n variable_chunks = {}\n variable_intermediate_fnames = {}\n for var in reader.varnames:\n\n tmp_outfname = str(outfname) + f\".{var}.zarr\"\n variable_intermediate_fnames[var] = tmp_outfname\n if Path(tmp_outfname).exists():\n logging.info(\n \"Skipping generating intermediate file {tmp_outfname}\"\n \" because it exists\"\n )\n continue\n\n tmp_chunks = _get_intermediate_chunks(\n ds[var], chunks, new_last_dim, zarr_output, memory\n )\n\n # make sure that the time dimension will be continuous in the final\n # output\n chunks = copy.copy(tmp_chunks)\n chunks[new_last_dim] = len(ds[var].time)\n variable_chunks[var] = chunks\n\n # now we can rechunk and transpose using xarray\n rechunked_transposed = ds[var].chunk(tmp_chunks).transpose(\n ..., new_last_dim\n )\n rechunked_transposed.to_dataset().to_zarr(\n tmp_outfname, consolidated=True\n )\n\n # Now we have to reassemble all variables to a single dataset and write the\n # final chunks\n variable_ds = []\n variable_chunksizes = {}\n for var in reader.varnames:\n ds = xr.open_zarr(variable_intermediate_fnames[var], consolidated=True)\n variable_ds.append(ds)\n\n # for the encoding variable below we need the chunks as tuple in the\n # right order, it's easier to get this here were we have easy access to\n # the transposed DataArray\n transposed_dims = ds[var].dims\n variable_chunksizes[var] = tuple(\n chunks[dim] for dim in transposed_dims\n )\n\n ds = xr.merge(\n variable_ds,\n compat=\"override\",\n join=\"override\",\n combine_attrs=\"override\",\n )\n ds.attrs.update(reader.global_attrs)\n encoding = {\n var: {\n \"chunksizes\": variable_chunksizes[var],\n \"zlib\": zlib,\n \"complevel\": complevel,\n }\n for var in reader.varnames\n }\n\n if not zarr_output:\n ds.to_netcdf(outfname, encoding=encoding)\n else:\n for var in reader.varnames:\n del ds[var].encoding[\"chunks\"]\n del ds[var].encoding[\"preferred_chunks\"]\n ds[var] = ds[var].chunk(variable_chunksizes[var])\n ds.to_zarr(outfname, mode=\"w\", consolidated=True)\n\n for var in reader.varnames:\n shutil.rmtree(variable_intermediate_fnames[var])\n logging.info(\"write_transposed_dataset: Finished writing transposed file.\")\n\n\ndef _transpose_no_dask(\n reader: Reader,\n outfname: Union[Path, str],\n start: datetime.datetime = None,\n end: datetime.datetime = None,\n chunks: Tuple = None,\n memory: float = 2,\n zlib: bool = True,\n complevel: int = 4,\n):\n warnings.warn(\n \"This is an experimental function and not yet ready for public use!\"\n )\n zarr_output = str(outfname).endswith(\".zarr\")\n new_last_dim = reader.timename\n timestamps = reader.tstamps_for_daterange(start, end)\n\n variable_fnames = {}\n variable_dims = {}\n for varname in reader.varnames:\n\n tmp_outfname = str(outfname) + f\".{varname}.zarr\"\n variable_fnames[varname] = tmp_outfname\n\n # first, get some info about structure of the input file\n first_img = reader.read_block(start=timestamps[0], end=timestamps[0])[\n varname\n ]\n tmp_chunks = _get_intermediate_chunks(\n first_img, chunks, new_last_dim, zarr_output, memory\n )\n\n # get new dim names in the correct order\n new_dim_names = list(tmp_chunks)\n variable_dims[varname] = new_dim_names\n\n # this happens this late because we need to set\n # `variable_dims[varname]` in any case\n if Path(tmp_outfname).exists():\n logging.info(f\"{str(tmp_outfname)} already exists, skipping.\")\n continue\n\n logging.debug(\n f\"write_transposed_dataset: starting zarr array creation\"\n f\" for {len(timestamps)} timestamps\"\n )\n\n # get shape of transposed target array\n dims = dict(zip(first_img.dims, first_img.shape))\n transposed_shape = tuple(dims[dim] for dim in tmp_chunks.keys())\n zarr_array = zarr.create(\n tuple(new_dim_sizes),\n chunks=tuple(size for size in tmp_chunks.values()),\n store=tmp_outfname,\n overwrite=True,\n fill_value=np.nan,\n )\n\n logging.debug(f\"write_transposed_dataset: Writing {tmp_outfname}\")\n print(f\"Constructing array stack for {varname}:\")\n pbar = tqdm(range(0, len(timestamps), stepsize))\n stepsize = tmp_chunks[new_last_dim]\n for start_idx in pbar:\n pbar.set_description(\"Reading\")\n end_idx = min(start_idx + stepsize - 1, len(timestamps) - 1)\n block = reader.read_block(\n timestamps[start_idx], timestamps[end_idx]\n )[varname]\n block = block.transpose(..., new_last_dim)\n pbar.set_description(\"Writing\")\n zarr_array[..., start_idx : end_idx + 1] = block.values\n\n variable_arrays = {}\n encoding = {}\n for varname, fname in variable_fnames.items():\n logging.debug(f\"Reading {str(fname)}\")\n arr = da.from_zarr(fname)\n dims = variable_dims[varname]\n metadata = reader.array_attrs[varname]\n if chunks is None:\n if zarr_output:\n chunks = infer_chunks(new_dim_sizes, 100, dtype)\n else:\n # netCDF chunks should be about 1MB\n chunks = infer_chunks(new_dim_sizes, 1, dtype)\n encoding[varname] = {\n \"chunksizes\": chunks,\n \"zlib\": zlib,\n \"complevel\": complevel,\n }\n chunk_dict = dict(zip(dims, chunks))\n arr = xr.DataArray(data=arr, dims=dims, attrs=metadata)\n arr = arr.chunk(chunk_dict)\n arr.encoding = encoding[varname]\n # we're writing again to a temporary file, because otherwise the\n # dataset creation fails because dask sucks\n # arr.to_dataset(name=varname).to_zarr(fname + \".tmp\", consolidated=True)\n # variable_arrays[varname] = xr.open_zarr(fname + \".tmp\", consolidated=True)\n variable_arrays[varname] = arr\n\n logging.debug(\"Reading test image\")\n test_img = reader.read_block(start=timestamps[0], end=timestamps[0])[\n reader.varnames[0]\n ]\n coords = {\n c: test_img.coords[c] for c in test_img.coords if c != reader.timename\n }\n coords[reader.timename] = timestamps\n logging.debug(\"Creating dataset\")\n ds = xr.Dataset(\n variable_arrays,\n coords=coords,\n )\n ds.attrs.update(reader.global_attrs)\n\n logging.info(\n f\"write_transposed_dataset: Writing combined file to {str(outfname)}\"\n )\n if not zarr_output:\n ds.to_netcdf(outfname, encoding=encoding)\n else:\n ds.to_zarr(outfname, mode=\"w\", consolidated=True)\n\n for fname in variable_fnames.values():\n shutil.rmtree(fname)\n logging.info(\"write_transposed_dataset: Finished writing transposed file.\")\n" ]
[ [ "numpy.prod" ] ]
skylarch/Paddle
[ "d58d8df6f5f7aa6fd2f0780f87475055db57a80d" ]
[ "python/paddle/fluid/tests/unittests/test_while_op.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport paddle.fluid.layers as layers\nfrom paddle.fluid.executor import Executor\nimport paddle.fluid.core as core\nfrom paddle.fluid.backward import append_backward\nimport numpy\n\n\nclass TestWhileOp(unittest.TestCase):\n def test_simple_forward(self):\n d0 = layers.data(\n \"d0\", shape=[10], append_batch_size=False, dtype='float32')\n d1 = layers.data(\n \"d1\", shape=[10], append_batch_size=False, dtype='float32')\n d2 = layers.data(\n \"d2\", shape=[10], append_batch_size=False, dtype='float32')\n i = layers.zeros(shape=[1], dtype='int64')\n i.stop_gradient = True\n init = layers.zeros(shape=[10], dtype='float32')\n mem_array = layers.array_write(x=init, i=i)\n data_array = layers.array_write(x=d0, i=i)\n\n i = layers.increment(i)\n layers.array_write(d1, i, array=data_array)\n\n i = layers.increment(i)\n layers.array_write(d2, i, array=data_array)\n\n i = layers.zeros(shape=[1], dtype='int64')\n i.stop_gradient = True\n\n array_len = layers.fill_constant(shape=[1], dtype='int64', value=3)\n array_len.stop_gradient = True\n cond = layers.less_than(x=i, y=array_len)\n\n while_op = layers.While(cond=cond)\n with while_op.block():\n d = layers.array_read(array=data_array, i=i)\n prev = layers.array_read(array=mem_array, i=i)\n result = layers.sums(input=[d, prev])\n\n i = layers.increment(x=i, in_place=True)\n layers.array_write(result, i=i, array=mem_array)\n layers.less_than(x=i, y=array_len, cond=cond)\n\n sum_result = layers.array_read(array=mem_array, i=i)\n loss = layers.mean(sum_result)\n\n append_backward(loss)\n\n cpu = core.CPUPlace()\n exe = Executor(cpu)\n d = []\n\n for i in range(3):\n d.append(numpy.random.random(size=[10]).astype('float32'))\n\n outs = exe.run(feed={'d0': d[0],\n 'd1': d[1],\n 'd2': d[2]},\n fetch_list=[sum_result])\n self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.sum", "numpy.random.random" ] ]
CFD-lab-ZJU/PetIBM
[ "59c9ddd7373c2f4659761ca425db05491069c601" ]
[ "examples/decoupledibpm/flatplate3dRe100AoA30_GPU/scripts/createBody.py" ]
[ "\"\"\"\nCreate a flat plate of length 1.0 with aspect ratio 2.0 and a 30-degree\ninclination.\nThe plate is discretized with spacing 0.04 in the x-y plane and with spacing\n0.04 along the z-direction.\n\"\"\"\n\nimport math\nimport pathlib\nimport numpy\n\n\n# Flat-plate's parameters.\nL = 1.0 # chord length\nAR = 2.0 # aspect ratio\nxc, yc, zc = 0.0, 0.0, 0.0 # center's coordinates\naoa = 30.0 # angle of inclination in degrees\nds = 0.04 # mesh spacing\n\nsimu_dir = pathlib.Path(__file__).absolute().parents[1]\n\n# Generate coordinates of the flat plate.\nn = math.ceil(L / ds)\ns = numpy.linspace(xc - L / 2, xc + L / 2, num=n + 1)\n\nx = xc + numpy.cos(numpy.radians(-aoa)) * s\ny = yc + numpy.sin(numpy.radians(-aoa)) * s\n\nnz = math.ceil(L * AR / ds)\nz = numpy.linspace(zc - L * AR / 2, zc + L * AR / 2, num=nz + 1)\n\n# Write coordinates into file.\nfilepath = simu_dir / 'flatplate.body'\nwith open(filepath, 'w') as outfile:\n outfile.write('{}\\n'.format(x.size * z.size))\nfor zi in z:\n with open(filepath, 'ab') as outfile:\n numpy.savetxt(outfile, numpy.c_[x, y, zi * numpy.ones(x.size)])\n" ]
[ [ "numpy.radians", "numpy.linspace", "numpy.ones" ] ]
Singular-Brain/ProjectBrain
[ "2d22d45c13a86825c0dcaf517a59e02f2c4f6164", "2d22d45c13a86825c0dcaf517a59e02f2c4f6164" ]
[ "bindsnet_master/examples/breakout/random_network_baseline.py", "bindsnet_master/test/network/test_monitors.py" ]
[ "import torch\nimport argparse\n\nfrom bindsnet.network import Network\nfrom bindsnet.learning import Hebbian\nfrom bindsnet.pipeline import EnvironmentPipeline\nfrom bindsnet.encoding import bernoulli\nfrom bindsnet.network.monitors import Monitor\nfrom bindsnet.environment import GymEnvironment\nfrom bindsnet.network.topology import Connection\nfrom bindsnet.network.nodes import Input, LIFNodes\nfrom bindsnet.pipeline.action import select_multinomial\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-n\", type=int, default=1000000)\nparser.add_argument(\"--seed\", type=int, default=0)\nparser.add_argument(\"--n_neurons\", type=int, default=100)\nparser.add_argument(\"--dt\", type=float, default=1.0)\nparser.add_argument(\"--plot_interval\", type=int, default=10)\nparser.add_argument(\"--render_interval\", type=int, default=10)\nparser.add_argument(\"--print_interval\", type=int, default=100)\nparser.add_argument(\"--gpu\", dest=\"gpu\", action=\"store_true\")\nparser.set_defaults(plot=False, render=False, gpu=False)\n\nargs = parser.parse_args()\n\nn = args.n\nseed = args.seed\nn_neurons = args.n_neurons\ndt = args.dt\nplot_interval = args.plot_interval\nrender_interval = args.render_interval\nprint_interval = args.print_interval\ngpu = args.gpu\n\nif gpu:\n torch.set_default_tensor_type(\"torch.cuda.FloatTensor\")\n torch.cuda.manual_seed_all(seed)\nelse:\n torch.manual_seed(seed)\n\n# Build network.\nnetwork = Network(dt=dt)\n\n# Layers of neurons.\ninpt = Input(shape=(1, 1, 1, 80, 80), traces=True) # Input layer\nexc = LIFNodes(n=n_neurons, refrac=0, traces=True) # Excitatory layer\nreadout = LIFNodes(n=4, refrac=0, traces=True) # Readout layer\nlayers = {\"X\": inpt, \"E\": exc, \"R\": readout}\n\n# Connections between layers.\n# Input -> excitatory.\nw = 0.01 * torch.rand(layers[\"X\"].n, layers[\"E\"].n)\ninput_exc_conn = Connection(\n source=layers[\"X\"],\n target=layers[\"E\"],\n w=0.01 * torch.rand(layers[\"X\"].n, layers[\"E\"].n),\n wmax=0.02,\n norm=0.01 * layers[\"X\"].n,\n)\n\n# Excitatory -> readout.\nexc_readout_conn = Connection(\n source=layers[\"E\"],\n target=layers[\"R\"],\n w=0.01 * torch.rand(layers[\"E\"].n, layers[\"R\"].n),\n update_rule=Hebbian,\n nu=[1e-2, 1e-2],\n norm=0.5 * layers[\"E\"].n,\n)\n\n# Spike recordings for all layers.\nspikes = {}\nfor layer in layers:\n spikes[layer] = Monitor(layers[layer], [\"s\"], time=plot_interval)\n\n# Voltage recordings for excitatory and readout layers.\nvoltages = {}\nfor layer in set(layers.keys()) - {\"X\"}:\n voltages[layer] = Monitor(layers[layer], [\"v\"], time=plot_interval)\n\n# Add all layers and connections to the network.\nfor layer in layers:\n network.add_layer(layers[layer], name=layer)\n\nnetwork.add_connection(input_exc_conn, source=\"X\", target=\"E\")\nnetwork.add_connection(exc_readout_conn, source=\"E\", target=\"R\")\n\n# Add all monitors to the network.\nfor layer in layers:\n network.add_monitor(spikes[layer], name=\"%s_spikes\" % layer)\n\n if layer in voltages:\n network.add_monitor(voltages[layer], name=\"%s_voltages\" % layer)\n\n# Load the Breakout environment.\nenvironment = GymEnvironment(\"BreakoutDeterministic-v4\")\nenvironment.reset()\n\npipeline = EnvironmentPipeline(\n network,\n environment,\n encoding=bernoulli,\n time=1,\n history=5,\n delta=10,\n plot_interval=plot_interval,\n print_interval=print_interval,\n render_interval=render_interval,\n action_function=select_multinomial,\n output=\"R\",\n)\n\ntotal = 0\nrewards = []\navg_rewards = []\nlengths = []\navg_lengths = []\n\ni = 0\ntry:\n while i < n:\n result = pipeline.env_step()\n pipeline.step(result)\n\n is_done = result[2]\n if is_done:\n pipeline.reset_state_variables()\n\n i += 1\n\nexcept KeyboardInterrupt:\n environment.close()\n", "import torch\n\nfrom bindsnet.network import Network\nfrom bindsnet.network.monitors import Monitor, NetworkMonitor\nfrom bindsnet.network.nodes import Input, IFNodes\nfrom bindsnet.network.topology import Connection\n\n\nclass TestMonitor:\n \"\"\"\n Testing Monitor object.\n \"\"\"\n\n network = Network()\n\n inpt = Input(75)\n network.add_layer(inpt, name=\"X\")\n _if = IFNodes(25)\n network.add_layer(_if, name=\"Y\")\n conn = Connection(inpt, _if, w=torch.rand(inpt.n, _if.n))\n network.add_connection(conn, source=\"X\", target=\"Y\")\n\n inpt_mon = Monitor(inpt, state_vars=[\"s\"])\n network.add_monitor(inpt_mon, name=\"X\")\n _if_mon = Monitor(_if, state_vars=[\"s\", \"v\"])\n network.add_monitor(_if_mon, name=\"Y\")\n\n network.run(inputs={\"X\": torch.bernoulli(torch.rand(100, inpt.n))}, time=100)\n\n assert inpt_mon.get(\"s\").size() == torch.Size([100, 1, inpt.n])\n assert _if_mon.get(\"s\").size() == torch.Size([100, 1, _if.n])\n assert _if_mon.get(\"v\").size() == torch.Size([100, 1, _if.n])\n\n del network.monitors[\"X\"], network.monitors[\"Y\"]\n\n inpt_mon = Monitor(inpt, state_vars=[\"s\"], time=500)\n network.add_monitor(inpt_mon, name=\"X\")\n _if_mon = Monitor(_if, state_vars=[\"s\", \"v\"], time=500)\n network.add_monitor(_if_mon, name=\"Y\")\n\n network.run(inputs={\"X\": torch.bernoulli(torch.rand(500, inpt.n))}, time=500)\n\n assert inpt_mon.get(\"s\").size() == torch.Size([500, 1, inpt.n])\n assert _if_mon.get(\"s\").size() == torch.Size([500, 1, _if.n])\n assert _if_mon.get(\"v\").size() == torch.Size([500, 1, _if.n])\n\n\nclass TestNetworkMonitor:\n \"\"\"\n Testing NetworkMonitor object.\n \"\"\"\n\n network = Network()\n\n inpt = Input(25)\n network.add_layer(inpt, name=\"X\")\n _if = IFNodes(75)\n network.add_layer(_if, name=\"Y\")\n conn = Connection(inpt, _if, w=torch.rand(inpt.n, _if.n))\n network.add_connection(conn, source=\"X\", target=\"Y\")\n\n mon = NetworkMonitor(network, state_vars=[\"s\", \"v\", \"w\"])\n network.add_monitor(mon, name=\"monitor\")\n\n network.run(inputs={\"X\": torch.bernoulli(torch.rand(50, inpt.n))}, time=50)\n\n recording = mon.get()\n\n assert recording[\"X\"][\"s\"].size() == torch.Size([50, 1, inpt.n])\n assert recording[\"Y\"][\"s\"].size() == torch.Size([50, 1, _if.n])\n assert recording[\"Y\"][\"s\"].size() == torch.Size([50, 1, _if.n])\n\n del network.monitors[\"monitor\"]\n\n mon = NetworkMonitor(network, state_vars=[\"s\", \"v\", \"w\"], time=50)\n network.add_monitor(mon, name=\"monitor\")\n\n network.run(inputs={\"X\": torch.bernoulli(torch.rand(50, inpt.n))}, time=50)\n\n recording = mon.get()\n\n assert recording[\"X\"][\"s\"].size() == torch.Size([50, 1, inpt.n])\n assert recording[\"Y\"][\"s\"].size() == torch.Size([50, 1, _if.n])\n assert recording[\"Y\"][\"s\"].size() == torch.Size([50, 1, _if.n])\n\n\nif __name__ == \"__main__\":\n tm = TestMonitor()\n tnm = TestNetworkMonitor()\n" ]
[ [ "torch.manual_seed", "torch.rand", "torch.set_default_tensor_type", "torch.cuda.manual_seed_all" ], [ "torch.Size", "torch.rand" ] ]
rcmalli/polimi-dl-project
[ "5bf26a8e930dc98fe59a74bc473ddc74ff7dd201", "5bf26a8e930dc98fe59a74bc473ddc74ff7dd201" ]
[ "tools/unpool_test.py", "tools/create_output_images.py" ]
[ "from src.model import unpool_resize,unpool_deconv, unpool_checkerboard, unpool_simple\nfrom tensorflow.keras.layers import Input, UpSampling2D\nfrom tensorflow.keras.models import Model\n\ninput = Input(shape=(20, 20, 3))\n\nout1 = unpool_resize(input)\nmodel1 = Model(inputs=input, outputs=out1)\nprint(\"\")\n\nout2 = unpool_deconv(input,512)\nmodel2 = Model(inputs=input, outputs=out2)\nprint(\"\")\n\nout3 = UpSampling2D((2,2))(input)\nout3 = unpool_checkerboard(out3)\nmodel3 = Model(inputs=input, outputs=out3)\nprint(\"\")\n\n", "from model import depth_model\nimport tensorflow as tf\nimport numpy as np\nfrom utils import get_args\nfrom config import process_config, get_config_from_json\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.resnet50 import preprocess_input\nimport matplotlib.pyplot as plt\nfrom model import load_depth_model_from_weights\nfrom dirs import create_dirs\nimport os\nimport pickle\nfrom PIL import Image\n\ngt_path = \"../images/dataset_gt\"\nd_img_path = \"../images/dataset_img\"\ne_img_path = \"../images/external_img\"\n\ndef create_outputs():\n\n args = get_args()\n config, _ = get_config_from_json(args.config)\n dir = \"../outputs/input\"\n create_dirs([dir])\n\n for i in range(6):\n img = Image.open(e_img_path + \"/\" + str(i + 1) + '.jpg')\n img = img.resize((config.input_size[1], config.input_size[0]))\n x = np.array(img)\n plt.imsave(dir + '/ext_' + str(i + 1) + '.jpg', x )\n\n for i in range(6):\n img = Image.open(d_img_path + \"/\" + str(i + 1) + '.jpg')\n img = img.resize((config.input_size[1], config.input_size[0]))\n x = np.array(img)\n plt.imsave(dir + '/dat_' + str(i + 1) + '.jpg', x)\n\n\n\n config.unpool_type = \"simple\"\n config.exp_name = \"nyu-resnet-berhu-aug-30-simple-upproject\"\n config.prediction_model_name = \"model-150-0.19.km\"\n config.model_dir = os.path.join(\"../experiments\", config.exp_name, \"model/\")\n config.tensorboard_dir = os.path.join(\"../experiments\", config.exp_name, \"log/\")\n\n extract(config)\n\n tf.keras.backend.clear_session()\n\n config.unpool_type = \"deconv\"\n config.exp_name = \"nyu-resnet-berhu-aug-30-deconv-upproject\"\n config.prediction_model_name = \"model-150-0.21.km\"\n config.model_dir = os.path.join(\"../experiments\", config.exp_name, \"model/\")\n config.tensorboard_dir = os.path.join(\"../experiments\", config.exp_name, \"log/\")\n\n extract(config)\n\n tf.keras.backend.clear_session()\n\n config.unpool_type = \"checkerboard\"\n config.exp_name = \"nyu-resnet-berhu-aug-30-checkerboard-upproject\"\n config.prediction_model_name = \"model-150-0.20.km\"\n config.model_dir = os.path.join(\"../experiments\", config.exp_name, \"model/\")\n config.tensorboard_dir = os.path.join(\"../experiments\", config.exp_name, \"log/\")\n\n extract(config)\n\n tf.keras.backend.clear_session()\n\n\n config.unpool_type = \"resize\"\n config.exp_name = \"nyu-resnet-berhu-aug-30-resize-upproject\"\n config.prediction_model_name = \"model-150-0.20.km\"\n config.model_dir = os.path.join(\"../experiments\", config.exp_name, \"model/\")\n config.tensorboard_dir = os.path.join(\"../experiments\", config.exp_name, \"log/\")\n\n extract(config)\n\n\n\n\ndef extract(config):\n\n model = load_depth_model_from_weights(config)\n dir = \"../outputs/\" + config.exp_name\n create_dirs([dir])\n\n for i in range(6):\n\n img = image.load_img(e_img_path+\"/\"+str(i+1) + '.jpg', target_size=(config.input_size[0], config.input_size[1]))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n prediction = model.predict(x)\n print(\"prediction shape\", prediction.shape)\n prediction = np.reshape(prediction, [prediction.shape[1], prediction.shape[2]])\n plt.imsave(dir + '/ext_pre_depth_'+str(i+1) + '.jpg', prediction)\n\n for i in range(6):\n\n img = image.load_img(d_img_path+\"/\"+str(i+1) + '.jpg', target_size=(config.input_size[0], config.input_size[1]))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n prediction = model.predict(x)\n print(\"prediction shape\", prediction.shape)\n prediction = np.reshape(prediction, [prediction.shape[1], prediction.shape[2]])\n plt.imsave(dir + '/dat_pre_depth_'+str(i+1) + '.jpg', prediction)\n\n for i in range(6):\n\n with open(gt_path+\"/\"+str(i+1) + '.pkl', 'rb') as fp:\n depth = pickle.load(fp)/10.0\n depth = Image.fromarray(depth)\n depth = np.array(depth.resize((160, 112)))\n plt.imsave(dir + '/dat_gt_depth_' + str(i + 1) + '.jpg', depth)\n\n del model\n\n\n\nif __name__ == '__main__':\n create_outputs()" ]
[ [ "tensorflow.keras.layers.UpSampling2D", "tensorflow.keras.layers.Input", "tensorflow.keras.models.Model" ], [ "numpy.array", "numpy.reshape", "tensorflow.keras.applications.resnet50.preprocess_input", "tensorflow.keras.preprocessing.image.img_to_array", "tensorflow.keras.backend.clear_session", "numpy.expand_dims" ] ]
ria-ee/monitor
[ "d5cb9384abf38394b35e760729649136cbbc7548" ]
[ "analysis_module/analyzer/AnalyzerDatabaseManager_tmp.py" ]
[ "# _tmp_\nimport datetime\nfrom copy import deepcopy\n# _tmp_\nfrom pymongo import MongoClient\nimport pymongo\nimport pandas as pd\nimport numpy as np\nimport sys\n\npd.options.mode.chained_assignment = None\n\n\nclass AnalyzerDatabaseManager(object):\n\n def __init__(self, db_config, config):\n self._db_config = db_config\n self._config = config\n\n def aggregate_data(self, model_type, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[],\n metric=None, threshold=None):\n if model_type == \"failed_request_ratio\":\n return self._aggregate_data_for_failed_request_ratio_model(agg_minutes=agg_minutes, start_time=start_time,\n end_time=end_time, ids_to_exclude=ids_to_exclude)\n elif model_type == \"duplicate_message_ids\":\n return self._aggregate_data_for_duplicate_message_id_model(agg_minutes=agg_minutes, start_time=start_time,\n end_time=end_time, ids_to_exclude=ids_to_exclude)\n elif model_type == \"time_sync_errors\":\n return self._aggregate_data_for_time_sync_model(relevant_metric=metric, threshold=threshold,\n agg_minutes=agg_minutes, start_time=start_time, end_time=end_time,\n ids_to_exclude=ids_to_exclude)\n else:\n return None\n\n def aggregate_data_for_historic_averages_model(self, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[], service_calls=None):\n # create connection\n clean_data = self._get_clean_data_collection()\n\n # nested fields need to be projected (select field from client if, exists, else from producer)\n project_dict = self._get_clean_data_projection_dict()\n\n # conditions to filter the data before processing\n filter_dict_elems = [{'succeeded': True, 'correctorStatus': 'done'}]\n if len(ids_to_exclude) > 0:\n id_exclude_query = {'_id': {'$nin': ids_to_exclude}}\n filter_dict_elems.append(id_exclude_query)\n if start_time is not None:\n start_time_query = {self._config.timestamp_field: {\"$gte\": start_time}}\n filter_dict_elems.append(start_time_query)\n if end_time is not None:\n end_time_query = {self._config.timestamp_field: {\"$lt\": end_time}}\n filter_dict_elems.append(end_time_query)\n if service_calls is not None and len(service_calls) > 0:\n for col in self._config.service_call_fields:\n service_calls.loc[service_calls[col] == \"-\", col] = None\n service_call_query = {\"$or\": service_calls.to_dict(orient=\"records\")}\n filter_dict_elems.append(service_call_query)\n if len(filter_dict_elems) == 1:\n filter_dict = filter_dict_elems[0]\n elif len(filter_dict_elems) > 1:\n filter_dict = {\"$and\": filter_dict_elems}\n\n # set up elements to group by (service call fields and temporal aggregation window)\n group_dict = {col: \"$%s\" % col for col in self._config.service_call_fields}\n group_dict[self._config.timestamp_field] = {\n \"$subtract\": [\n \"$%s\" % self._config.timestamp_field,\n {\"$mod\": [\"$%s\" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}\n ]}\n\n res = clean_data.aggregate([\n {'$project': project_dict},\n {'$match': filter_dict},\n {'$group': {\n \"_id\": group_dict,\n \"request_count\": {\"$sum\": 1},\n \"mean_request_size\": {\"$avg\": \"$requestSize\"},\n \"mean_response_size\": {\"$avg\": \"$responseSize\"},\n \"mean_client_duration\": {\"$avg\": \"$totalDuration\"},\n \"mean_producer_duration\": {\"$avg\": \"$producerDurationProducerView\"},\n \"request_ids\": {\"$push\": \"$_id\"}}}],\n allowDiskUse=True, maxTimeMS=14400000)\n\n # _tmp_\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" aggregate_data_for_historic_averages_model_start \")\n results = []\n for item_tmp in res:\n # print(datetime.datetime.now().strftime('%H:%M:%s') + \" aggregate_data_for_historic_averages_model \" + str(item_tmp))\n results.append(item_tmp)\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" aggregate_data_for_historic_averages_model_end \")\n # _tmp_\n\n # return self._generate_dataframe(list(res))\n return self._generate_dataframe(results)\n\n def add_first_request_timestamps_from_clean_data(self, data=None):\n\n # create connection\n clean_data = self._get_clean_data_collection()\n\n # nested fields need to be projected (select field from client if, exists, else from producer)\n project_dict = self._get_clean_data_projection_dict()\n\n # conditions to filter the data before processing\n filter_dict = {'correctorStatus': 'done'}\n if data is not None:\n for col in self._config.service_call_fields:\n data.loc[data[col] == \"-\", col] = None\n filter_dict[\"$or\"] = data.to_dict(orient=\"records\")\n\n # set up elements to group by (service call fields and temporal aggregation window)\n group_dict = {col: \"$%s\" % col for col in self._config.service_call_fields}\n\n res = clean_data.aggregate([\n {'$project': project_dict},\n {'$match': filter_dict},\n {'$group': {\n \"_id\": group_dict,\n self._config.timestamp_field: {\"$min\": \"$%s\" % self._config.timestamp_field}}}],\n allowDiskUse=True, maxTimeMS=14400000)\n\n # _tmp_\n results = []\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" add_first_request_timestamps_from_clean_data_start \")\n for item_tmp in res:\n # print(datetime.datetime.now().strftime('%H:%M:%s') + \" add_first_request_timestamps_from_clean_data \" + str(item_tmp))\n results.append(item_tmp)\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" add_first_request_timestamps_from_clean_data_end \")\n # _tmp_\n\n # res = list(res)\n res = deepcopy(results)\n if len(res) == 0:\n return\n # res = self._generate_dataframe(list(res))\n res = self._generate_dataframe(res)\n res = res.sort_values(self._config.timestamp_field, ascending=True).drop_duplicates(self._config.service_call_fields)\n\n # exclude service calls that already exist in the first timestamps table\n existing_first_timestamps = self.get_first_timestamps_for_service_calls()\n if len(existing_first_timestamps) > 0:\n res = res.merge(existing_first_timestamps[self._config.service_call_fields + [\"first_request_timestamp\"]],\n on=self._config.service_call_fields, how=\"left\")\n res = res[pd.isnull(res.first_request_timestamp)].drop(\"first_request_timestamp\", axis=1)\n\n res = res.rename(columns={self._config.timestamp_field: \"first_request_timestamp\"})\n res.first_request_timestamp = pd.to_datetime(res.first_request_timestamp, unit='ms')\n res = res.assign(first_incident_timestamp=None)\n res = res.assign(first_model_retrain_timestamp=None)\n res = res.assign(first_model_train_timestamp=None)\n\n # add new service calls\n scft = self._get_service_call_first_timestamps_collection()\n if len(res) > 0:\n scft.insert_many(res.to_dict('records'))\n\n def update_first_timestamps(self, field, value, service_calls=None):\n scft = self._get_service_call_first_timestamps_collection()\n scft.update({\"$or\": service_calls.to_dict(orient=\"records\")}, {\"$set\": {field: value}}, upsert=False, multi=True)\n\n def update_first_train_retrain_timestamps(self, sc_first_model, sc_second_model, current_time):\n if len(sc_first_model) > 0:\n self.update_first_timestamps(field=\"first_model_train_timestamp\",\n value=current_time,\n service_calls=sc_first_model[self._config.service_call_fields])\n\n if len(sc_second_model) > 0:\n self.update_first_timestamps(field=\"first_model_retrain_timestamp\",\n value=current_time,\n service_calls=sc_second_model[self._config.service_call_fields])\n\n def _aggregate_data_for_failed_request_ratio_model(self, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[]):\n # create connection\n clean_data = self._get_clean_data_collection()\n\n # nested fields need to be projected (select field from client if, exists, else from producer)\n project_dict = self._get_clean_data_projection_dict()\n\n filter_dict_elems = [{'correctorStatus': 'done'}]\n # conditions to filter the data before processing\n if len(ids_to_exclude) > 0:\n id_exclude_query = {'_id': {'$nin': ids_to_exclude}}\n filter_dict_elems.append(id_exclude_query)\n if start_time is not None:\n start_time_query = {self._config.timestamp_field: {\"$gte\": start_time}}\n filter_dict_elems.append(start_time_query)\n if end_time is not None:\n end_time_query = {self._config.timestamp_field: {\"$lt\": end_time}}\n filter_dict_elems.append(end_time_query)\n if len(filter_dict_elems) == 1:\n filter_dict = filter_dict_elems[0]\n elif len(filter_dict_elems) > 1:\n filter_dict = {\"$and\": filter_dict_elems}\n else:\n filter_dict = {}\n\n # set up elements to group by (service call fields and temporal aggregation window)\n group_dict = {col: \"$%s\" % col for col in self._config.service_call_fields}\n group_dict[self._config.timestamp_field] = {\n \"$subtract\": [\n \"$%s\" % self._config.timestamp_field,\n {\"$mod\": [\"$%s\" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}\n ]}\n group_dict['succeeded'] = '$succeeded'\n\n res = clean_data.aggregate([\n {'$project': project_dict},\n {'$match': filter_dict},\n {'$group': {\n \"_id\": group_dict,\n 'count': {'$sum': 1},\n \"request_ids\": {\"$push\": \"$_id\"}}}],\n allowDiskUse=True, maxTimeMS=14400000)\n\n # _tmp_\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" _aggregate_data_for_failed_request_ratio_model_start \")\n results = []\n for item_tmp in res:\n # print(datetime.datetime.now().strftime('%H:%M:%s') + \" _aggregate_data_for_failed_request_ratio_model \" + str(item_tmp))\n results.append(item_tmp)\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" _aggregate_data_for_failed_request_ratio_model_end \")\n # _tmp_\n\n # return self._generate_dataframe(list(res))\n return self._generate_dataframe(results)\n\n def _aggregate_data_for_duplicate_message_id_model(self, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[]):\n # create connection\n clean_data = self._get_clean_data_collection()\n\n # nested fields need to be projected (select field from client if, exists, else from producer)\n project_dict = self._get_clean_data_projection_dict()\n\n # conditions to filter the data before processing\n filter_dict_elems = [{'succeeded': True, 'correctorStatus': 'done'}]\n if len(ids_to_exclude) > 0:\n id_exclude_query = {'_id': {'$nin': ids_to_exclude}}\n filter_dict_elems.append(id_exclude_query)\n if start_time is not None:\n start_time_query = {self._config.timestamp_field: {\"$gte\": start_time}}\n filter_dict_elems.append(start_time_query)\n if end_time is not None:\n end_time_query = {self._config.timestamp_field: {\"$lt\": end_time}}\n filter_dict_elems.append(end_time_query)\n if len(filter_dict_elems) == 1:\n filter_dict = filter_dict_elems[0]\n elif len(filter_dict_elems) > 1:\n filter_dict = {\"$and\": filter_dict_elems}\n\n # set up elements to group by (service call fields and temporal aggregation window)\n group_dict = {col: \"$%s\" % col for col in self._config.service_call_fields}\n group_dict[self._config.timestamp_field] = {\n \"$subtract\": [\n \"$%s\" % self._config.timestamp_field,\n {\"$mod\": [\"$%s\" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}\n ]}\n group_dict['messageId'] = '$messageId'\n\n res = clean_data.aggregate([\n {'$project': project_dict},\n {'$match': filter_dict},\n {'$group': {\"_id\": group_dict,\n 'message_id_count': {'$sum': 1},\n \"request_ids\": {\"$push\": \"$_id\"}}},\n {'$match': {'message_id_count': {\"$gt\": 1}}}],\n allowDiskUse=True, maxTimeMS=14400000)\n\n # _tmp_\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" _aggregate_data_for_duplicate_message_id_model_start \")\n results = []\n for item_tmp in res:\n # print(datetime.datetime.now().strftime('%H:%M:%s') + \" _aggregate_data_for_duplicate_message_id_model \" + str(item_tmp))\n results.append(item_tmp)\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" _aggregate_data_for_duplicate_message_id_model_end \")\n # _tmp_\n\n # return self._generate_dataframe(list(res))\n return self._generate_dataframe(results)\n\n def _aggregate_data_for_time_sync_model(self, relevant_metric, threshold, agg_minutes=60, start_time=None, end_time=None, ids_to_exclude=[]):\n # create connection\n clean_data = self._get_clean_data_collection()\n\n # nested fields need to be projected (select field from client if, exists, else from producer)\n project_dict = self._get_clean_data_projection_dict()\n\n # conditions to filter the data before processing\n filter_dict_elems = [{'succeeded': True, 'correctorStatus': 'done'}]\n if len(ids_to_exclude) > 0:\n id_exclude_query = {'_id': {'$nin': ids_to_exclude}}\n filter_dict_elems.append(id_exclude_query)\n if start_time is not None:\n start_time_query = {self._config.timestamp_field: {\"$gte\": start_time}}\n filter_dict_elems.append(start_time_query)\n if end_time is not None:\n end_time_query = {self._config.timestamp_field: {\"$lt\": end_time}}\n filter_dict_elems.append(end_time_query)\n if len(filter_dict_elems) == 1:\n filter_dict = filter_dict_elems[0]\n elif len(filter_dict_elems) > 1:\n filter_dict = {\"$and\": filter_dict_elems}\n\n # set up elements to group by (service call fields and temporal aggregation window)\n group_dict = {col: \"$%s\" % col for col in self._config.service_call_fields}\n group_dict[self._config.timestamp_field] = {\n \"$subtract\": [\n \"$%s\" % self._config.timestamp_field,\n {\"$mod\": [\"$%s\" % self._config.timestamp_field, 1000 * 60 * agg_minutes]}\n ]}\n\n res = clean_data.aggregate([\n {'$project': project_dict},\n {'$match': filter_dict},\n {'$group': {\"_id\": group_dict,\n 'request_count': {'$sum': 1},\n \"docs\": {\"$push\":\n {relevant_metric: \"$%s\" % relevant_metric,\n \"id\": \"$_id\"}}}},\n {\"$unwind\": \"$docs\"},\n {'$match': {'docs.%s' % relevant_metric: {\"$lt\": threshold}}},\n {'$group': {\"_id\": \"$_id\",\n 'erroneous_count': {'$sum': 1},\n 'avg_erroneous_diff': {'$avg': '$docs.%s' % relevant_metric},\n \"request_count\": {\"$first\": \"$request_count\"},\n \"request_ids\": {\"$push\": \"$docs.id\"}}}\n\n ], allowDiskUse=True, maxTimeMS=14400000)\n\n # _tmp_\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" _aggregate_data_for_time_sync_model_start \")\n results = []\n for item_tmp in res:\n # print(datetime.datetime.now().strftime('%H:%M:%s') + \" _aggregate_data_for_time_sync_model \" + str(item_tmp))\n results.append(item_tmp)\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" _aggregate_data_for_time_sync_model_end \")\n # _tmp_\n\n # return self._generate_dataframe(list(res))\n return self._generate_dataframe(results)\n\n def get_request_ids_from_incidents(self, incident_status=[\"new\", \"showed\", \"normal\", \"incident\", \"viewed\"],\n relevant_anomalous_metrics=None, max_incident_creation_timestamp=None):\n filter_dict = {\"incident_status\": {\"$in\": incident_status}}\n if relevant_anomalous_metrics is not None:\n filter_dict[\"anomalous_metric\"] = {\"$in\": relevant_anomalous_metrics}\n if max_incident_creation_timestamp is not None:\n filter_dict[\"incident_creation_timestamp\"] = {\"$lte\": max_incident_creation_timestamp}\n incident_collection = self._get_incident_collection()\n # request_ids = incident_collection.distinct(\"request_ids\", filter_dict)\n request_ids = [doc['_id'] for doc in incident_collection.aggregate([{'$match': filter_dict}, {'$group': {'_id': '$request_ids'}}], allowDiskUse=True)]\n return request_ids\n\n def delete_incidents(self, field=None, value=None):\n incident_collection = self._get_incident_collection()\n if field is None or value is None:\n incident_collection.delete_many({})\n else:\n incident_collection.delete_many({field: value})\n\n def insert_incidents(self, dt_incidents):\n incident_collection = self._get_incident_collection()\n incident_collection.insert_many(dt_incidents.to_dict('records'))\n\n def get_timestamp(self, ts_type, model_type):\n ts_collection = self._get_incident_timestamp_collection()\n ts = ts_collection.find_one({\"type\": ts_type, \"model\": model_type})\n if ts:\n return ts[\"timestamp\"]\n return ts\n\n def load_model(self, model_name, version=None):\n incident_model_collection = self._get_incident_model_collection()\n\n filter_dict = {\"model_name\": model_name}\n if version is not None:\n filter_dict[\"version\"] = version\n result = incident_model_collection.find(filter_dict)\n\n # _tmp_\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" load_model_start \")\n results = []\n for item_tmp in result:\n # print(datetime.datetime.now().strftime('%H:%M:%s') + \" load_model \" + str(item_tmp))\n results.append(item_tmp)\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" load_model_end \")\n # _tmp_\n\n # return pd.DataFrame(list(result)).drop(\"_id\", axis=1)\n return pd.DataFrame(results).drop(\"_id\", axis=1)\n\n def save_model(self, df, delete_old_version=True):\n incident_model_collection = self._get_incident_model_collection()\n\n df = df.to_dict('records')\n\n if delete_old_version and len(df) > 0:\n model_name = df[0][\"model_name\"]\n incident_model_collection.delete_many({\"model_name\": model_name})\n\n incident_model_collection.insert_many(df)\n\n def set_timestamp(self, ts_type, model_type, value):\n ts_collection = self._get_incident_timestamp_collection()\n ts_collection.update({\"type\": ts_type, \"model\": model_type},\n {\"type\": ts_type, \"model\": model_type, \"timestamp\": value},\n upsert=True)\n\n def get_first_timestamps_for_service_calls(self):\n scft = self._get_service_call_first_timestamps_collection()\n # results = list(scft.find())\n # _tmp_\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" get_first_timestamps_for_service_calls_start1 \")\n results = []\n results_tmp = scft.find()\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" get_first_timestamps_for_service_calls_start2 \")\n for item_tmp in results_tmp:\n # print(datetime.datetime.now().strftime('%H:%M:%s') + \" get_first_timestamps_for_service_calls \" + str(item_tmp))\n results.append(item_tmp)\n print(datetime.datetime.now().strftime('%H:%M:%s') + \" get_first_timestamps_for_service_calls_end \")\n # _tmp_\n if len(results) == 0:\n return pd.DataFrame()\n data = pd.DataFrame(results).drop(\"_id\", axis=1)\n for col in [\"first_request_timestamp\", \"first_model_train_timestamp\", \"first_incident_timestamp\",\n \"first_model_retrain_timestamp\"]:\n data.loc[:, col] = pd.to_datetime(data.loc[:, col])\n return data\n\n def get_service_calls_for_train_stages(self, time_first_model, time_second_model):\n first_timestamps = self.get_first_timestamps_for_service_calls()\n\n if len(first_timestamps) == 0:\n return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()\n\n first_model_to_be_trained = first_timestamps[(pd.isnull(first_timestamps.first_model_train_timestamp)) &\n (first_timestamps.first_request_timestamp <= time_first_model)]\n model_to_be_retrained = first_timestamps[(pd.isnull(first_timestamps.first_model_retrain_timestamp)) &\n (first_timestamps.first_incident_timestamp <= time_second_model)]\n first_timestamps = first_timestamps[~pd.isnull(first_timestamps.first_model_retrain_timestamp)]\n\n return first_timestamps, first_model_to_be_trained, model_to_be_retrained\n\n def get_service_calls_for_transform_stages(self):\n first_timestamps = self.get_first_timestamps_for_service_calls()\n first_incidents_to_be_reported = first_timestamps[(pd.isnull(first_timestamps.first_incident_timestamp)) &\n (~pd.isnull(first_timestamps.first_model_train_timestamp))]\n regular_service_calls = first_timestamps[~pd.isnull(first_timestamps.first_incident_timestamp)]\n return regular_service_calls, first_incidents_to_be_reported\n\n def get_data_for_train_stages(self, sc_regular, sc_first_model, sc_second_model, relevant_anomalous_metrics,\n max_incident_creation_timestamp, last_fit_timestamp, agg_minutes, max_request_timestamp):\n\n # exclude requests that are part of a \"true\" incident\n ids_to_exclude = self.get_request_ids_from_incidents(\n incident_status=[\"incident\"],\n relevant_anomalous_metrics=relevant_anomalous_metrics,\n max_incident_creation_timestamp=max_incident_creation_timestamp)\n\n # make the timestamps correspond to the millisecond format\n if max_request_timestamp is not None:\n max_request_timestamp = max_request_timestamp.timestamp() * 1000\n if last_fit_timestamp is not None:\n last_fit_timestamp = last_fit_timestamp.timestamp() * 1000\n\n data_regular = pd.DataFrame()\n data_first_train = pd.DataFrame()\n data_first_retrain = pd.DataFrame()\n\n # for the first-time training, don't exclude anything\n if len(sc_first_model) > 0:\n if len(sc_first_model) > 100:\n data_first_train = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,\n end_time=max_request_timestamp)\n if len(data_first_train) > 0:\n data_first_train = data_first_train.merge(sc_first_model[self._config.service_call_fields])\n else:\n data_first_train = self.aggregate_data_for_historic_averages_model(\n agg_minutes=agg_minutes,\n end_time=max_request_timestamp,\n service_calls=sc_first_model[self._config.service_call_fields])\n\n # for the second model, exclude queries that were marked as \"incident\" after the first training,\n # but don't limit the start time\n if len(sc_second_model) > 0:\n if len(sc_second_model) > 100:\n data_first_retrain = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,\n end_time=max_request_timestamp,\n ids_to_exclude=ids_to_exclude)\n if len(data_first_retrain) > 0:\n data_first_retrain = data_first_retrain.merge(sc_second_model[self._config.service_call_fields])\n else:\n data_first_retrain = self.aggregate_data_for_historic_averages_model(\n agg_minutes=agg_minutes,\n service_calls=sc_second_model[self._config.service_call_fields],\n end_time=max_request_timestamp,\n ids_to_exclude=ids_to_exclude)\n\n # for regular training, exclude the incidents and limit the start time\n if len(sc_regular) > 0:\n data_regular = self.aggregate_data_for_historic_averages_model(\n agg_minutes=agg_minutes,\n start_time=last_fit_timestamp,\n end_time=max_request_timestamp,\n ids_to_exclude=ids_to_exclude)\n if len(data_regular) > 0:\n data_regular = data_regular.merge(sc_regular[self._config.service_call_fields])\n\n return data_regular, data_first_train, data_first_retrain\n\n def get_data_for_transform_stages(self, agg_minutes, last_transform_timestamp, current_transform_timestamp,\n sc_regular, sc_first_incidents):\n\n data_regular = pd.DataFrame()\n data_first_incidents = pd.DataFrame()\n\n # retrieve all data that have appeared after the last transform time\n data = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,\n start_time=last_transform_timestamp,\n end_time=current_transform_timestamp)\n\n if len(data) > 0:\n # exclude service calls that are not past the training period\n data_regular = data.merge(sc_regular[self._config.service_call_fields])\n\n if len(sc_first_incidents) > 100:\n # for first-time incdent reporting, retrieve all data for these service calls\n data_first_incidents = self.aggregate_data_for_historic_averages_model(agg_minutes=agg_minutes,\n end_time=current_transform_timestamp)\n if len(data_first_incidents) > 0:\n data_first_incidents = data_first_incidents.merge(sc_first_incidents[self._config.service_call_fields])\n\n elif len(sc_first_incidents) > 0:\n data_first_incidents = self.aggregate_data_for_historic_averages_model(\n agg_minutes=agg_minutes,\n end_time=current_transform_timestamp,\n service_calls=sc_first_incidents[self._config.service_call_fields])\n\n return pd.concat([data_regular, data_first_incidents])\n\n def _get_incident_collection(self):\n db_client = MongoClient(self._db_config.MONGODB_URI)\n db = db_client[self._db_config.MONGODB_AD]\n return db.incident\n\n def _get_incident_model_collection(self):\n db_client = MongoClient(self._db_config.MONGODB_URI)\n db = db_client[self._db_config.MONGODB_AD]\n return db.incident_model\n\n def _get_incident_timestamp_collection(self):\n db_client = MongoClient(self._db_config.MONGODB_URI)\n db = db_client[self._db_config.MONGODB_AD]\n return db.incident_timestamps\n\n def _get_service_call_first_timestamps_collection(self):\n db_client = MongoClient(self._db_config.MONGODB_URI)\n db = db_client[self._db_config.MONGODB_AD]\n return db.service_call_first_timestamps\n\n def _get_clean_data_collection(self):\n db_client = MongoClient(self._db_config.MONGODB_URI)\n db = db_client[self._db_config.MONGODB_QD]\n return db.clean_data\n\n def _get_clean_data_projection_dict(self):\n project_dict = {col: {\"$ifNull\": [\"$client.%s\" % col, \"$producer.%s\" % col]}\n for col in self._config.relevant_cols_nested}\n for col, field1, field2 in self._config.relevant_cols_general_alternative:\n project_dict[col] = {\"$ifNull\": [\"$%s\" % field1, \"$%s\" % field2]}\n for col in self._config.relevant_cols_general:\n project_dict[col] = \"$%s\" % col\n return project_dict\n\n def _generate_dataframe(self, result):\n data = pd.DataFrame(result)\n if len(data) > 0:\n data = pd.concat([data, pd.DataFrame(list(data[\"_id\"]))], axis=1)\n data = data.drop([\"_id\"], axis=1)\n data.loc[:, self._config.timestamp_field] = pd.to_datetime(data.loc[:, self._config.timestamp_field], unit='ms')\n\n for col in self._config.service_call_fields:\n data.loc[:, col] = data.loc[:, col].fillna(\"-\")\n\n return data\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "pandas.isnull", "pandas.concat" ] ]
BRAINSia/MONAI
[ "04e1c345fc840f5a1b6504ee5857d5a9feb27d84" ]
[ "monai/handlers/checkpoint_loader.py" ]
[ "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import TYPE_CHECKING, Dict, Optional\n\nimport torch\n\nfrom monai.utils import exact_version, optional_import\n\nEvents, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"Events\")\nCheckpoint, _ = optional_import(\"ignite.handlers\", \"0.3.0\", exact_version, \"Checkpoint\")\nif TYPE_CHECKING:\n from ignite.engine import Engine\nelse:\n Engine, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"Engine\")\n\n\nclass CheckpointLoader:\n \"\"\"\n CheckpointLoader acts as an Ignite handler to load checkpoint data from file.\n It can load variables for network, optimizer, lr_scheduler, etc.\n If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead\n as PyTorch recommended and then use this loader to load the model.\n\n Args:\n load_path: the file path of checkpoint, it should be a PyTorch `pth` file.\n load_dict: target objects that load checkpoint to. examples::\n\n {'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}\n\n name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.\n map_location: when loading the module for distributed training/evaluation,\n need to provide an appropriate map_location argument to prevent a process\n to step into others’ devices. If map_location is missing, torch.load will\n first load the module to CPU and then copy each parameter to where it was\n saved, which would result in all processes on the same machine using the\n same set of devices.\n\n \"\"\"\n\n def __init__(\n self, load_path: str, load_dict: Dict, name: Optional[str] = None, map_location: Optional[Dict] = None,\n ) -> None:\n assert load_path is not None, \"must provide clear path to load checkpoint.\"\n self.load_path = load_path\n assert load_dict is not None and len(load_dict) > 0, \"must provide target objects to load.\"\n self.logger = logging.getLogger(name)\n for k, v in load_dict.items():\n if hasattr(v, \"module\"):\n load_dict[k] = v.module\n self.load_dict = load_dict\n self._name = name\n self.map_location = map_location\n\n def attach(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n if self._name is None:\n self.logger = engine.logger\n engine.add_event_handler(Events.STARTED, self)\n\n def __call__(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n checkpoint = torch.load(self.load_path, map_location=self.map_location)\n if len(self.load_dict) == 1:\n key = list(self.load_dict.keys())[0]\n if not (key in checkpoint):\n checkpoint = {key: checkpoint}\n\n Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)\n self.logger.info(f\"Restored all variables from {self.load_path}\")\n" ]
[ [ "torch.load" ] ]
DeniseMak/ner-neuron
[ "d7ca8a2b1f5652b42892b4bda9b07a2e4edd09db" ]
[ "main.py" ]
[ "# -*- coding: utf-8 -*-\n# @Author: Jie\n# @Date: 2017-06-15 14:11:08\n# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com\n# @Last Modified time: 2019-02-13 12:41:44\n\nfrom __future__ import print_function\nimport time\nimport sys\nimport argparse\nimport random\nimport torch\nimport gc\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nfrom utils.metric import get_ner_fmeasure\nfrom model.seqlabel import SeqLabel\nfrom model.sentclassifier import SentClassifier\nfrom utils.data import Data\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\n\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nDEFAULT_TRAINED_FILE = 'test_data/lstmtestglove50.9.model'\n\nseed_num = 46\nrandom.seed(seed_num)\ntorch.manual_seed(seed_num)\nnp.random.seed(seed_num)\n\ndef importance_matrix(sensitivities, data,\n print_imp=True, show_table=True, tag_to_ablate=None):\n '''\n Builds a matrix of tag sensitivities\n :param sensitivities: This is a matrix of [num_tags, num_neurons],\n which is [10 x 50] in our experimental configuration.\n :return:\n '''\n\n important_lists = []\n important_nps = np.zeros(50, dtype=int)\n sensitivities = sensitivities[1:] # omit padding tag\n for i in range(len(sensitivities)):\n important_list = []\n important_np = np.zeros(50, dtype=int)\n tag_sensitivity_row = sensitivities[i]\n for j in range(len(tag_sensitivity_row)):\n most_important = np.argmax(tag_sensitivity_row)\n important_list.append(most_important)\n important_np[j] = most_important\n index = [most_important]\n tag_sensitivity_row[most_important] = np.NINF\n important_lists.append(important_list)\n important_nps = np.vstack((important_nps, important_np))\n\n important_nps = np.delete(important_nps, 0, axis=0) # delete padding tag\n np.save(\"imps.npy\",important_nps) # save importance rows for other scripts to use\n\n important_nps = np.transpose(important_nps)\n if show_table:\n sns.set()\n # Smaller than normal fonts\n sns.set(font_scale=0.5)\n x_tick = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]\n del(x_tick[0])\n ax = sns.heatmap(important_nps, annot=True, xticklabels=x_tick,\n cmap=ListedColormap(['white']), cbar=False, yticklabels=False,\n linecolor='gray', linewidths=0.4)\n title = \"Importance rankings of neurons per tag\"\n plt.title(title, fontsize=18)\n ttl = ax.title\n ttl.set_position([0.5, 1.05])\n plt.show()\n\n def trim_model_dir(model_dir):\n model_dir = model_dir.replace('/','-')\n return model_dir\n ax.figure.savefig(\"ImportanceRankings-{}.png\".format(trim_model_dir(data.model_dir)))\n if print_imp:\n imp_file = open(\"Importance-{}.txt\".format(trim_model_dir(data.model_dir)), \"w+\")\n print('Neuron importance ranking for each NER tag:')\n for i, l in enumerate(important_lists):\n tags = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]\n del(tags[0]) # remove PAD tag\n print (\"\\t{}\\t{}\".format(tags[i], l))\n imp_file.write(\"{}\\t{}\\n\".format(tags[i], l))\n imp_file.write(\"\\n\")\n np.savetxt(\"Importance-{}.tsv\".format(trim_model_dir(data.model_dir)),\n important_nps, fmt='%2.0d', delimiter='\\t')\n\n return important_nps\n\ndef heatmap_sensitivity(sensitivities,\n modelname=DEFAULT_TRAINED_FILE,\n testname=\"\",\n show_pad=False,\n show_vals=True,\n disable=False):\n '''\n Shows a heatmap for the sensitivity values, saves the heatmap to a PNG file,\n and also saves the sensitivity matrix to an .npy file,\n which we use for calculating correlations between models later.\n :param sensitivities: This is a matrix of [num_tags, num_neurons],\n which is [10 x 50] in our experimental configuration.\n :param disable: disable is just to turn off for debugging\n :return:\n '''\n # transpose to match chart in Figure 7. of paper\n sensitivities = np.transpose(sensitivities)\n # column 0 is the padding tag\n start = 1\n if show_pad:\n start = 0\n sensitivities = sensitivities[0:50, start:10]\n sns.set()\n # Smaller than normal fonts\n sns.set(font_scale=0.5)\n x_tick = [data.label_alphabet.get_instance(tag) for tag in sorted(data.tag_counts)]\n if show_pad: x_tick[0] = 'PAD'\n else: del(x_tick[0])\n\n # change tags' order to use in downstream correlation diagrams\n sensitivities_temp = np.zeros((50, 9))\n x_tick_output = ['B-PER', 'I-PER', 'B-LOC', 'I-LOC', 'B-ORG', 'I-ORG', 'B-MISC', 'I-MISC', 'O']\n for i in range(len(x_tick_output)):\n sensitivities_temp[:, i] = sensitivities[:, x_tick.index(x_tick_output[i])]\n np.save(modelname+'_sensitivities.npy', sensitivities_temp)\n \n # put sensititivites in heat map\n if not disable:\n ax = sns.heatmap(sensitivities, xticklabels=x_tick, annot=show_vals, fmt=\".2g\")\n title = \"({}): \".format(testname) + modelname\n plt.title(title, fontsize=18)\n ttl = ax.title\n ttl.set_position([0.5, 1.05])\n plt.show()\n ax.figure.savefig(modelname+\"_heatmap.png\")\n\n\ndef get_sensitivity_matrix(label, debug=True):\n '''\n Given a tag like 4: (B-PER), return the sensitivity matrix\n :param label:\n :return:\n '''\n\n avg_for_label = data.tag_contributions[label]/data.tag_counts[label]\n sum_other_counts = 0\n\n # data.tag_contributions[0] is for the padding label and can be ignored\n sum_other_contributions = np.zeros((10, 50))\n for l in data.tag_counts:\n\n if l != label and l != 0: # if l != label: (to consider the padding label which is 0)\n sum_other_counts += data.tag_counts[l]\n sum_other_contributions += data.tag_contributions[l]\n avg_for_others = sum_other_contributions/sum_other_counts\n\n s_ij = avg_for_label - avg_for_others\n s_ij_label = s_ij[label]\n return s_ij_label # was return s_ij\n\n\ndef data_initialization(data):\n data.initial_feature_alphabets()\n data.build_alphabet(data.train_dir)\n data.build_alphabet(data.dev_dir)\n data.build_alphabet(data.test_dir)\n data.fix_alphabet()\n\n\ndef predict_check(pred_variable, gold_variable, mask_variable, sentence_classification=False):\n \"\"\"\n input:\n pred_variable (batch_size, sent_len): pred tag result, in numpy format\n gold_variable (batch_size, sent_len): gold result variable\n mask_variable (batch_size, sent_len): mask variable\n \"\"\"\n pred = pred_variable.cpu().data.numpy()\n gold = gold_variable.cpu().data.numpy()\n mask = mask_variable.cpu().data.numpy()\n overlaped = (pred == gold)\n if sentence_classification:\n # print(overlaped)\n # print(overlaped*pred)\n right_token = np.sum(overlaped)\n total_token = overlaped.shape[0] ## =batch_size\n else:\n right_token = np.sum(overlaped * mask)\n total_token = mask.sum()\n # print(\"right: %s, total: %s\"%(right_token, total_token))\n return right_token, total_token\n\n\ndef recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover, sentence_classification=False):\n \"\"\"\n input:\n pred_variable (batch_size, sent_len): pred tag result\n gold_variable (batch_size, sent_len): gold result variable\n mask_variable (batch_size, sent_len): mask variable\n \"\"\"\n pred_variable = pred_variable[word_recover]\n # print(\"reordered labels: {}\".format(pred_variable))\n gold_variable = gold_variable[word_recover]\n mask_variable = mask_variable[word_recover]\n batch_size = gold_variable.size(0)\n if sentence_classification:\n pred_tag = pred_variable.cpu().data.numpy().tolist()\n gold_tag = gold_variable.cpu().data.numpy().tolist()\n pred_label = [label_alphabet.get_instance(pred) for pred in pred_tag]\n gold_label = [label_alphabet.get_instance(gold) for gold in gold_tag]\n else:\n seq_len = gold_variable.size(1)\n mask = mask_variable.cpu().data.numpy()\n pred_tag = pred_variable.cpu().data.numpy()\n gold_tag = gold_variable.cpu().data.numpy()\n batch_size = mask.shape[0]\n pred_label = []\n gold_label = []\n for idx in range(batch_size):\n pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]\n gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]\n assert(len(pred)==len(gold))\n pred_label.append(pred)\n gold_label.append(gold)\n return pred_label, gold_label\n\n\ndef recover_nbest_label(pred_variable, mask_variable, label_alphabet, word_recover):\n \"\"\"\n input:\n pred_variable (batch_size, sent_len, nbest): pred tag result\n mask_variable (batch_size, sent_len): mask variable\n word_recover (batch_size)\n output:\n nbest_pred_label list: [batch_size, nbest, each_seq_len]\n \"\"\"\n # exit(0)\n pred_variable = pred_variable[word_recover]\n mask_variable = mask_variable[word_recover]\n batch_size = pred_variable.size(0)\n seq_len = pred_variable.size(1)\n nbest = pred_variable.size(2)\n mask = mask_variable.cpu().data.numpy()\n pred_tag = pred_variable.cpu().data.numpy()\n batch_size = mask.shape[0]\n pred_label = []\n for idx in range(batch_size):\n pred = []\n for idz in range(nbest):\n each_pred = [label_alphabet.get_instance(pred_tag[idx][idy][idz]) for idy in range(seq_len) if mask[idx][idy] != 0]\n pred.append(each_pred)\n pred_label.append(pred)\n return pred_label\n\n\ndef lr_decay(optimizer, epoch, decay_rate, init_lr):\n lr = init_lr/(1+decay_rate*epoch)\n print(\" Learning rate is set as:\", lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return optimizer\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\ndef evaluate(data, model, name, nbest=None, print_tag_counts=False, tag_to_ablate=None):\n '''\n\n :param data:\n :param model:\n :param name:\n :param nbest:\n :param print_tag_counts:\n :param tag_to_ablate: if this is set to a tag name, like 'B-ORG', then in the LSTM layer's forward() we ablate the\n number of neurons specified by data.ablate_num\n :return:\n '''\n ablate_list_for_tag = None\n if tag_to_ablate:\n data.ablate_tag = tag_to_ablate\n ablate_list_for_tag = data.ablate_list[tag_to_ablate]\n\n print(\"\\nEVALUATE file: {}, set={}, \\n\\t ablate_num={} tag: {} \\nablate_list_for_tag={}\".format(\n data.model_dir, name, data.current_ablate_ind, tag_to_ablate, ablate_list_for_tag))\n if name == \"train\":\n instances = data.train_Ids\n elif name == \"dev\":\n instances = data.dev_Ids\n elif name == 'test':\n instances = data.test_Ids\n elif name == 'raw':\n instances = data.raw_Ids\n else:\n print(\"Error: wrong evaluate name,\", name)\n exit(1)\n right_token = 0\n whole_token = 0\n nbest_pred_results = []\n pred_scores = []\n pred_results = []\n gold_results = []\n ## set model in eval model\n model.eval()\n\n ''' Get count of model parameters '''\n # print(\"COUNT PARAMETERS: {}\".format(count_parameters(model)))\n\n batch_size = data.HP_batch_size\n start_time = time.time()\n train_num = len(instances)\n total_batch = train_num//batch_size+1\n for batch_id in range(total_batch):\n start = batch_id*batch_size\n end = (batch_id+1)*batch_size\n if end > train_num:\n end = train_num\n instance = instances[start:end]\n if not instance:\n continue\n batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask = batchify_with_label(instance, data.HP_gpu, False, data.sentence_classification)\n if nbest and not data.sentence_classification:\n scores, nbest_tag_seq = model.decode_nbest(batch_word,batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask, nbest)\n nbest_pred_result = recover_nbest_label(nbest_tag_seq, mask, data.label_alphabet, batch_wordrecover)\n nbest_pred_results += nbest_pred_result\n pred_scores += scores[batch_wordrecover].cpu().data.numpy().tolist()\n ## select the best sequence to evalurate\n tag_seq = nbest_tag_seq[:,:,0]\n else:\n tag_seq = model(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask)\n\n\n\n\n pred_label, gold_label = recover_label(tag_seq, batch_label, mask, data.label_alphabet, batch_wordrecover, data.sentence_classification)\n pred_results += pred_label\n gold_results += gold_label\n decode_time = time.time() - start_time\n speed = len(instances)/decode_time\n acc, p, r, f = get_ner_fmeasure(gold_results, pred_results, data.tagScheme, data=data)\n if nbest and not data.sentence_classification:\n return speed, acc, p, r, f, nbest_pred_results, pred_scores\n\n ''' Get per-tag sensitivity '''\n ## print(\"TOTAL BATCH ITERATIONS: {}\".format(data.iteration))\n sensitivity_matrices = [] # This will hold a row for each tag's sensitivity\n for tag in sorted(data.tag_counts):\n if print_tag_counts:\n if tag == 0:\n print(\"Padding {}: {} instances.\".format('0', data.tag_counts[tag]))\n else:\n print(\"Tag {}: {} instances.\".format(data.label_alphabet.get_instance(tag), data.tag_counts[tag]))\n sensitivity_tag = get_sensitivity_matrix(tag)\n sensitivity_matrices.append(sensitivity_tag)\n\n sensitivity_combined = np.squeeze(np.stack([sensitivity_matrices]))\n # TODO: the following line would stack multiple models' sensitivity,\n # but we don't need it unless running many different models for stats\n # data.sensitivity_matrices_combined.append(sensitivity_combined)\n return speed, acc, p, r, f, pred_results, pred_scores, sensitivity_combined\n\n\n\ndef batchify_with_label(input_batch_list, gpu, if_train=True, sentence_classification=False):\n if sentence_classification:\n return batchify_sentence_classification_with_label(input_batch_list, gpu, if_train)\n else:\n return batchify_sequence_labeling_with_label(input_batch_list, gpu, if_train)\n\n\ndef batchify_sequence_labeling_with_label(input_batch_list, gpu, if_train=True):\n \"\"\"\n input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]\n words: word ids for one sentence. (batch_size, sent_len)\n features: features ids for one sentence. (batch_size, sent_len, feature_num)\n chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)\n labels: label ids for one sentence. (batch_size, sent_len)\n\n output:\n zero padding for word and char, with their batch length\n word_seq_tensor: (batch_size, max_sent_len) Variable\n feature_seq_tensors: [(batch_size, max_sent_len),...] list of Variable\n word_seq_lengths: (batch_size,1) Tensor\n char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable\n char_seq_lengths: (batch_size*max_sent_len,1) Tensor\n char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order\n label_seq_tensor: (batch_size, max_sent_len)\n mask: (batch_size, max_sent_len)\n \"\"\"\n batch_size = len(input_batch_list)\n words = [sent[0] for sent in input_batch_list]\n features = [np.asarray(sent[1]) for sent in input_batch_list]\n feature_num = len(features[0][0])\n chars = [sent[2] for sent in input_batch_list]\n labels = [sent[3] for sent in input_batch_list]\n word_seq_lengths = torch.LongTensor(list(map(len, words)))\n max_seq_len = word_seq_lengths.max().item()\n word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()\n label_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()\n feature_seq_tensors = []\n for idx in range(feature_num):\n feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len),requires_grad = if_train).long())\n # '\n ''' 517 '''\n # mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).byte()\n mask = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).bool()\n for idx, (seq, label, seqlen) in enumerate(zip(words, labels, word_seq_lengths)):\n seqlen = seqlen.item()\n word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)\n label_seq_tensor[idx, :seqlen] = torch.LongTensor(label)\n mask[idx, :seqlen] = torch.Tensor([1]*seqlen)\n for idy in range(feature_num):\n feature_seq_tensors[idy][idx,:seqlen] = torch.LongTensor(features[idx][:,idy])\n word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)\n word_seq_tensor = word_seq_tensor[word_perm_idx]\n for idx in range(feature_num):\n feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]\n\n label_seq_tensor = label_seq_tensor[word_perm_idx]\n mask = mask[word_perm_idx]\n ### deal with char\n # pad_chars (batch_size, max_seq_len)\n pad_chars = [chars[idx] + [[0]] * (max_seq_len-len(chars[idx])) for idx in range(len(chars))]\n length_list = [list(map(len, pad_char)) for pad_char in pad_chars]\n max_word_len = max(map(max, length_list))\n char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad = if_train).long()\n char_seq_lengths = torch.LongTensor(length_list)\n for idx, (seq, seqlen) in enumerate(zip(pad_chars, char_seq_lengths)):\n for idy, (word, wordlen) in enumerate(zip(seq, seqlen)):\n # print len(word), wordlen\n char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)\n\n char_seq_tensor = char_seq_tensor[word_perm_idx].view(batch_size*max_seq_len,-1)\n char_seq_lengths = char_seq_lengths[word_perm_idx].view(batch_size*max_seq_len,)\n char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True)\n char_seq_tensor = char_seq_tensor[char_perm_idx]\n _, char_seq_recover = char_perm_idx.sort(0, descending=False)\n _, word_seq_recover = word_perm_idx.sort(0, descending=False)\n if gpu:\n word_seq_tensor = word_seq_tensor.cuda()\n for idx in range(feature_num):\n feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda()\n word_seq_lengths = word_seq_lengths.cuda()\n word_seq_recover = word_seq_recover.cuda()\n label_seq_tensor = label_seq_tensor.cuda()\n char_seq_tensor = char_seq_tensor.cuda()\n char_seq_recover = char_seq_recover.cuda()\n mask = mask.cuda()\n return word_seq_tensor,feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask\n\n\ndef batchify_sentence_classification_with_label(input_batch_list, gpu, if_train=True):\n \"\"\"\n input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]\n words: word ids for one sentence. (batch_size, sent_len)\n features: features ids for one sentence. (batch_size, feature_num), each sentence has one set of feature\n chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)\n labels: label ids for one sentence. (batch_size,), each sentence has one set of feature\n\n output:\n zero padding for word and char, with their batch length\n word_seq_tensor: (batch_size, max_sent_len) Variable\n feature_seq_tensors: [(batch_size,), ... ] list of Variable\n word_seq_lengths: (batch_size,1) Tensor\n char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable\n char_seq_lengths: (batch_size*max_sent_len,1) Tensor\n char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order\n label_seq_tensor: (batch_size, )\n mask: (batch_size, max_sent_len)\n \"\"\"\n\n batch_size = len(input_batch_list)\n words = [sent[0] for sent in input_batch_list]\n features = [np.asarray(sent[1]) for sent in input_batch_list] \n feature_num = len(features[0])\n chars = [sent[2] for sent in input_batch_list]\n labels = [sent[3] for sent in input_batch_list]\n word_seq_lengths = torch.LongTensor(list(map(len, words)))\n max_seq_len = word_seq_lengths.max().item()\n word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()\n label_seq_tensor = torch.zeros((batch_size, ), requires_grad = if_train).long()\n feature_seq_tensors = []\n for idx in range(feature_num):\n feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len),requires_grad = if_train).long())\n\n ''' 517 '''\n # mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).byte()\n mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).bool()\n label_seq_tensor = torch.LongTensor(labels)\n # exit(0)\n for idx, (seq, seqlen) in enumerate(zip(words, word_seq_lengths)):\n seqlen = seqlen.item()\n word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)\n mask[idx, :seqlen] = torch.Tensor([1]*seqlen)\n for idy in range(feature_num):\n feature_seq_tensors[idy][idx,:seqlen] = torch.LongTensor(features[idx][:,idy])\n word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)\n word_seq_tensor = word_seq_tensor[word_perm_idx]\n for idx in range(feature_num):\n feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]\n label_seq_tensor = label_seq_tensor[word_perm_idx]\n mask = mask[word_perm_idx]\n ### deal with char\n # pad_chars (batch_size, max_seq_len)\n pad_chars = [chars[idx] + [[0]] * (max_seq_len-len(chars[idx])) for idx in range(len(chars))]\n length_list = [list(map(len, pad_char)) for pad_char in pad_chars]\n max_word_len = max(map(max, length_list))\n char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad = if_train).long()\n char_seq_lengths = torch.LongTensor(length_list)\n for idx, (seq, seqlen) in enumerate(zip(pad_chars, char_seq_lengths)):\n for idy, (word, wordlen) in enumerate(zip(seq, seqlen)):\n # print len(word), wordlen\n char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)\n\n char_seq_tensor = char_seq_tensor[word_perm_idx].view(batch_size*max_seq_len,-1)\n char_seq_lengths = char_seq_lengths[word_perm_idx].view(batch_size*max_seq_len,)\n char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True)\n char_seq_tensor = char_seq_tensor[char_perm_idx]\n _, char_seq_recover = char_perm_idx.sort(0, descending=False)\n _, word_seq_recover = word_perm_idx.sort(0, descending=False)\n if gpu:\n word_seq_tensor = word_seq_tensor.cuda()\n for idx in range(feature_num):\n feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda()\n word_seq_lengths = word_seq_lengths.cuda()\n word_seq_recover = word_seq_recover.cuda()\n label_seq_tensor = label_seq_tensor.cuda()\n char_seq_tensor = char_seq_tensor.cuda()\n char_seq_recover = char_seq_recover.cuda()\n mask = mask.cuda()\n return word_seq_tensor,feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask\n\n\ndef load_model_to_test(data, train=False, dev=True, test=False, tag=None):\n '''\n Set any ONE of train, dev, test to true, in order to evaluate on that set.\n :param data:\n :param train:\n :param dev: Default set to test, because that was what the original experiment did\n :param test:\n :return:\n '''\n\n print(\"Load pretrained model...\")\n if data.sentence_classification:\n model = SentClassifier(data)\n else:\n model = SeqLabel(data)\n model.load_state_dict(torch.load(data.pretrained_model_path))\n\n\n\n '''----------------TESTING----------------'''\n if (train):\n speed, acc, p, r, f, _,_, train_sensitivities = evaluate(data, model, \"train\")\n heatmap_sensitivity(train_sensitivities, data.pretrained_model_path, testname=\"train\")\n if data.seg:\n current_score = f\n print(\"Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f\"%(speed, acc, p, r, f))\n else:\n current_score = acc\n print(\"Speed: %.2fst/s; acc: %.4f\"%(speed, acc))\n\n if (dev):\n # for tag in data.ablate_list:\n speed, acc, p, r, f, _,_, sensitivities = evaluate(\n data, model, \"dev\", tag_to_ablate=tag)\n if data.seg:\n current_score = f\n print(\"Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f\" % (speed, acc, p, r, f))\n else:\n current_score = acc\n print(\"Speed: %.2fst/s; acc: %.4f\" % (speed, acc))\n\n if (data.ablate_num == 0):\n heatmap_sensitivity(sensitivities, data.pretrained_model_path, testname=\"dev\")\n importance_matrix(sensitivities, data)\n\n\n\n\n if (test):\n speed, acc, p, r, f, _,_ = evaluate(data, model, \"test\")\n if data.seg:\n print(\"Speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f\"%(speed, acc, p, r, f))\n else:\n print(\"Speed: %.2fst/s; acc: %.4f\"%(speed, acc))\n\n return\n\n\ndef train(data):\n print(\"Training model...\")\n data.show_data_summary()\n save_data_name = data.model_dir +\".dset\"\n data.save(save_data_name)\n if data.sentence_classification:\n model = SentClassifier(data)\n else:\n model = SeqLabel(data)\n\n if data.optimizer.lower() == \"sgd\":\n optimizer = optim.SGD(model.parameters(), lr=data.HP_lr, momentum=data.HP_momentum,weight_decay=data.HP_l2)\n elif data.optimizer.lower() == \"adagrad\":\n optimizer = optim.Adagrad(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)\n elif data.optimizer.lower() == \"adadelta\":\n optimizer = optim.Adadelta(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)\n elif data.optimizer.lower() == \"rmsprop\":\n optimizer = optim.RMSprop(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)\n elif data.optimizer.lower() == \"adam\":\n optimizer = optim.Adam(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)\n else:\n print(\"Optimizer illegal: %s\"%(data.optimizer))\n exit(1)\n best_dev = -10\n # data.HP_iteration = 1\n ## start training\n for idx in range(data.HP_iteration):\n epoch_start = time.time()\n temp_start = epoch_start\n print(\"Epoch: %s/%s\" %(idx,data.HP_iteration))\n if data.optimizer == \"SGD\":\n optimizer = lr_decay(optimizer, idx, data.HP_lr_decay, data.HP_lr)\n instance_count = 0\n sample_id = 0\n sample_loss = 0\n total_loss = 0\n right_token = 0\n whole_token = 0\n random.shuffle(data.train_Ids)\n print(\"Shuffle: first input word list:\", data.train_Ids[0][0])\n ## set model in train model\n model.train()\n model.zero_grad()\n batch_size = data.HP_batch_size\n batch_id = 0\n train_num = len(data.train_Ids)\n total_batch = train_num//batch_size+1\n for batch_id in range(total_batch):\n start = batch_id*batch_size\n end = (batch_id+1)*batch_size\n if end >train_num:\n end = train_num\n instance = data.train_Ids[start:end]\n if not instance:\n continue\n batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask = batchify_with_label(instance, data.HP_gpu, True, data.sentence_classification)\n instance_count += 1\n loss, tag_seq = model.calculate_loss(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, batch_label, mask)\n right, whole = predict_check(tag_seq, batch_label, mask, data.sentence_classification)\n right_token += right\n whole_token += whole\n # print(\"loss:\",loss.item())\n sample_loss += loss.item()\n total_loss += loss.item()\n if end%500 == 0:\n temp_time = time.time()\n temp_cost = temp_time - temp_start\n temp_start = temp_time\n print(\" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f\"%(end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))\n if sample_loss > 1e8 or str(sample_loss) == \"nan\":\n print(\"ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....\")\n exit(1)\n sys.stdout.flush()\n sample_loss = 0\n loss.backward()\n optimizer.step()\n model.zero_grad()\n temp_time = time.time()\n temp_cost = temp_time - temp_start\n print(\" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f\"%(end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))\n\n epoch_finish = time.time()\n epoch_cost = epoch_finish - epoch_start\n print(\"Epoch: %s training finished. Time: %.2fs, speed: %.2fst/s, total loss: %s\"%(idx, epoch_cost, train_num/epoch_cost, total_loss))\n print(\"totalloss:\", total_loss)\n if total_loss > 1e8 or str(total_loss) == \"nan\":\n print(\"ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....\")\n exit(1)\n # continue\n speed, acc, p, r, f, _,_ , sensitivities = evaluate(data, model, \"dev\")\n\n dev_finish = time.time()\n dev_cost = dev_finish - epoch_finish\n\n if data.seg:\n current_score = f\n print(\"Dev: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f\"%(dev_cost, speed, acc, p, r, f))\n else:\n current_score = acc\n print(\"Dev: time: %.2fs speed: %.2fst/s; acc: %.4f\"%(dev_cost, speed, acc))\n\n if current_score > best_dev:\n if data.seg:\n print(\"Exceed previous best f score:\", best_dev)\n else:\n print(\"Exceed previous best acc score:\", best_dev)\n model_name = data.model_dir +'.'+ str(idx) + \".model\"\n print(\"Save current best model in file:\", model_name)\n torch.save(model.state_dict(), model_name)\n best_dev = current_score\n # ## decode test\n speed, acc, p, r, f, _,_ , sensitivities = evaluate(data, model, \"test\")\n\n\n test_finish = time.time()\n test_cost = test_finish - dev_finish\n if data.seg:\n print(\"Test: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f\"%(test_cost, speed, acc, p, r, f))\n else:\n print(\"Test: time: %.2fs, speed: %.2fst/s; acc: %.4f\"%(test_cost, speed, acc))\n gc.collect()\n\n\ndef load_model_decode(data, name):\n print(\"Load Model from file: {}, name={}\".format(data.model_dir, name) )\n if data.sentence_classification:\n model = SentClassifier(data)\n else:\n model = SeqLabel(data)\n # model = SeqModel(data)\n ## load model need consider if the model trained in GPU and load in CPU, or vice versa\n # if not gpu:\n # model.load_state_dict(torch.load(model_dir))\n # # model.load_state_dict(torch.load(model_dir), map_location=lambda storage, loc: storage)\n # # model = torch.load(model_dir, map_location=lambda storage, loc: storage)\n # else:\n # model.load_state_dict(torch.load(model_dir))\n # # model = torch.load(model_dir)\n model.load_state_dict(torch.load(data.load_model_dir))\n\n print(\"Decode %s data, nbest: %s ...\"%(name, data.nbest))\n start_time = time.time()\n speed, acc, p, r, f, pred_results, pred_scores = evaluate(data, model, name, data.nbest)\n end_time = time.time()\n time_cost = end_time - start_time\n if data.seg:\n print(\"%s: time:%.2fs, speed:%.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f\"%(name, time_cost, speed, acc, p, r, f))\n else:\n print(\"%s: time:%.2fs, speed:%.2fst/s; acc: %.4f\"%(name, time_cost, speed, acc))\n return pred_results, pred_scores\n\n\ndef load_ablation_file():\n filename = (\"Importance-\" + data.model_dir + \".txt\").replace('/','-')\n ablate_lists = {}\n ''' B-ORG\t[4, 24, 14, 15, 19, 46, 36, 22, 27, 9, 13, 20, 25, 33, 45, 0, 35, 40, 48, 42, 44, 18, 37, 21, 32, 29, 16, 26, 11, 7, 23, 49, 12, 5, 8, 38, 2, 47, 1, 43, 31, 30, 41, 6, 28, 3, 34, 39, 10, 17]'''\n with open(filename, 'r+') as file:\n lines = file.readlines()\n for line in lines:\n line = line.strip()\n if len(line) > 0:\n (tag, list) = line.split('[')[0].strip(), line.split('[')[1].strip().replace(']','')\n list = list.split(',')\n ablate_lists[tag] = [int(i) for i in list]\n return ablate_lists\n\ndef clear_sensitivity_data():\n data.iteration = 0\n data.batch_contributions = []\n data.tag_contributions = {}\n data.tag_counts = {}\n data.sensitivity_matrices = []\n data.sensitivity_matrices_combined = []\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Tuning with NCRF++')\n # parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')\n parser.add_argument('--config', help='Configuration File', default='None')\n parser.add_argument('--wordemb', help='Embedding for words', default='None')\n parser.add_argument('--charemb', help='Embedding for chars', default='None')\n parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')\n parser.add_argument('--savemodel', default=\"data/model/saved_model.lstmcrf.\")\n parser.add_argument('--savedset', help='Dir of saved data setting')\n parser.add_argument('--train', default=\"data/conll03/train.bmes\") \n parser.add_argument('--dev', default=\"data/conll03/dev.bmes\" ) \n parser.add_argument('--test', default=\"data/conll03/test.bmes\") \n parser.add_argument('--seg', default=\"True\") \n parser.add_argument('--raw') \n parser.add_argument('--loadmodel')\n parser.add_argument('--output')\n parser.add_argument('--loadtotest', help='Load the model just to test it')\n parser.add_argument('--pretrainedmodelpath', help='Path to a pretrained model that you just want to test',\n default=DEFAULT_TRAINED_FILE)\n parser.add_argument('--ablate', help='how many neurons to ablate', default=0) # indicate number of neurons to ablate\n # Importance.txt is generated by importance_matrix() (automatically reading this file is a TODO)\n parser.add_argument('--ablate_file', help='list of neurons to ablate')\n\n args = parser.parse_args()\n data = Data()\n data.HP_gpu = torch.cuda.is_available()\n if args.config == 'None':\n data.train_dir = args.train \n data.dev_dir = args.dev \n data.test_dir = args.test\n data.model_dir = args.savemodel\n data.dset_dir = args.savedset\n print(\"Save dset directory:\",data.dset_dir)\n save_model_dir = args.savemodel\n data.word_emb_dir = args.wordemb\n data.char_emb_dir = args.charemb\n if args.seg.lower() == 'true':\n data.seg = True\n else:\n data.seg = False\n print(\"Seed num:\",seed_num)\n else:\n data.read_config(args.config)\n\n # adding arg for pretrained model path\n data.pretrained_model_path = args.pretrainedmodelpath\n data.ablate_num = int(args.ablate)\n # data.show_data_summary()\n status = data.status.lower()\n print(\"Seed num:\",seed_num)\n\n if status == 'train':\n print(\"MODEL: train\")\n data_initialization(data) # set up alphabets\n data.generate_instance('train')\n data.generate_instance('dev')\n data.generate_instance('test')\n data.build_pretrain_emb()\n if not args.loadtotest:\n print(\"Training model, not just testing because --loadtotest is {}\".format(args.loadtotest))\n print(\"Loading ablation file even though it's just a placeholder\")\n debug_ablation = False\n if debug_ablation:\n data.ablate_list = load_ablation_file() # TODO: file not found\n tag_list = data.ablate_list.keys()\n train(data)\n else:\n if args.ablate:\n data.ablate_num = int(args.ablate)\n print(\"Loading model to test.\")\n data.ablate_list = load_ablation_file()\n tag_list = data.ablate_list.keys()\n # todo: command line arg for specific current ablate index\n # todo: command line arg for intervals\n\n for tag in tag_list:\n data.ablate_tag = tag\n data.current_ablate_ind[tag] = 0\n data.acc_chart[data.ablate_tag] = {} # clear accuracy dict of lists for the tag\n for i in range(0, data.ablate_num + 1):\n data.current_ablate_ind[tag] = i #+= 1 # todo: option to skip by different interval like every 5\n clear_sensitivity_data()\n load_model_to_test(data, tag=tag)\n\n # print out acc_chart\n #for tag in data.ablate_list:\n print ('{} ABLATION RESULTS:'.format(tag))\n degradations = {}\n for t in tag_list:\n print(\"\\tTag: {}, Decr. Accs: {}\".format(t, data.acc_chart[tag][t]))\n degradations[t] = \\\n [data.acc_chart[tag][t][ind] - data.acc_chart[tag][t][0] for ind in range (0, data.ablate_num+1)]\n print(\"\\t\\tDegradation={})\".format(degradations[t]))\n if (t==tag):\n # ablation tag, so use bolder symbol\n plt.plot(degradations[t], 'bs', label=t)\n else:\n plt.plot(degradations[t], label=t)\n\n plt.title(tag, fontsize=18)\n plt.legend()\n plt.savefig(\"{}_chart.png\".format(tag))\n plt.clf() # clear the plot -was plot.show()\n\n elif status == 'decode':\n print(\"MODEL: decode\")\n data.load(data.dset_dir)\n data.read_config(args.config)\n print(data.raw_dir)\n # exit(0)\n data.show_data_summary()\n data.generate_instance('raw')\n print(\"nbest: %s\"%(data.nbest))\n decode_results, pred_scores = load_model_decode(data, 'raw')\n if data.nbest and not data.sentence_classification:\n data.write_nbest_decoded_results(decode_results, pred_scores, 'raw')\n else:\n data.write_decoded_results(decode_results, 'raw')\n else:\n print(\"Invalid argument! Please use valid arguments! (train/test/decode)\")\n\n" ]
[ [ "torch.cuda.is_available", "torch.LongTensor", "torch.load", "numpy.save", "torch.manual_seed", "numpy.transpose", "numpy.argmax", "torch.Tensor", "numpy.vstack", "torch.zeros", "numpy.delete", "numpy.zeros", "matplotlib.pyplot.title", "numpy.stack", "matplotlib.pyplot.show", "matplotlib.pyplot.clf", "numpy.asarray", "numpy.random.seed", "numpy.sum", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.colors.ListedColormap" ] ]
0reza/mne-python
[ "da02a256423404a81929d6de278bc63d3192a280", "da02a256423404a81929d6de278bc63d3192a280" ]
[ "mne/forward/tests/test_field_interpolation.py", "mne/viz/tests/test_topo.py" ]
[ "from os import path as op\n\nimport numpy as np\nfrom numpy.polynomial import legendre\nfrom numpy.testing import (assert_allclose, assert_array_equal, assert_equal,\n assert_array_almost_equal)\nfrom scipy.interpolate import interp1d\n\nimport pytest\n\nimport mne\nfrom mne.forward import _make_surface_mapping, make_field_map\nfrom mne.forward._lead_dots import (_comp_sum_eeg, _comp_sums_meg,\n _get_legen_table, _do_cross_dots)\nfrom mne.forward._make_forward import _create_meg_coils\nfrom mne.forward._field_interpolation import _setup_dots\nfrom mne.surface import get_meg_helmet_surf, get_head_surf\nfrom mne.datasets import testing\nfrom mne import read_evokeds, pick_types, make_fixed_length_events, Epochs\nfrom mne.io import read_raw_fif\n\n\nbase_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')\nraw_fname = op.join(base_dir, 'test_raw.fif')\nevoked_fname = op.join(base_dir, 'test-ave.fif')\nraw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')\n\ndata_path = testing.data_path(download=False)\ntrans_fname = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc-trans.fif')\nsubjects_dir = op.join(data_path, 'subjects')\n\n\n@testing.requires_testing_data\ndef test_field_map_ctf():\n \"\"\"Test that field mapping can be done with CTF data.\"\"\"\n raw = read_raw_fif(raw_ctf_fname).crop(0, 1)\n raw.apply_gradient_compensation(3)\n events = make_fixed_length_events(raw, duration=0.5)\n evoked = Epochs(raw, events).average()\n evoked.pick_channels(evoked.ch_names[:50]) # crappy mapping but faster\n # smoke test\n make_field_map(evoked, trans=trans_fname, subject='sample',\n subjects_dir=subjects_dir)\n\n\ndef test_legendre_val():\n \"\"\"Test Legendre polynomial (derivative) equivalence.\"\"\"\n rng = np.random.RandomState(0)\n # check table equiv\n xs = np.linspace(-1., 1., 1000)\n n_terms = 100\n\n # True, numpy\n vals_np = legendre.legvander(xs, n_terms - 1)\n\n # Table approximation\n for nc, interp in zip([100, 50], ['nearest', 'linear']):\n lut, n_fact = _get_legen_table('eeg', n_coeff=nc, force_calc=True)\n lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp,\n axis=0)\n vals_i = lut_fun(xs)\n # Need a \"1:\" here because we omit the first coefficient in our table!\n assert_allclose(vals_np[:, 1:vals_i.shape[1] + 1], vals_i,\n rtol=1e-2, atol=5e-3)\n\n # Now let's look at our sums\n ctheta = rng.rand(20, 30) * 2.0 - 1.0\n beta = rng.rand(20, 30) * 0.8\n c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact)\n c1.shape = beta.shape\n\n # compare to numpy\n n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis]\n coeffs = np.zeros((n_terms,) + beta.shape)\n coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0) *\n (2.0 * n + 1.0) * (2.0 * n + 1.0) / n)\n # can't use tensor=False here b/c it isn't in old numpy\n c2 = np.empty((20, 30))\n for ci1 in range(20):\n for ci2 in range(30):\n c2[ci1, ci2] = legendre.legval(ctheta[ci1, ci2],\n coeffs[:, ci1, ci2])\n assert_allclose(c1, c2, 1e-2, 1e-3) # close enough...\n\n # compare fast and slow for MEG\n ctheta = rng.rand(20 * 30) * 2.0 - 1.0\n beta = rng.rand(20 * 30) * 0.8\n lut, n_fact = _get_legen_table('meg', n_coeff=10, force_calc=True)\n fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'nearest', axis=0)\n coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)\n lut, n_fact = _get_legen_table('meg', n_coeff=20, force_calc=True)\n fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'linear', axis=0)\n coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)\n\n\ndef test_legendre_table():\n \"\"\"Test Legendre table calculation.\"\"\"\n # double-check our table generation\n n = 10\n for ch_type in ['eeg', 'meg']:\n lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=25, force_calc=True)\n lut1 = lut1[:, :n - 1].copy()\n n_fact1 = n_fact1[:n - 1].copy()\n lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n, force_calc=True)\n assert_allclose(lut1, lut2)\n assert_allclose(n_fact1, n_fact2)\n\n\n@testing.requires_testing_data\ndef test_make_field_map_eeg():\n \"\"\"Test interpolation of EEG field onto head.\"\"\"\n evoked = read_evokeds(evoked_fname, condition='Left Auditory')\n evoked.info['bads'] = ['MEG 2443', 'EEG 053'] # add some bads\n surf = get_head_surf('sample', subjects_dir=subjects_dir)\n # we must have trans if surface is in MRI coords\n pytest.raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg')\n\n evoked.pick_types(meg=False, eeg=True)\n fmd = make_field_map(evoked, trans_fname,\n subject='sample', subjects_dir=subjects_dir)\n\n # trans is necessary for EEG only\n pytest.raises(RuntimeError, make_field_map, evoked, None,\n subject='sample', subjects_dir=subjects_dir)\n\n fmd = make_field_map(evoked, trans_fname,\n subject='sample', subjects_dir=subjects_dir)\n assert len(fmd) == 1\n assert_array_equal(fmd[0]['data'].shape, (642, 59)) # maps data onto surf\n assert len(fmd[0]['ch_names']) == 59\n\n\n@testing.requires_testing_data\n@pytest.mark.slowtest\ndef test_make_field_map_meg():\n \"\"\"Test interpolation of MEG field onto helmet | head.\"\"\"\n evoked = read_evokeds(evoked_fname, condition='Left Auditory')\n info = evoked.info\n surf = get_meg_helmet_surf(info)\n # let's reduce the number of channels by a bunch to speed it up\n info['bads'] = info['ch_names'][:200]\n # bad ch_type\n pytest.raises(ValueError, _make_surface_mapping, info, surf, 'foo')\n # bad mode\n pytest.raises(ValueError, _make_surface_mapping, info, surf, 'meg',\n mode='foo')\n # no picks\n evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)\n pytest.raises(RuntimeError, _make_surface_mapping, evoked_eeg.info,\n surf, 'meg')\n # bad surface def\n nn = surf['nn']\n del surf['nn']\n pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')\n surf['nn'] = nn\n cf = surf['coord_frame']\n del surf['coord_frame']\n pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')\n surf['coord_frame'] = cf\n\n # now do it with make_field_map\n evoked.pick_types(meg=True, eeg=False)\n evoked.info.normalize_proj() # avoid projection warnings\n fmd = make_field_map(evoked, None,\n subject='sample', subjects_dir=subjects_dir)\n assert (len(fmd) == 1)\n assert_array_equal(fmd[0]['data'].shape, (304, 106)) # maps data onto surf\n assert len(fmd[0]['ch_names']) == 106\n\n pytest.raises(ValueError, make_field_map, evoked, ch_type='foobar')\n\n # now test the make_field_map on head surf for MEG\n evoked.pick_types(meg=True, eeg=False)\n evoked.info.normalize_proj()\n fmd = make_field_map(evoked, trans_fname, meg_surf='head',\n subject='sample', subjects_dir=subjects_dir)\n assert len(fmd) == 1\n assert_array_equal(fmd[0]['data'].shape, (642, 106)) # maps data onto surf\n assert len(fmd[0]['ch_names']) == 106\n\n pytest.raises(ValueError, make_field_map, evoked, meg_surf='foobar',\n subjects_dir=subjects_dir, trans=trans_fname)\n\n\n@testing.requires_testing_data\ndef test_make_field_map_meeg():\n \"\"\"Test making a M/EEG field map onto helmet & head.\"\"\"\n evoked = read_evokeds(evoked_fname, baseline=(-0.2, 0.0))[0]\n picks = pick_types(evoked.info, meg=True, eeg=True)\n picks = picks[::10]\n evoked.pick_channels([evoked.ch_names[p] for p in picks])\n evoked.info.normalize_proj()\n maps = make_field_map(evoked, trans_fname, subject='sample',\n subjects_dir=subjects_dir, verbose='debug')\n assert_equal(maps[0]['data'].shape, (642, 6)) # EEG->Head\n assert_equal(maps[1]['data'].shape, (304, 31)) # MEG->Helmet\n # reasonable ranges\n maxs = (1.2, 2.0) # before #4418, was (1.1, 2.0)\n mins = (-0.8, -1.3) # before #4418, was (-0.6, -1.2)\n assert_equal(len(maxs), len(maps))\n for map_, max_, min_ in zip(maps, maxs, mins):\n assert_allclose(map_['data'].max(), max_, rtol=5e-2)\n assert_allclose(map_['data'].min(), min_, rtol=5e-2)\n # calculated from correct looking mapping on 2015/12/26\n assert_allclose(np.sqrt(np.sum(maps[0]['data'] ** 2)), 19.0903, # 16.6088,\n atol=1e-3, rtol=1e-3)\n assert_allclose(np.sqrt(np.sum(maps[1]['data'] ** 2)), 19.4748, # 20.1245,\n atol=1e-3, rtol=1e-3)\n\n\ndef _setup_args(info):\n \"\"\"Configure args for test_as_meg_type_evoked.\"\"\"\n coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'])\n int_rad, _, lut_fun, n_fact = _setup_dots('fast', info, coils, 'meg')\n my_origin = np.array([0., 0., 0.04])\n args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin,\n ch_type='meg', lut=lut_fun, n_fact=n_fact)\n return args_dict\n\n\n@testing.requires_testing_data\ndef test_as_meg_type_evoked():\n \"\"\"Test interpolation of data on to virtual channels.\"\"\"\n # validation tests\n raw = read_raw_fif(raw_fname)\n events = mne.find_events(raw)\n picks = pick_types(raw.info, meg=True, eeg=True, stim=True,\n ecg=True, eog=True, include=['STI 014'],\n exclude='bads')\n epochs = mne.Epochs(raw, events, picks=picks)\n evoked = epochs.average()\n\n with pytest.raises(ValueError, match=\"Invalid value for the 'ch_type'\"):\n evoked.as_type('meg')\n with pytest.raises(ValueError, match=\"Invalid value for the 'ch_type'\"):\n evoked.copy().pick_types(meg='grad').as_type('meg')\n\n # channel names\n ch_names = evoked.info['ch_names']\n virt_evoked = evoked.copy().pick_channels(ch_names=ch_names[:10:1])\n virt_evoked.info.normalize_proj()\n virt_evoked = virt_evoked.as_type('mag')\n assert (all(ch.endswith('_v') for ch in virt_evoked.info['ch_names']))\n\n # pick from and to channels\n evoked_from = evoked.copy().pick_channels(ch_names=ch_names[2:10:3])\n evoked_to = evoked.copy().pick_channels(ch_names=ch_names[0:10:3])\n\n info_from, info_to = evoked_from.info, evoked_to.info\n\n # set up things\n args1, args2 = _setup_args(info_from), _setup_args(info_to)\n args1.update(coils2=args2['coils1'])\n args2.update(coils2=args1['coils1'])\n\n # test cross dots\n cross_dots1 = _do_cross_dots(**args1)\n cross_dots2 = _do_cross_dots(**args2)\n\n assert_array_almost_equal(cross_dots1, cross_dots2.T)\n\n # correlation test\n evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy()\n data1 = evoked.pick_types(meg='grad').data.ravel()\n data2 = evoked.as_type('grad').data.ravel()\n assert (np.corrcoef(data1, data2)[0, 1] > 0.95)\n\n # Do it with epochs\n virt_epochs = \\\n epochs.copy().load_data().pick_channels(ch_names=ch_names[:10:1])\n virt_epochs.info.normalize_proj()\n virt_epochs = virt_epochs.as_type('mag')\n assert (all(ch.endswith('_v') for ch in virt_epochs.info['ch_names']))\n assert_allclose(virt_epochs.get_data().mean(0), virt_evoked.data)\n", "# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# Denis Engemann <denis.engemann@gmail.com>\n# Martin Luessi <mluessi@nmr.mgh.harvard.edu>\n# Eric Larson <larson.eric.d@gmail.com>\n# Robert Luke <mail@robertluke.net>\n#\n# License: Simplified BSD\n\nfrom collections import namedtuple\nimport os.path as op\n\nimport numpy as np\nimport pytest\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom mne import (read_events, Epochs, pick_channels_evoked, read_cov,\n compute_proj_evoked)\nfrom mne.channels import read_layout\nfrom mne.io import read_raw_fif\nfrom mne.time_frequency.tfr import AverageTFR\nfrom mne.utils import _record_warnings\n\nfrom mne.viz import (plot_topo_image_epochs, _get_presser,\n mne_analyze_colormap, plot_evoked_topo)\nfrom mne.viz.evoked import _line_plot_onselect\nfrom mne.viz.utils import _fake_click\n\nfrom mne.viz.topo import (_plot_update_evoked_topo_proj, iter_topography,\n _imshow_tfr)\n\nbase_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')\nevoked_fname = op.join(base_dir, 'test-ave.fif')\nraw_fname = op.join(base_dir, 'test_raw.fif')\nevent_name = op.join(base_dir, 'test-eve.fif')\ncov_fname = op.join(base_dir, 'test-cov.fif')\nevent_id, tmin, tmax = 1, -0.2, 0.2\nlayout = read_layout('Vectorview-all')\n\n\ndef _get_events():\n \"\"\"Get events.\"\"\"\n return read_events(event_name)\n\n\ndef _get_picks(raw):\n \"\"\"Get picks.\"\"\"\n return [0, 1, 2, 6, 7, 8, 306, 340, 341, 342] # take a only few channels\n\n\ndef _get_epochs():\n \"\"\"Get epochs.\"\"\"\n raw = read_raw_fif(raw_fname)\n raw.add_proj([], remove_existing=True)\n events = _get_events()\n picks = _get_picks(raw)\n # bad proj warning\n epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks)\n return epochs\n\n\ndef _get_epochs_delayed_ssp():\n \"\"\"Get epochs with delayed SSP.\"\"\"\n raw = read_raw_fif(raw_fname)\n events = _get_events()\n picks = _get_picks(raw)\n reject = dict(mag=4e-12)\n with pytest.warns(RuntimeWarning, match='projection'):\n epochs_delayed_ssp = Epochs(\n raw, events[:10], event_id, tmin, tmax, picks=picks,\n proj='delayed', reject=reject)\n return epochs_delayed_ssp\n\n\ndef test_plot_joint():\n \"\"\"Test joint plot.\"\"\"\n evoked = _get_epochs().average()\n evoked.plot_joint(ts_args=dict(time_unit='s'),\n topomap_args=dict(time_unit='s'))\n\n def return_inds(d): # to test function kwarg to zorder arg of evoked.plot\n return list(range(d.shape[0]))\n evoked.plot_joint(title='test', topomap_args=dict(contours=0, res=8,\n time_unit='ms'),\n ts_args=dict(spatial_colors=True, zorder=return_inds,\n time_unit='s'))\n with pytest.raises(ValueError, match='If one of `ts_args` and'):\n evoked.plot_joint(ts_args=dict(axes=True, time_unit='s'))\n\n axes = plt.subplots(nrows=3)[-1].flatten().tolist()\n evoked.plot_joint(times=[0], picks=[6, 7, 8], ts_args=dict(axes=axes[0]),\n topomap_args={\"axes\": axes[1:], \"time_unit\": \"s\"})\n with pytest.raises(ValueError, match='of length 4'):\n evoked.plot_joint(picks=[6, 7, 8], ts_args=dict(axes=axes[0]),\n topomap_args=dict(axes=axes[2:]))\n plt.close('all')\n\n # test proj options\n assert len(evoked.info['projs']) == 0\n evoked.pick_types(meg=True)\n evoked.add_proj(compute_proj_evoked(\n evoked, n_mag=1, n_grad=1, meg='combined'))\n assert len(evoked.info['projs']) == 1\n with pytest.raises(ValueError, match='must match ts_args'):\n evoked.plot_joint(ts_args=dict(proj=True),\n topomap_args=dict(proj=False))\n evoked.plot_joint(ts_args=dict(proj='reconstruct'),\n topomap_args=dict(proj='reconstruct'))\n plt.close('all')\n\n # test sEEG (gh:8733)\n evoked.del_proj().pick_types('mag') # avoid overlapping positions error\n mapping = {ch_name: 'seeg' for ch_name in evoked.ch_names}\n with pytest.warns(RuntimeWarning, match='The unit .* has changed from .*'):\n evoked.set_channel_types(mapping)\n evoked.plot_joint()\n\n # test DBS (gh:8739)\n evoked = _get_epochs().average().pick_types('mag')\n mapping = {ch_name: 'dbs' for ch_name in evoked.ch_names}\n with pytest.warns(RuntimeWarning, match='The unit for'):\n evoked.set_channel_types(mapping)\n evoked.plot_joint()\n plt.close('all')\n\n\ndef test_plot_topo():\n \"\"\"Test plotting of ERP topography.\"\"\"\n # Show topography\n evoked = _get_epochs().average()\n # should auto-find layout\n plot_evoked_topo([evoked, evoked], merge_grads=True,\n background_color='w')\n\n plot_evoked_topo([evoked, evoked], merge_grads=True,\n background_color='w', color='blue')\n\n with pytest.raises(ValueError, match='must be .*tuple, list, str,.*'):\n plot_evoked_topo([evoked, evoked], merge_grads=True,\n color=np.array([\"blue\", \"red\"]))\n\n picked_evoked = evoked.copy().pick_channels(evoked.ch_names[:3])\n picked_evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)\n picked_evoked_eeg.pick_channels(picked_evoked_eeg.ch_names[:3])\n\n # test scaling\n for ylim in [dict(mag=[-600, 600]), None]:\n plot_evoked_topo([picked_evoked] * 2, layout, ylim=ylim)\n\n for evo in [evoked, [evoked, picked_evoked]]:\n pytest.raises(ValueError, plot_evoked_topo, evo, layout,\n color=['y', 'b'])\n\n evoked_delayed_ssp = _get_epochs_delayed_ssp().average()\n ch_names = evoked_delayed_ssp.ch_names[:3] # make it faster\n picked_evoked_delayed_ssp = pick_channels_evoked(evoked_delayed_ssp,\n ch_names)\n fig = plot_evoked_topo(picked_evoked_delayed_ssp, layout,\n proj='interactive')\n func = _get_presser(fig)\n event = namedtuple('Event', ['inaxes', 'xdata', 'ydata'])\n func(event(inaxes=fig.axes[0], xdata=fig.axes[0]._mne_axs[0].pos[0],\n ydata=fig.axes[0]._mne_axs[0].pos[1]))\n func(event(inaxes=fig.axes[0], xdata=0, ydata=0))\n params = dict(evokeds=[picked_evoked_delayed_ssp],\n times=picked_evoked_delayed_ssp.times,\n fig=fig, projs=picked_evoked_delayed_ssp.info['projs'])\n bools = [True] * len(params['projs'])\n with pytest.warns(RuntimeWarning, match='projection'):\n _plot_update_evoked_topo_proj(params, bools)\n\n # should auto-generate layout\n plot_evoked_topo(picked_evoked_eeg.copy(),\n fig_background=np.zeros((4, 3, 3)), proj=True,\n background_color='k')\n # Test RMS plot of grad pairs\n picked_evoked.plot_topo(merge_grads=True, background_color='w')\n plt.close('all')\n for ax, idx in iter_topography(evoked.info, legend=True):\n ax.plot(evoked.data[idx], color='red')\n # test status bar message\n if idx != -1:\n assert (evoked.ch_names[idx] in ax.format_coord(.5, .5))\n assert idx == -1\n plt.close('all')\n cov = read_cov(cov_fname)\n cov['projs'] = []\n evoked.pick_types(meg=True).plot_topo(noise_cov=cov)\n plt.close('all')\n\n # Test exclude parameter\n exclude = ['MEG 0112']\n fig = picked_evoked.plot_topo(exclude=exclude)\n n_axes_expected = len(picked_evoked.info['ch_names']) - len(exclude)\n n_axes_found = len(fig.axes[0].lines)\n assert n_axes_found == n_axes_expected\n\n # test plot_topo\n evoked.plot_topo() # should auto-find layout\n _line_plot_onselect(0, 200, ['mag', 'grad'], evoked.info, evoked.data,\n evoked.times)\n plt.close('all')\n\n for ax, idx in iter_topography(evoked.info): # brief test with false\n ax.plot([0, 1, 2])\n break\n plt.close('all')\n\n\ndef test_plot_topo_nirs(fnirs_evoked):\n \"\"\"Test plotting of ERP topography for nirs data.\"\"\"\n fnirs_evoked.pick(picks='hbo')\n fig = plot_evoked_topo(fnirs_evoked)\n assert len(fig.axes) == 1\n plt.close('all')\n\n\ndef test_plot_topo_single_ch():\n \"\"\"Test single channel topoplot with time cursor.\"\"\"\n evoked = _get_epochs().average()\n evoked2 = evoked.copy()\n # test plotting several evokeds on different time grids\n evoked.crop(-.19, 0)\n evoked2.crop(.05, .19)\n fig = plot_evoked_topo([evoked, evoked2], background_color='w')\n # test status bar message\n ax = plt.gca()\n assert ('MEG 0113' in ax.format_coord(.065, .63))\n num_figures_before = len(plt.get_fignums())\n _fake_click(fig, fig.axes[0], (0.08, 0.65))\n assert num_figures_before + 1 == len(plt.get_fignums())\n fig = plt.gcf()\n ax = plt.gca()\n _fake_click(fig, ax, (.5, .5), kind='motion') # cursor should appear\n assert (isinstance(ax._cursorline, matplotlib.lines.Line2D))\n _fake_click(fig, ax, (1.5, 1.5), kind='motion') # cursor should disappear\n assert ax._cursorline is None\n plt.close('all')\n\n\ndef test_plot_topo_image_epochs():\n \"\"\"Test plotting of epochs image topography.\"\"\"\n title = 'ERF images - MNE sample data'\n epochs = _get_epochs()\n epochs.load_data()\n cmap = mne_analyze_colormap(format='matplotlib')\n data_min = epochs._data.min()\n plt.close('all')\n fig = plot_topo_image_epochs(epochs, sigma=0.5, vmin=-200, vmax=200,\n colorbar=True, title=title, cmap=cmap)\n assert epochs._data.min() == data_min\n num_figures_before = len(plt.get_fignums())\n _fake_click(fig, fig.axes[0], (0.08, 0.64))\n assert num_figures_before + 1 == len(plt.get_fignums())\n # test for auto-showing a colorbar when only 1 sensor type\n ep = epochs.copy().pick_types(meg=False, eeg=True)\n fig = plot_topo_image_epochs(ep, vmin=None, vmax=None, colorbar=None,\n cmap=cmap)\n ax = [x for x in fig.get_children() if isinstance(x, matplotlib.axes.Axes)]\n # include inset axes (newer MPL)\n ax.extend(y for x in ax for y in x.get_children()\n if isinstance(y, matplotlib.axes.Axes))\n qm_cmap = [y.cmap for x in ax for y in x.get_children()\n if isinstance(y, matplotlib.collections.QuadMesh)]\n assert len(qm_cmap) >= 1\n assert qm_cmap[0] is cmap\n\n\ndef test_plot_tfr_topo():\n \"\"\"Test plotting of TFR data.\"\"\"\n epochs = _get_epochs()\n n_freqs = 3\n nave = 1\n data = np.random.RandomState(0).randn(len(epochs.ch_names),\n n_freqs, len(epochs.times))\n tfr = AverageTFR(epochs.info, data, epochs.times, np.arange(n_freqs), nave)\n plt.close('all')\n fig = tfr.plot_topo(baseline=(None, 0), mode='ratio',\n title='Average power', vmin=0., vmax=14.)\n\n # test opening tfr by clicking\n num_figures_before = len(plt.get_fignums())\n # could use np.reshape(fig.axes[-1].images[0].get_extent(), (2, 2)).mean(1)\n with pytest.warns(RuntimeWarning, match='not masking'):\n _fake_click(fig, fig.axes[0], (0.08, 0.65))\n assert num_figures_before + 1 == len(plt.get_fignums())\n plt.close('all')\n\n tfr.plot([4], baseline=(None, 0), mode='ratio', show=False, title='foo')\n pytest.raises(ValueError, tfr.plot, [4], yscale='lin', show=False)\n\n # nonuniform freqs\n freqs = np.logspace(*np.log10([3, 10]), num=3)\n tfr = AverageTFR(epochs.info, data, epochs.times, freqs, nave)\n fig = tfr.plot([4], baseline=(None, 0), mode='mean', vmax=14., show=False)\n assert fig[0].axes[0].get_yaxis().get_scale() == 'log'\n\n # one timesample\n tfr = AverageTFR(epochs.info, data[:, :, [0]], epochs.times[[1]],\n freqs, nave)\n with _record_warnings(): # matplotlib equal left/right\n tfr.plot([4], baseline=None, vmax=14., show=False, yscale='linear')\n\n # one frequency bin, log scale required: as it doesn't make sense\n # to plot log scale for one value, we test whether yscale is set to linear\n vmin, vmax = 0., 2.\n fig, ax = plt.subplots()\n tmin, tmax = epochs.times[0], epochs.times[-1]\n with pytest.warns(RuntimeWarning, match='not masking'):\n _imshow_tfr(ax, 3, tmin, tmax, vmin, vmax, None, tfr=data[:, [0], :],\n freq=freqs[[-1]], x_label=None, y_label=None,\n colorbar=False, cmap=('RdBu_r', True), yscale='log')\n fig = plt.gcf()\n assert fig.axes[0].get_yaxis().get_scale() == 'linear'\n\n # ValueError when freq[0] == 0 and yscale == 'log'\n these_freqs = freqs[:3].copy()\n these_freqs[0] = 0\n with pytest.warns(RuntimeWarning, match='not masking'):\n pytest.raises(ValueError, _imshow_tfr, ax, 3, tmin, tmax, vmin, vmax,\n None, tfr=data[:, :3, :], freq=these_freqs, x_label=None,\n y_label=None, colorbar=False, cmap=('RdBu_r', True),\n yscale='log')\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array", "numpy.empty", "numpy.cumprod", "numpy.zeros", "numpy.testing.assert_equal", "numpy.random.RandomState", "numpy.sum", "numpy.testing.assert_array_equal", "numpy.polynomial.legendre.legvander", "numpy.testing.assert_array_almost_equal", "numpy.arange", "numpy.corrcoef", "numpy.linspace", "numpy.polynomial.legendre.legval" ], [ "numpy.array", "numpy.zeros", "numpy.random.RandomState", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "matplotlib.pyplot.get_fignums", "numpy.arange", "matplotlib.pyplot.gcf", "numpy.log10", "matplotlib.pyplot.gca" ] ]
sczyz/radioxenon_ml
[ "73398f0060e88616c7652a72bdedf7f93ea17a20" ]
[ "ml_rxe.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 9 14:25:47 2018\n\n@author: Steven\n\"\"\"\nimport sys\nimport argparse\n\nfrom radioxenon_ml.read_in import ml_matrix_composition as mlmc\nfrom radioxenon_ml.solve import iterate\nimport numpy as np\n\"\"\"\nimport radioxenon_ml.read_in.ml_matrix_composition\nimport radioxenon_ml.solve.iterate\nimport radioxenon_ml.solve.variance\n\"\"\"\n\"\"\"the master file for the radioxenon_ml package\"\"\" \nparser = argparse.ArgumentParser(description='This is the master file for running the maximum likelihood package.')\nparser.add_argument('-o', '--offset', \n type=int,\n default=84,\n help='where to start the file selection from list of test files'\n )\nargs = parser.parse_args(sys.argv[1:])\n\nspectrum_file_location = 'radioxenon_ml/test_files/test'\noffset = args.offset\nerr = 0.01 #acceptable error in normalized activity\nscale_array = np.array([1,1,1,1]) #Should have elements equal to the number of isotopes\n#scale_array = np.array([0.561,0.584,0.9,0.372,0.489,0.489,1]) #scaling factor for each simulation file\n #currently taken from (Czyz, 2017)\nn = np.shape(scale_array)[0] #number of simulated spectra\n\nsimulation, experiment, totcount = mlmc.form_matrix(spectrum_file_location,scale_array,n,offset); #known issue: requires UTF-8 encoding\n#simulation, experiment = mlmc.scale_matrix(simulation_unscaled,experiment_unscaled,)\n\nA,J,K,q=iterate.iterate(simulation, experiment, err)\nprint(\"\\n_____________________________________\\nTotal activity percents = \" + str(A*100))" ]
[ [ "numpy.array", "numpy.shape" ] ]
JGoldstone/colour
[ "6829b363d5f0682bff0f4826995e7ceac189ff28", "6829b363d5f0682bff0f4826995e7ceac189ff28", "6829b363d5f0682bff0f4826995e7ceac189ff28" ]
[ "colour/recovery/tests/test__init__.py", "colour/geometry/tests/test_section.py", "colour/algebra/regression.py" ]
[ "# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDefines the unit tests for the :mod:`colour.recovery` module.\n\"\"\"\n\nimport numpy as np\nimport unittest\n\nfrom colour.colorimetry import (\n MSDS_CMFS,\n SDS_ILLUMINANTS,\n SpectralShape,\n reshape_msds,\n reshape_sd,\n sd_to_XYZ_integration,\n)\nfrom colour.recovery import XYZ_to_sd\nfrom colour.utilities import domain_range_scale\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = 'colour-developers@colour-science.org'\n__status__ = 'Production'\n\n__all__ = [\n 'TestXYZ_to_sd',\n]\n\n\nclass TestXYZ_to_sd(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.recovery.XYZ_to_sd` definition unit tests\n methods.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialises common tests attributes.\n \"\"\"\n\n # pylint: disable=E1102\n self._cmfs = reshape_msds(\n MSDS_CMFS['CIE 1931 2 Degree Standard Observer'],\n SpectralShape(360, 780, 10))\n\n self._sd_D65 = reshape_sd(SDS_ILLUMINANTS['D65'], self._cmfs.shape)\n\n def test_domain_range_scale_XYZ_to_sd(self):\n \"\"\"\n Tests :func:`colour.recovery.XYZ_to_sd` definition domain\n and range scale support.\n \"\"\"\n\n XYZ = np.array([0.20654008, 0.12197225, 0.05136952])\n m = ('Jakob 2019', 'Mallett 2019', 'Meng 2015', 'Otsu 2018',\n 'Smits 1999')\n v = [\n sd_to_XYZ_integration(\n XYZ_to_sd(\n XYZ, method, cmfs=self._cmfs, illuminant=self._sd_D65),\n self._cmfs, self._sd_D65) for method in m\n ]\n\n d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1))\n for method, value in zip(m, v):\n for scale, factor_a, factor_b in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n sd_to_XYZ_integration(\n XYZ_to_sd(\n XYZ * factor_a,\n method,\n cmfs=self._cmfs,\n illuminant=self._sd_D65), self._cmfs,\n self._sd_D65),\n value * factor_b,\n decimal=7)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDefines the unit tests for the :mod:`colour.geometry.section` module.\n\"\"\"\n\nimport numpy as np\nimport unittest\n\nfrom colour.geometry.section import (\n edges_to_chord,\n close_chord,\n unique_vertices,\n)\nfrom colour.geometry import primitive_cube, hull_section\nfrom colour.utilities import is_trimesh_installed\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = 'colour-developers@colour-science.org'\n__status__ = 'Production'\n\n__all__ = [\n 'TestEdgesToChord',\n 'TestCloseChord',\n 'TestUniqueVertices',\n 'TestHullSection',\n]\n\n\nclass TestEdgesToChord(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.geometry.section.edges_to_chord` definition unit\n tests methods.\n \"\"\"\n\n def test_edges_to_chord(self):\n \"\"\"\n Tests :func:`colour.geometry.section.edges_to_chord` definition.\n \"\"\"\n\n edges = np.array([\n [[0.0, -0.5, 0.0], [0.5, -0.5, 0.0]],\n [[-0.5, -0.5, 0.0], [0.0, -0.5, 0.0]],\n [[0.5, 0.5, 0.0], [0.0, 0.5, 0.0]],\n [[0.0, 0.5, 0.0], [-0.5, 0.5, 0.0]],\n [[-0.5, 0.0, 0.0], [-0.5, -0.5, 0.0]],\n [[-0.5, 0.5, 0.0], [-0.5, 0.0, 0.0]],\n [[0.5, -0.5, 0.0], [0.5, 0.0, 0.0]],\n [[0.5, 0.0, 0.0], [0.5, 0.5, 0.0]],\n ])\n\n np.testing.assert_almost_equal(\n edges_to_chord(edges),\n np.array([\n [0.0, -0.5, 0.0],\n [0.5, -0.5, 0.0],\n [0.5, -0.5, -0.0],\n [0.5, 0.0, -0.0],\n [0.5, 0.0, -0.0],\n [0.5, 0.5, -0.0],\n [0.5, 0.5, 0.0],\n [0.0, 0.5, 0.0],\n [0.0, 0.5, 0.0],\n [-0.5, 0.5, 0.0],\n [-0.5, 0.5, -0.0],\n [-0.5, 0.0, -0.0],\n [-0.5, 0.0, -0.0],\n [-0.5, -0.5, -0.0],\n [-0.5, -0.5, 0.0],\n [0.0, -0.5, 0.0],\n ]))\n\n np.testing.assert_almost_equal(\n edges_to_chord(edges, 5),\n np.array([\n [-0.5, 0.5, 0.0],\n [-0.5, 0.0, 0.0],\n [-0.5, 0.0, 0.0],\n [-0.5, -0.5, 0.0],\n [-0.5, -0.5, 0.0],\n [0.0, -0.5, 0.0],\n [0.0, -0.5, 0.0],\n [0.5, -0.5, 0.0],\n [0.5, -0.5, 0.0],\n [0.5, 0.0, 0.0],\n [0.5, 0.0, 0.0],\n [0.5, 0.5, 0.0],\n [0.5, 0.5, 0.0],\n [0.0, 0.5, 0.0],\n [0.0, 0.5, 0.0],\n [-0.5, 0.5, 0.0],\n ]))\n\n\nclass TestCloseChord(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.geometry.section.close_chord` definition unit tests\n methods.\n \"\"\"\n\n def test_close_chord(self):\n \"\"\"\n Tests :func:`colour.geometry.section.close_chord` definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n close_chord(np.array([[0.0, 0.5, 0.0], [0.0, 0.0, 0.5]])),\n np.array([[0.0, 0.5, 0.0], [0.0, 0.0, 0.5], [0.0, 0.5, 0.0]]))\n\n\nclass TestUniqueVertices(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.geometry.section.unique_vertices` definition unit\n tests methods.\n \"\"\"\n\n def test_unique_vertices(self):\n \"\"\"\n Tests :func:`colour.geometry.section.unique_vertices` definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n unique_vertices(\n np.array([[0.0, 0.5, 0.0], [0.0, 0.0, 0.5], [0.0, 0.5, 0.0]])),\n np.array([[0.0, 0.5, 0.0], [0.0, 0.0, 0.5]]))\n\n np.testing.assert_almost_equal(\n unique_vertices(\n np.array([[0.0, 0.51, 0.0], [0.0, 0.0, 0.51], [0.0, 0.52,\n 0.0]]), 1),\n np.array([[0.0, 0.5, 0.0], [0.0, 0.0, 0.5]]))\n\n\nclass TestHullSection(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.geometry.section.hull_section` definition unit tests\n methods.\n \"\"\"\n\n def test_hull_section(self):\n \"\"\"\n Tests :func:`colour.geometry.section.hull_section` definition.\n \"\"\"\n\n if not is_trimesh_installed: # pragma: no cover\n return\n\n import trimesh\n\n vertices, faces, _outline = primitive_cube(1, 1, 1, 2, 2, 2)\n hull = trimesh.Trimesh(vertices['position'], faces, process=False)\n\n np.testing.assert_almost_equal(\n hull_section(hull, origin=0),\n np.array([\n [0.0, -0.5, 0.0],\n [0.5, -0.5, 0.0],\n [0.5, 0.0, 0.0],\n [0.5, 0.5, 0.0],\n [0.0, 0.5, 0.0],\n [-0.5, 0.5, 0.0],\n [-0.5, 0.0, 0.0],\n [-0.5, -0.5, 0.0],\n [0.0, -0.5, 0.0],\n ]))\n\n np.testing.assert_almost_equal(\n hull_section(hull, axis='+x', origin=0),\n np.array([\n [0.0, 0.0, -0.5],\n [0.0, 0.5, -0.5],\n [0.0, 0.5, 0.0],\n [0.0, 0.5, 0.5],\n [0.0, 0.0, 0.5],\n [0.0, -0.5, 0.5],\n [0.0, -0.5, 0.0],\n [0.0, -0.5, -0.5],\n [0.0, 0.0, -0.5],\n ]))\n\n np.testing.assert_almost_equal(\n hull_section(hull, axis='+y', origin=0),\n np.array([\n [0.0, 0.0, -0.5],\n [-0.5, 0.0, -0.5],\n [-0.5, 0.0, 0.0],\n [-0.5, 0.0, 0.5],\n [0.0, 0.0, 0.5],\n [0.5, 0.0, 0.5],\n [0.5, 0.0, 0.0],\n [0.5, 0.0, -0.5],\n [0.0, 0.0, -0.5],\n ]))\n\n hull.vertices = (hull.vertices + 0.5) * 2\n np.testing.assert_almost_equal(\n hull_section(hull, origin=0.5, normalise=True),\n np.array([\n [1.0, 0.0, 1.0],\n [2.0, 0.0, 1.0],\n [2.0, 1.0, 1.0],\n [2.0, 2.0, 1.0],\n [1.0, 2.0, 1.0],\n [0.0, 2.0, 1.0],\n [0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0],\n [1.0, 0.0, 1.0],\n ]))\n\n self.assertRaises(ValueError, hull_section, hull, origin=-1)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nRegression\n==========\n\nDefines various objects to perform regression:\n\n- :func:`colour.algebra.least_square_mapping_MoorePenrose`: *Least-squares*\n mapping using *Moore-Penrose* inverse.\n\nReferences\n----------\n- :cite:`Finlayson2015` : Finlayson, G. D., MacKiewicz, M., & Hurlbert, A.\n (2015). Color Correction Using Root-Polynomial Regression. IEEE\n Transactions on Image Processing, 24(5), 1460-1470.\n doi:10.1109/TIP.2015.2405336\n\"\"\"\n\nimport numpy as np\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = 'colour-developers@colour-science.org'\n__status__ = 'Production'\n\n__all__ = [\n 'least_square_mapping_MoorePenrose',\n]\n\n\ndef least_square_mapping_MoorePenrose(y, x):\n \"\"\"\n Computes the *least-squares* mapping from dependent variable :math:`y` to\n independent variable :math:`x` using *Moore-Penrose* inverse.\n\n Parameters\n ----------\n y : array_like\n Dependent and already known :math:`y` variable.\n x : array_like, optional\n Independent :math:`x` variable(s) values corresponding with :math:`y`\n variable.\n\n Returns\n -------\n ndarray\n *Least-squares* mapping.\n\n References\n ----------\n :cite:`Finlayson2015`\n\n Examples\n --------\n >>> prng = np.random.RandomState(2)\n >>> y = prng.random_sample((24, 3))\n >>> x = y + (prng.random_sample((24, 3)) - 0.5) * 0.5\n >>> least_square_mapping_MoorePenrose(y, x) # doctest: +ELLIPSIS\n array([[ 1.0526376..., 0.1378078..., -0.2276339...],\n [ 0.0739584..., 1.0293994..., -0.1060115...],\n [ 0.0572550..., -0.2052633..., 1.1015194...]])\n \"\"\"\n\n y = np.atleast_2d(y)\n x = np.atleast_2d(x)\n\n return np.dot(np.transpose(x), np.linalg.pinv(np.transpose(y)))\n" ]
[ [ "numpy.array" ], [ "numpy.array" ], [ "numpy.transpose", "numpy.atleast_2d" ] ]
3DAlgoLab/pyqtgraph
[ "6b4385ce0d0f9078aa22e2e27aa5307271e95ae1", "6b4385ce0d0f9078aa22e2e27aa5307271e95ae1" ]
[ "examples/VideoSpeedTest.py", "examples/multiplePlotSpeedTest.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nTests the speed of image updates for an ImageItem and RawImageWidget.\nThe speed will generally depend on the type of data being shown, whether\nit is being scaled and/or converted by lookup table, and whether OpenGL\nis used by the view widget\n\"\"\"\n\n## Add path to library (just for examples; you do not need this)\nimport initExample\n\nimport argparse\nimport sys\n\nimport numpy as np\n\nimport pyqtgraph as pg\nimport pyqtgraph.ptime as ptime\nfrom pyqtgraph.Qt import QtGui, QtCore, QT_LIB\n\npg.setConfigOption('imageAxisOrder', 'row-major')\n\nimport importlib\nui_template = importlib.import_module(f'VideoTemplate_{QT_LIB.lower()}')\n\ntry:\n import cupy as cp\n pg.setConfigOption(\"useCupy\", True)\n _has_cupy = True\nexcept ImportError:\n cp = None\n _has_cupy = False\n\ntry:\n import numba\n _has_numba = True\nexcept ImportError:\n numba = None\n _has_numba = False\n\ntry:\n from pyqtgraph.widgets.RawImageWidget import RawImageGLWidget\nexcept ImportError:\n RawImageGLWidget = None\n\nparser = argparse.ArgumentParser(description=\"Benchmark for testing video performance\")\nparser.add_argument('--cuda', default=False, action='store_true', help=\"Use CUDA to process on the GPU\", dest=\"cuda\")\nparser.add_argument('--dtype', default='uint8', choices=['uint8', 'uint16', 'float'], help=\"Image dtype (uint8, uint16, or float)\")\nparser.add_argument('--frames', default=3, type=int, help=\"Number of image frames to generate (default=3)\")\nparser.add_argument('--image-mode', default='mono', choices=['mono', 'rgb'], help=\"Image data mode (mono or rgb)\", dest='image_mode')\nparser.add_argument('--levels', default=None, type=lambda s: tuple([float(x) for x in s.split(',')]), help=\"min,max levels to scale monochromatic image dynamic range, or rmin,rmax,gmin,gmax,bmin,bmax to scale rgb\")\nparser.add_argument('--lut', default=False, action='store_true', help=\"Use color lookup table\")\nparser.add_argument('--lut-alpha', default=False, action='store_true', help=\"Use alpha color lookup table\", dest='lut_alpha')\nparser.add_argument('--size', default='512x512', type=lambda s: tuple([int(x) for x in s.split('x')]), help=\"WxH image dimensions default='512x512'\")\nargs = parser.parse_args(sys.argv[1:])\n\nif RawImageGLWidget is not None:\n # don't limit frame rate to vsync\n sfmt = QtGui.QSurfaceFormat()\n sfmt.setSwapInterval(0)\n QtGui.QSurfaceFormat.setDefaultFormat(sfmt)\n\napp = pg.mkQApp(\"Video Speed Test Example\")\n\nwin = QtGui.QMainWindow()\nwin.setWindowTitle('pyqtgraph example: VideoSpeedTest')\nui = ui_template.Ui_MainWindow()\nui.setupUi(win)\nwin.show()\n\nif RawImageGLWidget is None:\n ui.rawGLRadio.setEnabled(False)\n ui.rawGLRadio.setText(ui.rawGLRadio.text() + \" (OpenGL not available)\")\nelse:\n ui.rawGLImg = RawImageGLWidget()\n ui.stack.addWidget(ui.rawGLImg)\n\n# read in CLI args\nui.cudaCheck.setChecked(args.cuda and _has_cupy)\nui.cudaCheck.setEnabled(_has_cupy)\nui.numbaCheck.setChecked(_has_numba and pg.getConfigOption(\"useNumba\"))\nui.numbaCheck.setEnabled(_has_numba)\nui.framesSpin.setValue(args.frames)\nui.widthSpin.setValue(args.size[0])\nui.heightSpin.setValue(args.size[1])\nui.dtypeCombo.setCurrentText(args.dtype)\nui.rgbCheck.setChecked(args.image_mode=='rgb')\nui.maxSpin1.setOpts(value=255, step=1)\nui.minSpin1.setOpts(value=0, step=1)\nlevelSpins = [ui.minSpin1, ui.maxSpin1, ui.minSpin2, ui.maxSpin2, ui.minSpin3, ui.maxSpin3]\nif args.cuda and _has_cupy:\n xp = cp\nelse:\n xp = np\nif args.levels is None:\n ui.scaleCheck.setChecked(False)\n ui.rgbLevelsCheck.setChecked(False)\nelse:\n ui.scaleCheck.setChecked(True)\n if len(args.levels) == 2:\n ui.rgbLevelsCheck.setChecked(False)\n ui.minSpin1.setValue(args.levels[0])\n ui.maxSpin1.setValue(args.levels[1])\n elif len(args.levels) == 6:\n ui.rgbLevelsCheck.setChecked(True)\n for spin,val in zip(levelSpins, args.levels):\n spin.setValue(val)\n else:\n raise ValueError(\"levels argument must be 2 or 6 comma-separated values (got %r)\" % (args.levels,))\nui.lutCheck.setChecked(args.lut)\nui.alphaCheck.setChecked(args.lut_alpha)\n\n\n#ui.graphicsView.useOpenGL() ## buggy, but you can try it if you need extra speed.\n\nvb = pg.ViewBox()\nui.graphicsView.setCentralItem(vb)\nvb.setAspectLocked()\nimg = pg.ImageItem()\nvb.addItem(img)\n\n\n\nLUT = None\ndef updateLUT():\n global LUT, ui\n dtype = ui.dtypeCombo.currentText()\n if dtype == 'uint8':\n n = 256\n else:\n n = 4096\n LUT = ui.gradient.getLookupTable(n, alpha=ui.alphaCheck.isChecked())\n if _has_cupy and xp == cp:\n LUT = cp.asarray(LUT)\nui.gradient.sigGradientChanged.connect(updateLUT)\nupdateLUT()\n\nui.alphaCheck.toggled.connect(updateLUT)\n\ndef updateScale():\n global ui, levelSpins\n if ui.rgbLevelsCheck.isChecked():\n for s in levelSpins[2:]:\n s.setEnabled(True)\n else:\n for s in levelSpins[2:]:\n s.setEnabled(False)\n\nupdateScale()\n\nui.rgbLevelsCheck.toggled.connect(updateScale)\n\ncache = {}\ndef mkData():\n with pg.BusyCursor():\n global data, cache, ui, xp\n frames = ui.framesSpin.value()\n width = ui.widthSpin.value()\n height = ui.heightSpin.value()\n cacheKey = (ui.dtypeCombo.currentText(), ui.rgbCheck.isChecked(), frames, width, height)\n if cacheKey not in cache:\n if cacheKey[0] == 'uint8':\n dt = xp.uint8\n loc = 128\n scale = 64\n mx = 255\n elif cacheKey[0] == 'uint16':\n dt = xp.uint16\n loc = 4096\n scale = 1024\n mx = 2**16 - 1\n elif cacheKey[0] == 'float':\n dt = xp.float32\n loc = 1.0\n scale = 0.1\n mx = 1.0\n else:\n raise ValueError(f\"unable to handle dtype: {cacheKey[0]}\")\n\n chan_shape = (height, width)\n if ui.rgbCheck.isChecked():\n frame_shape = chan_shape + (3,)\n else:\n frame_shape = chan_shape\n data = xp.empty((frames,) + frame_shape, dtype=dt)\n view = data.reshape((-1,) + chan_shape)\n for idx in range(view.shape[0]):\n subdata = xp.random.normal(loc=loc, scale=scale, size=chan_shape)\n # note: gaussian filtering has been removed as it slows down array\n # creation greatly.\n if cacheKey[0] != 'float':\n xp.clip(subdata, 0, mx, out=subdata)\n view[idx] = subdata\n\n data[:, 10:50, 10] = mx\n data[:, 48, 9:12] = mx\n data[:, 47, 8:13] = mx\n cache = {cacheKey: data} # clear to save memory (but keep one to prevent unnecessary regeneration)\n\n data = cache[cacheKey]\n updateLUT()\n updateSize()\n\ndef updateSize():\n global ui, vb\n frames = ui.framesSpin.value()\n width = ui.widthSpin.value()\n height = ui.heightSpin.value()\n dtype = xp.dtype(str(ui.dtypeCombo.currentText()))\n rgb = 3 if ui.rgbCheck.isChecked() else 1\n ui.sizeLabel.setText('%d MB' % (frames * width * height * rgb * dtype.itemsize / 1e6))\n vb.setRange(QtCore.QRectF(0, 0, width, height))\n\n\ndef noticeCudaCheck():\n global xp, cache\n cache = {}\n if ui.cudaCheck.isChecked():\n if _has_cupy:\n xp = cp\n else:\n xp = np\n ui.cudaCheck.setChecked(False)\n else:\n xp = np\n mkData()\n\n\ndef noticeNumbaCheck():\n pg.setConfigOption('useNumba', _has_numba and ui.numbaCheck.isChecked())\n\n\nmkData()\n\n\nui.dtypeCombo.currentIndexChanged.connect(mkData)\nui.rgbCheck.toggled.connect(mkData)\nui.widthSpin.editingFinished.connect(mkData)\nui.heightSpin.editingFinished.connect(mkData)\nui.framesSpin.editingFinished.connect(mkData)\n\nui.widthSpin.valueChanged.connect(updateSize)\nui.heightSpin.valueChanged.connect(updateSize)\nui.framesSpin.valueChanged.connect(updateSize)\nui.cudaCheck.toggled.connect(noticeCudaCheck)\nui.numbaCheck.toggled.connect(noticeNumbaCheck)\n\n\nptr = 0\nlastTime = ptime.time()\nfps = None\ndef update():\n global ui, ptr, lastTime, fps, LUT, img\n if ui.lutCheck.isChecked():\n useLut = LUT\n else:\n useLut = None\n\n downsample = ui.downsampleCheck.isChecked()\n\n if ui.scaleCheck.isChecked():\n if ui.rgbLevelsCheck.isChecked():\n useScale = [\n [ui.minSpin1.value(), ui.maxSpin1.value()],\n [ui.minSpin2.value(), ui.maxSpin2.value()],\n [ui.minSpin3.value(), ui.maxSpin3.value()]]\n else:\n useScale = [ui.minSpin1.value(), ui.maxSpin1.value()]\n else:\n useScale = None\n\n if ui.rawRadio.isChecked():\n ui.rawImg.setImage(data[ptr%data.shape[0]], lut=useLut, levels=useScale)\n ui.stack.setCurrentIndex(1)\n elif ui.rawGLRadio.isChecked():\n ui.rawGLImg.setImage(data[ptr%data.shape[0]], lut=useLut, levels=useScale)\n ui.stack.setCurrentIndex(2)\n else:\n img.setImage(data[ptr%data.shape[0]], autoLevels=False, levels=useScale, lut=useLut, autoDownsample=downsample)\n ui.stack.setCurrentIndex(0)\n #img.setImage(data[ptr%data.shape[0]], autoRange=False)\n\n ptr += 1\n now = ptime.time()\n dt = now - lastTime\n lastTime = now\n if fps is None:\n fps = 1.0/dt\n else:\n s = np.clip(dt*3., 0, 1)\n fps = fps * (1-s) + (1.0/dt) * s\n ui.fpsLabel.setText('%0.2f fps' % fps)\n app.processEvents() ## force complete redraw for every plot\ntimer = QtCore.QTimer()\ntimer.timeout.connect(update)\ntimer.start(0)\n\nif __name__ == '__main__':\n pg.exec()\n", "# -*- coding: utf-8 -*-\nimport initExample ## Add path to library (just for examples; you do not need this)\n\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore, QtGui\nimport numpy as np\n\napp = pg.mkQApp()\nplt = pg.PlotWidget()\n\napp.processEvents()\n\n## Putting this at the beginning or end does not have much effect\nplt.show() \n\n## The auto-range is recomputed after each item is added,\n## so disabling it before plotting helps\nplt.enableAutoRange(False, False)\n\ndef plot():\n start = pg.ptime.time()\n n = 15\n pts = 100\n x = np.linspace(0, 0.8, pts)\n y = np.random.random(size=pts)*0.8\n for i in range(n):\n for j in range(n):\n ## calling PlotWidget.plot() generates a PlotDataItem, which \n ## has a bit more overhead than PlotCurveItem, which is all \n ## we need here. This overhead adds up quickly and makes a big\n ## difference in speed.\n \n #plt.plot(x=x+i, y=y+j)\n plt.addItem(pg.PlotCurveItem(x=x+i, y=y+j))\n \n #path = pg.arrayToQPath(x+i, y+j)\n #item = QtGui.QGraphicsPathItem(path)\n #item.setPen(pg.mkPen('w'))\n #plt.addItem(item)\n \n dt = pg.ptime.time() - start\n print(\"Create plots took: %0.3fms\" % (dt*1000))\n\n## Plot and clear 5 times, printing the time it took\nfor i in range(5):\n plt.clear()\n plot()\n app.processEvents()\n plt.autoRange()\n\n\n\n\n\ndef fastPlot():\n ## Different approach: generate a single item with all data points.\n ## This runs about 20x faster.\n start = pg.ptime.time()\n n = 15\n pts = 100\n x = np.linspace(0, 0.8, pts)\n y = np.random.random(size=pts)*0.8\n xdata = np.empty((n, n, pts))\n xdata[:] = x.reshape(1,1,pts) + np.arange(n).reshape(n,1,1)\n ydata = np.empty((n, n, pts))\n ydata[:] = y.reshape(1,1,pts) + np.arange(n).reshape(1,n,1)\n conn = np.ones((n*n,pts))\n conn[:,-1] = False # make sure plots are disconnected\n path = pg.arrayToQPath(xdata.flatten(), ydata.flatten(), conn.flatten())\n item = QtGui.QGraphicsPathItem(path)\n item.setPen(pg.mkPen('w'))\n plt.addItem(item)\n \n dt = pg.ptime.time() - start\n print(\"Create plots took: %0.3fms\" % (dt*1000))\n\n\n## Plot and clear 5 times, printing the time it took\nif hasattr(pg, 'arrayToQPath'):\n for i in range(5):\n plt.clear()\n fastPlot()\n app.processEvents()\nelse:\n print(\"Skipping fast tests--arrayToQPath function is missing.\")\n\nplt.autoRange()\n\nif __name__ == '__main__':\n pg.exec()\n" ]
[ [ "numpy.clip" ], [ "numpy.empty", "numpy.ones", "numpy.arange", "numpy.random.random", "numpy.linspace" ] ]
brittwitham/brexit-word-freq
[ "943c9c83aee30d2866571a6b07ad1895a73c02de" ]
[ "brexit-word-freq/app.py" ]
[ "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.graph_objs as go\nfrom pymongo import MongoClient\nimport json\nimport os\n \nclient = MongoClient(os.environ.get(\"DATABASE\"))\ndb = client.politics.brexit\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\ndata = list(db.find({}, {\"_id\": 0})) \n\ndata_df = pd.json_normalize(data).set_index('timestamp')\ntop_columns = data_df.sum().sort_values(ascending=False)\ntop_10 = top_columns[0:10].index.tolist()\ntop10df = data_df[top_10].fillna(0).astype(int)\ndf = top10df[-12:]\n\ncols = list(df.columns)\n\n# Set up plot\nfig = go.Figure()\n\ncolors = {\n 'background': '#111111',\n 'text': '#7FDBFF'\n}\n\napp.layout = html.Div(children=[\n html.H1(\n children='#brexit',\n style={\n 'textAlign': 'center',\n 'color': colors['text']\n }\n ),\n\n html.Div(children='Top Keywords used with the #brexit hashtag in the last 12 hours', style={\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n\n\n dcc.Graph(\n id='test-plot',\n figure={\n 'data': [\n go.Scatter(\n x=df.index,\n y=df[i],\n name=i.replace('words.', ''),\n line=dict(shape='spline', width=2),\n opacity=0.8\n ) for i in cols[0:10]\n ],\n 'layout': go.Layout(\n xaxis={'title': 'Time'},\n yaxis={'title': 'Frequency'},\n margin={'l': 40, 'b': 80, 't': 10, 'r': 10},\n legend={'x': 0, 'y': 1},\n hovermode='closest'\n )\n },\n ),\n dcc.Interval(\n id='interval-component',\n interval=60*1000, # in milliseconds\n n_intervals=0\n )\n])\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n" ]
[ [ "pandas.json_normalize" ] ]
okotaku/pet_finder
[ "380e4f19172e06e92b5b752f59e2902efa6aee1f" ]
[ "code/exp/v18.py" ]
[ "# -*- coding: utf-8 -*-\n'''\nfeature: v1, 2, 3, 4, 10, 11\nfeature: v1, 2, 3, 4, 11, 13, 14, 17, 18, 19, 22, 23\nmodel: v10\n'''\nimport itertools\nimport json\nimport gc\nimport glob\nimport os\nimport time\nimport cv2\nimport re\nimport nltk\nimport torch\nimport imagehash\nimport lightgbm as lgb\nimport xgboost as xgb\nimport matplotlib\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport scipy as sp\nfrom scipy.stats import rankdata\nfrom PIL import Image\nfrom pymagnitude import Magnitude\nfrom gensim.models import word2vec, KeyedVectors\nfrom gensim.scripts.glove2word2vec import glove2word2vec\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom itertools import combinations\nfrom logging import getLogger, Formatter, StreamHandler, FileHandler, INFO\nfrom keras.applications.densenet import preprocess_input as preprocess_input_dense\nfrom keras.applications.densenet import DenseNet121\nfrom keras.applications.inception_resnet_v2 import preprocess_input as preprocess_input_incep\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras import backend as K\nfrom keras.layers import GlobalAveragePooling2D, Input, Lambda, AveragePooling1D\nfrom keras.models import Model\nfrom keras.preprocessing.text import text_to_word_sequence\nfrom sklearn.decomposition import LatentDirichletAllocation, TruncatedSVD, NMF\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.metrics import cohen_kappa_score, mean_squared_error\nfrom sklearn.model_selection import GroupKFold, StratifiedKFold, train_test_split\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.feature_extraction.text import _document_frequency\n\n# ===============\n# Constants\n# ===============\nCOMPETITION_NAME = 'petfinder-adoption-prediction'\nMODEL_NAME = 'v001'\nlogger = getLogger(COMPETITION_NAME)\nLOGFORMAT = '%(asctime)s %(levelname)s %(message)s'\n\ntarget = 'AdoptionSpeed'\nlen_train = 14993\nlen_test = 3948\n\nT_flag = True\nK_flag = True\nG_flag = True\ndebug = False\n\n# ===============\n# Params\n# ===============\nseed = 777\nkaeru_seed = 1337\nn_splits = 5\nnp.random.seed(seed)\n\n# feature engineering\nn_components = 5\nn_components_gege_img = 32\nn_components_gege_txt = 16\nimg_size = 256\nbatch_size = 256\n\n# model\nMODEL_PARAMS = {\n 'task': 'train',\n 'boosting_type': 'gbdt',\n 'objective': 'regression',\n 'metric': 'rmse',\n 'learning_rate': 0.01,\n 'num_leaves': 63,\n 'subsample': 0.9,\n 'subsample_freq': 1,\n 'colsample_bytree': 0.6,\n 'max_depth': 9,\n 'max_bin': 127,\n 'reg_alpha': 0.11,\n 'reg_lambda': 0.01,\n 'min_child_weight': 0.2,\n 'min_child_samples': 20,\n 'min_gain_to_split': 0.02,\n 'min_data_in_bin': 3,\n 'bin_construct_sample_cnt': 5000,\n 'cat_l2': 10,\n 'verbose': -1,\n 'nthread': -1,\n 'seed': 777,\n}\nKAERU_PARAMS = {'application': 'regression',\n 'boosting': 'gbdt',\n 'metric': 'rmse',\n 'num_leaves': 70,\n 'max_depth': 9,\n 'learning_rate': 0.01,\n 'max_bin': 32,\n 'bagging_freq': 2,\n 'bagging_fraction': 0.85,\n 'feature_fraction': 0.8,\n 'min_split_gain': 0.02,\n 'min_child_samples': 150,\n 'min_child_weight': 0.02,\n 'lambda_l2': 0.0475,\n 'verbosity': -1,\n 'seed': kaeru_seed}\nADV_PARAMS = {\n 'task': 'train',\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': 'auc',\n 'num_leaves': 64,\n 'learning_rate': 0.02,\n 'verbose': 0,\n 'lambda_l1': 0.1,\n 'seed': 1213\n}\nMODEL_PARAMS_XGB = {\n 'eval_metric': 'rmse',\n 'seed': 1337,\n 'eta': 0.01,\n 'subsample': 0.8,\n 'colsample_bytree': 0.85,\n 'tree_method': 'gpu_hist',\n 'device': 'gpu',\n 'silent': 1,\n}\nFIT_PARAMS = {\n 'num_boost_round': 5000,\n 'early_stopping_rounds': 100,\n 'verbose_eval': 5000,\n}\n\n# define\nmaxvalue_dict = {}\ncategorical_features = [\n 'Breed1',\n 'Breed2',\n 'Color1',\n 'Color2',\n 'Color3',\n 'Dewormed',\n 'FurLength',\n 'Gender',\n 'Health',\n 'MaturitySize',\n 'State',\n 'Sterilized',\n 'Type',\n 'Vaccinated',\n 'Type_main_breed',\n 'BreedName_main_breed',\n 'Type_second_breed',\n 'BreedName_second_breed',\n 'BreedName_main_breed_all',\n]\ncontraction_mapping = {u\"ain’t\": u\"is not\", u\"aren’t\": u\"are not\", u\"can’t\": u\"cannot\", u\"’cause\": u\"because\",\n u\"could’ve\": u\"could have\", u\"couldn’t\": u\"could not\", u\"didn’t\": u\"did not\",\n u\"doesn’t\": u\"does not\", u\"don’t\": u\"do not\", u\"hadn’t\": u\"had not\",\n u\"hasn’t\": u\"has not\", u\"haven’t\": u\"have not\", u\"he’d\": u\"he would\",\n u\"he’ll\": u\"he will\", u\"he’s\": u\"he is\", u\"how’d\": u\"how did\", u\"how’d’y\": u\"how do you\",\n u\"how’ll\": u\"how will\", u\"how’s\": u\"how is\", u\"I’d\": u\"I would\",\n u\"I’d’ve\": u\"I would have\", u\"I’ll\": u\"I will\", u\"I’ll’ve\": u\"I will have\",\n u\"I’m\": u\"I am\", u\"I’ve\": u\"I have\", u\"i’d\": u\"i would\", u\"i’d’ve\": u\"i would have\",\n u\"i’ll\": u\"i will\", u\"i’ll’ve\": u\"i will have\", u\"i’m\": u\"i am\", u\"i’ve\": u\"i have\",\n u\"isn’t\": u\"is not\", u\"it’d\": u\"it would\", u\"it’d’ve\": u\"it would have\",\n u\"it’ll\": u\"it will\", u\"it’ll’ve\": u\"it will have\", u\"it’s\": u\"it is\",\n u\"let’s\": u\"let us\", u\"ma’am\": u\"madam\", u\"mayn’t\": u\"may not\",\n u\"might’ve\": u\"might have\", u\"mightn’t\": u\"might not\", u\"mightn’t’ve\": u\"might not have\",\n u\"must’ve\": u\"must have\", u\"mustn’t\": u\"must not\", u\"mustn’t’ve\": u\"must not have\",\n u\"needn’t\": u\"need not\", u\"needn’t’ve\": u\"need not have\", u\"o’clock\": u\"of the clock\",\n u\"oughtn’t\": u\"ought not\", u\"oughtn’t’ve\": u\"ought not have\", u\"shan’t\": u\"shall not\",\n u\"sha’n’t\": u\"shall not\", u\"shan’t’ve\": u\"shall not have\", u\"she’d\": u\"she would\",\n u\"she’d’ve\": u\"she would have\", u\"she’ll\": u\"she will\", u\"she’ll’ve\": u\"she will have\",\n u\"she’s\": u\"she is\", u\"should’ve\": u\"should have\", u\"shouldn’t\": u\"should not\",\n u\"shouldn’t’ve\": u\"should not have\", u\"so’ve\": u\"so have\", u\"so’s\": u\"so as\",\n u\"this’s\": u\"this is\", u\"that’d\": u\"that would\", u\"that’d’ve\": u\"that would have\",\n u\"that’s\": u\"that is\", u\"there’d\": u\"there would\", u\"there’d’ve\": u\"there would have\",\n u\"there’s\": u\"there is\", u\"here’s\": u\"here is\", u\"they’d\": u\"they would\",\n u\"they’d’ve\": u\"they would have\", u\"they’ll\": u\"they will\",\n u\"they’ll’ve\": u\"they will have\", u\"they’re\": u\"they are\", u\"they’ve\": u\"they have\",\n u\"to’ve\": u\"to have\", u\"wasn’t\": u\"was not\", u\"we’d\": u\"we would\",\n u\"we’d’ve\": u\"we would have\", u\"we’ll\": u\"we will\", u\"we’ll’ve\": u\"we will have\",\n u\"we’re\": u\"we are\", u\"we’ve\": u\"we have\", u\"weren’t\": u\"were not\",\n u\"what’ll\": u\"what will\", u\"what’ll’ve\": u\"what will have\", u\"what’re\": u\"what are\",\n u\"what’s\": u\"what is\", u\"what’ve\": u\"what have\", u\"when’s\": u\"when is\",\n u\"when’ve\": u\"when have\", u\"where’d\": u\"where did\", u\"where’s\": u\"where is\",\n u\"where’ve\": u\"where have\", u\"who’ll\": u\"who will\", u\"who’ll’ve\": u\"who will have\",\n u\"who’s\": u\"who is\", u\"who’ve\": u\"who have\", u\"why’s\": u\"why is\", u\"why’ve\": u\"why have\",\n u\"will’ve\": u\"will have\", u\"won’t\": u\"will not\", u\"won’t’ve\": u\"will not have\",\n u\"would’ve\": u\"would have\", u\"wouldn’t\": u\"would not\", u\"wouldn’t’ve\": u\"would not have\",\n u\"y’all\": u\"you all\", u\"y’all’d\": u\"you all would\", u\"y’all’d’ve\": u\"you all would have\",\n u\"y’all’re\": u\"you all are\", u\"y’all’ve\": u\"you all have\", u\"you’d\": u\"you would\",\n u\"you’d’ve\": u\"you would have\", u\"you’ll\": u\"you will\", u\"you’ll’ve\": u\"you will have\",\n u\"you’re\": u\"you are\", u\"you’ve\": u\"you have\", u\"cat’s\": u\"cat is\", u\" whatapp \": u\" whatapps \",\n u\" whatssapp \": u\" whatapps \", u\" whatssap \": u\" whatapps \", u\" whatspp \": u\" whatapps \",\n u\" whastapp \": u\" whatapps \", u\" whatsap \": u\" whatapps \", u\" whassap \": u\" whatapps \",\n u\" watapps \": u\" whatapps \", u\"wetfood\": u\"wet food\", u\"intetested\": u\"interested\",\n u\"领养条件,\": u\"领养条件\", u\"谢谢。\": u\"谢谢\",\n u\"别打我,记住,我有反抗的牙齿,但我不会咬你。remember\": u\"别打我,记住,我有反抗的牙齿,但我不会咬你。\",\n u\"有你。do\": u\"有你。\", u\"名字name\": u\"名字\", u\"year,\": u\"year\", u\"work,your\": u\"work your\",\n u\"too,will\": u\"too will\", u\"timtams\": u\"timtam\", u\"spay。\": u\"spay\", u\"shoulder,a\": u\"shoulder a\",\n u\"sherpherd\": u\"shepherd\", u\"sherphed\": u\"shepherd\", u\"sherperd\": u\"shepherd\",\n u\"sherpard\": u\"shepherd\", u\"serious。\": u\"serious\", u\"remember,i\": u\"remember i\",\n u\"recover,\": u\"recover\", u\"refundable指定期限内结扎后会全数奉还\": u\"refundable\",\n u\"puchong区,有没有人有增添家庭成员?\": u\"puchong\", u\"puchong救的\": u\"puchong\",\n u\"puchong,\": u\"puchong\", u\"month。\": u\"month\", u\"month,\": u\"month\",\n u\"microchip(做狗牌一定要有主人的电话号码)\": u\"microchip\", u\"maju。\": u\"maju\", u\"maincoone\": u\"maincoon\",\n u\"lumpur。\": u\"lumpur\", u\"location:阿里玛,大山脚\": u\"location\", u\"life🐾🐾\": u\"life\",\n u\"kibble,\": u\"kibble\", u\"home…\": u\"home\", u\"hand,but\": u\"hand but\", u\"hair,a\": u\"hair a\",\n u\"grey、brown\": u\"grey brown\", u\"gray,\": u\"gray\", u\"free免费\": u\"free\", u\"food,or\": u\"food or\",\n u\"dog/dog\": u\"dog\", u\"dijumpa\": u\"dijumpai\", u\"dibela\": u\"dibelai\",\n u\"beauuuuuuuuutiful\": u\"beautiful\", u\"adopt🙏\": u\"adopt\", u\"addopt\": u\"adopt\",\n u\"enxiety\": u\"anxiety\", u\"vaksin\": u\"vaccine\"}\nnumerical_features = []\ntext_features = ['Name', 'Description', 'Description_Emb', 'Description_bow']\nmeta_text = ['BreedName_main_breed', 'BreedName_second_breed', 'annots_top_desc', 'sentiment_text',\n 'annots_top_desc_pick', 'sentiment_entities']\nremove = ['index', 'seq_text', 'PetID', 'Name', 'Description', 'RescuerID', 'StateName', 'annots_top_desc',\n 'sentiment_text',\n 'sentiment_entities', 'Description_Emb', 'Description_bow', 'annots_top_desc_pick']\nkaeru_drop_cols = [\"2017GDPperCapita\", \"Bumiputra\", \"Chinese\", \"HDI\", \"Indian\", \"Latitude\", \"Longitude\",\n 'color_red_score_mean_mean', 'color_red_score_mean_sum', 'color_blue_score_mean_mean',\n 'color_blue_score_mean_sum', 'color_green_score_mean_mean', 'color_green_score_mean_sum',\n 'dog_cat_scores_mean_mean', 'dog_cat_scores_mean_sum', 'dog_cat_topics_mean_mean',\n 'dog_cat_topics_mean_sum', 'is_dog_or_cat_mean_mean', 'is_dog_or_cat_mean_sum',\n 'len_text_mean_mean', 'len_text_mean_sum', 'StateID']\ngege_drop_cols = ['2017GDPperCapita', 'Breed1_equals_Breed2', 'Bumiputra', 'Chinese',\n 'HDI', 'Indian', 'Latitude', 'Longitude', 'Pop_density', 'Urban_pop', 'Breed1_equals_Breed2',\n 'fix_Breed1', 'fix_Breed2', 'single_Breed', 'color_red_score_mean_mean', 'color_red_score_mean_sum',\n 'color_red_score_mean_var', 'color_blue_score_mean_mean', 'color_blue_score_mean_sum',\n 'color_blue_score_mean_var', 'color_green_score_mean_mean', 'color_green_score_mean_sum',\n 'color_green_score_mean_var', 'dog_cat_scores_mean_mean', 'dog_cat_scores_mean_sum',\n 'dog_cat_scores_mean_var', 'dog_cat_topics_mean_mean', 'dog_cat_topics_mean_sum',\n 'dog_cat_topics_mean_var', 'is_dog_or_cat_mean_mean', 'is_dog_or_cat_mean_sum',\n 'is_dog_or_cat_mean_var', 'len_text_mean_mean', 'len_text_mean_sum', 'len_text_mean_var']\nuse_cols = pd.read_csv(\"../input/pet-usecols/importance10.csv\")\n# use_cols = pd.read_csv(\"importance9.csv\")\nuse_cols[\"gain\"] = use_cols[\"gain\"] / use_cols[\"gain\"].sum()\nuse_cols = list(use_cols[use_cols.gain > 0.0002].feature.values)\n\nps = nltk.stem.PorterStemmer()\nlc = nltk.stem.lancaster.LancasterStemmer()\nsb = nltk.stem.snowball.SnowballStemmer('english')\n\n\n# ===============\n# Utility Functions\n# ===============\ndef to_category(train, cat=None):\n if cat is None:\n cat = [col for col in train.columns if train[col].dtype == 'object']\n for c in cat:\n train[c], uniques = pd.factorize(train[c])\n maxvalue_dict[c] = train[c].max() + 1\n return train\n\n\ndef init_logger():\n # Add handlers\n handler = StreamHandler()\n handler.setLevel(INFO)\n handler.setFormatter(Formatter(LOGFORMAT))\n fh_handler = FileHandler('{}.log'.format(MODEL_NAME))\n fh_handler.setFormatter(Formatter(LOGFORMAT))\n logger.setLevel(INFO)\n logger.addHandler(handler)\n logger.addHandler(fh_handler)\n\n\n@contextmanager\ndef timer(name):\n t0 = time.time()\n yield\n logger.info(f'[{name}] done in {time.time() - t0:.0f} s')\n\n\ndef load_image_and_hash(paths):\n funcs = [\n imagehash.average_hash,\n imagehash.phash,\n imagehash.dhash,\n imagehash.whash,\n # lambda x: imagehash.whash(x, mode='db4'),\n ]\n\n petids = []\n hashes = []\n for path in paths:\n image = Image.open(path)\n imageid = path.split('/')[-1].split('.')[0][:-2]\n\n petids.append(imageid)\n hashes.append(np.array([f(image).hash for f in funcs]).reshape(256))\n return petids, np.array(hashes).astype(np.int32)\n\n\ndef find_duplicates_all():\n train_paths = glob.glob('../input/petfinder-adoption-prediction/train_images/*-1.jpg')\n train_paths += glob.glob('../input/petfinder-adoption-prediction/train_images/*-2.jpg')\n test_paths = glob.glob('../input/petfinder-adoption-prediction/test_images/*-1.jpg')\n test_paths += glob.glob('../input/petfinder-adoption-prediction/test_images/*-2.jpg')\n\n train_petids, train_hashes = load_image_and_hash(train_paths)\n test_petids, test_hashes = load_image_and_hash(test_paths)\n\n # sims = np.array([(train_hashes[i] == test_hashes).sum(axis=1)/256 for i in range(train_hashes.shape[0])])\n train_hashes = torch.Tensor(train_hashes).cuda()\n test_hashes = torch.Tensor(test_hashes).cuda()\n sims = np.array(\n [(train_hashes[i] == test_hashes).sum(dim=1).cpu().numpy() / 256 for i in range(train_hashes.shape[0])])\n indices1 = np.where(sims > 0.9)\n indices2 = np.where(indices1[0] != indices1[1])\n petids1 = [train_petids[i] for i in indices1[0][indices2]]\n petids2 = [test_petids[i] for i in indices1[1][indices2]]\n dups = {tuple(sorted([petid1, petid2])): True for petid1, petid2 in zip(petids1, petids2)}\n logger.info('found %d duplicates' % len(dups))\n\n return dups\n\n\ndef submission_with_postprocess(y_pred):\n df_sub = pd.read_csv('../input/petfinder-adoption-prediction/test/sample_submission.csv')\n train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')\n df_sub[\"AdoptionSpeed\"] = y_pred\n # postprocess\n duplicated = find_duplicates_all()\n duplicated = pd.DataFrame(duplicated, index=range(0)).T.reset_index()\n duplicated.columns = ['pet_id_0', 'pet_id_1']\n\n duplicated_0 = duplicated.merge(train[['PetID', 'AdoptionSpeed']], how='left', left_on='pet_id_0',\n right_on='PetID').dropna()\n df_sub = df_sub.merge(duplicated_0[['pet_id_1', 'AdoptionSpeed']],\n how='left', left_on='PetID', right_on='pet_id_1', suffixes=('_original', ''))\n df_sub['AdoptionSpeed'].fillna(df_sub['AdoptionSpeed_original'], inplace=True)\n df_sub = df_sub[['PetID', 'AdoptionSpeed']]\n\n duplicated_1 = duplicated.merge(train[['PetID', 'AdoptionSpeed']],\n how='left', left_on='pet_id_1', right_on='PetID').dropna()\n df_sub = df_sub.merge(duplicated_1[['pet_id_0', 'AdoptionSpeed']],\n how='left', left_on='PetID', right_on='pet_id_0', suffixes=('_original', ''))\n df_sub['AdoptionSpeed'].fillna(df_sub['AdoptionSpeed_original'], inplace=True)\n df_sub = df_sub[['PetID', 'AdoptionSpeed']]\n df_sub['AdoptionSpeed'] = df_sub['AdoptionSpeed'].astype('int32')\n # submission\n df_sub.to_csv('submission.csv', index=False)\n\n\ndef submission(y_pred):\n logger.info('making submission file...')\n df_sub = pd.read_csv('../input/petfinder-adoption-prediction/test/sample_submission.csv')\n df_sub[target] = y_pred\n df_sub.to_csv('submission.csv', index=False)\n\n\ndef analyzer_bow(text):\n stop_words = ['i', 'a', 'an', 'the', 'to', 'and', 'or', 'if', 'is', 'are', 'am', 'it', 'this', 'that', 'of', 'from',\n 'in', 'on']\n text = text.lower() # 小文字化\n text = text.replace('\\n', '') # 改行削除\n text = text.replace('\\t', '') # タブ削除\n puncts = r',.\":)(-!?|;\\'$&/[]>%=#*+\\\\•~@£·_{}©^®`<→°€™›♥←×§″′Â█½à…“★”–●â►−¢²¬░¶↑±¿▾═¦║―¥▓—‹─▒:¼⊕▼▪†■’▀¨▄♫☆é¯♦¤▲踾Ã⋅‘∞∙)↓、│(»,♪╩╚³・╦╣╔╗▬❤ïØ¹≤‡√。【】'\n for punct in puncts:\n text = text.replace(punct, f' {punct} ')\n for bad_word in contraction_mapping:\n if bad_word in text:\n text = text.replace(bad_word, contraction_mapping[bad_word])\n text = text.split(' ') # スペースで区切る\n text = [sb.stem(t) for t in text]\n\n words = []\n for word in text:\n if (re.compile(r'^.*[0-9]+.*$').fullmatch(word) is not None): # 数字が含まれるものは分割\n for w in re.findall(r'(\\d+|\\D+)', word):\n words.append(w)\n continue\n if word in stop_words: # ストップワードに含まれるものは除外\n continue\n if len(word) < 2: # 1文字、0文字(空文字)は除外\n continue\n words.append(word)\n\n return \" \".join(words)\n\n\ndef analyzer_embed(text):\n text = text.lower() # 小文字化\n text = text.replace('\\n', '') # 改行削除\n text = text.replace('\\t', '') # タブ削除\n puncts = r',.\":)(-!?|;\\'$&/[]>%=#*+\\\\•~@£·_{}©^®`<→°€™›♥←×§″′Â█½à…“★”–●â►−¢²¬░¶↑±¿▾═¦║―¥▓—‹─▒:¼⊕▼▪†■’▀¨▄♫☆é¯♦¤▲踾Ã⋅‘∞∙)↓、│(»,♪╩╚³・╦╣╔╗▬❤ïØ¹≤‡√。【】'\n for punct in puncts:\n text = text.replace(punct, f' {punct} ')\n for bad_word in contraction_mapping:\n if bad_word in text:\n text = text.replace(bad_word, contraction_mapping[bad_word])\n text = text.split(' ') # スペースで区切る\n\n words = []\n for word in text:\n if (re.compile(r'^.*[0-9]+.*$').fullmatch(word) is not None): # 数字が含まれるものは分割\n for w in re.findall(r'(\\d+|\\D+)', word):\n words.append(w)\n continue\n if len(word) < 1: # 0文字(空文字)は除外\n continue\n words.append(word)\n\n return \" \".join(words)\n\n\ndef analyzer_k(text):\n stop_words = ['i', 'a', 'an', 'the', 'to', 'and', 'or', 'if', 'is', 'are', 'am', 'it', 'this', 'that', 'of', 'from',\n 'in', 'on']\n text = text.lower() # 小文字化\n text = text.replace('\\n', '') # 改行削除\n text = text.replace('\\t', '') # タブ削除\n text = re.sub(re.compile(r'[!-\\/:-@[-`{-~]'), ' ', text) # 記号をスペースに置き換え\n text = text.split(' ') # スペースで区切る\n\n words = []\n for word in text:\n if (re.compile(r'^.*[0-9]+.*$').fullmatch(word) is not None): # 数字が含まれるものは除外\n continue\n if word in stop_words: # ストップワードに含まれるものは除外\n continue\n if len(word) < 2: # 1文字、0文字(空文字)は除外\n continue\n words.append(word)\n\n return words\n\n\n# ===============\n# Feature Engineering\n# ===============\nclass GroupbyTransformer():\n def __init__(self, param_dict=None):\n self.param_dict = param_dict\n\n def _get_params(self, p_dict):\n key = p_dict['key']\n if 'var' in p_dict.keys():\n var = p_dict['var']\n else:\n var = self.var\n if 'agg' in p_dict.keys():\n agg = p_dict['agg']\n else:\n agg = self.agg\n if 'on' in p_dict.keys():\n on = p_dict['on']\n else:\n on = key\n return key, var, agg, on\n\n def _aggregate(self, dataframe):\n self.features = []\n for param_dict in self.param_dict:\n key, var, agg, on = self._get_params(param_dict)\n all_features = list(set(key + var))\n new_features = self._get_feature_names(key, var, agg)\n features = dataframe[all_features].groupby(key)[\n var].agg(agg).reset_index()\n features.columns = key + new_features\n self.features.append(features)\n return self\n\n def _merge(self, dataframe, merge=True):\n for param_dict, features in zip(self.param_dict, self.features):\n key, var, agg, on = self._get_params(param_dict)\n if merge:\n dataframe = dataframe.merge(features, how='left', on=on)\n else:\n new_features = self._get_feature_names(key, var, agg)\n dataframe = pd.concat([dataframe, features[new_features]], axis=1)\n return dataframe\n\n def transform(self, dataframe):\n self._aggregate(dataframe)\n return self._merge(dataframe, merge=True)\n\n def _get_feature_names(self, key, var, agg):\n _agg = []\n for a in agg:\n if not isinstance(a, str):\n _agg.append(a.__name__)\n else:\n _agg.append(a)\n return ['_'.join([a, v, 'groupby'] + key) for v in var for a in _agg]\n\n def get_feature_names(self):\n self.feature_names = []\n for param_dict in self.param_dict:\n key, var, agg, on = self._get_params(param_dict)\n self.feature_names += self._get_feature_names(key, var, agg)\n return self.feature_names\n\n def get_numerical_features(self):\n return self.get_feature_names()\n\n\nclass DiffGroupbyTransformer(GroupbyTransformer):\n def _aggregate(self):\n raise NotImplementedError\n\n def _merge(self):\n raise NotImplementedError\n\n def transform(self, dataframe):\n for param_dict in self.param_dict:\n key, var, agg, on = self._get_params(param_dict)\n for a in agg:\n for v in var:\n new_feature = '_'.join(['diff', a, v, 'groupby'] + key)\n base_feature = '_'.join([a, v, 'groupby'] + key)\n dataframe[new_feature] = dataframe[base_feature] - dataframe[v]\n return dataframe\n\n def _get_feature_names(self, key, var, agg):\n _agg = []\n for a in agg:\n if not isinstance(a, str):\n _agg.append(a.__name__)\n else:\n _agg.append(a)\n return ['_'.join(['diff', a, v, 'groupby'] + key) for v in var for a in _agg]\n\n\nclass RatioGroupbyTransformer(GroupbyTransformer):\n def _aggregate(self):\n raise NotImplementedError\n\n def _merge(self):\n raise NotImplementedError\n\n def transform(self, dataframe):\n for param_dict in self.param_dict:\n key, var, agg, on = self._get_params(param_dict)\n for a in agg:\n for v in var:\n new_feature = '_'.join(['ratio', a, v, 'groupby'] + key)\n base_feature = '_'.join([a, v, 'groupby'] + key)\n dataframe[new_feature] = dataframe[v] / dataframe[base_feature]\n return dataframe\n\n def _get_feature_names(self, key, var, agg):\n _agg = []\n for a in agg:\n if not isinstance(a, str):\n _agg.append(a.__name__)\n else:\n _agg.append(a)\n return ['_'.join(['ratio', a, v, 'groupby'] + key) for v in var for a in _agg]\n\n\nclass CategoryVectorizer():\n def __init__(self, categorical_columns, n_components,\n vectorizer=CountVectorizer(),\n transformer=LatentDirichletAllocation(),\n name='CountLDA'):\n self.categorical_columns = categorical_columns\n self.n_components = n_components\n self.vectorizer = vectorizer\n self.transformer = transformer\n self.name = name + str(self.n_components)\n\n def transform(self, dataframe):\n features = []\n for (col1, col2) in self.get_column_pairs():\n try:\n sentence = self.create_word_list(dataframe, col1, col2)\n sentence = self.vectorizer.fit_transform(sentence)\n feature = self.transformer.fit_transform(sentence)\n feature = self.get_feature(dataframe, col1, col2, feature, name=self.name)\n features.append(feature)\n except:\n pass\n features = pd.concat(features, axis=1)\n return features\n\n def create_word_list(self, dataframe, col1, col2):\n col1_size = int(dataframe[col1].values.max() + 1)\n col2_list = [[] for _ in range(col1_size)]\n for val1, val2 in zip(dataframe[col1].values, dataframe[col2].values):\n col2_list[int(val1)].append(col2 + str(val2))\n return [' '.join(map(str, ls)) for ls in col2_list]\n\n def get_feature(self, dataframe, col1, col2, latent_vector, name=''):\n features = np.zeros(\n shape=(len(dataframe), self.n_components), dtype=np.float32)\n self.columns = ['_'.join([name, col1, col2, str(i)])\n for i in range(self.n_components)]\n for i, val1 in enumerate(dataframe[col1]):\n features[i, :self.n_components] = latent_vector[val1]\n\n return pd.DataFrame(data=features, columns=self.columns)\n\n def get_column_pairs(self):\n return [(col1, col2) for col1, col2 in itertools.product(self.categorical_columns, repeat=2) if col1 != col2]\n\n def get_numerical_features(self):\n return self.columns\n\n\nclass BM25Transformer(BaseEstimator, TransformerMixin):\n \"\"\"\n Parameters\n ----------\n use_idf : boolean, optional (default=True)\n k1 : float, optional (default=2.0)\n b : float, optional (default=0.75)\n References\n ----------\n Okapi BM25: a non-binary model - Introduction to Information Retrieval\n http://nlp.stanford.edu/IR-book/html/htmledition/okapi-bm25-a-non-binary-model-1.html\n \"\"\"\n\n def __init__(self, use_idf=True, k1=2.0, b=0.75):\n self.use_idf = use_idf\n self.k1 = k1\n self.b = b\n\n def fit(self, X):\n \"\"\"\n Parameters\n ----------\n X : sparse matrix, [n_samples, n_features] document-term matrix\n \"\"\"\n if not sp.sparse.issparse(X):\n X = sp.sparse.csc_matrix(X)\n if self.use_idf:\n n_samples, n_features = X.shape\n df = _document_frequency(X)\n idf = np.log((n_samples - df + 0.5) / (df + 0.5))\n self._idf_diag = sp.sparse.spdiags(idf, diags=0, m=n_features, n=n_features)\n\n doc_len = X.sum(axis=1)\n self._average_document_len = np.average(doc_len)\n\n return self\n\n def transform(self, X, copy=True):\n \"\"\"\n Parameters\n ----------\n X : sparse matrix, [n_samples, n_features] document-term matrix\n copy : boolean, optional (default=True)\n \"\"\"\n if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):\n # preserve float family dtype\n X = sp.sparse.csr_matrix(X, copy=copy)\n else:\n # convert counts or binary occurrences to floats\n X = sp.sparse.csr_matrix(X, dtype=np.float, copy=copy)\n\n n_samples, n_features = X.shape\n\n # Document length (number of terms) in each row\n # Shape is (n_samples, 1)\n doc_len = X.sum(axis=1)\n # Number of non-zero elements in each row\n # Shape is (n_samples, )\n sz = X.indptr[1:] - X.indptr[0:-1]\n\n # In each row, repeat `doc_len` for `sz` times\n # Shape is (sum(sz), )\n # Example\n # -------\n # dl = [4, 5, 6]\n # sz = [1, 2, 3]\n # rep = [4, 5, 5, 6, 6, 6]\n rep = np.repeat(np.asarray(doc_len), sz)\n\n # Compute BM25 score only for non-zero elements\n nom = self.k1 + 1\n denom = X.data + self.k1 * (1 - self.b + self.b * rep / self._average_document_len)\n data = X.data * nom / denom\n\n X = sp.sparse.csr_matrix((data, X.indices, X.indptr), shape=X.shape)\n\n if self.use_idf:\n check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')\n\n expected_n_features = self._idf_diag.shape[0]\n if n_features != expected_n_features:\n raise ValueError(\"Input has n_features=%d while the model\"\n \" has been trained with n_features=%d\" % (\n n_features, expected_n_features))\n X = X * self._idf_diag\n\n return X\n\n\n# ===============\n# For pet\n# ===============\ndef merge_state_info(train):\n states = pd.read_csv('../input/petfinder-adoption-prediction/state_labels.csv')\n state_info = pd.read_csv('../input/state-info/state_info.csv')\n state_info.rename(columns={\n 'Area (km2)': 'Area',\n 'Pop. density': 'Pop_density',\n 'Urban pop.(%)': 'Urban_pop',\n 'Bumiputra (%)': 'Bumiputra',\n 'Chinese (%)': 'Chinese',\n 'Indian (%)': 'Indian'\n }, inplace=True)\n state_info['Population'] = state_info['Population'].str.replace(',', '').astype('int32')\n state_info['Area'] = state_info['Area'].str.replace(',', '').astype('int32')\n state_info['Pop_density'] = state_info['Pop_density'].str.replace(',', '').astype('int32')\n state_info['2017GDPperCapita'] = state_info['2017GDPperCapita'].str.replace(',', '').astype('float32')\n state_info['StateName'] = state_info['StateName'].str.replace('FT ', '')\n state_info['StateName'] = state_info['StateName'].str.replace('Malacca', 'Melaka')\n state_info['StateName'] = state_info['StateName'].str.replace('Penang', 'Pulau Pinang')\n\n states = states.merge(state_info, how='left', on='StateName')\n train = train.merge(states, how='left', left_on='State', right_on='StateID')\n\n return train\n\n\ndef merge_breed_name(train):\n breeds = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')\n with open(\"../input/cat-and-dog-breeds-parameters/rating.json\", 'r', encoding='utf-8') as f:\n breed_data = json.load(f)\n cat_breed = pd.DataFrame.from_dict(breed_data['cat_breeds']).T\n dog_breed = pd.DataFrame.from_dict(breed_data['dog_breeds']).T\n df = pd.concat([dog_breed, cat_breed], axis=0).reset_index().rename(columns={'index': 'BreedName'})\n df.BreedName.replace(\n {\n 'Siamese Cat': 'Siamese',\n 'Chinese Crested': 'Chinese Crested Dog',\n 'Australian Cattle Dog': 'Australian Cattle Dog/Blue Heeler',\n 'Yorkshire Terrier': 'Yorkshire Terrier Yorkie',\n 'Pembroke Welsh Corgi': 'Welsh Corgi',\n 'Sphynx': 'Sphynx (hairless cat)',\n 'Plott': 'Plott Hound',\n 'Korean Jindo Dog': 'Jindo',\n 'Anatolian Shepherd Dog': 'Anatolian Shepherd',\n 'Belgian Malinois': 'Belgian Shepherd Malinois',\n 'Belgian Sheepdog': 'Belgian Shepherd Dog Sheepdog',\n 'Belgian Tervuren': 'Belgian Shepherd Tervuren',\n 'Bengal Cats': 'Bengal',\n 'Bouvier des Flandres': 'Bouvier des Flanders',\n 'Brittany': 'Brittany Spaniel',\n 'Caucasian Shepherd Dog': 'Caucasian Sheepdog (Caucasian Ovtcharka)',\n 'Dandie Dinmont Terrier': 'Dandi Dinmont Terrier',\n 'Bulldog': 'English Bulldog',\n 'American English Coonhound': 'English Coonhound',\n 'Small Munsterlander Pointer': 'Munsterlander',\n 'Entlebucher Mountain Dog': 'Entlebucher',\n 'Exotic': 'Exotic Shorthair',\n 'Flat-Coated Retriever': 'Flat-coated Retriever',\n 'English Foxhound': 'Foxhound',\n 'Alaskan Klee Kai': 'Klee Kai',\n 'Newfoundland': 'Newfoundland Dog',\n 'Norwegian Forest': 'Norwegian Forest Cat',\n 'Nova Scotia Duck Tolling Retriever': 'Nova Scotia Duck-Tolling Retriever',\n 'American Pit Bull Terrier': 'Pit Bull Terrier',\n 'Ragdoll Cats': 'Ragdoll',\n 'Standard Schnauzer': 'Schnauzer',\n 'Scottish Terrier': 'Scottish Terrier Scottie',\n 'Chinese Shar-Pei': 'Shar Pei',\n 'Shetland Sheepdog': 'Shetland Sheepdog Sheltie',\n 'West Highland White Terrier': 'West Highland White Terrier Westie',\n 'Soft Coated Wheaten Terrier': 'Wheaten Terrier',\n 'Wirehaired Pointing Griffon': 'Wire-haired Pointing Griffon',\n 'Xoloitzcuintli': 'Wirehaired Terrier',\n 'Cane Corso': 'Cane Corso Mastiff',\n 'Havana Brown': 'Havana',\n }, inplace=True\n )\n breeds = breeds.merge(df, how='left', on='BreedName')\n\n breeds1_dic, breeds2_dic = {}, {}\n for c in breeds.columns:\n if c == \"BreedID\":\n continue\n breeds1_dic[c] = c + \"_main_breed_all\"\n breeds2_dic[c] = c + \"_second_breed_all\"\n train = train.merge(breeds.rename(columns=breeds1_dic), how='left', left_on='Breed1', right_on='BreedID')\n train.drop(['BreedID'], axis=1, inplace=True)\n train = train.merge(breeds.rename(columns=breeds2_dic), how='left', left_on='Breed2', right_on='BreedID')\n train.drop(['BreedID'], axis=1, inplace=True)\n\n return train\n\n\ndef merge_breed_name_sub(train):\n breeds = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')\n df = pd.read_json('../input/cat-and-dog-breeds-parameters/rating.json')\n cat_df = df.cat_breeds.dropna(0).reset_index().rename(columns={'index': 'BreedName'})\n dog_df = df.dog_breeds.dropna(0).reset_index().rename(columns={'index': 'BreedName'})\n\n cat = cat_df['cat_breeds'].apply(lambda x: pd.Series(x))\n cat_df = pd.concat([cat_df, cat], axis=1).drop(['cat_breeds'], axis=1)\n dog = dog_df['dog_breeds'].apply(lambda x: pd.Series(x))\n dog_df = pd.concat([dog_df, cat], axis=1).drop(['dog_breeds'], axis=1)\n\n df = pd.concat([dog_df, cat_df])\n df.BreedName.replace(\n {\n 'Siamese Cat': 'Siamese',\n 'Chinese Crested': 'Chinese Crested Dog',\n 'Australian Cattle Dog': 'Australian Cattle Dog/Blue Heeler',\n 'Yorkshire Terrier': 'Yorkshire Terrier Yorkie',\n 'Pembroke Welsh Corgi': 'Welsh Corgi',\n 'Sphynx': 'Sphynx (hairless cat)',\n 'Plott': 'Plott Hound',\n 'Korean Jindo Dog': 'Jindo',\n 'Anatolian Shepherd Dog': 'Anatolian Shepherd',\n 'Belgian Malinois': 'Belgian Shepherd Malinois',\n 'Belgian Sheepdog': 'Belgian Shepherd Dog Sheepdog',\n 'Belgian Tervuren': 'Belgian Shepherd Tervuren',\n 'Bengal Cats': 'Bengal',\n 'Bouvier des Flandres': 'Bouvier des Flanders',\n 'Brittany': 'Brittany Spaniel',\n 'Caucasian Shepherd Dog': 'Caucasian Sheepdog (Caucasian Ovtcharka)',\n 'Dandie Dinmont Terrier': 'Dandi Dinmont Terrier',\n 'Bulldog': 'English Bulldog',\n 'American English Coonhound': 'English Coonhound',\n 'Small Munsterlander Pointer': 'Munsterlander',\n 'Entlebucher Mountain Dog': 'Entlebucher',\n 'Exotic': 'Exotic Shorthair',\n 'Flat-Coated Retriever': 'Flat-coated Retriever',\n 'English Foxhound': 'Foxhound',\n 'Alaskan Klee Kai': 'Klee Kai',\n 'Newfoundland': 'Newfoundland Dog',\n 'Norwegian Forest': 'Norwegian Forest Cat',\n 'Nova Scotia Duck Tolling Retriever': 'Nova Scotia Duck-Tolling Retriever',\n 'American Pit Bull Terrier': 'Pit Bull Terrier',\n 'Ragdoll Cats': 'Ragdoll',\n 'Standard Schnauzer': 'Schnauzer',\n 'Scottish Terrier': 'Scottish Terrier Scottie',\n 'Chinese Shar-Pei': 'Shar Pei',\n 'Shetland Sheepdog': 'Shetland Sheepdog Sheltie',\n 'West Highland White Terrier': 'West Highland White Terrier Westie',\n 'Soft Coated Wheaten Terrier': 'Wheaten Terrier',\n 'Wirehaired Pointing Griffon': 'Wire-haired Pointing Griffon',\n 'Xoloitzcuintli': 'Wirehaired Terrier',\n 'Cane Corso': 'Cane Corso Mastiff',\n 'Havana Brown': 'Havana',\n }, inplace=True\n )\n breeds = breeds.merge(df, how='left', on='BreedName')\n\n train = train.merge(breeds.rename(columns={'BreedName': 'BreedName_main_breed'}), how='left', left_on='Breed1',\n right_on='BreedID', suffixes=('', '_main_breed'))\n train.drop(['BreedID'], axis=1, inplace=True)\n train = train.merge(breeds.rename(columns={'BreedName': 'BreedName_second_breed'}), how='left', left_on='Breed2',\n right_on='BreedID', suffixes=('', '_second_breed'))\n train.drop(['BreedID'], axis=1, inplace=True)\n\n return train\n\n\ndef merge_breed_ranking(train):\n breeds = pd.read_csv('../input/breed-labels-with-ranks/breed_labels_with_ranks.csv').drop(\"BreedName\", axis=1)\n train = train.merge(breeds, how=\"left\", left_on=\"fix_Breed1\", right_on=\"BreedID\")\n train = train.rename(columns={\"BreedCatRank\": \"BreedCatRank_main\", \"BreedDogRank\": \"BreedDogRank_main\"})\n train = train.merge(breeds, how=\"left\", left_on=\"fix_Breed2\", right_on=\"BreedID\")\n train = train.rename(columns={\"BreedCatRank\": \"BreedCatRank_second\", \"BreedDogRank\": \"BreedDogRank_second\"})\n\n return train\n\n\ndef breed_mismatch(train):\n breed_labels = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')\n dog_breed_labels_set = list(breed_labels[breed_labels['Type'] == 1]['BreedID'])\n dog_breed_labels_set.remove(307)\n train['breeds_mismatch'] = list((train['Type'] == 2) & (\n (train['fix_Breed1'].isin(dog_breed_labels_set)) | (train['fix_Breed2'].isin(dog_breed_labels_set))))\n train['breeds_mismatch'] = train['breeds_mismatch'].astype(int)\n\n return train\n\n\ndef breed_mismatch_desc(train):\n train['desc_contain_dog'] = train['Description'].str.lower().str.contains(' dog | dogs ')\n train['desc_contain_cat'] = train['Description'].str.lower().str.contains(' cat | cats ')\n train['desc_miss_match'] = list((train['Type'] == 1) & (train['desc_contain_cat']))\n train['desc_miss_match'] = train['desc_miss_match'].astype(int)\n\n return train\n\n\ndef breed_mismatch_meta(train):\n train['annot_contain_dog'] = train['annots_top_desc'].str.lower().str.contains(' dog | dogs ')\n train['annot_contain_cat'] = train['annots_top_desc'].str.lower().str.contains(' cat | cats ')\n train['annot_miss_match'] = list((train['Type'] == 1) & (train['annot_contain_cat']))\n train['annot_miss_match'] = train['annot_miss_match'].astype(int)\n\n return train\n\n\ndef extract_emojis(text, emoji_list):\n return ' '.join(c for c in text if c in emoji_list)\n\n\ndef merge_emoji(train):\n emoji = pd.read_csv('../input/emoji-sentiment-data/Emoji_Sentiment_Data_v1.0.csv')\n emoji2 = pd.read_csv('../input/emoji-sentiment-data/Emojitracker_20150604.csv')\n emoji = emoji.merge(emoji2, how='left', on='Emoji', suffixes=('', '_tracker'))\n\n emoji_list = emoji['Emoji'].values\n train_emoji = train['Description'].apply(extract_emojis, emoji_list=emoji_list)\n train_emoji = pd.DataFrame([train['PetID'], train_emoji]).T.set_index('PetID')\n train_emoji = train_emoji['Description'].str.extractall('(' + ')|('.join(emoji_list) + ')')\n train_emoji = train_emoji.fillna(method='bfill', axis=1).iloc[:, 0].reset_index().rename(columns={0: 'Emoji'})\n train_emoji = train_emoji.merge(emoji, how='left', on='Emoji')\n\n emoji_columns = ['Occurrences', 'Position', 'Negative', 'Neutral', 'Positive', 'Occurrences_tracker']\n stats = ['mean', 'max', 'min', 'median', 'std']\n g = train_emoji.groupby('PetID')[emoji_columns].agg(stats)\n g.columns = [c + '_' + stat for c in emoji_columns for stat in stats]\n train = train.merge(g, how='left', on='PetID')\n\n return train\n\n\ndef get_interactions(train):\n interaction_features = ['Age', 'Quantity']\n for (c1, c2) in combinations(interaction_features, 2):\n train[c1 + '_mul_' + c2] = train[c1] * train[c2]\n train[c1 + '_div_' + c2] = train[c1] / train[c2]\n return train\n\n\ndef get_text_features(train):\n train['Length_Description'] = train['Description'].map(len)\n train['Length_annots_top_desc'] = train['annots_top_desc'].map(len)\n train['Lengths_sentiment_text'] = train['sentiment_text'].map(len)\n train['Lengths_sentiment_entities'] = train['sentiment_entities'].map(len)\n\n return train\n\n\ndef get_name_features(train):\n train['num_name_chars'] = train['Name'].apply(len)\n train['num_name_capitals'] = train['Name'].apply(lambda x: sum(1 for c in x if c.isupper()))\n train['name_caps_vs_length'] = train.apply(lambda row: row['num_name_capitals'] / (row['num_name_chars'] + 1e-5),\n axis=1)\n train['num_name_exclamation_marks'] = train['Name'].apply(lambda x: x.count('!'))\n train['num_name_question_marks'] = train['Name'].apply(lambda x: x.count('?'))\n train['num_name_punctuation'] = train['Name'].apply(lambda x: sum(x.count(w) for w in '.,;:'))\n train['num_name_symbols'] = train['Name'].apply(lambda x: sum(x.count(w) for w in '*&$%'))\n train['num_name_words'] = train['Name'].apply(lambda x: len(x.split()))\n return train\n\n\nclass MetaDataParser(object):\n def __init__(self):\n # sentiment files\n train_sentiment_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_sentiment/*.json'))\n test_sentiment_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_sentiment/*.json'))\n sentiment_files = train_sentiment_files + test_sentiment_files\n self.sentiment_files = pd.DataFrame(sentiment_files, columns=['sentiment_filename'])\n self.sentiment_files['PetID'] = self.sentiment_files['sentiment_filename'].apply(\n lambda x: x.split('/')[-1].split('.')[0])\n\n # metadata files\n train_metadata_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_metadata/*.json'))\n test_metadata_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_metadata/*.json'))\n metadata_files = train_metadata_files + test_metadata_files\n self.metadata_files = pd.DataFrame(metadata_files, columns=['metadata_filename'])\n self.metadata_files['PetID'] = self.metadata_files['metadata_filename'].apply(\n lambda x: x.split('/')[-1].split('-')[0])\n\n def open_json_file(self, filename):\n with open(filename, 'r', encoding=\"utf-8\") as f:\n metadata_file = json.load(f)\n return metadata_file\n\n def get_stats(self, array, name):\n stats = [np.mean, np.max, np.min, np.sum, np.var]\n result = {}\n if len(array):\n for stat in stats:\n result[name + '_' + stat.__name__] = stat(array)\n else:\n for stat in stats:\n result[name + '_' + stat.__name__] = 0\n return result\n\n def parse_sentiment_file(self, file):\n file_sentiment = file['documentSentiment']\n file_entities = [x['name'] for x in file['entities']]\n file_entities = ' '.join(file_entities)\n\n file_sentences_text = [x['text']['content'] for x in file['sentences']]\n file_sentences_text = ' '.join(file_sentences_text)\n file_sentences_sentiment = [x['sentiment'] for x in file['sentences']]\n\n file_sentences_sentiment_sum = pd.DataFrame.from_dict(\n file_sentences_sentiment, orient='columns').sum()\n file_sentences_sentiment_sum = file_sentences_sentiment_sum.add_prefix('document_sum_').to_dict()\n\n file_sentences_sentiment_mean = pd.DataFrame.from_dict(\n file_sentences_sentiment, orient='columns').mean()\n file_sentences_sentiment_mean = file_sentences_sentiment_mean.add_prefix('document_mean_').to_dict()\n\n file_sentences_sentiment_var = pd.DataFrame.from_dict(\n file_sentences_sentiment, orient='columns').sum()\n file_sentences_sentiment_var = file_sentences_sentiment_var.add_prefix('document_var_').to_dict()\n\n file_sentiment.update(file_sentences_sentiment_mean)\n file_sentiment.update(file_sentences_sentiment_sum)\n file_sentiment.update(file_sentences_sentiment_var)\n file_sentiment.update({\"sentiment_text\": file_sentences_text})\n file_sentiment.update({\"sentiment_entities\": file_entities})\n\n return pd.Series(file_sentiment)\n\n def parse_metadata(self, file):\n file_keys = list(file.keys())\n\n if 'labelAnnotations' in file_keys:\n label_annotations = file['labelAnnotations']\n file_top_score = [x['score'] for x in label_annotations]\n pick_value = int(len(label_annotations) * 0.3)\n if pick_value == 0: pick_value = 1\n file_top_score_pick = [x['score'] for x in label_annotations[:pick_value]]\n file_top_desc = [x['description'] for x in label_annotations]\n file_top_desc_pick = [x['description'] for x in label_annotations[:pick_value]]\n dog_cat_scores = []\n dog_cat_topics = []\n is_dog_or_cat = []\n for label in label_annotations:\n if label['description'] == 'dog' or label['description'] == 'cat':\n dog_cat_scores.append(label['score'])\n dog_cat_topics.append(label['topicality'])\n is_dog_or_cat.append(1)\n else:\n is_dog_or_cat.append(0)\n else:\n file_top_score = []\n file_top_desc = []\n dog_cat_scores = []\n dog_cat_topics = []\n is_dog_or_cat = []\n file_top_score_pick = []\n file_top_desc_pick = []\n\n if 'faceAnnotations' in file_keys:\n file_face = file['faceAnnotations']\n n_faces = len(file_face)\n else:\n n_faces = 0\n\n if 'textAnnotations' in file_keys:\n text_annotations = file['textAnnotations']\n file_n_text_annotations = len(text_annotations)\n file_len_text = [len(text['description']) for text in text_annotations]\n else:\n file_n_text_annotations = 0\n file_len_text = []\n\n file_colors = file['imagePropertiesAnnotation']['dominantColors']['colors']\n file_crops = file['cropHintsAnnotation']['cropHints']\n\n file_color_score = [x['score'] for x in file_colors]\n file_color_pixelfrac = [x['pixelFraction'] for x in file_colors]\n file_color_red = [x['color']['red'] if 'red' in x['color'].keys() else 0 for x in file_colors]\n file_color_blue = [x['color']['blue'] if 'blue' in x['color'].keys() else 0 for x in file_colors]\n file_color_green = [x['color']['green'] if 'green' in x['color'].keys() else 0 for x in file_colors]\n file_crop_conf = np.mean([x['confidence'] for x in file_crops])\n file_crop_x = np.mean([x['boundingPoly']['vertices'][1]['x'] for x in file_crops])\n file_crop_y = np.mean([x['boundingPoly']['vertices'][3]['y'] for x in file_crops])\n\n if 'importanceFraction' in file_crops[0].keys():\n file_crop_importance = np.mean([x['importanceFraction'] for x in file_crops])\n else:\n file_crop_importance = 0\n\n metadata = {\n 'annots_top_desc': ' '.join(file_top_desc),\n 'annots_top_desc_pick': ' '.join(file_top_desc_pick),\n 'annots_score_pick_mean': np.mean(file_top_score_pick),\n 'n_faces': n_faces,\n 'n_text_annotations': file_n_text_annotations,\n 'crop_conf': file_crop_conf,\n 'crop_x': file_crop_x,\n 'crop_y': file_crop_y,\n 'crop_importance': file_crop_importance,\n }\n metadata.update(self.get_stats(file_top_score, 'annots_score_normal'))\n metadata.update(self.get_stats(file_color_score, 'color_score'))\n metadata.update(self.get_stats(file_color_pixelfrac, 'color_pixel_score'))\n metadata.update(self.get_stats(file_color_red, 'color_red_score'))\n metadata.update(self.get_stats(file_color_blue, 'color_blue_score'))\n metadata.update(self.get_stats(file_color_green, 'color_green_score'))\n metadata.update(self.get_stats(dog_cat_scores, 'dog_cat_scores'))\n metadata.update(self.get_stats(dog_cat_topics, 'dog_cat_topics'))\n metadata.update(self.get_stats(is_dog_or_cat, 'is_dog_or_cat'))\n metadata.update(self.get_stats(file_len_text, 'len_text'))\n metadata.update({\"color_red_score_first\": file_color_red[0] if len(file_color_red) > 0 else -1})\n metadata.update({\"color_blue_score_first\": file_color_blue[0] if len(file_color_blue) > 0 else -1})\n metadata.update({\"color_green_score_first\": file_color_green[0] if len(file_color_green) > 0 else -1})\n metadata.update({\"color_pixel_score_first\": file_color_pixelfrac[0] if len(file_color_pixelfrac) > 0 else -1})\n metadata.update({\"color_score_first\": file_color_score[0] if len(file_color_score) > 0 else -1})\n metadata.update({\"label_score_first\": file_top_score[0] if len(file_top_score) > 0 else -1})\n\n return pd.Series(metadata)\n\n def _transform(self, path, sentiment=True):\n file = self.open_json_file(path)\n if sentiment:\n result = self.parse_sentiment_file(file)\n else:\n result = self.parse_metadata(file)\n return result\n\n\ndef pretrained_w2v(train_text, model, name):\n train_corpus = [text_to_word_sequence(text) for text in train_text]\n\n result = []\n for text in train_corpus:\n n_skip = 0\n vec = np.zeros(model.vector_size)\n for n_w, word in enumerate(text):\n if word in model: # 0.9906\n vec = vec + model.wv[word]\n continue\n word_ = word.upper()\n if word_ in model: # 0.9909\n vec = vec + model.wv[word_]\n continue\n word_ = word.capitalize()\n if word_ in model: # 0.9925\n vec = vec + model.wv[word_]\n continue\n word_ = ps.stem(word)\n if word_ in model: # 0.9927\n vec = vec + model.wv[word_]\n continue\n word_ = lc.stem(word)\n if word_ in model: # 0.9932\n vec = vec + model.wv[word_]\n continue\n word_ = sb.stem(word)\n if word_ in model: # 0.9933\n vec = vec + model.wv[word_]\n continue\n else:\n n_skip += 1\n continue\n vec = vec / (n_w - n_skip + 1)\n result.append(vec)\n\n w2v_cols = [\"{}{}\".format(name, i) for i in range(1, model.vector_size + 1)]\n result = pd.DataFrame(result)\n result.columns = w2v_cols\n\n return result\n\n\ndef w2v_pymagnitude(train_text, model, name):\n train_corpus = [text_to_word_sequence(text) for text in train_text]\n\n result = []\n for text in train_corpus:\n vec = np.zeros(model.dim)\n for n_w, word in enumerate(text):\n if word in model: # 0.9906\n vec = vec + model.query(word)\n continue\n word_ = word.upper()\n if word_ in model: # 0.9909\n vec = vec + model.query(word_)\n continue\n word_ = word.capitalize()\n if word_ in model: # 0.9925\n vec = vec + model.query(word_)\n continue\n word_ = ps.stem(word)\n if word_ in model: # 0.9927\n vec = vec + model.query(word_)\n continue\n word_ = lc.stem(word)\n if word_ in model: # 0.9932\n vec = vec + model.query(word_)\n continue\n word_ = sb.stem(word)\n if word_ in model: # 0.9933\n vec = vec + model.query(word_)\n continue\n vec = vec + model.query(word)\n\n vec = vec / (n_w + 1)\n result.append(vec)\n\n w2v_cols = [\"{}{}\".format(name, i) for i in range(1, model.dim + 1)]\n result = pd.DataFrame(result)\n result.columns = w2v_cols\n\n return result\n\n\ndef doc2vec(description_k, d2v_param):\n corpus = [TaggedDocument(words=analyzer_k(text), tags=[i]) for i, text in enumerate(description_k)]\n doc2vecs = Doc2Vec(\n documents=corpus, dm=1,\n **d2v_param\n ) # dm == 1 -> dmpv, dm != 1 -> DBoW\n doc2vecs = np.array([doc2vecs.infer_vector(analyzer_k(text)) for text in description_k])\n\n doc2vec_df = pd.DataFrame()\n doc2vec_df['d2v_mean'] = np.mean(doc2vecs, axis=1)\n doc2vec_df['d2v_sum'] = np.sum(doc2vecs, axis=1)\n doc2vec_df['d2v_max'] = np.max(doc2vecs, axis=1)\n doc2vec_df['d2v_min'] = np.min(doc2vecs, axis=1)\n doc2vec_df['d2v_median'] = np.median(doc2vecs, axis=1)\n doc2vec_df['d2v_var'] = np.var(doc2vecs, axis=1)\n\n return doc2vec_df\n\n\ndef resize_to_square(im):\n old_size = im.shape[:2] # old_size is in (height, width) format\n ratio = float(img_size) / max(old_size)\n new_size = tuple([int(x * ratio) for x in old_size])\n # new_size should be in (width, height) format\n im = cv2.resize(im, (new_size[1], new_size[0]))\n delta_w = img_size - new_size[1]\n delta_h = img_size - new_size[0]\n top, bottom = delta_h // 2, delta_h - (delta_h // 2)\n left, right = delta_w // 2, delta_w - (delta_w // 2)\n color = [0, 0, 0]\n new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)\n return new_im\n\n\ndef load_image(path, preprocesssing):\n image = cv2.imread(path)\n new_image = resize_to_square(image)\n new_image = preprocesssing(new_image)\n return new_image\n\n\ndef get_age_feats(df):\n df[\"Age_year\"] = (df[\"Age\"] / 12).astype(np.int32)\n over_1year_flag = df[\"Age\"] / 12 >= 1\n df.loc[over_1year_flag, \"over_1year\"] = 1\n df.loc[~over_1year_flag, \"over_1year\"] = 0\n return df\n\n\ndef freq_encoding(df, freq_cols):\n for c in freq_cols:\n count_df = df.groupby([c])['PetID'].count().reset_index()\n count_df.columns = [c, '{}_freq'.format(c)]\n df = df.merge(count_df, how='left', on=c)\n\n return df\n\n\ndef getSize(filename):\n st = os.stat(filename)\n return st.st_size\n\n\ndef getDimensions(filename):\n img_size = Image.open(filename).size\n return img_size\n\n\ndef is_zh(in_str):\n \"\"\"\n SJISに変換して文字数が減れば簡体字があるので中国語\n \"\"\"\n return (set(in_str) - set(in_str.encode('sjis', 'ignore').decode('sjis'))) != set([])\n\n\n# ===============\n# Model\n# ===============\ndef get_score(y_true, y_pred):\n return cohen_kappa_score(y_true, y_pred, weights='quadratic')\n\n\ndef get_y():\n return pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv', usecols=[target]).values.flatten()\n\n\ndef run_model(X_train, y_train, X_valid, y_valid, X_test,\n categorical_features,\n predictors, maxvalue_dict, fold_id, params, model_name):\n train = lgb.Dataset(X_train, y_train,\n categorical_feature=categorical_features,\n feature_name=predictors)\n valid = lgb.Dataset(X_valid, y_valid,\n categorical_feature=categorical_features,\n feature_name=predictors)\n evals_result = {}\n model = lgb.train(\n params,\n train,\n valid_sets=[valid],\n valid_names=['valid'],\n evals_result=evals_result,\n **FIT_PARAMS\n )\n logger.info(f'Best Iteration: {model.best_iteration}')\n\n # train score\n y_pred_train = model.predict(X_train)\n train_rmse = np.sqrt(mean_squared_error(y_train, y_pred_train))\n\n # validation score\n y_pred_valid = model.predict(X_valid)\n valid_rmse = np.sqrt(mean_squared_error(y_valid, y_pred_valid))\n y_pred_valid = rankdata(y_pred_valid) / len(y_pred_valid)\n\n # save model\n model.save_model(f'{model_name}_fold{fold_id}.txt')\n\n # predict test\n y_pred_test = model.predict(X_test)\n y_pred_test = rankdata(y_pred_test) / len(y_pred_test)\n\n # save predictions\n np.save(f'{model_name}_train_fold{fold_id}.npy', y_pred_valid)\n np.save(f'{model_name}_test_fold{fold_id}.npy', y_pred_test)\n\n return y_pred_valid, y_pred_test, train_rmse, valid_rmse\n\n\ndef run_xgb_model(X_train, y_train, X_valid, y_valid, X_test,\n predictors, maxvalue_dict, fold_id, params, model_name):\n d_train = xgb.DMatrix(data=X_train, label=y_train, feature_names=predictors)\n d_valid = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=predictors)\n\n watchlist = [(d_train, 'train'), (d_valid, 'valid')]\n model = xgb.train(dtrain=d_train, evals=watchlist, params=params, **FIT_PARAMS)\n\n # train score\n y_pred_train = model.predict(d_train, ntree_limit=model.best_ntree_limit)\n train_rmse = np.sqrt(mean_squared_error(y_train, y_pred_train))\n\n # validation score\n y_pred_valid = model.predict(d_valid, ntree_limit=model.best_ntree_limit)\n valid_rmse = np.sqrt(mean_squared_error(y_valid, y_pred_valid))\n y_pred_valid = rankdata(y_pred_valid) / len(y_pred_valid)\n\n # save model\n model.save_model(f'{model_name}_fold{fold_id}.txt')\n\n # predict test\n y_pred_test = model.predict(xgb.DMatrix(data=X_test, feature_names=predictors), ntree_limit=model.best_ntree_limit)\n y_pred_test = rankdata(y_pred_test) / len(y_pred_test)\n\n # save predictions\n np.save(f'{model_name}_train_fold{fold_id}.npy', y_pred_valid)\n np.save(f'{model_name}_test_fold{fold_id}.npy', y_pred_test)\n\n return y_pred_valid, y_pred_test, train_rmse, valid_rmse\n\n\ndef plot_mean_feature_importances(feature_importances, max_num=50, importance_type='gain', path=None):\n mean_gain = feature_importances[[importance_type, 'feature']].groupby('feature').mean()\n feature_importances['mean_' + importance_type] = feature_importances['feature'].map(mean_gain[importance_type])\n\n if path is not None:\n data = feature_importances.sort_values('mean_' + importance_type, ascending=False).iloc[:max_num, :]\n plt.clf()\n plt.figure(figsize=(16, 8))\n sns.barplot(x=importance_type, y='feature', data=data)\n plt.tight_layout()\n plt.savefig(path)\n\n return feature_importances\n\n\ndef to_bins(x, borders):\n for i in range(len(borders)):\n if x <= borders[i]:\n return i\n return len(borders)\n\n\nclass OptimizedRounder(object):\n def __init__(self):\n self.coef_ = 0\n\n def _loss(self, coef, X, y, idx):\n X_p = np.array([to_bins(pred, coef) for pred in X])\n ll = -get_score(y, X_p)\n return ll\n\n def fit(self, X, y):\n coef = [0.2, 0.4, 0.6, 0.8]\n golden1 = 0.618\n golden2 = 1 - golden1\n ab_start = [(0.01, 0.3), (0.15, 0.56), (0.35, 0.75), (0.6, 0.9)]\n for it1 in range(10):\n for idx in range(4):\n # golden section search\n a, b = ab_start[idx]\n # calc losses\n coef[idx] = a\n la = self._loss(coef, X, y, idx)\n coef[idx] = b\n lb = self._loss(coef, X, y, idx)\n for it in range(20):\n # choose value\n if la > lb:\n a = b - (b - a) * golden1\n coef[idx] = a\n la = self._loss(coef, X, y, idx)\n else:\n b = b - (b - a) * golden2\n coef[idx] = b\n lb = self._loss(coef, X, y, idx)\n self.coef_ = {'x': coef}\n\n def predict(self, X, coef):\n X_p = np.array([to_bins(pred, coef) for pred in X])\n return X_p\n\n def coefficients(self):\n return self.coef_['x']\n\n\nclass StratifiedGroupKFold():\n def __init__(self, n_splits=5):\n self.n_splits = n_splits\n\n def split(self, X, y=None, groups=None):\n fold = pd.DataFrame([X, y, groups]).T\n fold.columns = ['X', 'y', 'groups']\n fold['y'] = fold['y'].astype(int)\n g = fold.groupby('groups')['y'].agg('mean').reset_index()\n fold = fold.merge(g, how='left', on='groups', suffixes=('', '_mean'))\n fold['y_mean'] = fold['y_mean'].apply(np.round)\n fold['fold_id'] = 0\n for unique_y in fold['y_mean'].unique():\n mask = fold.y_mean == unique_y\n selected = fold[mask].reset_index(drop=True)\n cv = GroupKFold(n_splits=n_splits)\n for i, (train_index, valid_index) in enumerate(\n cv.split(range(len(selected)), y=None, groups=selected['groups'])):\n selected.loc[valid_index, 'fold_id'] = i\n fold.loc[mask, 'fold_id'] = selected['fold_id'].values\n\n for i in range(self.n_splits):\n indices = np.arange(len(fold))\n train_index = indices[fold['fold_id'] != i]\n valid_index = indices[fold['fold_id'] == i]\n yield train_index, valid_index\n\n\nif __name__ == '__main__':\n init_logger()\n t_cols, k_cols, g_cols = [], [], []\n\n # load\n train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')\n test = pd.read_csv('../input/petfinder-adoption-prediction/test/test.csv')\n train = pd.concat([train, test], sort=True)\n train[['Description', 'Name']] = train[['Description', 'Name']].astype(str)\n train[\"Description_Emb\"] = [analyzer_embed(text) for text in train[\"Description\"]]\n train[\"Description_bow\"] = [analyzer_bow(text) for text in train[\"Description\"]]\n train['fix_Breed1'] = train['Breed1']\n train['fix_Breed2'] = train['Breed2']\n train.loc[train['Breed1'] == 0, 'fix_Breed1'] = train[train['Breed1'] == 0]['Breed2']\n train.loc[train['Breed1'] == 0, 'fix_Breed2'] = train[train['Breed1'] == 0]['Breed1']\n train['Breed1_equals_Breed2'] = (train['Breed1'] == train['Breed2']).astype(int)\n train['single_Breed'] = (train['Breed1'] * train['Breed2'] == 0).astype(int)\n train.drop([\"Breed1\", \"Breed2\"], axis=1)\n train.rename(columns={\"fix_Breed1\": \"Breed1\", \"fix_Breed2\": \"Breed2\"})\n logger.info(f'DataFrame shape: {train.shape}')\n\n with timer('common features'):\n with timer('merge additional state files'):\n train = merge_state_info(train)\n\n common_cols = list(train.columns)\n\n with timer('merge additional breed rating files'):\n orig_cols = list(train.columns)\n train = merge_breed_name_sub(train)\n t_cols += [c for c in train.columns if c not in orig_cols]\n k_cols += [c for c in train.columns if c not in orig_cols]\n\n orig_cols = list(train.columns)\n train = merge_breed_name(train)\n g_cols += [c for c in train.columns if c not in orig_cols and \"_main_breed_all\" in c] + [\n \"Type_second_breed\"]\n\n with timer('preprocess category features'):\n train = to_category(train, cat=categorical_features)\n\n train[text_features].fillna('missing', inplace=True)\n with timer('preprocess metadata'): # 使ってるcolsがkaeruさんとtakuokoで違う kaeruさんがfirst系は全部使うが、takuokoは使わない\n # TODO: parallelization\n meta_parser = MetaDataParser()\n sentiment_features = meta_parser.sentiment_files['sentiment_filename'].apply(\n lambda x: meta_parser._transform(x, sentiment=True))\n meta_parser.sentiment_files = pd.concat([meta_parser.sentiment_files, sentiment_features], axis=1,\n sort=False)\n meta_features = meta_parser.metadata_files['metadata_filename'].apply(\n lambda x: meta_parser._transform(x, sentiment=False))\n meta_parser.metadata_files = pd.concat([meta_parser.metadata_files, meta_features], axis=1, sort=False)\n\n stats = ['mean']\n columns = [c for c in sentiment_features.columns if c not in ['sentiment_text', 'sentiment_entities']]\n g = meta_parser.sentiment_files[list(sentiment_features.columns) + ['PetID']].groupby('PetID').agg(stats)\n g.columns = [c + '_' + stat for c in columns for stat in stats]\n train = train.merge(g, how='left', on='PetID')\n k_cols += [c for c in g.columns if re.match(\"\\w*_mean_\\w*mean\", c)] + [\"magnitude_mean\", \"score_mean\"]\n t_cols += [c for c in g.columns if re.match(\"\\w*_sum_\\w*mean\", c)] + [\"magnitude_mean\", \"score_mean\"]\n g_cols += list(g.columns)\n\n stats = ['mean', 'min', 'max', 'median', 'var', 'sum', 'first']\n columns = [c for c in meta_features.columns if c not in ['annots_top_desc', 'annots_top_desc_pick']]\n g = meta_parser.metadata_files[columns + ['PetID']].groupby('PetID').agg(stats)\n g.columns = [c + '_' + stat for c in columns for stat in stats]\n train = train.merge(g, how='left', on='PetID')\n k_cols += [c for c in g.columns if\n (\"mean_mean\" in c or \"mean_sum\" in c or \"first_first\" in c) and \"annots_score_normal\" not in c] + \\\n ['crop_conf_first', 'crop_x_first', 'crop_y_first', 'crop_importance_first', 'crop_conf_mean',\n 'crop_conf_sum', 'crop_importance_mean', 'crop_importance_sum']\n t_cols += [c for c in g.columns if ((re.match(\"\\w*_sum_\\w*(?<!sum)$\", c) and \"first\" not in c) \\\n or (\n \"sum\" not in c and \"first\" not in c)) and \"annots_score_pick\" not in c]\n g_cols += [c for c in g.columns if\n \"mean_mean\" in c or \"mean_sum\" in c or \"mean_var\" in c and \"annots_score_pick\" not in c] + \\\n ['crop_conf_mean', 'crop_conf_sum', 'crop_conf_var', 'crop_importance_mean',\n 'crop_importance_sum', 'crop_importance_var']\n\n with timer('preprocess metatext'):\n meta_features = meta_parser.metadata_files[['PetID', 'annots_top_desc', 'annots_top_desc_pick']]\n meta_features_all = meta_features.groupby('PetID')['annots_top_desc'].apply(\n lambda x: \" \".join(x)).reset_index()\n train = train.merge(meta_features_all, how='left', on='PetID')\n\n meta_features_pick = meta_features.groupby('PetID')['annots_top_desc_pick'].apply(\n lambda x: \" \".join(x)).reset_index()\n train = train.merge(meta_features_pick, how='left', on='PetID')\n\n sentiment_features = meta_parser.sentiment_files[['PetID', 'sentiment_text', 'sentiment_entities']]\n sentiment_features_txt = sentiment_features.groupby('PetID')['sentiment_text'].apply(\n lambda x: \" \".join(x)).reset_index()\n train = train.merge(sentiment_features_txt, how='left', on='PetID')\n\n sentiment_features_entities = sentiment_features.groupby('PetID')['sentiment_entities'].apply(\n lambda x: \" \".join(x)).reset_index()\n train = train.merge(sentiment_features_entities, how='left', on='PetID')\n\n train[meta_text] = train[meta_text].astype(str)\n train[meta_text].fillna(\"missing\", inplace=True)\n del meta_features_all, meta_features_pick, meta_features, sentiment_features;\n gc.collect()\n\n with timer('make image features'):\n train_image_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_images/*.jpg'))\n test_image_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_images/*.jpg'))\n image_files = train_image_files + test_image_files\n train_images = pd.DataFrame(image_files, columns=['image_filename'])\n train_images['PetID'] = train_images['image_filename'].apply(lambda x: x.split('/')[-1].split('-')[0])\n\n with timer('breed mismatch features'):\n train = breed_mismatch(train)\n train = breed_mismatch_desc(train)\n train = breed_mismatch_meta(train)\n t_cols += ['breeds_mismatch', 'desc_contain_dog', 'desc_contain_cat', 'desc_miss_match',\n 'annot_contain_dog', 'annot_contain_cat', 'annot_miss_match']\n k_cols += ['breeds_mismatch', 'desc_miss_match', 'annot_miss_match']\n\n with timer('preprocess densenet'):\n if debug:\n import feather\n\n X = feather.read_dataframe(\"feature/dense121_2_X.feather\")\n gp_img = X.groupby(\"PetID\").mean().reset_index()\n train = pd.merge(train, gp_img, how=\"left\", on=\"PetID\")\n gp_dense_first = X.groupby(\"PetID\").first().reset_index()\n t_cols += list(gp_img.drop(\"PetID\", axis=1).columns)\n del gp_img;\n gc.collect()\n else:\n pet_ids = train_images['PetID'].values\n img_pathes = train_images['image_filename'].values\n n_batches = len(pet_ids) // batch_size + 1\n\n inp = Input((256, 256, 3))\n backbone = DenseNet121(input_tensor=inp,\n weights='../input/densenet121weights/densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',\n include_top=False)\n x = backbone.output\n x = GlobalAveragePooling2D()(x)\n x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)\n x = AveragePooling1D(4)(x)\n out = Lambda(lambda x: x[:, :, 0])(x)\n m = Model(inp, out)\n\n features = []\n for b in range(n_batches):\n start = b * batch_size\n end = (b + 1) * batch_size\n batch_pets = pet_ids[start: end]\n batch_path = img_pathes[start: end]\n batch_images = np.zeros((len(batch_pets), img_size, img_size, 3))\n for i, (pet_id, path) in enumerate(zip(batch_pets, batch_path)):\n try:\n batch_images[i] = load_image(path, preprocess_input_dense)\n except:\n try:\n batch_images[i] = load_image(path, preprocess_input_dense)\n except:\n pass\n batch_preds = m.predict(batch_images)\n for i, pet_id in enumerate(batch_pets):\n features.append([pet_id] + list(batch_preds[i]))\n X = pd.DataFrame(features,\n columns=[\"PetID\"] + [\"dense121_2_{}\".format(i) for i in range(batch_preds.shape[1])])\n gp_img = X.groupby(\"PetID\").mean().reset_index()\n train = pd.merge(train, gp_img, how=\"left\", on=\"PetID\")\n gp_dense_first = X.groupby(\"PetID\").first().reset_index()\n t_cols += list(gp_img.drop(\"PetID\", axis=1).columns)\n del m, gp_img;\n gc.collect();\n K.clear_session()\n\n if T_flag:\n with timer('takuoko features'):\n orig_cols = train.columns\n with timer('merge emoji files'):\n train = merge_emoji(train)\n\n with timer('preprocess breed files'):\n train = merge_breed_ranking(train)\n\n with timer('preprocess and simple features'):\n train = get_interactions(train)\n\n with timer('tfidf + svd / nmf / bm25'):\n vectorizer = make_pipeline(\n TfidfVectorizer(),\n make_union(\n TruncatedSVD(n_components=n_components, random_state=seed),\n NMF(n_components=n_components, random_state=seed),\n make_pipeline(\n BM25Transformer(use_idf=True, k1=2.0, b=0.75),\n TruncatedSVD(n_components=n_components, random_state=seed)\n ),\n n_jobs=1,\n ),\n )\n X = vectorizer.fit_transform(train['Description_bow'])\n X = pd.DataFrame(X, columns=['tfidf_svd_{}'.format(i) for i in range(n_components)]\n + ['tfidf_nmf_{}'.format(i) for i in range(n_components)]\n + ['tfidf_bm25_{}'.format(i) for i in range(n_components)])\n train = pd.concat([train, X], axis=1)\n del vectorizer;\n gc.collect()\n\n with timer('count + svd / nmf / bm25'):\n vectorizer = make_pipeline(\n CountVectorizer(),\n make_union(\n TruncatedSVD(n_components=n_components, random_state=seed),\n NMF(n_components=n_components, random_state=seed),\n make_pipeline(\n BM25Transformer(use_idf=True, k1=2.0, b=0.75),\n TruncatedSVD(n_components=n_components, random_state=seed)\n ),\n n_jobs=1,\n ),\n )\n X = vectorizer.fit_transform(train['Description_bow'])\n X = pd.DataFrame(X, columns=['count_svd_{}'.format(i) for i in range(n_components)]\n + ['count_nmf_{}'.format(i) for i in range(n_components)]\n + ['count_bm25_{}'.format(i) for i in range(n_components)])\n train = pd.concat([train, X], axis=1)\n del vectorizer;\n gc.collect()\n\n with timer('tfidf2 + svd / nmf / bm25'):\n vectorizer = make_pipeline(\n TfidfVectorizer(min_df=2, max_features=20000,\n strip_accents='unicode', analyzer='word', token_pattern=r'\\w{1,}',\n ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english'),\n make_union(\n TruncatedSVD(n_components=n_components, random_state=seed),\n NMF(n_components=n_components, random_state=seed),\n make_pipeline(\n BM25Transformer(use_idf=True, k1=2.0, b=0.75),\n TruncatedSVD(n_components=n_components, random_state=seed)\n ),\n n_jobs=1,\n ),\n )\n X = vectorizer.fit_transform(train['Description_bow'])\n X = pd.DataFrame(X, columns=['tfidf2_svd_{}'.format(i) for i in range(n_components)]\n + ['tfidf2_nmf_{}'.format(i) for i in range(n_components)]\n + ['tfidf2_bm25_{}'.format(i) for i in range(n_components)])\n train = pd.concat([train, X], axis=1)\n del vectorizer;\n gc.collect()\n\n with timer('count2 + svd / nmf / bm25'):\n vectorizer = make_pipeline(\n CountVectorizer(min_df=2, max_features=20000,\n strip_accents='unicode', analyzer='word', token_pattern=r'\\w{1,}',\n ngram_range=(1, 3), stop_words='english'),\n make_union(\n TruncatedSVD(n_components=n_components, random_state=seed),\n NMF(n_components=n_components, random_state=seed),\n make_pipeline(\n BM25Transformer(use_idf=True, k1=2.0, b=0.75),\n TruncatedSVD(n_components=n_components, random_state=seed)\n ),\n n_jobs=1,\n ),\n )\n X = vectorizer.fit_transform(train['Description_bow'])\n X = pd.DataFrame(X, columns=['count2_svd_{}'.format(i) for i in range(n_components)]\n + ['count2_nmf_{}'.format(i) for i in range(n_components)]\n + ['count2_bm25_{}'.format(i) for i in range(n_components)])\n train = pd.concat([train, X], axis=1)\n del vectorizer;\n gc.collect()\n\n with timer('tfidf3 + svd / nmf / bm25'):\n vectorizer = make_pipeline(\n TfidfVectorizer(min_df=30, max_features=50000, binary=True,\n strip_accents='unicode', analyzer='char', token_pattern=r'\\w{1,}',\n ngram_range=(3, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english'),\n make_union(\n TruncatedSVD(n_components=n_components, random_state=seed),\n NMF(n_components=n_components, random_state=seed),\n make_pipeline(\n BM25Transformer(use_idf=True, k1=2.0, b=0.75),\n TruncatedSVD(n_components=n_components, random_state=seed)\n ),\n n_jobs=1,\n ),\n )\n X = vectorizer.fit_transform(train['Description_bow'])\n X = pd.DataFrame(X, columns=['tfidf3_svd_{}'.format(i) for i in range(n_components)]\n + ['tfidf3_nmf_{}'.format(i) for i in range(n_components)]\n + ['tfidf3_bm25_{}'.format(i) for i in range(n_components)])\n train = pd.concat([train, X], axis=1)\n del vectorizer;\n gc.collect()\n\n with timer('count3 + svd / nmf / bm25'):\n vectorizer = make_pipeline(\n CountVectorizer(min_df=30, max_features=50000, binary=True,\n strip_accents='unicode', analyzer='char', token_pattern=r'\\w{1,}',\n ngram_range=(3, 3), stop_words='english'),\n make_union(\n TruncatedSVD(n_components=n_components, random_state=seed),\n NMF(n_components=n_components, random_state=seed),\n make_pipeline(\n BM25Transformer(use_idf=True, k1=2.0, b=0.75),\n TruncatedSVD(n_components=n_components, random_state=seed)\n ),\n n_jobs=1,\n ),\n )\n X = vectorizer.fit_transform(train['Description_bow'])\n X = pd.DataFrame(X, columns=['count3_svd_{}'.format(i) for i in range(n_components)]\n + ['count3_nmf_{}'.format(i) for i in range(n_components)]\n + ['count3_bm25_{}'.format(i) for i in range(n_components)])\n train = pd.concat([train, X], axis=1)\n del vectorizer;\n gc.collect()\n\n with timer('meta text bow/tfidf->svd / nmf / bm25'):\n train['desc'] = ''\n for c in ['BreedName_main_breed', 'BreedName_second_breed', 'annots_top_desc', 'sentiment_text']:\n train['desc'] += ' ' + train[c].astype(str)\n\n train[\"desc_bow\"] = [analyzer_bow(text) for text in train[\"desc\"]]\n\n vectorizer = make_pipeline(\n TfidfVectorizer(),\n make_union(\n TruncatedSVD(n_components=n_components, random_state=seed),\n NMF(n_components=n_components, random_state=seed),\n make_pipeline(\n BM25Transformer(use_idf=True, k1=2.0, b=0.75),\n TruncatedSVD(n_components=n_components, random_state=seed)\n ),\n n_jobs=1,\n ),\n )\n X = vectorizer.fit_transform(train['desc_bow'])\n X = pd.DataFrame(X, columns=['meta_desc_tfidf_svd_{}'.format(i) for i in range(n_components)]\n + ['meta_desc_tfidf_nmf_{}'.format(i) for i in range(n_components)]\n + ['meta_desc_tfidf_bm25_{}'.format(i) for i in range(n_components)])\n train = pd.concat([train, X], axis=1)\n\n vectorizer = make_pipeline(\n CountVectorizer(),\n make_union(\n TruncatedSVD(n_components=n_components, random_state=seed),\n NMF(n_components=n_components, random_state=seed),\n make_pipeline(\n BM25Transformer(use_idf=True, k1=2.0, b=0.75),\n TruncatedSVD(n_components=n_components, random_state=seed)\n ),\n n_jobs=1,\n ),\n )\n X = vectorizer.fit_transform(train['desc_bow'])\n X = pd.DataFrame(X, columns=['meta_desc_count_svd_{}'.format(i) for i in range(n_components)]\n + ['meta_desc_count_nmf_{}'.format(i) for i in range(n_components)]\n + ['meta_desc_count_bm25_{}'.format(i) for i in range(n_components)])\n train = pd.concat([train, X], axis=1)\n train.drop(['desc_bow', 'desc'], axis=1, inplace=True)\n\n with timer('description fasttext'):\n embedding = '../input/quora-embedding/GoogleNews-vectors-negative300.bin'\n model = KeyedVectors.load_word2vec_format(embedding, binary=True)\n X = pretrained_w2v(train[\"Description_Emb\"], model, name=\"gnvec\")\n train = pd.concat([train, X], axis=1)\n del model;\n gc.collect()\n\n with timer('description glove'):\n embedding = \"../input/pymagnitude-data/glove.840B.300d.magnitude\"\n model = Magnitude(embedding)\n X = w2v_pymagnitude(train[\"Description_Emb\"], model, name=\"glove_mag\")\n train = pd.concat([train, X], axis=1)\n del model;\n gc.collect()\n\n with timer('image features'):\n train['num_images'] = train['PetID'].apply(lambda x: sum(train_images.PetID == x))\n train['num_images_per_pet'] = train['num_images'] / train['Quantity']\n\n with timer('make inception resnet features'):\n if debug:\n import feather\n\n X = feather.read_dataframe(\"feature/inception_resnet.feather\")\n train = pd.concat((train, X), axis=1)\n else:\n pet_ids = train_images['PetID'].values\n img_pathes = train_images['image_filename'].values\n n_batches = len(pet_ids) // batch_size + 1\n\n inp = Input((256, 256, 3))\n backbone = InceptionResNetV2(input_tensor=inp,\n weights='../input/inceptionresnetv2/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5',\n include_top=False)\n x = backbone.output\n x = GlobalAveragePooling2D()(x)\n x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)\n x = AveragePooling1D(4)(x)\n out = Lambda(lambda x: x[:, :, 0])(x)\n m = Model(inp, out)\n\n features = []\n for b in range(n_batches):\n start = b * batch_size\n end = (b + 1) * batch_size\n batch_pets = pet_ids[start: end]\n batch_path = img_pathes[start: end]\n batch_images = np.zeros((len(batch_pets), img_size, img_size, 3))\n for i, (pet_id, path) in enumerate(zip(batch_pets, batch_path)):\n try:\n batch_images[i] = load_image(path, preprocess_input_incep)\n except:\n try:\n batch_images[i] = load_image(path, preprocess_input_incep)\n except:\n pass\n batch_preds = m.predict(batch_images)\n for i, pet_id in enumerate(batch_pets):\n features.append([pet_id] + list(batch_preds[i]))\n X = pd.DataFrame(features, columns=[\"PetID\"] + [\"inception_resnet_{}\".format(i) for i in\n range(batch_preds.shape[1])])\n gp_img = X.groupby(\"PetID\").mean().reset_index()\n train = pd.merge(train, gp_img, how=\"left\", on=\"PetID\")\n del m, gp_img;\n gc.collect();\n K.clear_session()\n\n with timer('aggregation'):\n stats = ['mean', 'sum', 'median', 'min', 'max', 'var']\n groupby_dict = [\n {\n 'key': ['Name'],\n 'var': ['Age'],\n 'agg': ['count']\n },\n {\n 'key': ['RescuerID'],\n 'var': ['Age'],\n 'agg': ['count']\n },\n {\n 'key': ['RescuerID', 'State'],\n 'var': ['Age'],\n 'agg': ['count']\n },\n {\n 'key': ['RescuerID', 'Type'],\n 'var': ['Age'],\n 'agg': ['count']\n },\n {\n 'key': ['RescuerID'],\n 'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],\n 'agg': stats\n },\n {\n 'key': ['RescuerID', 'State'],\n 'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],\n 'agg': stats\n },\n {\n 'key': ['RescuerID', 'Type'],\n 'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],\n 'agg': stats\n },\n {\n 'key': ['Type', 'Breed1', 'Breed2'],\n 'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],\n 'agg': stats\n },\n {\n 'key': ['Type', 'Breed1'],\n 'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],\n 'agg': stats\n },\n {\n 'key': ['State'],\n 'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],\n 'agg': stats\n },\n {\n 'key': ['MaturitySize'],\n 'var': ['Age', 'Quantity', 'Sterilized', 'Fee'],\n 'agg': stats\n },\n ]\n\n nunique_dict = [\n {\n 'key': ['State'],\n 'var': ['RescuerID'],\n 'agg': ['nunique']\n },\n {\n 'key': ['Dewormed'],\n 'var': ['RescuerID'],\n 'agg': ['nunique']\n },\n {\n 'key': ['Type'],\n 'var': ['RescuerID'],\n 'agg': ['nunique']\n },\n {\n 'key': ['Type', 'Breed1'],\n 'var': ['RescuerID'],\n 'agg': ['nunique']\n },\n ]\n\n groupby = GroupbyTransformer(param_dict=nunique_dict)\n train = groupby.transform(train)\n groupby = GroupbyTransformer(param_dict=groupby_dict)\n train = groupby.transform(train)\n diff = DiffGroupbyTransformer(param_dict=groupby_dict)\n train = diff.transform(train)\n ratio = RatioGroupbyTransformer(param_dict=groupby_dict)\n train = ratio.transform(train)\n\n with timer('category embedding'):\n train[['BreedName_main_breed', 'BreedName_second_breed']] = \\\n train[['BreedName_main_breed', 'BreedName_second_breed']].astype(\"int32\")\n for c in categorical_features:\n train[c] = train[c].fillna(train[c].max() + 1)\n\n cv = CategoryVectorizer(categorical_features, n_components,\n vectorizer=CountVectorizer(),\n transformer=LatentDirichletAllocation(n_components=n_components, n_jobs=-1,\n learning_method='online',\n random_state=777),\n name='CountLDA')\n features1 = cv.transform(train).astype(np.float32)\n\n cv = CategoryVectorizer(categorical_features, n_components,\n vectorizer=CountVectorizer(),\n transformer=TruncatedSVD(n_components=n_components, random_state=777),\n name='CountSVD')\n features2 = cv.transform(train).astype(np.float32)\n train = pd.concat([train, features1, features2], axis=1)\n\n t_cols += [c for c in train.columns if c not in orig_cols]\n\n if K_flag or G_flag:\n with timer('kaeru and gege features'):\n with timer('text stats features'):\n train = get_text_features(train)\n k_cols += ['Length_Description', 'Length_annots_top_desc', 'Lengths_sentiment_text']\n g_cols += ['Length_Description', 'Length_annots_top_desc', 'Lengths_sentiment_entities']\n\n if K_flag:\n with timer('kaeru features'):\n orig_cols = train.columns\n with timer('enginerring age'):\n train = get_age_feats(train)\n\n with timer('frequency encoding'):\n freq_cols = ['BreedName_main_breed', 'BreedName_second_breed']\n train = freq_encoding(train, freq_cols)\n\n with timer('kanji feature'):\n train['in_kanji'] = train.Description.apply(lambda x: is_zh(x))\n\n with timer('tfidf + svd / nmf'):\n vectorizer = TruncatedSVD(n_components=n_components, random_state=kaeru_seed)\n X = vectorizer.fit_transform(train['Description'])\n X = pd.DataFrame(X, columns=['tfidf_k_svd_{}'.format(i) for i in range(n_components)])\n train = pd.concat([train, X], axis=1)\n del vectorizer;\n gc.collect()\n\n with timer('description doc2vec'):\n d2v_param = {\n \"features_num\": 300,\n \"min_word_count\": 10,\n \"context\": 5,\n \"downsampling\": 1e-3,\n \"epoch_num\": 10\n }\n X = doc2vec(train[\"Description\"], d2v_param)\n train = pd.concat([train, X], axis=1)\n\n with timer('annots_top_desc + svd / nmf'):\n vectorizer = TruncatedSVD(n_components=n_components, random_state=kaeru_seed)\n X = vectorizer.fit_transform(train['annots_top_desc_pick'])\n X = pd.DataFrame(X, columns=['annots_top_desc_k_svd_{}'.format(i) for i in range(n_components)])\n train = pd.concat([train, X], axis=1)\n del vectorizer;\n gc.collect()\n\n with timer('densenet features'):\n vectorizer = TruncatedSVD(n_components=n_components, random_state=kaeru_seed)\n X = vectorizer.fit_transform(gp_dense_first.drop(['PetID'], axis=1))\n X = pd.DataFrame(X, columns=['densenet121_svd_{}'.format(i) for i in range(n_components)])\n X[\"PetID\"] = gp_dense_first[\"PetID\"]\n train = pd.merge(train, X, how=\"left\", on=\"PetID\")\n del vectorizer;\n gc.collect()\n\n with timer('aggregation'):\n stats = ['mean', 'sum', 'min', 'max']\n var = ['Age_k', 'MaturitySize_k', 'FurLength_k', 'Fee_k', 'Health_k']\n for c in ['Age', 'MaturitySize', 'FurLength', 'Fee', 'Health']:\n train[c + \"_k\"] = train[c]\n groupby_dict = [\n {\n 'key': ['RescuerID'],\n 'var': ['Age_k'],\n 'agg': ['count']\n },\n {\n 'key': ['RescuerID'],\n 'var': ['Age_k', 'Length_Description', 'Length_annots_top_desc', 'Lengths_sentiment_text'],\n 'agg': stats + [\"var\"]\n },\n {\n 'key': ['RescuerID'],\n 'var': ['MaturitySize_k', 'FurLength_k', 'Fee_k', 'Health_k'],\n 'agg': stats\n }\n ]\n\n groupby = GroupbyTransformer(param_dict=groupby_dict)\n train = groupby.transform(train)\n train.drop(var, axis=1, inplace=True)\n\n k_cols += [c for c in train.columns if c not in orig_cols if c not in kaeru_drop_cols]\n\n if G_flag:\n with timer('gege features'):\n orig_cols = train.columns\n with timer('densenet features'):\n vectorizer = TruncatedSVD(n_components=n_components_gege_img, random_state=kaeru_seed)\n X = vectorizer.fit_transform(gp_dense_first.drop(['PetID'], axis=1))\n X = pd.DataFrame(X, columns=['densenet121_g_svd_{}'.format(i) for i in range(n_components_gege_img)])\n X[\"PetID\"] = gp_dense_first[\"PetID\"]\n train = pd.merge(train, X, how=\"left\", on=\"PetID\")\n del vectorizer, gp_dense_first;\n gc.collect()\n\n with timer('frequency encoding'):\n freq_cols = ['RescuerID', 'Breed1', 'Breed2', 'Color1', 'Color2', 'Color3', 'State']\n train = freq_encoding(train, freq_cols)\n\n with timer('tfidf + svd'):\n vectorizer = make_pipeline(\n TfidfVectorizer(min_df=2, max_features=None,\n strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\\b\\w+\\b',\n ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1),\n TruncatedSVD(n_components=n_components_gege_txt, random_state=kaeru_seed)\n )\n X = vectorizer.fit_transform(train['Description'])\n X = pd.DataFrame(X, columns=['tfidf_g_svd_{}'.format(i) for i in range(n_components_gege_txt)])\n train = pd.concat([train, X], axis=1)\n del vectorizer;\n gc.collect()\n\n with timer('annots tfidf + svd'):\n vectorizer = make_pipeline(\n TfidfVectorizer(min_df=2, max_features=None,\n strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\\b\\w+\\b',\n ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1),\n TruncatedSVD(n_components=n_components_gege_txt, random_state=kaeru_seed)\n )\n X = vectorizer.fit_transform(train['annots_top_desc'])\n X = pd.DataFrame(X, columns=['annots_top_desc_tfidf_g_svd_{}'.format(i) for i in\n range(n_components_gege_txt)])\n train = pd.concat([train, X], axis=1)\n del vectorizer;\n gc.collect()\n\n with timer('sentiment entities tfidf + svd'):\n vectorizer = make_pipeline(\n TfidfVectorizer(min_df=2, max_features=None,\n strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\\b\\w+\\b',\n ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1),\n TruncatedSVD(n_components=n_components_gege_txt, random_state=kaeru_seed)\n )\n X = vectorizer.fit_transform(train['sentiment_entities'])\n X = pd.DataFrame(X, columns=['sentiment_entities_tfidf_g_svd_{}'.format(i) for i in\n range(n_components_gege_txt)])\n train = pd.concat([train, X], axis=1)\n del vectorizer;\n gc.collect()\n\n with timer('image basic features'):\n train_images['image_size'] = train_images['image_filename'].apply(getSize)\n train_images['temp_size'] = train_images['image_filename'].apply(getDimensions)\n train_images['width'] = train_images['temp_size'].apply(lambda x: x[0])\n train_images['height'] = train_images['temp_size'].apply(lambda x: x[1])\n train_images = train_images.drop(['temp_size'], axis=1)\n\n aggs = {\n 'image_size': ['sum', 'mean', 'var'],\n 'width': ['sum', 'mean', 'var'],\n 'height': ['sum', 'mean', 'var'],\n }\n\n gp = train_images.groupby('PetID').agg(aggs)\n new_columns = [k + '_' + agg for k in aggs.keys() for agg in aggs[k]]\n gp.columns = new_columns\n train = train.merge(gp.reset_index(), how=\"left\", on=\"PetID\")\n\n g_cols += [c for c in train.columns if c not in orig_cols]\n\n dtype_cols = ['BreedName_main_breed', 'BreedName_second_breed', 'BreedName_main_breed_all']\n train[dtype_cols] = train[dtype_cols].astype(\"int32\")\n\n logger.info(train.head())\n\n train.to_feather(\"all_data.feather\")\n np.save(\"common_cols.npy\", np.array(common_cols))\n np.save(\"t_cols.npy\", np.array(t_cols))\n np.save(\"k_cols.npy\", np.array(k_cols))\n np.save(\"g_cols.npy\", np.array(g_cols))\n\n if T_flag:\n with timer('takuoko feature info'):\n categorical_features_t = list(set(categorical_features) - set(remove))\n predictors = list(set(common_cols + t_cols + categorical_features_t) - set([target] + remove))\n predictors = [c for c in predictors if c in use_cols]\n categorical_features_t = [c for c in categorical_features_t if c in predictors]\n logger.info(f'predictors / use_cols = {len(predictors)} / {len(use_cols)}')\n\n train = train.loc[:, ~train.columns.duplicated()]\n\n X = train.loc[:, predictors]\n y = train.loc[:, target]\n rescuer_id = train.loc[:, 'RescuerID'].iloc[:len_train]\n X_test = X[len_train:]\n X = X[:len_train]\n y = y[:len_train]\n X.to_feather(\"X_train_t.feather\")\n X_test.reset_index(drop=True).to_feather(\"X_test_t.feather\")\n\n with timer('takuoko modeling'):\n y_pred_t = np.empty(len_train, )\n y_test_t = []\n train_losses, valid_losses = [], []\n\n # cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=1337)\n # for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y)):\n # cv = GroupKFold(n_splits=n_splits)\n # for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=None, groups=rescuer_id)):\n cv = StratifiedGroupKFold(n_splits=n_splits)\n for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)):\n X_train = X.loc[train_index, :]\n X_valid = X.loc[valid_index, :]\n y_train = y[train_index]\n y_valid = y[valid_index]\n\n pred_val, pred_test, train_rmse, valid_rmse = run_model(X_train, y_train, X_valid, y_valid, X_test,\n categorical_features_t, predictors,\n maxvalue_dict, fold_id, MODEL_PARAMS,\n MODEL_NAME + \"_t\")\n y_pred_t[valid_index] = pred_val\n y_test_t.append(pred_test)\n train_losses.append(train_rmse)\n valid_losses.append(valid_rmse)\n\n y_test_t = np.mean(y_test_t, axis=0)\n logger.info(f'train RMSE = {np.mean(train_losses)}')\n logger.info(f'valid RMSE = {np.mean(valid_losses)}')\n\n np.save(\"y_test_t.npy\", y_test_t)\n np.save(\"y_oof_t.npy\", y_pred_t)\n\n if K_flag:\n with timer('kaeru feature info'):\n kaeru_cat_cols = None\n predictors = list(set(common_cols + k_cols) - set([target] + remove + kaeru_drop_cols))\n\n X = train.loc[:, predictors]\n y = train.loc[:, target]\n rescuer_id = train.loc[:, 'RescuerID'].iloc[:len_train]\n X_test = X[len_train:]\n X = X[:len_train]\n y = y[:len_train]\n X.to_feather(\"X_train_k.feather\")\n X_test.reset_index(drop=True).to_feather(\"X_test_k.feather\")\n\n with timer('kaeru modeling'):\n y_pred_k = np.empty(len_train, )\n y_test_k = []\n train_losses, valid_losses = [], []\n\n # cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=1337)\n # for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y)):\n cv = StratifiedGroupKFold(n_splits=n_splits)\n for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)):\n X_train = X.loc[train_index, :]\n X_valid = X.loc[valid_index, :]\n y_train = y[train_index]\n y_valid = y[valid_index]\n\n pred_val, pred_test, train_rmse, valid_rmse = run_model(X_train, y_train, X_valid, y_valid, X_test,\n kaeru_cat_cols, predictors, maxvalue_dict,\n fold_id, KAERU_PARAMS, MODEL_NAME + \"_k\")\n y_pred_k[valid_index] = pred_val\n y_test_k.append(pred_test)\n train_losses.append(train_rmse)\n valid_losses.append(valid_rmse)\n\n y_test_k = np.mean(y_test_k, axis=0)\n logger.info(f'train RMSE = {np.mean(train_losses)}')\n logger.info(f'valid RMSE = {np.mean(valid_losses)}')\n\n np.save(\"y_test_k.npy\", y_test_k)\n np.save(\"y_oof_k.npy\", y_pred_k)\n\n if G_flag:\n with timer('gege feature info'):\n predictors = list(set(common_cols + g_cols) - set([target] + remove + gege_drop_cols))\n categorical_features_g = [c for c in categorical_features if c in predictors]\n\n X = train.loc[:, predictors]\n y = train.loc[:, target]\n rescuer_id = train.loc[:, 'RescuerID'].iloc[:len_train]\n X_test = X[len_train:]\n X = X[:len_train]\n y = y[:len_train]\n X.to_feather(\"X_train_g.feather\")\n X_test.reset_index(drop=True).to_feather(\"X_test_g.feather\")\n\n with timer('gege adversarial validation'):\n train_idx = range(0, len_train)\n X_adv = train.loc[:, predictors]\n y_adv = np.array([0 for i in range(len(X))] + [1 for i in range(len(X_test))])\n\n X_adv_tr, X_adv_tst, y_adv_tr, y_adv_tst = train_test_split(X_adv, y_adv, test_size=0.20, shuffle=True,\n random_state=42)\n\n lgtrain = lgb.Dataset(X_adv_tr, y_adv_tr,\n categorical_feature=categorical_features_g,\n feature_name=predictors)\n lgvalid = lgb.Dataset(X_adv_tst, y_adv_tst,\n categorical_feature=categorical_features_g,\n feature_name=predictors)\n\n lgb_adv = lgb.train(\n ADV_PARAMS,\n lgtrain,\n num_boost_round=20000,\n valid_sets=[lgtrain, lgvalid],\n valid_names=['train', 'valid'],\n early_stopping_rounds=500,\n verbose_eval=20000\n )\n\n train_preds = lgb_adv.predict(X_adv.iloc[train_idx])\n extract_idx = np.argsort(-train_preds)[:int(len(train_idx) * 0.85)]\n\n del X_adv_tr, X_adv_tst, y_adv_tr, y_adv_tst, X_adv, y_adv, lgb_adv;\n gc.collect()\n\n with timer('gege modeling'):\n X = X.iloc[extract_idx].reset_index(drop=True)\n y = y[extract_idx].reset_index(drop=True)\n rescuer_id = rescuer_id[extract_idx].reset_index(drop=True)\n y_pred_g = np.empty(len(extract_idx), )\n y_test_g = []\n train_losses, valid_losses = [], []\n\n cv = StratifiedGroupKFold(n_splits=n_splits)\n for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)):\n X_train = X.loc[train_index, :]\n X_valid = X.loc[valid_index, :]\n y_train = y[train_index]\n y_valid = y[valid_index]\n\n pred_val, pred_test, train_rmse, valid_rmse = run_xgb_model(X_train, y_train,\n X_valid, y_valid, X_test, predictors,\n maxvalue_dict,\n fold_id, MODEL_PARAMS_XGB,\n MODEL_NAME + \"_g\")\n y_pred_g[valid_index] = pred_val\n y_test_g.append(pred_test)\n train_losses.append(train_rmse)\n valid_losses.append(valid_rmse)\n\n y_test_g = np.mean(y_test_g, axis=0)\n logger.info(f'train RMSE = {np.mean(train_losses)}')\n logger.info(f'valid RMSE = {np.mean(valid_losses)}')\n\n np.save(\"y_test_g.npy\", y_test_g)\n np.save(\"y_oof_g.npy\", y_pred_g)\n np.save(\"extract_idx.npy\", extract_idx)\n\n if T_flag and K_flag and G_flag:\n y_pred = (y_pred_t[extract_idx] + y_pred_k[extract_idx] + y_pred_g) / 3\n y_test = (y_test_t + y_test_k + y_test_g) / 3\n elif T_flag and K_flag:\n y_pred = y_pred_t * 0.5 + y_pred_k * 0.5\n y_test = y_test_t * 0.5 + y_test_k * 0.5\n elif T_flag and G_flag:\n y_pred = y_pred_t[extract_idx] * 0.5 + y_pred_g * 0.5\n y_test = y_test_t * 0.5 + y_test_g * 0.5\n elif G_flag and K_flag:\n y_pred = y_pred_g * 0.5 + y_pred_k[extract_idx] * 0.5\n y_test = y_test_g * 0.5 + y_test_k * 0.5\n elif T_flag:\n y_pred = y_pred_t\n y_test = y_test_t\n elif K_flag:\n y_pred = y_pred_k\n y_test = y_test_k\n elif G_flag:\n y_pred = y_pred_g\n y_test = y_test_g\n\n with timer('optimize threshold'):\n optR = OptimizedRounder()\n optR.fit(y_pred, y)\n coefficients = optR.coefficients()\n y_pred = optR.predict(y_pred, coefficients)\n score = get_score(y, y_pred)\n logger.info(f'Coefficients = {coefficients}')\n logger.info(f'QWK = {score}')\n y_test = optR.predict(y_test, coefficients).astype(int)\n\n with timer('postprocess'):\n submission_with_postprocess(y_test)\n" ]
[ [ "numpy.median", "numpy.min", "numpy.mean", "numpy.where", "sklearn.feature_extraction.text.CountVectorizer", "pandas.concat", "pandas.read_csv", "scipy.stats.rankdata", "numpy.issubdtype", "numpy.max", "sklearn.feature_extraction.text._document_frequency", "numpy.empty", "numpy.log", "pandas.merge", "pandas.DataFrame", "matplotlib.pyplot.savefig", "numpy.save", "sklearn.decomposition.NMF", "matplotlib.pyplot.tight_layout", "sklearn.decomposition.TruncatedSVD", "scipy.sparse.csr_matrix", "torch.Tensor", "matplotlib.use", "scipy.sparse.issparse", "numpy.array", "sklearn.utils.validation.check_is_fitted", "numpy.zeros", "scipy.sparse.csc_matrix", "sklearn.model_selection.GroupKFold", "matplotlib.pyplot.figure", "pandas.read_json", "sklearn.decomposition.LatentDirichletAllocation", "numpy.argsort", "sklearn.feature_extraction.text.TfidfVectorizer", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.clf", "sklearn.metrics.cohen_kappa_score", "sklearn.metrics.mean_squared_error", "numpy.asarray", "scipy.sparse.spdiags", "numpy.random.seed", "numpy.sum", "pandas.DataFrame.from_dict", "pandas.factorize", "pandas.Series", "numpy.average", "numpy.var" ] ]
navivokaj/segmentation_models.pytorch
[ "5dbb5f6733515097cecc93f078c09e59ccbeb0c0" ]
[ "segmentation_models_pytorch/decoders/unet/decoder.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom segmentation_models_pytorch.base import modules as md\n\n\nclass DecoderBlock(nn.Module):\n def __init__(\n self,\n in_channels,\n skip_channels,\n out_channels,\n use_batchnorm=True,\n attention_type=None,\n ):\n super().__init__()\n self.conv1 = md.Conv2dReLU(\n in_channels + skip_channels,\n out_channels,\n kernel_size=3,\n padding=1,\n use_batchnorm=use_batchnorm,\n )\n self.attention1 = md.Attention(attention_type, in_channels=in_channels + skip_channels)\n self.conv2 = md.Conv2dReLU(\n out_channels,\n out_channels,\n kernel_size=3,\n padding=1,\n use_batchnorm=use_batchnorm,\n )\n self.attention2 = md.Attention(attention_type, in_channels=out_channels)\n\n def forward(self, x, skip=None):\n x = F.interpolate(x, scale_factor=2, mode=\"nearest\")\n if skip is not None:\n if skip.shape[-1] != x.shape[-1]:\n skip = F.interpolate(skip, scale_factor=2, mode=\"nearest\")\n x = torch.cat([x, skip], dim=1)\n x = self.attention1(x)\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.attention2(x)\n return x\n\n\nclass CenterBlock(nn.Sequential):\n def __init__(self, in_channels, out_channels, use_batchnorm=True):\n conv1 = md.Conv2dReLU(\n in_channels,\n out_channels,\n kernel_size=3,\n padding=1,\n use_batchnorm=use_batchnorm,\n )\n conv2 = md.Conv2dReLU(\n out_channels,\n out_channels,\n kernel_size=3,\n padding=1,\n use_batchnorm=use_batchnorm,\n )\n super().__init__(conv1, conv2)\n\n\nclass UnetDecoder(nn.Module):\n def __init__(\n self,\n encoder_channels,\n decoder_channels,\n n_blocks=5,\n use_batchnorm=True,\n attention_type=None,\n center=False,\n ):\n super().__init__()\n\n if n_blocks != len(decoder_channels):\n raise ValueError(\n \"Model depth is {}, but you provide `decoder_channels` for {} blocks.\".format(\n n_blocks, len(decoder_channels)\n )\n )\n\n # remove first skip with same spatial resolution\n encoder_channels = encoder_channels[1:]\n # reverse channels to start from head of encoder\n encoder_channels = encoder_channels[::-1]\n\n # computing blocks input and output channels\n head_channels = encoder_channels[0]\n in_channels = [head_channels] + list(decoder_channels[:-1])\n skip_channels = list(encoder_channels[1:]) + [0]\n out_channels = decoder_channels\n\n if center:\n self.center = CenterBlock(head_channels, head_channels, use_batchnorm=use_batchnorm)\n else:\n self.center = nn.Identity()\n\n # combine decoder keyword arguments\n kwargs = dict(use_batchnorm=use_batchnorm, attention_type=attention_type)\n blocks = [\n DecoderBlock(in_ch, skip_ch, out_ch, **kwargs)\n for in_ch, skip_ch, out_ch in zip(in_channels, skip_channels, out_channels)\n ]\n self.blocks = nn.ModuleList(blocks)\n\n def forward(self, *features):\n\n features = features[1:] # remove first skip with same spatial resolution\n features = features[::-1] # reverse channels to start from head of encoder\n\n head = features[0]\n skips = features[1:]\n\n x = self.center(head)\n for i, decoder_block in enumerate(self.blocks):\n skip = skips[i] if i < len(skips) else None\n x = decoder_block(x, skip)\n\n return x\n" ]
[ [ "torch.nn.functional.interpolate", "torch.cat", "torch.nn.Identity", "torch.nn.ModuleList" ] ]
DidierRLopes/timeseries-cv
[ "c886670ba0c8c347b12639ec4a6fb549457c1ef1" ]
[ "tsxv/splitTrainVal.py" ]
[ "\"\"\"\nForward Chaining, K-Fold and Group K-Fold algorithms to split a given training dataset into train (X, y) and validation (Xcv, ycv) sets\n\"\"\"\n\nimport numpy as np\n\ndef split_train_val_forwardChaining(sequence, numInputs, numOutputs, numJumps):\n \"\"\" Returns sets to train and cross-validate a model using forward chaining technique\n \n Parameters:\n sequence (array) : Full training dataset\n numInputs (int) : Number of inputs X and Xcv used at each training and validation\n numOutputs (int) : Number of outputs y and ycv used at each training and validation\n numJumps (int) : Number of sequence samples to be ignored between (X,y) sets\n\n Returns:\n X (2D array) : Array of numInputs arrays used for training\n y (2D array) : Array of numOutputs arrays used for training\n Xcv (2D array) : Array of numInputs arrays used for cross-validation\n ycv (2D array) : Array of numOutputs arrays used for cross-validation\n \n \"\"\"\n \n X, y, Xcv, ycv = dict(), dict(), dict(), dict()\n j=2; # Tracks index of CV set at each train/val split\n \n # Iterate through all train/val splits\n while 1:\n start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;\n X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()\n i=0; # Index of individual training set at each train/val split\n \n # Iterate until index of individual training set is smaller than index of cv set\n while (i < j):\n ## TRAINING DATA\n start_ix = numJumps*i;\n end_ix = start_ix + numInputs;\n \n seq_x = sequence[start_ix:end_ix] \n X_it.append(seq_x)\n seq_y = sequence[end_ix:end_ix+numOutputs]\n y_it.append(seq_y)\n \n i+=1;\n \n # Once val data crosses time series length return \n if (((end_ix+numInputs)+numOutputs) > len(sequence)):\n break\n \n ## CROSS-VALIDATION DATA\n startCv_ix = end_ix;\n endCv_ix = end_ix + numInputs;\n \n seq_xcv = sequence[startCv_ix:endCv_ix] \n Xcv_it.append(seq_xcv)\n seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]\n ycv_it.append(seq_ycv) \n \n ## Add another train/val split \n X[j-2] = np.array(X_it)\n y[j-2] = np.array(y_it)\n Xcv[j-2] = np.array(Xcv_it)\n ycv[j-2] = np.array(ycv_it)\n \n j+=1;\n \n if (len(X)==0 or len(Xcv)==0):\n print(\"The sequence provided does not has size enough to populate the return arrays\")\n \n return X, y, Xcv, ycv\n\n\ndef split_train_val_kFold(sequence, numInputs, numOutputs, numJumps):\n \"\"\" Returns sets to train and cross-validate a model using K-Fold technique\n \n Parameters:\n sequence (array) : Full training dataset\n numInputs (int) : Number of inputs X and Xcv used at each training\n numOutputs (int) : Number of outputs y and ycv used at each training\n numJumps (int) : Number of sequence samples to be ignored between (X,y) sets\n\n Returns:\n X (2D array) : Array of numInputs arrays used for training\n y (2D array) : Array of numOutputs arrays used for training\n Xcv (2D array) : Array of numInputs arrays used for cross-validation\n ycv (2D array) : Array of numOutputs arrays used for cross-validation\n \n \"\"\"\n \n X, y, Xcv, ycv = dict(), dict(), dict(), dict()\n j=2; # Tracks index of CV set at each train/val split\n theEnd = 0; # Flag to terminate function\n \n # Iterate until val set falls outside time series length\n while 1:\n start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;\n X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()\n i=0; # Index of individual training set at each train/val split\n n=0; # Number of numJumps\n \n # Iterate through all train/val splits\n while 1:\n if (i != j): \n ## TRAINING DATA\n start_ix = endCv_ix + numJumps*n;\n end_ix = start_ix + numInputs;\n n +=1;\n\n # Leave train/val split loop once training data crosses time series length\n if end_ix+numOutputs > len(sequence):\n break;\n\n seq_x = sequence[start_ix:end_ix] \n X_it.append(seq_x)\n seq_y = sequence[end_ix:end_ix+numOutputs]\n y_it.append(seq_y)\n else:\n ## CROSS-VALIDATION DATA\n startCv_ix = end_ix;\n endCv_ix = end_ix + numInputs;\n n = 0;\n \n # Once val data crosses time series length exit tran/val split loop and return\n if endCv_ix+numOutputs > len(sequence):\n theEnd = 1;\n break;\n\n seq_xcv = sequence[startCv_ix:endCv_ix] \n Xcv_it.append(seq_xcv)\n seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]\n ycv_it.append(seq_ycv)\n i+=1;\n \n # Only add a train/val split if the time series length has not been crossed\n if (theEnd == 1):\n break\n \n ## Add another train/val split \n X[j-2] = np.array(X_it)\n y[j-2] = np.array(y_it)\n Xcv[j-2] = np.array(Xcv_it)\n ycv[j-2] = np.array(ycv_it)\n \n j+=1;\n \n if (len(X)==0 or len(Xcv)==0):\n print(\"The sequence provided does not has size enough to populate the return arrays\")\n \n return X, y, Xcv, ycv\n\n\ndef split_train_val_groupKFold(sequence, numInputs, numOutputs, numJumps):\n \"\"\" Returns sets to train and cross-validate a model using group K-Fold technique\n \n Parameters:\n sequence (array) : Full training dataset\n numInputs (int) : Number of inputs X and Xcv used at each training\n numOutputs (int) : Number of outputs y and ycv used at each training\n numJumps (int) : Number of sequence samples to be ignored between (X,y) sets\n\n Returns:\n X (2D array) : Array of numInputs arrays used for training\n y (2D array) : Array of numOutputs arrays used for training\n Xcv (2D array) : Array of numInputs arrays used for cross-validation\n ycv (2D array) : Array of numOutputs arrays used for cross-validation\n \n \"\"\"\n \n X, y, Xcv, ycv = dict(), dict(), dict(), dict()\n \n # Iterate through 5 train/val splits\n for j in np.arange(5):\n start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;\n X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()\n i=0; # Index of individual training set at each train/val split\n n=0; # Number of numJumps\n \n while 1: \n if ((i+1+j)%(5) != 0):\n # TRAINING DATA\n start_ix = endCv_ix + numJumps*n;\n end_ix = start_ix + numInputs;\n n+=1;\n\n # Leave train/val split loop once training data crosses time series length\n if end_ix+numOutputs > len(sequence)-1:\n break \n\n seq_x = sequence[start_ix:end_ix] \n X_it.append(seq_x)\n seq_y = sequence[end_ix:end_ix+numOutputs]\n y_it.append(seq_y)\n else:\n # CROSS-VALIDATION DATA\n startCv_ix = end_ix;\n endCv_ix = end_ix + numInputs;\n n=0;\n\n # Once val data crosses time series length return \n if ((endCv_ix+numOutputs) > len(sequence)):\n break\n\n seq_xcv = sequence[startCv_ix:endCv_ix] \n Xcv_it.append(seq_xcv)\n seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]\n ycv_it.append(seq_ycv) \n \n i+=1;\n \n ## Add another train/val split \n X[j] = np.array(X_it)\n y[j] = np.array(y_it)\n Xcv[j] = np.array(Xcv_it)\n ycv[j] = np.array(ycv_it)\n \n if (len(X)==0 or len(Xcv)==0):\n print(\"The sequence provided does not has size enough to populate the return arrays\")\n \n return X, y, Xcv, ycv\n" ]
[ [ "numpy.array", "numpy.arange" ] ]
aricsanders/pyMez3
[ "13e2b9900af2287db0cc42a0190d31da165ce174" ]
[ "Code/DataHandlers/GraphModels.py" ]
[ "#-----------------------------------------------------------------------------\n# Name: GraphModels\n# Purpose: To store graphs used in network translations\n# Author: Aric Sanders\n# Created: 4/6/2016\n# License: MIT License\n#-----------------------------------------------------------------------------\n\"\"\"\nGraph Models stores sub classes of graphs that define data translations. All edges\nor the functions that define translations from one format to another\nare found in <a href=\"./Translations.m.html\">`pyMez.Code.DataHandlers.Translations`</a>.\nCurrently, the module networkx is used to display the graph.\n\nExamples\n--------\n #!python\n >>from pyMez import *\n >>image_graph=ImageGraph()\n >>image_graph.set_state('png','my_png.png')\n >>image_graph.move_to_node('EmbeddedHtml')\n >>output=image_graph.data\n >>print output\n\n\n<h3><a href=\"../../../Examples/Html/GraphModels_Example.html\">GraphModels Example</a></h3>\n\nRequirements\n------------\n+ [sys](https://docs.python.org/2/library/sys.html)\n+ [os](https://docs.python.org/2/library/os.html?highlight=os#module-os)\n+ [networkx](http://networkx.github.io/)\n+ [numpy](http://www.numpy.org/)\n+ [pyMez](https://github.com/aricsanders/pyMez)\n\nHelp\n---------------\n<a href=\"./index.html\">`pyMez.Code.DataHandlers`</a>\n<div>\n<a href=\"../../../pyMez_Documentation.html\">Documentation Home</a> |\n<a href=\"../../index.html\">API Documentation Home</a> |\n<a href=\"../../../Examples/html/Examples_Home.html\">Examples Home</a> |\n<a href=\"../../../Reference_Index.html\">Index</a>\n</div>\n \"\"\"\n\n#-----------------------------------------------------------------------------\n# Standard Imports\nimport re\nimport datetime\nimport sys\nimport os\n\n#-----------------------------------------------------------------------------\n# Third Party Imports\nsys.path.append(os.path.join(os.path.dirname( __file__ ), '..','..'))\ntry:\n from Code.Utils.Alias import *\n METHOD_ALIASES=1\nexcept:\n print(\"The module pyMez.Code.Utils.Alias was not found\")\n METHOD_ALIASES=0\n pass\ntry:\n from Code.DataHandlers.GeneralModels import *\nexcept:\n print(\"The module pyMez.Code.DataHandlers.GeneralModels was not found,\"\n \"please put it on the python path\")\n raise ImportError\ntry:\n from Code.DataHandlers.TouchstoneModels import *\nexcept:\n print(\"The module pyMez.Code.DataHandlers.TouchstoneModels was not found,\"\n \"please put it on the python path\")\n raise ImportError\ntry:\n from Code.DataHandlers.Translations import *\nexcept:\n print(\"The module pyMez.Code.DataHandlers.Translations was not found or had an error,\"\n \"please put it on the python path or resolve the error\")\n raise ImportError\ntry:\n import numpy as np\nexcept:\n print(\"The module numpy was not found,\"\n \"please put it on the python path\")\n raise ImportError\ntry:\n import networkx\nexcept:\n print(\"The module networkx was not found,\"\n \"please put it on the python path\")\n raise ImportError\n\n#-----------------------------------------------------------------------------\n# Module Constants\n\n#-----------------------------------------------------------------------------\n# Module Functions\n\n# as an example these functions are left.\n#todo: Change the names\ndef edge_1_to_2(in_string):\n \"A Test function for an edge for a Graph\"\n return in_string.splitlines()\n\ndef edge_2_to_1(string_list):\n \"\"\"A test function for an edge in a Graph\"\"\"\n return string_list_collapse(string_list)\n\ndef visit_all_nodes(graph):\n \"\"\"Visit all nodes visits each node on a graph\"\"\"\n nodes=graph.node_names\n for node in nodes:\n graph.move_to_node(node)\n\ndef visit_and_print_all_nodes(graph):\n \"\"\"Visits all the nodes in graph and prints graph.data after each move\"\"\"\n nodes=graph.node_names\n for node in nodes:\n graph.move_to_node(node)\n print((graph.data))\n\n\ndef to_node_name(node_data):\n \"\"\"Creates a node name given an input object, does a bit of silly type selecting and name rearranging. This matches for 75%\n of the cases. There are a lot of user defined nodes without a clear path to generate a name. For instance the DataTableGraph\n node HpFile, does not save with a .hp extension so it would be auto named TxtFile if was only selected by the path name.\n If it is auto selected it returns StringList because it is of the format [\"file_path\",\"schema_path\"] \"\"\"\n\n # we retrieve the text version of the class name\n class_name = node_data.__class__.__name__\n node_name = class_name\n # now for dict and list types we want to inspect the first Element to see what it is\n if re.match('list', class_name):\n node_name = \"List\"\n try:\n element_class_name = node_data[0].__class__.__name__\n node_name = element_class_name + node_name\n except:\n pass\n elif re.match('dict', class_name):\n node_name = \"Dictionary\"\n try:\n element_class_name = list(node_data.values())[0].__class__.__name__\n node_name = element_class_name + node_name\n except:\n pass\n elif re.match('str', class_name):\n node_name = \"String\"\n # Now we have to check if it is an existing file name\n if os.path.isfile(node_data):\n node_name = \"File\"\n extension = \"\"\n try:\n if re.search(\"\\.\", node_data):\n extension = node_data.split(\".\")[-1]\n node_name = extension.title() + node_name\n except:\n pass\n elif fnmatch.fnmatch(node_data, \"*.*\"):\n node_name = \"File\"\n try:\n if re.search(\"\\.\", node_data):\n extension = node_data.split(\".\")[-1]\n node_name = extension.title() + node_name\n except:\n pass\n node_name = node_name.replace(\"str\", \"String\").replace(\"dict\", \"Dictionary\")\n return (node_name)\n\n\ndef TableGraph_to_Links(table_graph, **options):\n \"\"\"Converts a table graph to a set of download links with embedded data in them\"\"\"\n defaults = {\"base_name\": None,\n \"nodes\": ['XmlFile', 'CsvFile', 'ExcelFile', 'OdsFile', 'MatFile', 'HtmlFile', 'JsonFile'],\n \"extensions\": ['xml', 'csv', 'xlsx', 'ods', 'mat', 'html', 'json'],\n \"mime_types\": ['application/xml', 'text/plain',\n 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n 'application/vnd.oasis.opendocument.spreadsheet',\n 'application/x-matlab-data', 'text/html', 'application/json']}\n conversion_options = {}\n for key, value in defaults.items():\n conversion_options[key] = value\n for key, value in options.items():\n conversion_options[key] = value\n if conversion_options[\"base_name\"] is None:\n base_name = 'test.txt'\n else:\n base_name = conversion_options[\"base_name\"]\n\n nodes = conversion_options[\"nodes\"]\n extensions = conversion_options[\"extensions\"]\n mime_types = conversion_options[\"mime_types\"]\n\n out_links = \"\"\n for node_index, node in enumerate(nodes):\n table_graph.move_to_node(node)\n file_path = table_graph.data\n in_file = open(file_path, 'rb')\n content_string = in_file.read()\n link = String_to_DownloadLink(content_string,\n suggested_name=change_extension(base_name, extensions[node_index]),\n mime_type=mime_types[node_index],\n text=extensions[node_index])\n if node_index == len(nodes) - 1:\n out_links = out_links + link\n else:\n out_links = out_links + link + \" | \"\n return out_links\n\n\ndef remove_circular_paths(path):\n \"\"\"Removes pieces of the path that just end on the same node\"\"\"\n # Todo: Track the error that leaves out a needed path sometimes\n # See http://localhost:8888/notebooks/Two_Port_Matrix_Parameters_Debug_20170105_001.ipynb\n\n edge_pattern=re.compile(\"edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)\")\n past_locations=[]\n\n for index,edge in enumerate(path):\n match=re.match(edge_pattern,edge)\n begin_node=match.groupdict()[\"begin_node\"]\n end_node=match.groupdict()[\"end_node\"]\n past_locations.append(begin_node)\n #print(\"{0} is {1}\".format(\"past_locations\",past_locations))\n new_path=[]\n node_index=0\n between_list=[False for item in past_locations]\n while(node_index<len(past_locations)):\n node=past_locations[node_index]\n old_path=new_path\n new_path=[]\n # if you visit a location more than one\n number_of_visits=past_locations.count(node)\n if number_of_visits>1:\n #print(\"{0} is {1}\".format(\"node\",node))\n #print(\"{0} is {1}\".format(\"past_locations\",past_locations))\n # Now find all the visits to that location\n equality_list=[x==node for x in past_locations]\n print((\"{0} is {1}\".format(\"equality_list\",equality_list)))\n # You are intially not between visits\n between=False\n\n # every time you cross that node you flip between, as long as there are\n visit_number=0\n for index,equality in enumerate(equality_list):\n if equality:\n # add one to the visit number\n visit_number+=1\n # Flip the between truth value if it is the first or last\n # visits only\n if visit_number==1 or visit_number==number_of_visits:\n between=not between\n between_list[index]=between or between_list[index]\n else:\n between_list[index]=between or between_list[index]\n else:\n between_list[index]=between or between_list[index]\n #print(\"{0} is {1}\".format(\"between_list\",between_list))\n for index,item in enumerate(between_list):\n if not item:\n new_path.append(path[index])\n node_index+=1\n if new_path in [[]]:\n new_path=path\n return new_path\n#-----------------------------------------------------------------------------\n# Module Classes\n\n# getting around to adding a breadth first graph solver to Graph class\n# modify the find_path method\nclass Graph(object):\n \"\"\"The Graph class creates a content graph that has as nodes different formats. As\n a format is added via graph.add_node() by specifying a node name and a function from an\n existing node into the new one, and one exiting the node. Once a series of nodes exists\n to enter the graph at a node use graph.set_state() the current data representing the\n state is in the attribute graph.data. To move among the formats use graph.move_to_node('NodeName')\n need to recode the find_path method using a shortest path alogrithm like\n [Dijkstra](https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm).\n \"\"\"\n\n def __init__(self, **options):\n \"\"\"Initializes the graph. The first 2 nodes and two edges forming a bijection between them are required\"\"\"\n defaults = {\"graph_name\": \"Graph\",\n \"node_names\": ['n1', 'n2'],\n \"node_descriptions\": [\"A plain string\",\n \"A list of strings with no \\\\n, created with string.splitlines()\"],\n \"current_node\": 'n1',\n \"state\": [1, 0],\n \"data\": \"This is a test string\\n it has to have multiple lines \\n and many characters 34%6\\n^\",\n \"edge_2_to_1\": edge_2_to_1,\n \"edge_1_to_2\": edge_1_to_2\n }\n self.options = {}\n for key, value in defaults.items():\n self.options[key] = value\n for key, value in options.items():\n self.options[key] = value\n self.elements = ['graph_name', 'node_names', 'node_descriptions', 'current_node', 'state', 'data']\n for element in self.elements:\n self.__dict__[element] = self.options[element]\n self.edges = []\n self.edge_matrices = []\n self.state_matrix = np.matrix(self.state).T\n # Add the first 2 edges, required to intialize the graph properly\n self.display_graph = networkx.DiGraph()\n\n self.add_edge(self.node_names[0], self.node_names[1], self.options[\"edge_1_to_2\"])\n self.add_edge(self.node_names[1], self.node_names[0], self.options[\"edge_2_to_1\"])\n self.jumps = []\n self.external_node_names = []\n self.external_node_descriptions = []\n self.display_layout = networkx.spring_layout(self.display_graph)\n\n def get_description_dictionary(self):\n \"returns a dictionary of the form {NodeName:Node Description for all of the current nodes\"\n dictionary = {node_name: self.node_descriptions[index] for index, node_name in enumerate(self.node_names)}\n return dictionary\n\n def set_state(self, node_name, node_data):\n \"\"\"Sets the graph state to be the state specified by node_name, and node_data\"\"\"\n try:\n current_node_state_position = self.node_names.index(node_name)\n self.current_node = node_name\n self.data = node_data\n self.state = [0 for i in range(len(self.node_names))]\n self.state[current_node_state_position] = 1\n self.state_matrix = np.matrix(self.state).T\n except:\n print((\"Could not set the state of graph: {0}\".format(self.graph_name)))\n raise\n\n def add_edge(self, begin_node=None, end_node=None, edge_function=None):\n \"\"\"Adds an edge mapping one node to another, required input is begin_node (it's name)\n end_node, and the edge function\"\"\"\n # check to see if edge is defined if it is increment a number\n edge_match = re.compile(\"edge_{0}_{1}\".format(begin_node, end_node))\n keys = list(self.__dict__.keys())\n # print keys\n iterator = 0\n for key in keys:\n if re.match(edge_match, key):\n iterator += 1\n edge_name = \"edge_{0}_{1}_{2:0>3d}\".format(begin_node, end_node, iterator)\n self.__dict__[edge_name] = edge_function\n self.edges.append(edge_name)\n edge_matrix = np.zeros((len(self.state), len(self.state)))\n begin_position = self.node_names.index(begin_node)\n end_position = self.node_names.index(end_node)\n edge_matrix[end_position][begin_position] = 1\n edge_matrix = np.matrix(edge_matrix)\n self.edge_matrices.append(edge_matrix)\n self.display_graph.add_edge(begin_node, end_node)\n self.display_layout = networkx.spring_layout(self.display_graph)\n\n def add_jump(self, begin_node=None, end_node=None, jump_function=None):\n \"\"\"Adds a jump mapping one internal node to an external node, required input is begin_node (it's name)\n end_node, and the edge function\"\"\"\n # check to see if edge is defined if it is increment a number\n jump_match = re.compile(\"jump_{0}_{1}\".format(begin_node, end_node))\n keys = list(self.__dict__.keys())\n # print keys\n iterator = 0\n for key in keys:\n if re.match(jump_match, key):\n iterator += 1\n jump_name = \"jump_{0}_{1}_{2:0>3d}\".format(begin_node, end_node, iterator)\n self.__dict__[jump_name] = jump_function\n self.jumps.append(jump_name)\n self.display_graph.add_edge(begin_node, end_node)\n self.display_layout = networkx.spring_layout(self.display_graph)\n\n def move_to(self, path, **options):\n \"\"\"Changes the state of the graph by moving along the path specified\"\"\"\n defaults = {\"debug\": False, \"verbose\": False}\n move_options = {}\n for key, value in defaults.items():\n move_options[key] = value\n for key, value in options.items():\n move_options[key] = value\n\n if move_options[\"debug\"]:\n print(path)\n for index, edge in enumerate(path):\n # print edge\n edge_pattern = 'edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)'\n match = re.match(edge_pattern, edge)\n begin_node = match.groupdict()['begin_node']\n end_node = match.groupdict()['end_node']\n if move_options[\"verbose\"]:\n print((\"moving {0} -> {1}\".format(begin_node, end_node)))\n # print self.data\n self.data = self.__dict__[edge](self.data)\n # print self.data\n self.current_node = match.groupdict()['end_node']\n self.state = [0 for i in range(len(self.node_names))]\n position = self.node_names.index(self.current_node)\n self.state[position] = 1\n self.state_matrix = np.matrix(self.state).T\n # print self.state\n # print self.current_node\n\n def virtual_move_to(self, path):\n \"\"\"virtual_move_to simulates moving but does not change the state of the graph\"\"\"\n # print path\n temp_state = self.state\n temp_data = self.data\n temp_current_node = self.current_node\n temp_node_names = self.node_names\n for index, edge in enumerate(path):\n # print edge\n edge_pattern = 'edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)'\n match = re.match(edge_pattern, edge)\n begin_node = match.groupdict()['begin_node']\n end_node = match.groupdict()['end_node']\n # print(\"moving {0} -> {1}\".format(begin_node,end_node))\n # print self.data\n temp_data = self.__dict__[edge](temp_data)\n # print self.data\n temp_current_node = match.groupdict()['end_node']\n temp_state = [0 for i in range(len(temp_node_names))]\n position = temp_node_names.index(temp_current_node)\n temp_state[position] = 1\n # print temp_state\n # print self.state\n # print self.current_node\n\n def __str__(self):\n return str(self.data)\n\n def add_node(self, node_name, edge_into_node_begin, edge_into_node_function, edge_out_node_end,\n edge_out_node_function, node_description=None):\n \"\"\"Adds a node to the graph. Required input is node_name (a string with no spaces),\n a reference to an entering node,the function mapping the entering node to the new node,\n a reference to an exiting node and the function mapping the\n new node to the exiting node.\"\"\"\n # first check if node into and out of node is good\n self.node_names.append(node_name)\n self.state.append(0)\n self.state_matrix = np.matrix(self.state).T\n for index, matrix in enumerate(self.edge_matrices):\n pad_row = np.zeros((1, len(matrix)))\n new_matrix = np.concatenate((matrix, pad_row), axis=0)\n pad_column = np.zeros((1, len(self.node_names)))\n new_matrix = np.concatenate((new_matrix, pad_column.T), axis=1)\n # print(\"New matrix is :\\n{0}\".format(new_matrix))\n self.edge_matrices[index] = new_matrix\n self.add_edge(begin_node=node_name, end_node=edge_out_node_end, edge_function=edge_out_node_function)\n self.add_edge(begin_node=edge_into_node_begin, end_node=node_name, edge_function=edge_into_node_function)\n if node_description:\n self.node_descriptions.append(node_description)\n self.display_graph.add_node(node_name)\n self.display_graph.add_edge(node_name, edge_out_node_end)\n self.display_graph.add_edge(edge_into_node_begin, node_name)\n self.display_layout = networkx.spring_layout(self.display_graph)\n\n def add_external_node(self, external_node_name, jump_into_node_begin,\n jump_into_node_function, external_node_description=None):\n \"\"\"Adds an external node to the graph. Required input is node_name (a string with no spaces),\n a reference to an entering node,the function mapping the entering node to the new external node\"\"\"\n # first check if node into and out of node is good\n self.external_node_names.append(external_node_name)\n self.add_jump(begin_node=jump_into_node_begin, end_node=external_node_name,\n jump_function=jump_into_node_function)\n if external_node_description:\n self.external_node_descriptions.append(external_node_description)\n self.display_graph.add_node(external_node_name)\n self.display_graph.add_edge(jump_into_node_begin, external_node_name)\n self.display_layout = networkx.spring_layout(self.display_graph)\n\n def jump_to_external_node(self, external_node_name, **options):\n \"\"\"Returns the result of the jump, the graph is left in the node that is the begining of the jump\"\"\"\n end_node = external_node_name\n jump_pattern = 'jump_(?P<begin_node>\\w+)_{0}_(?P<iterator>\\w+)'.format(end_node)\n for jump in self.jumps[:]:\n jump_match = re.match(jump_pattern, jump, re.IGNORECASE)\n if jump_match:\n jump_to_use = jump\n begin_node = jump_match.groupdict()[\"begin_node\"]\n\n self.move_to_node(begin_node)\n return self.__dict__[jump_to_use](self.data, **options)\n\n def path_length(self, path, num_repeats=10):\n \"\"\"Determines the length of a given path, currently the metric is based on the time to move to.\"\"\"\n begin_time = datetime.datetime.now()\n # num_repeats=100\n for i in range(num_repeats):\n self.virtual_move_to(path)\n end_time = datetime.datetime.now()\n delta_t = end_time - begin_time\n path_length = delta_t.total_seconds() / float(num_repeats)\n if path_length == 0.0:\n print(\"Warning the path length is less than 1 microsecond,\"\n \"make sure num_repeats is high enough to measure it.\")\n return path_length\n\n def is_path_valid(self, path):\n \"\"\"Returns True if the path is valid from the current node position or False otherwise\"\"\"\n null_state = [0 for i in range(len(self.node_names))]\n null_state_matrix = np.matrix(null_state).T\n new_state = np.matrix(self.state).T\n for index, edge in enumerate(path):\n # print index\n # print edge\n edge_position = self.edges.index(edge)\n move_matrix = self.edge_matrices[edge_position]\n # print move_matrix\n new_state = move_matrix * new_state\n if new_state.any() == null_state_matrix.any():\n # print new_state\n # print null_state_matrix\n return False\n return True\n\n def get_entering_nodes(self, node):\n \"\"\"Returns all nodes that have an edge that enter the specificed node\"\"\"\n enter_edge_pattern = re.compile('edge_(?P<begin_node>\\w+)_{0}_(?P<iterator>\\w+)'.format(node))\n enter_nodes = []\n for index, edge in enumerate(self.edges):\n enter_match = re.match(enter_edge_pattern, edge)\n if enter_match:\n enter_node = enter_match.groupdict()['begin_node']\n enter_nodes.append(enter_node)\n return enter_nodes\n\n def get_entering_edges(self, node):\n \"\"\"Returns all edges that enter the specificed node\"\"\"\n enter_edge_pattern = re.compile('edge_(?P<begin_node>\\w+)_{0}_(?P<iterator>\\w+)'.format(node))\n enter_edges = []\n for index, edge in enumerate(self.edges):\n if re.match(enter_edge_pattern, edge):\n enter_edges.append(edge)\n return enter_edges\n\n def get_exiting_edges(self, node):\n \"\"\"Returns all edges that exit the specificed node\"\"\"\n exit_edge_pattern = re.compile('edge_{0}_(?P<end_node>\\w+)_(?P<iterator>\\w+)'.format(node))\n exit_edges = []\n for index, edge in enumerate(self.edges):\n if re.match(exit_edge_pattern, edge):\n exit_edges.append(edge)\n return exit_edges\n\n def get_exiting_nodes(self, node):\n \"\"\"Returns all nodes that have an edge leaving the specificed node\"\"\"\n exit_edge_pattern = re.compile('edge_{0}_(?P<end_node>\\w+)_(?P<iterator>\\w+)'.format(node))\n exit_nodes = []\n for index, edge in enumerate(self.edges):\n exit_match = re.match(exit_edge_pattern, edge)\n if exit_match:\n exit_node = exit_match.groupdict()['end_node']\n exit_nodes.append(exit_node)\n return exit_nodes\n\n def get_path(self, first_node, last_node, **options):\n \"\"\"Returns the first path found between first node and last node, uses a breadth first search algorithm\"\"\"\n defaults = {\"debug\": False, \"method\": \"BreathFirst\"}\n self.get_path_options = {}\n for key, value in defaults.items():\n self.get_path_options[key] = value\n for key, value in options.items():\n self.get_path_options[key] = value\n unvisited_nodes = self.node_names[:]\n unvisited_nodes.remove(first_node)\n visited_nodes = [first_node]\n node_history = []\n edge_history = []\n path_queue = []\n possible_paths = []\n queue = []\n current_edge = []\n queue.append(first_node)\n path = {first_node: []}\n while queue:\n # first remove the\n current_node = queue.pop(0)\n if path_queue != []:\n current_edge = path_queue.pop(0)\n edge_history.append(current_edge)\n node_history.append(current_node)\n if self.get_path_options[\"debug\"]:\n print((\"current_node is {0}\".format(current_node)))\n print((\"current_edge is {0}\".format(current_edge)))\n # if this node is the destination exit returning the path\n if current_node == last_node:\n if self.get_path_options[\"debug\"]:\n print((\"Node path was found to be {0}\".format(node_path)))\n print((\"path was found to be {0}\".format(edge_path)))\n print((\"{0} is {1}\".format(\"path\", path)))\n return path[last_node][::-1]\n\n adjacent_nodes = self.get_exiting_nodes(current_node)\n adjacent_paths = self.get_exiting_edges(current_node)\n if self.get_path_options[\"debug\"]:\n print((\"{0} are {1}\".format(\"adjacent_nodes\", adjacent_nodes)))\n print((\"{0} are {1}\".format(\"adjacent_paths\", adjacent_paths)))\n current_history = edge_history\n for node_index, node in enumerate(adjacent_nodes):\n if node not in visited_nodes:\n queue.append(node)\n path_queue.append(adjacent_paths[node_index])\n visited_nodes.append(node)\n path[node] = [adjacent_paths[node_index]] + path[current_node]\n path[node]\n # possible_paths.append(current_path.append(node))\n if self.get_path_options[\"debug\"]:\n print((\"{0} is {1}\".format(\"path_queue\", path_queue)))\n\n def move_to_node(self, node):\n \"\"\"Moves from current_node to the specified node\"\"\"\n path = self.get_path(self.current_node, node)\n self.move_to(path)\n\n def check_closed_path(self):\n \"\"\"Checks that data is not changed for the first closed path found. Returns True if data==data after\n moving around the closed path, False otherwise. Starting point is current_node \"\"\"\n temp_data = self.data\n path = self.get_path(self.current_node, self.current_node)\n if self.is_path_valid(path):\n pass\n else:\n print(\"Path is not valid, graph definition is broken\")\n raise\n out = temp_data == self.data\n out_list = [self.current_node, path, out]\n print((\"The assertion that the data remains unchanged,\\n\"\n \"for node {0} following path {1} is {2}\".format(*out_list)))\n return out\n\n def is_graph_isomorphic(self):\n \"\"\"Returns True if all nodes have closed paths that preserve the data, False otherwise\"\"\"\n out = True\n for node in self.node_names:\n self.move_to_node(node)\n if not self.check_closed_path:\n out = False\n return out\n\n def show(self, **options):\n \"\"\"Shows the graph using matplotlib and networkx\"\"\"\n # Should be seperated to allow for fixed presentation?\n defaults = {\"descriptions\": False, \"edge_descriptions\": False, \"save_plot\": False,\n \"path\": None, \"active_node\": True, \"directory\": None,\n \"specific_descriptor\": self.graph_name.replace(\" \", \"_\"),\n \"general_descriptor\": \"plot\", \"file_name\": None,\n \"arrows\": True, \"node_size\": 1000, \"font_size\": 10, \"fix_layout\": True}\n show_options = {}\n for key, value in defaults.items():\n show_options[key] = value\n for key, value in options.items():\n show_options[key] = value\n if show_options[\"directory\"] is None:\n show_options[\"directory\"] = os.getcwd()\n if show_options[\"active_node\"]:\n node_colors = []\n for node in self.display_graph.nodes():\n if node == self.current_node:\n node_colors.append('b')\n else:\n if node in self.node_names:\n node_colors.append('r')\n elif node in self.external_node_names:\n node_colors.append('g')\n else:\n node_colors = ['r' for node in self.node_names] + ['g' for node in self.node_names]\n # print(\"{0} is {1}\".format('node_colors',node_colors))\n if show_options[\"descriptions\"]:\n node_labels = {node: self.node_descriptions[index] for index,\n node in enumerate(self.node_names)}\n if self.external_node_names:\n for index, node in enumerate(self.external_node_names):\n node_labels[node] = self.external_node_descriptions[index]\n networkx.draw_networkx(self.display_graph, arrows=show_options[\"arrows\"],\n labels=node_labels, node_color=node_colors,\n node_size=show_options[\"node_size\"], font_size=show_options[\"font_size\"],\n pos=self.display_layout)\n # print(\"{0} is {1}\".format('node_labels',node_labels))\n else:\n networkx.draw_networkx(self.display_graph, arrows=show_options[\"arrows\"], node_color=node_colors,\n node_size=show_options[\"node_size\"], font_size=show_options[\"font_size\"],\n pos=self.display_layout)\n plt.axis('off')\n plt.suptitle(self.options[\"graph_name\"])\n if show_options[\"file_name\"] is None:\n file_name = auto_name(specific_descriptor=show_options[\"specific_descriptor\"],\n general_descriptor=show_options[\"general_descriptor\"],\n directory=show_options[\"directory\"], extension='png', padding=3)\n else:\n file_name = show_options[\"file_name\"]\n if show_options[\"save_plot\"]:\n # print file_name\n if show_options[\"path\"]:\n plt.savefig(show_options[\"path\"])\n else:\n plt.savefig(os.path.join(show_options[\"directory\"], file_name))\n else:\n plt.show()\n fig = plt.gcf()\n return fig\n\n\n\nclass StringGraph(Graph):\n \"\"\"String Graph is a graph relating different string forms\"\"\"\n def __init__(self,**options):\n \"\"\"Intializes the StringGraph Class by defining nodes and edges\"\"\"\n defaults={\"graph_name\":\"StringGraph\",\n \"node_names\":['String','StringList'],\n \"node_descriptions\":[\"A plain string\",\n \"A list of strings with no \\\\n, created with string.splitlines()\"],\n \"current_node\":'String',\n \"state\":[1,0],\n \"data\":\"This is a test string\\n it has to have multiple lines \\n and many characters 34%6\\n^\",\n \"edge_2_to_1\":edge_2_to_1,\n \"edge_1_to_2\":edge_1_to_2\n }\n self.options={}\n for key,value in defaults.items():\n self.options[key]=value\n for key,value in options.items():\n self.options[key]=value\n Graph.__init__(self,**self.options)\n self.add_node(\"File\",\"String\",String_to_File,\"String\",File_to_String,node_description=\"Plain File\")\n self.add_node(\"CStringIo\",\"String\",String_to_CStringIo,\"String\",CStringIo_to_String,node_description=\"C File Like Object\")\n self.add_node(\"StringIo\",\"String\",String_to_StringIo,\"String\",StringIo_to_String,node_description=\"File Like Object\")\n self.add_edge(begin_node=\"StringList\",end_node=\"File\",edge_function=StringList_to_File)\n\n\n# Changed from ColumnModeledGraph to TableGraph 12/14/2016 by AWS\nclass TableGraph(Graph):\n \"\"\"Class that transforms column modeled data (table) from one format to another, use set_state to initialize to\n your data.\n #!python\n defaults={\"graph_name\":\"Table Graph\",\n \"node_names\":['DataFrame','AsciiDataTable'],\n \"node_descriptions\":[\"Pandas Data Frame\",\"AsciiDataTable\"],\n \"current_node\":'DataFrame',\n \"state\":[1,0],\n \"data\":pandas.DataFrame([[1,2,3],[3,4,5]],columns=[\"a\",\"b\",\"c\"]),\n \"edge_2_to_1\":AsciiDataTable_to_DataFrame,\n \"edge_1_to_2\":DataFrame_to_AsciiDataTable}\n \"\"\"\n def __init__(self,**options):\n defaults={\"graph_name\":\"Table Graph\",\n \"node_names\":['DataFrame','AsciiDataTable'],\n \"node_descriptions\":[\"Pandas Data Frame\",\"AsciiDataTable\"],\n \"current_node\":'DataFrame',\n \"state\":[1,0],\n \"data\":pandas.DataFrame([[1,2,3],[3,4,5]],columns=[\"a\",\"b\",\"c\"]),\n \"edge_2_to_1\":AsciiDataTable_to_DataFrame,\n \"edge_1_to_2\":DataFrame_to_AsciiDataTable}\n self.options={}\n for key,value in defaults.items():\n self.options[key]=value\n for key,value in options.items():\n self.options[key]=value\n Graph.__init__(self,**self.options)\n self.add_node(\"HdfFile\",\"DataFrame\",DataFrame_to_HdfFile,\n \"DataFrame\",HdfFile_to_DataFrame,\n node_description=\"HDF File\")\n self.add_node(\"XmlDataTable\",\"AsciiDataTable\",AsciiDataTable_to_XmlDataTable,\n \"AsciiDataTable\",XmlDataTable_to_AsciiDataTable,\n node_description=\"XML Data Table\")\n # Need to add XML File and Html File using save and save_HTML()\n self.add_node(\"ExcelFile\",\"DataFrame\",DataFrame_to_ExcelFile,\n \"DataFrame\",ExcelFile_to_DataFrame,\n node_description=\"Excel File\")\n\n self.add_node(\"OdsFile\",\"ExcelFile\",ExcelFile_to_OdsFile,\n \"ExcelFile\",OdsFile_to_ExcelFile,\"Open Office Spreadsheet\")\n\n self.add_node(\"HtmlString\",\"DataFrame\",DataFrame_to_HtmlString,\n \"DataFrame\",HtmlString_to_DataFrame,\n node_description=\"HTML String\")\n # Note a lot of the pandas reading and writing cause float64 round off errors\n # applymap(lambda x: np.around(x,10) any all float fields will fix this\n # also the column names move about in order\n self.add_node(\"JsonFile\",\"DataFrame\",DataFrame_to_JsonFile,\n \"DataFrame\",JsonFile_to_DataFrame,\n node_description=\"JSON File\")\n self.add_node(\"JsonString\",\"DataFrame\",DataFrame_to_JsonString,\n \"DataFrame\",JsonString_to_DataFrame,\n node_description=\"JSON String\")\n self.add_node(\"CsvFile\",\"DataFrame\",DataFrame_to_CsvFile,\n \"DataFrame\",CsvFile_to_DataFrame,\n node_description=\"CSV File\")\n self.add_node(\"MatFile\",\"AsciiDataTable\",AsciiTable_to_MatFile,\n \"AsciiDataTable\",MatFile_to_AsciiTable,\n node_description=\"Matlab File\")\n self.add_node(\"XmlFile\",\"XmlDataTable\",XmlDataTable_to_XmlFile,\n \"XmlDataTable\",XmlFile_to_XmlDataTable,\n node_description=\"XML DataTable Saved As a File\")\n self.add_node(\"HtmlFile\",\"HtmlString\",HtmlString_to_HtmlFile,\n \"HtmlString\",HtmlFile_to_HtmlString,\n node_description=\"HTML File\")\n self.add_edge(\"DataFrame\",\"HtmlFile\",DataFrame_to_HtmlFile)\n self.add_edge(\"JsonFile\",\"XmlDataTable\",JsonFile_to_XmlDataTable)\n self.add_external_node(\"XsltResultString\",\"XmlDataTable\",XmlBase_to_XsltResultString,\n external_node_description=\"XSLT Results String\")\n self.add_external_node(\"XsltResultFile\",\"XmlDataTable\",XmlBase_to_XsltResultFile,\n external_node_description=\"XSLT Results File\")\nclass ImageGraph(Graph):\n \"\"\"A transformation graph for images node types are image formats and external nodes are\n common image processing functions\n #!python\n defaults={\"graph_name\":\"Image Graph\",\n \"node_names\":['Image','png'],\n \"node_descriptions\":[\"PIL Image\",\"png\"],\n \"current_node\":'Image',\n \"state\":[1,0],\n \"data\":PIL.Image.open(os.path.join(TESTS_DIRECTORY,'test.png')),\n \"edge_2_to_1\":File_to_Image,\n \"edge_1_to_2\":lambda x: Image_to_FileType(x,file_path=\"test\",extension=\"png\")}\n \"\"\"\n def __init__(self,**options):\n defaults={\"graph_name\":\"Image Graph\",\n \"node_names\":['Image','Png'],\n \"node_descriptions\":[\"PIL Image\",\"Png\"],\n \"current_node\":'Image',\n \"state\":[1,0],\n \"data\":PIL.Image.open(os.path.join(TESTS_DIRECTORY,'test.png')),\n \"edge_2_to_1\":File_to_Image,\n \"edge_1_to_2\":lambda x: Image_to_FileType(x,file_path=\"test\",extension=\"png\")}\n self.options={}\n for key,value in defaults.items():\n self.options[key]=value\n for key,value in options.items():\n self.options[key]=value\n Graph.__init__(self,**self.options)\n self.add_node(\"Jpg\",\"Image\",lambda x: Image_to_FileType(x,file_path=\"test\",extension=\"jpg\"),\n \"Image\",File_to_Image,node_description=\"Jpg File\")\n self.add_node(\"Tiff\",\"Image\",lambda x: Image_to_FileType(x,file_path=\"test\",extension=\"tiff\"),\n \"Image\",File_to_Image,node_description=\"Tif File\")\n self.add_node(\"Gif\",\"Image\",lambda x: Image_to_FileType(x,file_path=\"test\",extension=\"gif\"),\n \"Image\",File_to_Image,node_description=\"Gif File\")\n self.add_node(\"Bmp\",\"Image\",lambda x: Image_to_FileType(x,file_path=\"test\",extension=\"bmp\"),\n \"Image\",File_to_Image,node_description=\"BMP File\")\n self.add_node(\"Base64\",\"Png\",PngFile_to_Base64,\n \"Png\",Base64_to_PngFile,node_description=\"Base 64 PNG\")\n self.add_node(\"EmbeddedHtml\",\"Base64\",Base64Png_to_EmbeddedHtmlString,\n \"Base64\",EmbeddedHtmlString_to_Base64Png,node_description=\"Embedded HTML of PNG\")\n self.add_node(\"Ndarray\",\"Png\",PngFile_to_Ndarray,\n \"Png\",Ndarray_to_PngFile,node_description=\"Numpy Array\")\n self.add_node(\"MatplotlibFigure\",\"Ndarray\",Ndarray_to_MatplotlibFigure,\n \"Png\",MatplotlibFigure_to_PngFile,node_description=\"MatplotlibFigure\")\n self.add_external_node(\"Thumbnail\",\"Image\",Image_to_ThumbnailFile,external_node_description=\"JPEG Thumbnail\")\n self.add_external_node(\"Matplotlib\",\"Ndarray\",Ndarray_to_Matplotlib,\n external_node_description=\"Matplotlib Plot\")\n\nclass MetadataGraph(Graph):\n \"\"\"Metadata Graph is a graph representing the content of key,value metadata\"\"\"\n def __init__(self,**options):\n \"\"\"Intializes the metadata graph class\"\"\"\n defaults={\"graph_name\":\"Metadata Graph\",\n \"node_names\":['Dictionary','JsonString'],\n \"node_descriptions\":[\"Python Dictionary\",\"Json string\"],\n \"current_node\":'Dictionary',\n \"state\":[1,0],\n \"data\":{\"a\":\"First\",\"b\":\"Second\"},\n \"edge_2_to_1\":JsonString_to_Dictionary,\n \"edge_1_to_2\":Dictionary_to_JsonString}\n self.options={}\n for key,value in defaults.items():\n self.options[key]=value\n for key,value in options.items():\n self.options[key]=value\n Graph.__init__(self,**self.options)\n self.add_node(\"JsonFile\",\"JsonString\",JsonString_to_JsonFile,\n \"JsonString\",JsonFile_to_JsonString,node_description=\"JSON File\")\n self.add_node(\"XmlString\",\"Dictionary\",Dictionary_to_XmlString,\n \"Dictionary\",XmlString_to_Dictionary,node_description=\"XML string\")\n self.add_node(\"HtmlMetaString\",\"Dictionary\",Dictionary_to_HtmlMetaString,\n \"Dictionary\",HtmlMetaString_to_Dictionary,node_description=\"HTML meta tags\")\n self.add_node(\"XmlTupleString\",\"Dictionary\",Dictionary_to_XmlTupleString,\n \"Dictionary\",XmlTupleString_to_Dictionary,node_description=\"Tuple Line\")\n self.add_node(\"PickleFile\",\"Dictionary\",Dictionary_to_PickleFile,\n \"Dictionary\",PickleFile_to_Dictionary,node_description=\"Pickled File\")\n self.add_node(\"ListList\",\"Dictionary\",Dictionary_to_ListList,\n \"Dictionary\",ListList_to_Dictionary,node_description=\"List of lists\")\n self.add_node(\"HeaderList\",\"Dictionary\",Dictionary_to_HeaderList,\n \"Dictionary\",HeaderList_to_Dictionary,node_description=\"Header List\")\n self.add_node(\"DataFrame\",\"Dictionary\",Dictionary_to_DataFrame,\n \"Dictionary\",DataFrame_to_Dictionary,node_description=\"Pandas DataFrame\")\n self.add_node(\"AsciiDataTable\",\"DataFrame\",DataFrame_to_AsciiDataTable,\n \"DataFrame\",AsciiDataTable_to_DataFrame,node_description=\"AsciiDataTable\")\n self.add_node(\"MatFile\",\"AsciiDataTable\",AsciiTable_to_MatFile,\n \"AsciiDataTable\",MatFile_to_AsciiDataTableKeyValue,node_description=\"Matlab\")\n self.add_node(\"ExcelFile\",\"DataFrame\",DataFrame_to_ExcelFile,\n \"DataFrame\",ExcelFile_to_DataFrame,node_description=\"excel\")\n self.add_node(\"HdfFile\",\"DataFrame\",DataFrame_to_HdfFile,\n \"DataFrame\",HdfFile_to_DataFrame,node_description=\"hdf file\")\n self.add_node(\"CsvFile\",\"DataFrame\",DataFrame_to_CsvFile,\n \"DataFrame\",CsvFile_to_DataFrame,node_description=\"CSV File\")\n self.add_node(\"HtmlFile\",\"DataFrame\",DataFrame_to_HtmlFile,\n \"DataFrame\",HtmlFile_to_DataFrame,node_description=\"HTML Table File\")\n self.add_node(\"HtmlTableString\",\"HtmlFile\",HtmlFile_to_HtmlString,\n \"HtmlFile\",HtmlString_to_HtmlFile,node_description=\"HTML Table String\")\nclass TwoPortParameterGraph(Graph):\n \"\"\"TwoPortParamterGraph is a content graph for two-port parameters,\n it transforms between S,T,Y,Z,ABCD and H parameters and matrix versions.\n #!python\n defaults={\"graph_name\":\"Two Port Parameter Graph\",\n \"node_names\":[\"SFrequencyList\",'SFrequencyMatrixList'],\n \"node_descriptions\":[\"S Parameters\",\"S Parameters in a Matrix\"],\n \"current_node\":'SFrequencyList',\n \"state\":[1,0],\n \"data\":[[1.0,.9,.436,.436,.9]],\n \"edge_2_to_1\":FrequencyMatrixList_to_FrequencyList,\n \"edge_1_to_2\":FrequencyList_to_FrequencyMatrixList,\n \"frequency_units\":\"GHz\",\n \"Z01\":50,\n \"Z02\":50 }\n\"\"\"\n def __init__(self,**options):\n\n defaults={\"graph_name\":\"Two Port Parameter Graph\",\n \"node_names\":[\"SFrequencyList\",'SFrequencyMatrixList'],\n \"node_descriptions\":[\"S Parameters\",\"S Parameters in a Matrix\"],\n \"current_node\":'SFrequencyList',\n \"state\":[1,0],\n \"data\":[[1.0,.9,.436,.436,.9]],\n \"edge_2_to_1\":FrequencyMatrixList_to_FrequencyList,\n \"edge_1_to_2\":FrequencyList_to_FrequencyMatrixList,\n \"frequency_units\":\"GHz\",\n \"Z01\":50,\n \"Z02\":50 }\n graph_options={}\n for key,value in defaults.items():\n graph_options[key]=value\n for key,value in options.items():\n graph_options[key]=value\n Graph.__init__(self,**graph_options)\n\n self.add_node(\"TFrequencyMatrixList\",\n \"SFrequencyMatrixList\",SFrequencyMatrixList_to_TFrequencyMatrixList,\n \"SFrequencyMatrixList\",TFrequencyMatrixList_to_SFrequencyMatrixList,\n \"T Parameters in a Matrix\")\n\n self.add_node(\"TFrequencyList\",\n \"TFrequencyMatrixList\",FrequencyMatrixList_to_FrequencyList,\n \"TFrequencyMatrixList\",FrequencyList_to_FrequencyMatrixList,\n \"T Parameters\")\n\n self.add_node(\"ZFrequencyList\",\n \"SFrequencyList\",SFrequencyList_to_ZFrequencyList,\n \"TFrequencyList\",ZFrequencyList_to_TFrequencyList,\n \"Z Parameters\")\n\n self.add_node(\"ZFrequencyMatrixList\",\n \"ZFrequencyList\",FrequencyList_to_FrequencyMatrixList,\n \"ZFrequencyList\",FrequencyMatrixList_to_FrequencyList,\n \"Z Parameters in a matrix\")\n\n self.add_node(\"ABCDFrequencyList\",\n \"ZFrequencyList\",ZFrequencyList_to_ABCDFrequencyList,\n \"ZFrequencyList\",ABCDFrequencyList_to_ZFrequencyList,\n \"ABCD Parameters\")\n\n self.add_node(\"ABCDFrequencyMatrixList\",\n \"ABCDFrequencyList\",FrequencyList_to_FrequencyMatrixList,\n \"ABCDFrequencyList\",FrequencyMatrixList_to_FrequencyList,\n \"ABCD Parameters in a matrix\")\n\n self.add_node(\"HFrequencyList\",\n \"ABCDFrequencyList\",ABCDFrequencyList_to_HFrequencyList,\n \"ZFrequencyList\",HFrequencyList_to_ZFrequencyList,\n \"h Parameters\")\n\n self.add_node(\"HFrequencyMatrixList\",\n \"HFrequencyList\",FrequencyList_to_FrequencyMatrixList,\n \"HFrequencyList\",FrequencyMatrixList_to_FrequencyList,\n \"H Parameters in a matrix\")\n self.add_node(\"YFrequencyList\",\n \"ABCDFrequencyList\",ABCDFrequencyList_to_YFrequencyList,\n \"HFrequencyList\",YFrequencyList_to_HFrequencyList,\n \"Y Parameters\")\n\n self.add_node(\"YFrequencyMatrixList\",\n \"YFrequencyList\",FrequencyList_to_FrequencyMatrixList,\n \"YFrequencyList\",FrequencyMatrixList_to_FrequencyList,\n \"Y Parameters in a matrix\")\n\n self.add_edge(begin_node=\"ZFrequencyMatrixList\",\n end_node=\"YFrequencyMatrixList\",\n edge_function=ZFrequencyMatrixList_to_YFrequencyMatrixList)\n\n self.add_edge(begin_node=\"SFrequencyMatrixList\",\n end_node=\"ZFrequencyMatrixList\",\n edge_function=SFrequencyMatrixList_to_ZFrequencyMatrixList)\n\n self.add_edge(begin_node=\"ZFrequencyMatrixList\",\n end_node=\"TFrequencyMatrixList\",\n edge_function=ZFrequencyMatrixList_to_TFrequencyMatrixList)\n\n self.add_edge(begin_node=\"ABCDFrequencyList\",\n end_node=\"SFrequencyList\",\n edge_function=ABCDFrequencyList_to_SFrequencyList)\nclass DataTableGraph(Graph):\n \"\"\" Class that transforms a row modelled header and metadata to several different data types\n #!python\n defaults={\"graph_name\":\"Data Table Graph\",\n \"node_names\":['DataFrameDictionary','AsciiDataTable'],\n \"node_descriptions\":[\"Pandas Data Frame Dictionary\",\"AsciiDataTable\"],\n \"current_node\":'DataFrameDictionary',\n \"state\":[1,0],\n \"data\":AsciiDataTable_to_DataFrameDictionary(TwoPortRawModel(os.path.join(TESTS_DIRECTORY,'TestFileTwoPortRaw.txt'))),\n \"edge_2_to_1\":AsciiDataTable_to_DataFrameDictionary,\n \"edge_1_to_2\":DataFrameDictionary_to_AsciiDataTable\n }\n \"\"\"\n def __init__(self,**options):\n\n defaults={\"graph_name\":\"Data Table Graph\",\n \"node_names\":['DataFrameDictionary','AsciiDataTable'],\n \"node_descriptions\":[\"Pandas Data Frame Dictionary\",\"AsciiDataTable\"],\n \"current_node\":'DataFrameDictionary',\n \"state\":[1,0],\n \"data\":AsciiDataTable_to_DataFrameDictionary(TwoPortRawModel(os.path.join(TESTS_DIRECTORY,'TestFileTwoPortRaw.txt'))),\n \"edge_2_to_1\":AsciiDataTable_to_DataFrameDictionary,\n \"edge_1_to_2\":DataFrameDictionary_to_AsciiDataTable\n }\n graph_options={}\n for key,value in defaults.items():\n graph_options[key]=value\n for key,value in options.items():\n graph_options[key]=value\n Graph.__init__(self, **graph_options)\n\n self.add_node(\"ExcelFile\", \"DataFrameDictionary\", DataFrameDictionary_to_ExcelFile,\n \"DataFrameDictionary\", ExcelFile_to_DataFrameDictionary,\n node_description=\"Excel Workbook\")\n self.add_node(\"HdfFile\", \"DataFrameDictionary\", DataFrameDictionary_to_HdfFile,\n \"DataFrameDictionary\", HdfFile_to_DataFrameDictionary, node_description=\"HD5 File\")\n self.add_node(\"CsvFile\", \"AsciiDataTable\", AsciiDataTable_to_CsvFile,\n \"AsciiDataTable\", File_to_AsciiDataTable, node_description=\"CSV File\")\n self.add_node(\"HpFile\", \"AsciiDataTable\", AsciiDataTable_to_HpFile,\n \"AsciiDataTable\", File_to_AsciiDataTable, node_description=\"hp format File\")\n self.add_external_node(external_node_name=\"XMLDataTable\", jump_into_node_begin=\"AsciiDataTable\",\n jump_into_node_function=AsciiDataTable_to_XmlDataTable,\n external_node_description=\"XMLDataTable\")\n#-----------------------------------------------------------------------------\n# Module Scripts\n#TODO: Add test_Graph script currently lives in jupyter-notebooks\n\n#-----------------------------------------------------------------------------\n# Module Runner\nif __name__ == '__main__':\n pass" ]
[ [ "numpy.concatenate", "numpy.matrix" ] ]
mdietrichstein/skpredict
[ "f15416b61f5fc2693b4c85c690d664fbbb008f8b" ]
[ "tests/svm/conftest.py" ]
[ "import pytest\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nTEST_SIZE = 0.33\nRANDOM_STATE = 42\n\n\n@pytest.fixture(scope=\"module\")\ndef binary_dataset():\n df = pd.read_csv(\"./resources/heart.csv\")\n features = df.iloc[0:, :-1]\n labels = df.iloc[0:, -1].values.ravel()\n\n X_train, X_test, y_train, y_test = train_test_split(\n features, labels, test_size=TEST_SIZE, random_state=RANDOM_STATE\n )\n\n return X_train, X_test, y_train\n\n\n@pytest.fixture(scope=\"module\")\ndef multiclass_dataset():\n df = pd.read_csv(\"./resources/glass.csv\")\n features = df.iloc[0:, :-1]\n labels = df.iloc[0:, -1].values.ravel()\n\n X_train, X_test, y_train, _ = train_test_split(\n features, labels, test_size=TEST_SIZE, random_state=RANDOM_STATE\n )\n\n return X_train, X_test, y_train\n" ]
[ [ "sklearn.model_selection.train_test_split", "pandas.read_csv" ] ]
Michellemingxuan/stanford_cs231n
[ "b1d0a5a4a3b2fe5d685e34a4ebd810cbc56ec143", "b1d0a5a4a3b2fe5d685e34a4ebd810cbc56ec143" ]
[ "assignments/2021/assignment1/cs231n/classifiers/k_nearest_neighbor.py", "assignments/2021/assignment1/cs231n/classifiers/softmax.py" ]
[ "from builtins import range\nfrom builtins import object\nimport numpy as np\nfrom past.builtins import xrange\n\n\nclass KNearestNeighbor(object):\n \"\"\" a kNN classifier with L2 distance \"\"\"\n\n def __init__(self):\n pass\n\n def train(self, X, y):\n \"\"\"\n Train the classifier. For k-nearest neighbors this is just\n memorizing the training data.\n\n Inputs:\n - X: A numpy array of shape (num_train, D) containing the training data\n consisting of num_train samples each of dimension D.\n - y: A numpy array of shape (N,) containing the training labels, where\n y[i] is the label for X[i].\n \"\"\"\n self.X_train = X\n self.y_train = y\n\n def predict(self, X, k=1, num_loops=0):\n \"\"\"\n Predict labels for test data using this classifier.\n\n Inputs:\n - X: A numpy array of shape (num_test, D) containing test data consisting\n of num_test samples each of dimension D.\n - k: The number of nearest neighbors that vote for the predicted labels.\n - num_loops: Determines which implementation to use to compute distances\n between training points and testing points.\n\n Returns:\n - y: A numpy array of shape (num_test,) containing predicted labels for the\n test data, where y[i] is the predicted label for the test point X[i].\n \"\"\"\n if num_loops == 0:\n dists = self.compute_distances_no_loops(X)\n elif num_loops == 1:\n dists = self.compute_distances_one_loop(X)\n elif num_loops == 2:\n dists = self.compute_distances_two_loops(X)\n else:\n raise ValueError(\"Invalid value %d for num_loops\" % num_loops)\n\n return self.predict_labels(dists, k=k)\n\n def compute_distances_two_loops(self, X):\n \"\"\"\n Compute the distance between each test point in X and each training point\n in self.X_train using a nested loop over both the training data and the\n test data.\n\n Inputs:\n - X: A numpy array of shape (num_test, D) containing test data.\n\n Returns:\n - dists: A numpy array of shape (num_test, num_train) where dists[i, j]\n is the Euclidean distance between the ith test point and the jth training\n point.\n \"\"\"\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in range(num_test):\n for j in range(num_train):\n #####################################################################\n # TODO: #\n # Compute the l2 distance between the ith test point and the jth #\n # training point, and store the result in dists[i, j]. You should #\n # not use a loop over dimension, nor use np.linalg.norm(). #\n #####################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n dists[i, j] = np.sqrt(sum((X[i, ] - self.X_train[j, ]) ** 2))\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n return dists\n\n def compute_distances_one_loop(self, X):\n \"\"\"\n Compute the distance between each test point in X and each training point\n in self.X_train using a single loop over the test data.\n\n Input / Output: Same as compute_distances_two_loops\n \"\"\"\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in range(num_test):\n #######################################################################\n # TODO: #\n # Compute the l2 distance between the ith test point and all training #\n # points, and store the result in dists[i, :]. #\n # Do not use np.linalg.norm(). #\n #######################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n dists[i, :] = np.sqrt(np.sum((self.X_train - X[i, :]) ** 2, 1))\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n return dists\n\n def compute_distances_no_loops(self, X):\n \"\"\"\n Compute the distance between each test point in X and each training point\n in self.X_train using no explicit loops.\n\n Input / Output: Same as compute_distances_two_loops\n \"\"\"\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n #########################################################################\n # TODO: #\n # Compute the l2 distance between all test points and all training #\n # points without using any explicit loops, and store the result in #\n # dists. #\n # #\n # You should implement this function using only basic array operations; #\n # in particular you should not use functions from scipy, #\n # nor use np.linalg.norm(). #\n # #\n # HINT: Try to formulate the l2 distance using matrix multiplication #\n # and two broadcast sums. #\n #########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n dists = np.sqrt(\n np.sum((self.X_train[np.newaxis, :] - X[np.newaxis, :].reshape((num_test, 1, X.shape[1]))) ** 2, 2))\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n return dists\n\n def predict_labels(self, dists, k=1):\n \"\"\"\n Given a matrix of distances between test points and training points,\n predict a label for each test point.\n\n Inputs:\n - dists: A numpy array of shape (num_test, num_train) where dists[i, j]\n gives the distance betwen the ith test point and the jth training point.\n\n Returns:\n - y: A numpy array of shape (num_test,) containing predicted labels for the\n test data, where y[i] is the predicted label for the test point X[i].\n \"\"\"\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n # A list of length k storing the labels of the k nearest neighbors to\n # the ith test point.\n closest_y = []\n #########################################################################\n # TODO: #\n # Use the distance matrix to find the k nearest neighbors of the ith #\n # testing point, and use self.y_train to find the labels of these #\n # neighbors. Store these labels in closest_y. #\n # Hint: Look up the function numpy.argsort. #\n #########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n closest_y = self.y_train[dists[i, ].argsort()[:k]]\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n #########################################################################\n # TODO: #\n # Now that you have found the labels of the k nearest neighbors, you #\n # need to find the most common label in the list closest_y of labels. #\n # Store this label in y_pred[i]. Break ties by choosing the smaller #\n # label. #\n #########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n unique, counts = np.unique(closest_y, return_counts=True)\n y_pred[i] = unique[np.argmax(counts)]\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return y_pred\n", "from builtins import range\nimport numpy as np\nfrom random import shuffle\nfrom past.builtins import xrange\n\n\ndef softmax_loss_naive(W, X, y, reg):\n \"\"\"\n Softmax loss function, naive implementation (with loops)\n\n Inputs have dimension D, there are C classes, and we operate on minibatches\n of N examples.\n\n Inputs:\n - W: A numpy array of shape (D, C) containing weights.\n - X: A numpy array of shape (N, D) containing a minibatch of data.\n - y: A numpy array of shape (N,) containing training labels; y[i] = c means\n that X[i] has label c, where 0 <= c < C.\n - reg: (float) regularization strength\n\n Returns a tuple of:\n - loss as single float\n - gradient with respect to weights W; an array of same shape as W\n \"\"\"\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_training = X.shape[0]\n num_class = W.shape[1]\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n for i in np.arange(num_training):\n inner = X[i, :].dot(W)\n temp = np.sum(np.exp(inner))\n loss += - np.log(np.exp(X[i].dot(W[:, y[i]])) / temp)\n for j in np.arange(num_class):\n if j == y[i]:\n dW[:, j] -= X[i]\n dW[:, j] += np.exp(inner[j]) / temp * X[i]\n loss = loss / num_training + reg * np.sum(W * W)\n dW = dW / num_training + 2 * reg * W\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW\n\n\ndef softmax_loss_vectorized(W, X, y, reg):\n \"\"\"\n Softmax loss function, vectorized version.\n\n Inputs and outputs are the same as softmax_loss_naive.\n \"\"\"\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_training = X.shape[0]\n num_class = W.shape[1]\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n mask = np.stack([np.arange(num_class)] *\n num_training) == y.reshape((num_training, 1))\n loss = - np.sum(X.dot(W) * mask)\n loss += np.sum(np.log(np.sum(np.exp(X.dot(W)), axis=1)))\n loss = loss / num_training + reg * np.sum(W * W)\n dW = - X.T.dot(mask)\n dW += X.T.dot(np.exp(X.dot(W)) /\n np.sum(np.exp(X.dot(W)), axis=1)[:, np.newaxis])\n dW = dW / num_training + 2 * reg * W\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW\n" ]
[ [ "numpy.sum", "numpy.argmax", "numpy.zeros", "numpy.unique" ], [ "numpy.sum", "numpy.zeros_like", "numpy.arange", "numpy.exp" ] ]